diff --git a/.github/actions/run-gradle/action.yml b/.github/actions/run-gradle/action.yml new file mode 100644 index 0000000000000..18d6bdeb1f558 --- /dev/null +++ b/.github/actions/run-gradle/action.yml @@ -0,0 +1,75 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +--- +name: "Run Tests with Gradle" +description: "Run specified Gradle test tasks with configuration for timeout and test catalog." +inputs: + # Composite actions do not support typed parameters. Everything is treated as a string + # See: https://github.com/actions/runner/issues/2238 + test-task: + description: "The test suite to run. Either 'test' or 'quarantinedTest'." + required: true + timeout-minutes: + description: "The timeout for the tests, in minutes." + required: true + test-catalog-path: + description: "The file path of the test catalog file." + required: true + build-scan-artifact-name: + description: "The name to use for archiving the build scan." + required: true +outputs: + gradle-exitcode: + description: "The result of the Gradle test task." + value: ${{ steps.run-tests.outputs.exitcode }} +runs: + using: "composite" + steps: + - name: Run JUnit Tests (${{ inputs.test-task }}) + # Gradle flags + # --build-cache: Let Gradle restore the build cache + # --no-scan: Don't attempt to publish the scan yet. We want to archive it first. + # --continue: Keep running even if a test fails + # -PcommitId Prevent the Git SHA being written into the jar files (which breaks caching) + shell: bash + id: run-tests + env: + TIMEOUT_MINUTES: ${{ inputs.timeout-minutes}} + TEST_CATALOG: ${{ inputs.test-catalog-path }} + TEST_TASK: ${{ inputs.test-task }} + run: | + set +e + ./.github/scripts/thread-dump.sh & + timeout ${TIMEOUT_MINUTES}m ./gradlew --build-cache --continue --no-scan \ + -PtestLoggingEvents=started,passed,skipped,failed \ + -PmaxParallelForks=2 \ + -PmaxTestRetries=1 -PmaxTestRetryFailures=3 \ + -PmaxQuarantineTestRetries=3 -PmaxQuarantineTestRetryFailures=0 \ + -Pkafka.test.catalog.file=$TEST_CATALOG \ + -PcommitId=xxxxxxxxxxxxxxxx \ + $TEST_TASK + exitcode="$?" + echo "exitcode=$exitcode" >> $GITHUB_OUTPUT + - name: Archive build scan (${{ inputs.test-task }}) + if: always() + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.build-scan-artifact-name }} + path: ~/.gradle/build-scan-data + compression-level: 9 + if-no-files-found: ignore \ No newline at end of file diff --git a/.github/configs/labeler.yml b/.github/configs/labeler.yml index be6944c0ab90e..24a7a643c9042 100644 --- a/.github/configs/labeler.yml +++ b/.github/configs/labeler.yml @@ -124,3 +124,15 @@ kraft: - any-glob-to-any-file: - 'metadata/**' - 'raft/**' + +dependencies: + - changed-files: + - any-glob-to-any-file: + - "grade/dependencies.gradle" + - "LICENSE-binary" + +tiered-storage: + - changed-files: + - any-glob-to-any-file: + - '*/src/*/java/org/apache/kafka/server/log/remote/**' + - '*/src/*/java/kafka/log/remote/**' diff --git a/.github/scripts/develocity_reports.py b/.github/scripts/develocity_reports.py new file mode 100644 index 0000000000000..74df32a3edeaf --- /dev/null +++ b/.github/scripts/develocity_reports.py @@ -0,0 +1,1270 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import requests +import json +from dataclasses import dataclass, field, asdict +from typing import Dict, List, Tuple, Optional +from datetime import datetime, timedelta +import pytz # Add this import for timezone handling +from collections import defaultdict +import time +import logging +import concurrent.futures # Add this import at the top +import pathlib +import pickle +from abc import ABC, abstractmethod + +logger = logging.getLogger(__name__) + +@dataclass +class TestOutcome: + passed: int + failed: int + skipped: int + flaky: int + not_selected: int = field(metadata={'name': 'notSelected'}) + total: int + +@dataclass +class BuildInfo: + id: str + timestamp: datetime + duration: int + has_failed: bool + +@dataclass +class TestTimelineEntry: + build_id: str + timestamp: datetime + outcome: str # "passed", "failed", "flaky", etc. + +@dataclass +class TestResult: + name: str + outcome_distribution: TestOutcome + first_seen: datetime + timeline: List[TestTimelineEntry] = field(default_factory=list) + recent_failure_rate: float = 0.0 # Added to track recent failure trends + +@dataclass +class TestContainerResult: + build_id: str + outcome: str + timestamp: Optional[datetime] = None + +@dataclass +class TestCaseResult(TestResult): + """Extends TestResult to include container-specific information""" + container_name: str = "" + +@dataclass +class BuildCache: + last_update: datetime + builds: Dict[str, 'BuildInfo'] + + def to_dict(self): + return { + 'last_update': self.last_update.isoformat(), + 'builds': {k: asdict(v) for k, v in self.builds.items()} + } + + @classmethod + def from_dict(cls, data: dict) -> 'BuildCache': + return cls( + last_update=datetime.fromisoformat(data['last_update']), + builds={k: BuildInfo(**v) for k, v in data['builds'].items()} + ) + +class CacheProvider(ABC): + @abstractmethod + def get_cache(self) -> Optional[BuildCache]: + pass + + @abstractmethod + def save_cache(self, cache: BuildCache): + pass + +class LocalCacheProvider(CacheProvider): + def __init__(self, cache_dir: str = None): + if cache_dir is None: + cache_dir = os.path.join(os.path.expanduser("~"), ".develocity_cache") + self.cache_file = os.path.join(cache_dir, "build_cache.pkl") + os.makedirs(cache_dir, exist_ok=True) + + def get_cache(self) -> Optional[BuildCache]: + try: + if os.path.exists(self.cache_file): + with open(self.cache_file, 'rb') as f: + return pickle.load(f) + except Exception as e: + logger.warning(f"Failed to load local cache: {e}") + return None + + def save_cache(self, cache: BuildCache): + try: + with open(self.cache_file, 'wb') as f: + pickle.dump(cache, f) + except Exception as e: + logger.warning(f"Failed to save local cache: {e}") + +class GitHubActionsCacheProvider(CacheProvider): + def __init__(self): + self.cache_key = "develocity-build-cache" + + def get_cache(self) -> Optional[BuildCache]: + try: + # Check if running in GitHub Actions + if not os.environ.get('GITHUB_ACTIONS'): + return None + + cache_path = os.environ.get('GITHUB_WORKSPACE', '') + cache_file = os.path.join(cache_path, self.cache_key + '.json') + + if os.path.exists(cache_file): + with open(cache_file, 'r') as f: + data = json.load(f) + return BuildCache.from_dict(data) + except Exception as e: + logger.warning(f"Failed to load GitHub Actions cache: {e}") + return None + + def save_cache(self, cache: BuildCache): + try: + if not os.environ.get('GITHUB_ACTIONS'): + return + + cache_path = os.environ.get('GITHUB_WORKSPACE', '') + cache_file = os.path.join(cache_path, self.cache_key + '.json') + + with open(cache_file, 'w') as f: + json.dump(cache.to_dict(), f) + except Exception as e: + logger.warning(f"Failed to save GitHub Actions cache: {e}") + +class TestAnalyzer: + def __init__(self, base_url: str, auth_token: str): + self.base_url = base_url + self.headers = { + 'Authorization': f'Bearer {auth_token}', + 'Accept': 'application/json' + } + self.default_chunk_size = timedelta(days=14) + self.api_retry_delay = 2 # seconds + self.max_api_retries = 3 + + # Initialize cache providers + self.cache_providers = [ + GitHubActionsCacheProvider(), + LocalCacheProvider() + ] + self.build_cache = None + self._load_cache() + + def _load_cache(self): + """Load cache from the first available provider""" + for provider in self.cache_providers: + cache = provider.get_cache() + if cache is not None: + self.build_cache = cache + logger.info(f"Loaded cache from {provider.__class__.__name__}") + return + logger.info("No existing cache found") + + def _save_cache(self): + """Save cache to all providers""" + if self.build_cache: + for provider in self.cache_providers: + provider.save_cache(self.build_cache) + logger.info(f"Saved cache to {provider.__class__.__name__}") + + def build_query(self, project: str, chunk_start: datetime, chunk_end: datetime, test_type: str) -> str: + """ + Constructs the query string to be used in both build info and test containers API calls. + + Args: + project: The project name. + chunk_start: The start datetime for the chunk. + chunk_end: The end datetime for the chunk. + test_type: The type of tests to query. + + Returns: + A formatted query string. + """ + return f'project:{project} buildStartTime:[{chunk_start.isoformat()} TO {chunk_end.isoformat()}] gradle.requestedTasks:{test_type} tag:github tag:trunk' + + def process_chunk( + self, + chunk_start: datetime, + chunk_end: datetime, + project: str, + test_type: str, + remaining_build_ids: set | None, + max_builds_per_request: int + ) -> Dict[str, BuildInfo]: + """Helper method to process a single chunk of build information""" + chunk_builds = {} + + # Use the helper method to build the query + query = self.build_query(project, chunk_start, chunk_end, test_type) + + # Initialize pagination for this chunk + from_build = None + continue_chunk = True + + while continue_chunk and (remaining_build_ids is None or remaining_build_ids): + query_params = { + 'query': query, + 'models': ['gradle-attributes'], + 'allModels': 'false', + 'maxBuilds': max_builds_per_request, + 'reverse': 'false', + 'fromInstant': int(chunk_start.timestamp() * 1000) + } + + if from_build: + query_params['fromBuild'] = from_build + + for attempt in range(self.max_api_retries): + try: + response = requests.get( + f'{self.base_url}/api/builds', + headers=self.headers, + params=query_params, + timeout=(5, 30) + ) + response.raise_for_status() + break + except requests.exceptions.Timeout: + if attempt == self.max_api_retries - 1: + raise + time.sleep(self.api_retry_delay * (attempt + 1)) + except requests.exceptions.RequestException: + raise + + response_json = response.json() + + if not response_json: + break + + for build in response_json: + build_id = build['id'] + + if 'models' in build and 'gradleAttributes' in build['models']: + gradle_attrs = build['models']['gradleAttributes'] + if 'model' in gradle_attrs: + attrs = gradle_attrs['model'] + build_timestamp = datetime.fromtimestamp(attrs['buildStartTime'] / 1000, pytz.UTC) + + if build_timestamp >= chunk_end: + continue_chunk = False + break + + if remaining_build_ids is None or build_id in remaining_build_ids: + if 'problem' not in gradle_attrs: + chunk_builds[build_id] = BuildInfo( + id=build_id, + timestamp=build_timestamp, + duration=attrs.get('buildDuration'), + has_failed=attrs.get('hasFailed', False) + ) + if remaining_build_ids is not None: + remaining_build_ids.remove(build_id) + + if continue_chunk and response_json: + from_build = response_json[-1]['id'] + else: + continue_chunk = False + + time.sleep(0.5) # Rate limiting between pagination requests + + return chunk_builds + def get_build_info(self, build_ids: List[str] = None, project: str = None, test_type: str = None, query_days: int = None, bypass_cache: bool = False, fetch_all: bool = False) -> Dict[str, BuildInfo]: + builds = {} + max_builds_per_request = 100 + cutoff_date = datetime.now(pytz.UTC) - timedelta(days=query_days) + current_time = datetime.now(pytz.UTC) + + if not fetch_all and not build_ids: + raise ValueError("Either build_ids must be provided or fetch_all must be True") + + # Get builds from cache if available and bypass_cache is False + if not bypass_cache and self.build_cache: + cached_builds = self.build_cache.builds + cached_cutoff = self.build_cache.last_update - timedelta(days=query_days) + + if fetch_all: + # Use all cached builds within the time period + for build_id, build in cached_builds.items(): + if build.timestamp >= cached_cutoff: + builds[build_id] = build + else: + # Use cached data for specific builds within the cache period + for build_id in build_ids: + if build_id in cached_builds: + build = cached_builds[build_id] + if build.timestamp >= cached_cutoff: + builds[build_id] = build + + # Update cutoff date to only fetch new data + cutoff_date = self.build_cache.last_update + logger.info(f"Using cached data up to {cutoff_date.isoformat()}") + + if not fetch_all: + # Remove already found builds from the search list + build_ids = [bid for bid in build_ids if bid not in builds] + + if not build_ids: + logger.info("All builds found in cache") + return builds + + # Fetch remaining builds from API + remaining_build_ids = set(build_ids) if not fetch_all else None + chunk_size = self.default_chunk_size + + # Create time chunks + chunks = [] + chunk_start = cutoff_date + while chunk_start < current_time: + chunk_end = min(chunk_start + chunk_size, current_time) + chunks.append((chunk_start, chunk_end)) + chunk_start = chunk_end + + total_start_time = time.time() + + # Process chunks in parallel + with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: + future_to_chunk = { + executor.submit( + self.process_chunk, + chunk[0], + chunk[1], + project, + test_type, + remaining_build_ids.copy() if remaining_build_ids else None, + max_builds_per_request + ): chunk for chunk in chunks + } + + for future in concurrent.futures.as_completed(future_to_chunk): + try: + chunk_builds = future.result() + builds.update(chunk_builds) + if remaining_build_ids: + remaining_build_ids -= set(chunk_builds.keys()) + except Exception as e: + logger.error(f"Chunk processing generated an exception: {str(e)}") + + total_duration = time.time() - total_start_time + logger.info( + f"\nBuild Info Performance:" + f"\n Total Duration: {total_duration:.2f}s" + f"\n Builds Retrieved: {len(builds)}" + f"\n Builds Not Found: {len(remaining_build_ids) if remaining_build_ids else 0}" + ) + + # Update cache with new data if not bypassing cache + if builds and not bypass_cache: + if not self.build_cache: + self.build_cache = BuildCache(current_time, {}) + self.build_cache.builds.update(builds) + self.build_cache.last_update = current_time + self._save_cache() + + return builds + def get_test_results(self, project: str, threshold_days: int, test_type: str = "quarantinedTest", + outcomes: List[str] = None) -> List[TestResult]: + """Fetch test results with timeline information""" + if outcomes is None: + outcomes = ["failed", "flaky"] + + logger.debug(f"Fetching test results for project {project}, last {threshold_days} days") + + end_time = datetime.now(pytz.UTC) + start_time = end_time - timedelta(days=threshold_days) + + all_results = {} + build_ids = set() + test_container_results = defaultdict(list) + + chunk_size = self.default_chunk_size + chunk_start = start_time + + while chunk_start < end_time: + chunk_end = min(chunk_start + chunk_size, end_time) + logger.debug(f"Processing chunk: {chunk_start} to {chunk_end}") + + # Use the helper method to build the query + query = self.build_query(project, chunk_start, chunk_end, test_type) + + query_params = { + 'query': query, + 'testOutcomes': outcomes, + 'container': '*', + 'include': ['buildScanIds'] # Explicitly request build scan IDs + } + + response = requests.get( + f'{self.base_url}/api/tests/containers', + headers=self.headers, + params=query_params + ) + response.raise_for_status() + + for test in response.json()['content']: + test_name = test['name'] + logger.debug(f"Processing test: {test_name}") + + if test_name not in all_results: + outcome_data = test['outcomeDistribution'] + if 'notSelected' in outcome_data: + outcome_data['not_selected'] = outcome_data.pop('notSelected') + outcome = TestOutcome(**outcome_data) + all_results[test_name] = TestResult(test_name, outcome, chunk_start) + + # Collect build IDs by outcome + if 'buildScanIdsByOutcome' in test: + scan_ids = test['buildScanIdsByOutcome'] + + for outcome, ids in scan_ids.items(): + if ids: # Only process if we have IDs + for build_id in ids: + build_ids.add(build_id) + test_container_results[test_name].append( + TestContainerResult(build_id=build_id, outcome=outcome) + ) + + chunk_start = chunk_end + + logger.debug(f"Total unique build IDs collected: {len(build_ids)}") + + # Fetch build information using the updated get_build_info method + builds = self.get_build_info(list(build_ids), project, test_type, threshold_days) + logger.debug(f"Retrieved {len(builds)} builds from API") + logger.debug(f"Retrieved build IDs: {sorted(builds.keys())}") + + # Update test results with timeline information + for test_name, result in all_results.items(): + logger.debug(f"\nProcessing timeline for test: {test_name}") + timeline = [] + for container_result in test_container_results[test_name]: + logger.debug(f"Processing container result: {container_result}") + if container_result.build_id in builds: + build_info = builds[container_result.build_id] + timeline.append(TestTimelineEntry( + build_id=container_result.build_id, + timestamp=build_info.timestamp, + outcome=container_result.outcome + )) + else: + logger.warning(f"Build ID {container_result.build_id} not found in builds response") + + # Sort timeline by timestamp + result.timeline = sorted(timeline, key=lambda x: x.timestamp) + logger.debug(f"Final timeline entries for {test_name}: {len(result.timeline)}") + + # Print build details for debugging + logger.debug("Timeline entries:") + for entry in timeline: + logger.debug(f"Build ID: {entry.build_id}, Timestamp: {entry.timestamp}, Outcome: {entry.outcome}") + + # Calculate recent failure rate + recent_cutoff = datetime.now(pytz.UTC) - timedelta(days=30) + recent_runs = [t for t in timeline if t.timestamp >= recent_cutoff] + if recent_runs: + recent_failures = sum(1 for t in recent_runs if t.outcome in ('failed', 'flaky')) + result.recent_failure_rate = recent_failures / len(recent_runs) + + return list(all_results.values()) + + def get_defective_tests(self, results: List[TestResult]) -> Dict[str, TestResult]: + """ + Analyze test results to find defective tests (failed or flaky) + """ + defective_tests = {} + + for result in results: + if result.outcome_distribution.failed > 0 or result.outcome_distribution.flaky > 0: + defective_tests[result.name] = result + + return defective_tests + + def get_long_quarantined_tests(self, results: List[TestResult], quarantine_threshold_days: int = 60) -> Dict[str, TestResult]: + """ + Find tests that have been quarantined longer than the threshold. + These are candidates for removal or rewriting. + + Args: + results: List of test results + quarantine_threshold_days: Number of days after which a quarantined test should be considered for removal/rewrite + """ + long_quarantined = {} + current_time = datetime.now(pytz.UTC) + + for result in results: + days_quarantined = (current_time - result.first_seen).days + if days_quarantined >= quarantine_threshold_days: + long_quarantined[result.name] = (result, days_quarantined) + + return long_quarantined + + def get_problematic_quarantined_tests( + self, + results: List[TestResult], + quarantine_threshold_days: int = 60, + min_failure_rate: float = 0.3, + recent_failure_threshold: float = 0.5 + ) -> Dict[str, Dict]: + """Enhanced version that includes test case details""" + problematic_tests = {} + current_time = datetime.now(pytz.UTC) + chunk_start = current_time - timedelta(days=7) # Last 7 days for test cases + + for result in results: + days_quarantined = (current_time - result.first_seen).days + if days_quarantined >= quarantine_threshold_days: + total_runs = result.outcome_distribution.total + if total_runs > 0: + problem_runs = result.outcome_distribution.failed + result.outcome_distribution.flaky + failure_rate = problem_runs / total_runs + + if failure_rate >= min_failure_rate or result.recent_failure_rate >= recent_failure_threshold: + # Get detailed test case information + try: + test_cases = self.get_test_case_details( + result.name, + "kafka", + chunk_start, + current_time, + test_type="quarantinedTest" + ) + + problematic_tests[result.name] = { + 'container_result': result, + 'days_quarantined': days_quarantined, + 'failure_rate': failure_rate, + 'recent_failure_rate': result.recent_failure_rate, + 'test_cases': test_cases + } + except Exception as e: + logger.error(f"Error getting test case details for {result.name}: {str(e)}") + + return problematic_tests + + def get_test_case_details( + self, + container_name: str, + project: str, + chunk_start: datetime, + chunk_end: datetime, + test_type: str = "quarantinedTest" + ) -> List[TestCaseResult]: + """ + Fetch detailed test case results for a specific container. + + Args: + container_name: Name of the test container + project: The project name + chunk_start: Start time for the query + chunk_end: End time for the query + test_type: Type of tests to query (default: "quarantinedTest") + """ + # Use the helper method to build the query, similar to get_test_results + query = self.build_query(project, chunk_start, chunk_end, test_type) + + query_params = { + 'query': query, + 'testOutcomes': ['failed', 'flaky'], + 'container': container_name, + 'include': ['buildScanIds'], # Explicitly request build scan IDs + 'limit': 1000 + } + + try: + response = requests.get( + f'{self.base_url}/api/tests/cases', + headers=self.headers, + params=query_params + ) + response.raise_for_status() + + test_cases = [] + content = response.json().get('content', []) + + # Collect all build IDs first + build_ids = set() + for test in content: + if 'buildScanIdsByOutcome' in test: + for outcome_type, ids in test['buildScanIdsByOutcome'].items(): + build_ids.update(ids) + + # Get build info for all build IDs + builds = self.get_build_info(list(build_ids), project, test_type, 7) # 7 days for test cases + + for test in content: + outcome_data = test['outcomeDistribution'] + if 'notSelected' in outcome_data: + outcome_data['not_selected'] = outcome_data.pop('notSelected') + outcome = TestOutcome(**outcome_data) + + test_case = TestCaseResult( + name=test['name'], + outcome_distribution=outcome, + first_seen=chunk_start, + container_name=container_name + ) + + # Add build information with proper timestamps + if 'buildScanIdsByOutcome' in test: + for outcome_type, build_ids in test['buildScanIdsByOutcome'].items(): + for build_id in build_ids: + if build_id in builds: + build_info = builds[build_id] + test_case.timeline.append( + TestTimelineEntry( + build_id=build_id, + timestamp=build_info.timestamp, + outcome=outcome_type + ) + ) + else: + logger.warning(f"Build ID {build_id} not found for test case {test['name']}") + + # Sort timeline by timestamp + test_case.timeline.sort(key=lambda x: x.timestamp) + test_cases.append(test_case) + + return test_cases + + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching test case details for {container_name}: {str(e)}") + raise + + def get_flaky_test_regressions(self, project: str, results: List[TestResult], + recent_days: int = 7, min_flaky_rate: float = 0.2) -> Dict[str, Dict]: + """ + Identify tests that have recently started showing flaky behavior. + + Args: + project: The project name + results: List of test results + recent_days: Number of days to consider for recent behavior + min_flaky_rate: Minimum flaky rate to consider a test as problematic + """ + flaky_regressions = {} + current_time = datetime.now(pytz.UTC) + recent_cutoff = current_time - timedelta(days=recent_days) + + for result in results: + # Skip tests with no timeline data + if not result.timeline: + continue + + # Split timeline into recent and historical periods + recent_entries = [t for t in result.timeline if t.timestamp >= recent_cutoff] + historical_entries = [t for t in result.timeline if t.timestamp < recent_cutoff] + + if not recent_entries or not historical_entries: + continue + + # Calculate flaky rates + recent_flaky = sum(1 for t in recent_entries if t.outcome == 'flaky') + recent_total = len(recent_entries) + recent_flaky_rate = recent_flaky / recent_total if recent_total > 0 else 0 + + historical_flaky = sum(1 for t in historical_entries if t.outcome == 'flaky') + historical_total = len(historical_entries) + historical_flaky_rate = historical_flaky / historical_total if historical_total > 0 else 0 + + # Check if there's a significant increase in flakiness + if recent_flaky_rate >= min_flaky_rate and recent_flaky_rate > historical_flaky_rate * 1.5: + flaky_regressions[result.name] = { + 'result': result, + 'recent_flaky_rate': recent_flaky_rate, + 'historical_flaky_rate': historical_flaky_rate, + 'recent_executions': recent_entries, + 'historical_executions': historical_entries + } + + return flaky_regressions + + def get_cleared_tests(self, project: str, results: List[TestResult], + success_threshold: float = 0.7, min_executions: int = 5) -> Dict[str, Dict]: + """ + Identify quarantined tests that are consistently passing and could be cleared. + + Args: + project: The project name + results: List of test results + success_threshold: Required percentage of successful builds to be considered cleared + min_executions: Minimum number of executions required to make a determination + """ + cleared_tests = {} + current_time = datetime.now(pytz.UTC) + chunk_start = current_time - timedelta(days=7) # Last 7 days for test cases + + for result in results: + # Only consider tests with sufficient recent executions + recent_executions = result.timeline + if len(recent_executions) < min_executions: + continue + + # Calculate success rate at class level + successful_runs = sum(1 for t in recent_executions + if t.outcome == 'passed') + success_rate = successful_runs / len(recent_executions) + + # Check if the test meets clearing criteria at class level + if success_rate >= success_threshold: + # Verify no recent failures or flaky behavior + has_recent_issues = any(t.outcome in ['failed', 'flaky'] + for t in recent_executions[-min_executions:]) + + if not has_recent_issues: + try: + # Get test case details + test_cases = self.get_test_case_details( + result.name, + project, + chunk_start, + current_time, + test_type="quarantinedTest" + ) + + # Only include if all test cases are also passing consistently + all_cases_passing = True + passing_test_cases = [] + + for test_case in test_cases: + case_total = test_case.outcome_distribution.total + if case_total >= min_executions: + case_success_rate = test_case.outcome_distribution.passed / case_total + + # Check recent executions for the test case + recent_case_issues = any(t.outcome in ['failed', 'flaky'] + for t in test_case.timeline[-min_executions:]) + + if case_success_rate >= success_threshold and not recent_case_issues: + passing_test_cases.append({ + 'name': test_case.name, + 'success_rate': case_success_rate, + 'total_executions': case_total, + 'recent_executions': sorted(test_case.timeline, + key=lambda x: x.timestamp)[-min_executions:] + }) + else: + all_cases_passing = False + break + + if all_cases_passing and passing_test_cases: + cleared_tests[result.name] = { + 'result': result, + 'success_rate': success_rate, + 'total_executions': len(recent_executions), + 'successful_runs': successful_runs, + 'recent_executions': recent_executions[-min_executions:], + 'test_cases': passing_test_cases + } + + except Exception as e: + logger.error(f"Error getting test case details for {result.name}: {str(e)}") + + return cleared_tests + + def update_cache(self, builds: Dict[str, BuildInfo]): + """ + Update the build cache with new build information. + + Args: + builds: Dictionary of build IDs to BuildInfo objects + """ + current_time = datetime.now(pytz.UTC) + + # Initialize cache if it doesn't exist + if not self.build_cache: + self.build_cache = BuildCache(current_time, {}) + + # Update builds and last update time + self.build_cache.builds.update(builds) + self.build_cache.last_update = current_time + + # Save to all cache providers + self._save_cache() + + logger.info(f"Updated cache with {len(builds)} builds") + + def get_persistent_failing_tests(self, results: List[TestResult], + min_failure_rate: float = 0.2, + min_executions: int = 5) -> Dict[str, Dict]: + """ + Identify tests that have been consistently failing/flaky over time. + Groups by test class and includes individual test cases. + """ + persistent_failures = {} + current_time = datetime.now(pytz.UTC) + chunk_start = current_time - timedelta(days=7) # Last 7 days for test cases + + # Group results by class + class_groups = {} + for result in results: + class_name = result.name.split('#')[0] # Get class name + if class_name not in class_groups: + class_groups[class_name] = [] + class_groups[class_name].append(result) + + # Analyze each class and its test cases + for class_name, class_results in class_groups.items(): + class_total = sum(r.outcome_distribution.total for r in class_results) + class_problems = sum(r.outcome_distribution.failed + r.outcome_distribution.flaky + for r in class_results) + + if class_total < min_executions: + continue + + class_failure_rate = class_problems / class_total if class_total > 0 else 0 + + # Only include if class has significant failures + if class_failure_rate >= min_failure_rate: + try: + # Get detailed test case information using the same method as other reports + test_cases = self.get_test_case_details( + class_name, + "kafka", + chunk_start, + current_time, + test_type="test" + ) + + failing_test_cases = {} + for test_case in test_cases: + total_runs = test_case.outcome_distribution.total + if total_runs >= min_executions: + problem_runs = (test_case.outcome_distribution.failed + + test_case.outcome_distribution.flaky) + failure_rate = problem_runs / total_runs if total_runs > 0 else 0 + + if failure_rate >= min_failure_rate: + # Extract just the method name + method_name = test_case.name.split('.')[-1] + failing_test_cases[method_name] = { + 'result': test_case, + 'failure_rate': failure_rate, + 'total_executions': total_runs, + 'failed_executions': problem_runs, + 'timeline': sorted(test_case.timeline, key=lambda x: x.timestamp) + } + + if failing_test_cases: # Only include classes that have problematic test cases + persistent_failures[class_name] = { + 'failure_rate': class_failure_rate, + 'total_executions': class_total, + 'failed_executions': class_problems, + 'test_cases': failing_test_cases + } + + except Exception as e: + logger.error(f"Error getting test case details for {class_name}: {str(e)}") + + return persistent_failures + +def get_develocity_class_link(class_name: str, threshold_days: int, test_type: str = None) -> str: + """ + Generate Develocity link for a test class + + Args: + class_name: Name of the test class + threshold_days: Number of days to look back in search + test_type: Type of test (e.g., "quarantinedTest", "test") + """ + base_url = "https://ge.apache.org/scans/tests" + params = { + "search.rootProjectNames": "kafka", + "search.tags": "github,trunk", + "search.timeZoneId": "UTC", + "search.relativeStartTime": f"P{threshold_days}D", + "tests.container": class_name + } + + if test_type: + params["search.tasks"] = test_type + + return f"{base_url}?{'&'.join(f'{k}={requests.utils.quote(str(v))}' for k, v in params.items())}" + +def get_develocity_method_link(class_name: str, method_name: str, threshold_days: int, test_type: str = None) -> str: + """ + Generate Develocity link for a test method + + Args: + class_name: Name of the test class + method_name: Name of the test method + threshold_days: Number of days to look back in search + test_type: Type of test (e.g., "quarantinedTest", "test") + """ + base_url = "https://ge.apache.org/scans/tests" + + # Extract just the method name without the class prefix + if '.' in method_name: + method_name = method_name.split('.')[-1] + + params = { + "search.rootProjectNames": "kafka", + "search.tags": "github,trunk", + "search.timeZoneId": "UTC", + "search.relativeStartTime": f"P{threshold_days}D", + "tests.container": class_name, + "tests.test": method_name + } + + if test_type: + params["search.tasks"] = test_type + + return f"{base_url}?{'&'.join(f'{k}={requests.utils.quote(str(v))}' for k, v in params.items())}" + +def print_most_problematic_tests(problematic_tests: Dict[str, Dict], threshold_days: int, test_type: str = None): + """Print a summary of the most problematic tests""" + print("\n## Most Problematic Tests") + if not problematic_tests: + print("No high-priority problematic tests found.") + return + + print(f"Found {len(problematic_tests)} tests that have been quarantined for {threshold_days} days and are still failing frequently.") + + # Print table with class and method information + print("\n") + print("") + + for test_name, details in sorted(problematic_tests.items(), + key=lambda x: x[1]['failure_rate'], + reverse=True): + class_link = get_develocity_class_link(test_name, threshold_days, test_type=test_type) + print(f"") + + for test_case in sorted(details['test_cases'], + key=lambda x: (x.outcome_distribution.failed + x.outcome_distribution.flaky) / x.outcome_distribution.total + if x.outcome_distribution.total > 0 else 0, + reverse=True): + method_name = test_case.name.split('.')[-1] + if method_name != 'N/A': + method_link = get_develocity_method_link(test_name, test_case.name, threshold_days, test_type="quarantinedTest") + total_runs = test_case.outcome_distribution.total + failure_rate = (test_case.outcome_distribution.failed + test_case.outcome_distribution.flaky) / total_runs if total_runs > 0 else 0 + print(f"" + f"" + f"") + print("
ClassTest CaseFailure RateBuild ScansLink
{test_name}↗️
{method_name}{failure_rate:.2%}{total_runs}↗️
") + + # Print detailed execution history + print("\n
") + print("Detailed Execution History\n") + + for test_name, details in sorted(problematic_tests.items(), + key=lambda x: x[1]['failure_rate'], + reverse=True): + print(f"\n### {test_name}") + print(f"* Days Quarantined: {details['days_quarantined']}") + print(f"* Recent Failure Rate: {details['recent_failure_rate']:.2%}") + print(f"* Total Runs: {details['container_result'].outcome_distribution.total}") + print(f"* Build Outcomes: Passed: {details['container_result'].outcome_distribution.passed} | " + f"Failed: {details['container_result'].outcome_distribution.failed} | " + f"Flaky: {details['container_result'].outcome_distribution.flaky}") + + for test_method in sorted(details['test_cases'], + key=lambda x: (x.outcome_distribution.failed + x.outcome_distribution.flaky) / x.outcome_distribution.total + if x.outcome_distribution.total > 0 else 0, + reverse=True): + if test_method.timeline: + print(f"\n#### {method_name}") + print("Recent Executions:") + print("```") + print("Date/Time (UTC) Outcome Build ID") + print("-" * 44) + for entry in sorted(test_method.timeline, key=lambda x: x.timestamp, reverse=True)[:5]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") + print("```") + + print("
") + +def print_flaky_regressions(flaky_regressions: Dict[str, Dict], threshold_days: int): + """Print tests that have recently started showing flaky behavior""" + print("\n## Flaky Test Regressions") + if not flaky_regressions: + print("No flaky test regressions found.") + return + + print(f"Found {len(flaky_regressions)} tests that have started showing increased flaky behavior recently.") + + # Print table with test details + print("\n") + print("") + + for test_name, details in flaky_regressions.items(): + class_link = get_develocity_class_link(test_name, threshold_days) + print(f"") + print(f"" + f"" + f"") + + # Add recent execution details in sub-rows + print("") + for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp, reverse=True)[:5]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f"") + print("
Test ClassRecent Flaky RateHistorical RateRecent ExecutionsLink
{test_name}↗️
{details['recent_flaky_rate']:.2%}{details['historical_flaky_rate']:.2%}{len(details['recent_executions'])}
Recent Executions:
{date_str} - {entry.outcome}
") + + # Print detailed history + print("\n
") + print("Detailed Execution History\n") + + for test_name, details in sorted(flaky_regressions.items(), + key=lambda x: x[1]['recent_flaky_rate'], + reverse=True): + print(f"\n### {test_name}") + print(f"* Recent Flaky Rate: {details['recent_flaky_rate']:.2%}") + print(f"* Historical Flaky Rate: {details['historical_flaky_rate']:.2%}") + print("\nRecent Executions:") + print("```") + print("Date/Time (UTC) Outcome Build ID") + print("-" * 44) + for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp, reverse=True)[:5]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") + print("```") + + print("
") + +def print_persistent_failing_tests(persistent_failures: Dict[str, Dict], threshold_days: int): + """Print tests that have been consistently failing over time""" + print("\n## Persistently Failing/Flaky Tests") + if not persistent_failures: + print("No persistently failing tests found.") + return + + print(f"Found {len(persistent_failures)} tests that have been consistently failing or flaky.") + + # Print table with test details + print("\n") + print("") + + for class_name, class_details in sorted(persistent_failures.items(), + key=lambda x: x[1]['failure_rate'], + reverse=True): + class_link = get_develocity_class_link(class_name, threshold_days) + + # Print class row + print(f"" + f"") + + # Print test case rows + for test_name, test_details in sorted(class_details['test_cases'].items(), + key=lambda x: x[1]['failure_rate'], + reverse=True): + test_link = get_develocity_method_link(class_name, test_name, threshold_days) + print(f"" + f"" + f"" + f"" + f"" + f"") + print("
Test ClassTest CaseFailure RateTotal RunsFailed/FlakyLink
{class_name}↗️
{test_name}{test_details['failure_rate']:.2%}{test_details['total_executions']}{test_details['failed_executions']}↗️
") + + # Print detailed history + print("\n
") + print("Detailed Execution History\n") + + for class_name, class_details in sorted(persistent_failures.items(), + key=lambda x: x[1]['failure_rate'], + reverse=True): + print(f"\n### {class_name}") + print(f"* Overall Failure Rate: {class_details['failure_rate']:.2%}") + print(f"* Total Executions: {class_details['total_executions']}") + print(f"* Failed/Flaky Executions: {class_details['failed_executions']}") + + for test_name, test_details in sorted(class_details['test_cases'].items(), + key=lambda x: x[1]['failure_rate'], + reverse=True): + print("\nRecent Executions:") + print("```") + print("Date/Time (UTC) Outcome Build ID") + print("-" * 44) + for entry in sorted(test_details['timeline'], key=lambda x: x.timestamp, reverse=True)[:5]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") + print("```") + + print("
") + +def print_cleared_tests(cleared_tests: Dict[str, Dict], threshold_days: int, test_type: str = None): + """Print tests that are ready to be unquarantined""" + print("\n## Cleared Tests (Ready for Unquarantine)") + if not cleared_tests: + print("No tests ready to be cleared from quarantine.") + return + + # Calculate total number of test methods + total_methods = sum(len(details['test_cases']) for details in cleared_tests.values()) + + print(f"Found {len(cleared_tests)} test classes with {total_methods} test methods that have been consistently passing. " + f"These tests could be candidates for removing quarantine annotations at either class or method level.") + + # Print table with class and method information + print("\n") + print("") + + for test_name, details in sorted(cleared_tests.items(), + key=lambda x: x[1]['success_rate'], + reverse=True): + class_link = get_develocity_class_link(test_name, threshold_days, test_type=test_type) + print(f"") + print(f"" + f"" + f"" + f"") + + for test_case in details['test_cases']: + method_name = test_case['name'].split('.')[-1] + method_link = get_develocity_method_link(test_name, test_case['name'], threshold_days) + recent_status = "N/A" + if test_case['recent_executions']: + recent_status = test_case['recent_executions'][-1].outcome + + print(f"" + f"" + f"" + f"" + f"") + print("") + print("
Test ClassTest MethodSuccess RateTotal RunsRecent StatusLink
{test_name}↗️
Class Overall{details['success_rate']:.2%}{details['total_executions']}{details['successful_runs']} passed
{method_name}{test_case['success_rate']:.2%}{test_case['total_executions']}{recent_status}↗️
 
") + + # Print detailed history + print("\n
") + print("Detailed Test Method History\n") + + for test_name, details in sorted(cleared_tests.items(), + key=lambda x: x[1]['success_rate'], + reverse=True): + print(f"\n### {test_name}") + print(f"* Overall Success Rate: {details['success_rate']:.2%}") + print(f"* Total Executions: {details['total_executions']}") + print(f"* Consecutive Successful Runs: {details['successful_runs']}") + + for test_case in details['test_cases']: + method_name = test_case['name'].split('.')[-1] + print(f"\n#### {method_name}") + print(f"* Success Rate: {test_case['success_rate']:.2%}") + print(f"* Total Runs: {test_case['total_executions']}") + print("\nRecent Executions:") + print("```") + print("Date/Time (UTC) Outcome Build ID") + print("-" * 44) + for entry in sorted(test_case['recent_executions'], key=lambda x: x.timestamp, reverse=True)[:5]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") + print("```") + + print("
") + +def main(): + token = None + if os.environ.get("DEVELOCITY_ACCESS_TOKEN"): + token = os.environ.get("DEVELOCITY_ACCESS_TOKEN") + else: + print("No auth token was specified. You must set DEVELOCITY_ACCESS_TOKEN to your personal access token.") + exit(1) + + # Configuration + BASE_URL = "https://ge.apache.org" + PROJECT = "kafka" + QUARANTINE_THRESHOLD_DAYS = 7 + MIN_FAILURE_RATE = 0.1 + RECENT_FAILURE_THRESHOLD = 0.5 + SUCCESS_THRESHOLD = 0.7 # For cleared tests + MIN_FLAKY_RATE = 0.2 # For flaky regressions + + analyzer = TestAnalyzer(BASE_URL, token) + + try: + quarantined_builds = analyzer.get_build_info([], PROJECT, "quarantinedTest", 7, bypass_cache=True, fetch_all=True) + regular_builds = analyzer.get_build_info([], PROJECT, "test", 7, bypass_cache=True, fetch_all=True) + + analyzer.update_cache(quarantined_builds) + analyzer.update_cache(regular_builds) + + # Get test results + quarantined_results = analyzer.get_test_results( + PROJECT, + threshold_days=QUARANTINE_THRESHOLD_DAYS, + test_type="quarantinedTest" + ) + + regular_results = analyzer.get_test_results( + PROJECT, + threshold_days=7, # Last 7 days for regular tests + test_type="test" + ) + + # Generate reports + problematic_tests = analyzer.get_problematic_quarantined_tests( + quarantined_results, + QUARANTINE_THRESHOLD_DAYS, + MIN_FAILURE_RATE, + RECENT_FAILURE_THRESHOLD + ) + + flaky_regressions = analyzer.get_flaky_test_regressions( + PROJECT, + regular_results, + recent_days=7, + min_flaky_rate=MIN_FLAKY_RATE + ) + + cleared_tests = analyzer.get_cleared_tests( + PROJECT, + quarantined_results, + success_threshold=SUCCESS_THRESHOLD + ) + + # Get persistent failing tests (add after getting regular_results) + persistent_failures = analyzer.get_persistent_failing_tests( + regular_results, + min_failure_rate=0.2, # 20% failure rate threshold + min_executions=5 + ) + + # Print report header + print(f"\n# Flaky Test Report for {datetime.now(pytz.UTC).strftime('%Y-%m-%d')}") + print(f"This report was run on {datetime.now(pytz.UTC).strftime('%Y-%m-%d %H:%M:%S')} UTC") + + # Print each section + print_most_problematic_tests(problematic_tests, QUARANTINE_THRESHOLD_DAYS, test_type="quarantinedTest") + print_flaky_regressions(flaky_regressions, QUARANTINE_THRESHOLD_DAYS) + print_persistent_failing_tests(persistent_failures, QUARANTINE_THRESHOLD_DAYS) + print_cleared_tests(cleared_tests, QUARANTINE_THRESHOLD_DAYS, test_type="quarantinedTest") + + except Exception as e: + logger.exception("Error occurred during report generation") + print(f"Error occurred: {str(e)}") + +if __name__ == "__main__": + # Configure logging + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + handlers=[ + logging.FileHandler("flaky_test_report.log") + ] + ) + main() diff --git a/.github/scripts/junit.py b/.github/scripts/junit.py index 48d2f528a2631..b23f444720ba3 100644 --- a/.github/scripts/junit.py +++ b/.github/scripts/junit.py @@ -142,6 +142,7 @@ def parse_report(workspace_path, report_path, fp) -> Iterable[TestSuite]: cur_suite: Optional[TestSuite] = None partial_test_case = None test_case_failed = False + test_case_skipped = False for (event, elem) in xml.etree.ElementTree.iterparse(fp, events=["start", "end"]): if event == "start": if elem.tag == "testsuite": @@ -158,6 +159,7 @@ def parse_report(workspace_path, report_path, fp) -> Iterable[TestSuite]: test_time = float(elem.get("time", 0.0)) partial_test_case = partial(TestCase, test_name, class_name, test_time) test_case_failed = False + test_case_skipped = False elif elem.tag == "failure": failure_message = elem.get("message") if failure_message: @@ -171,11 +173,12 @@ def parse_report(workspace_path, report_path, fp) -> Iterable[TestSuite]: elif elem.tag == "skipped": skipped = partial_test_case(None, None, None) cur_suite.skipped_tests.append(skipped) + test_case_skipped = True else: pass elif event == "end": if elem.tag == "testcase": - if not test_case_failed: + if not test_case_failed and not test_case_skipped: passed = partial_test_case(None, None, None) cur_suite.passed_tests.append(passed) partial_test_case = None @@ -303,7 +306,7 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: logger.debug(f"Found skipped test: {skipped_test}") skipped_table.append((simple_class_name, skipped_test.test_name)) - # Collect all tests that were run as part of quarantinedTest + # Only collect quarantined tests from the "quarantinedTest" task if task == "quarantinedTest": for test in all_suite_passed.values(): simple_class_name = test.class_name.split(".")[-1] @@ -329,59 +332,89 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: # The stdout (print) goes to the workflow step console output. # The stderr (logger) is redirected to GITHUB_STEP_SUMMARY which becomes part of the HTML job summary. report_url = get_env("JUNIT_REPORT_URL") - report_md = f"Download [HTML report]({report_url})." - summary = (f"{total_run} tests cases run in {duration}. " + if report_url: + report_md = f"Download [HTML report]({report_url})." + else: + report_md = "No report available. JUNIT_REPORT_URL was missing." + summary = (f"{total_run} tests cases run in {duration}.\n\n" f"{total_success} {PASSED}, {total_failures} {FAILED}, " - f"{total_flaky} {FLAKY}, {total_skipped} {SKIPPED}, and {total_errors} errors.") + f"{total_flaky} {FLAKY}, {total_skipped} {SKIPPED}, {len(quarantined_table)} {QUARANTINED}, and {total_errors} errors.") print("## Test Summary\n") - print(f"{summary} {report_md}\n") + print(f"{summary}\n\n{report_md}\n") + + # Failed if len(failed_table) > 0: - logger.info(f"Found {len(failed_table)} test failures:") - print("### Failed Tests\n") + print("
") + print(f"Failed Tests {FAILED} ({len(failed_table)})\n") print(f"| Module | Test | Message | Time |") print(f"| ------ | ---- | ------- | ---- |") + logger.info(f"Found {len(failed_table)} test failures:") for row in failed_table: logger.info(f"{FAILED} {row[0]} > {row[1]}") row_joined = " | ".join(row) print(f"| {row_joined} |") + print("\n
") print("\n") + + # Flaky if len(flaky_table) > 0: - logger.info(f"Found {len(flaky_table)} flaky test failures:") - print("### Flaky Tests\n") + print("
") + print(f"Flaky Tests {FLAKY} ({len(flaky_table)})\n") print(f"| Module | Test | Message | Time |") print(f"| ------ | ---- | ------- | ---- |") + logger.info(f"Found {len(flaky_table)} flaky test failures:") for row in flaky_table: logger.info(f"{FLAKY} {row[0]} > {row[1]}") row_joined = " | ".join(row) print(f"| {row_joined} |") + print("\n
") print("\n") + + # Skipped if len(skipped_table) > 0: print("
") - print(f"{len(skipped_table)} Skipped Tests\n") + print(f"Skipped Tests {SKIPPED} ({len(skipped_table)})\n") print(f"| Module | Test |") print(f"| ------ | ---- |") + logger.debug(f"::group::Found {len(skipped_table)} skipped tests") for row in skipped_table: row_joined = " | ".join(row) print(f"| {row_joined} |") + logger.debug(f"{row[0]} > {row[1]}") print("\n
") + logger.debug("::endgroup::") + print("\n") + # Quarantined if len(quarantined_table) > 0: - logger.info(f"Ran {len(quarantined_table)} quarantined test:") print("
") - print(f"{len(quarantined_table)} Quarantined Tests\n") + print(f"Quarantined Tests {QUARANTINED} ({len(quarantined_table)})\n") print(f"| Module | Test |") print(f"| ------ | ---- |") + logger.debug(f"::group::Found {len(quarantined_table)} quarantined tests") for row in quarantined_table: - logger.info(f"{QUARANTINED} {row[0]} > {row[1]}") row_joined = " | ".join(row) print(f"| {row_joined} |") + logger.debug(f"{row[0]} > {row[1]}") print("\n
") + logger.debug("::endgroup::") + + print("
") # Print special message if there was a timeout - exit_code = get_env("GRADLE_EXIT_CODE", int) - if exit_code == 124: + test_exit_code = get_env("GRADLE_TEST_EXIT_CODE", int) + quarantined_test_exit_code = get_env("GRADLE_QUARANTINED_TEST_EXIT_CODE", int) + + if test_exit_code == 124 or quarantined_test_exit_code == 124: + # Special handling for timeouts. The exit code 124 is emitted by 'timeout' command used in build.yml. + # A watchdog script "thread-dump.sh" will use jstack to force a thread dump for any Gradle process + # still running after the timeout. We capture the exit codes of the two test tasks and pass them to + # this script. If either "test" or "quarantinedTest" fails due to timeout, we want to fail the overall build. thread_dump_url = get_env("THREAD_DUMP_URL") - logger.debug(f"Gradle command timed out. These are partial results!") + if test_exit_code == 124: + logger.debug(f"Gradle task for 'test' timed out. These are partial results!") + else: + logger.debug(f"Gradle task for 'quarantinedTest' timed out. These are partial results!") logger.debug(summary) if thread_dump_url: print(f"\nThe JUnit tests were cancelled due to a timeout. Thread dumps were generated before the job was cancelled. " @@ -390,7 +423,7 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: else: logger.debug(f"Failing this step because the tests timed out. Thread dumps were not archived, check logs in JUnit step.") exit(1) - elif exit_code in (0, 1): + elif test_exit_code in (0, 1): logger.debug(summary) if total_failures > 0: logger.debug(f"Failing this step due to {total_failures} test failures") @@ -401,5 +434,5 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: else: exit(0) else: - logger.debug(f"Gradle had unexpected exit code {exit_code}. Failing this step") + logger.debug(f"Gradle had unexpected exit code {test_exit_code}. Failing this step") exit(1) diff --git a/.github/scripts/requirements.txt b/.github/scripts/requirements.txt index 900ce9a9b61bc..d59455f79dac6 100644 --- a/.github/scripts/requirements.txt +++ b/.github/scripts/requirements.txt @@ -13,3 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. PyYAML~=6.0 +pytz==2024.2 +requests==2.32.3 diff --git a/.github/workflows/README.md b/.github/workflows/README.md index f921ad78393ca..1087a3c1d6042 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -51,6 +51,67 @@ using this for very simple tasks such as applying labels or adding comments to P _We must never run the untrusted PR code in the elevated `pull_request_target` context_ +## Our Workflows + +### Trunk Build + +The [ci.yml](ci.yml) is run when commits are pushed to trunk. This calls into [build.yml](build.yml) +to run our main build. In the trunk build, we do not read from the Gradle cache, +but we do write to it. Also, the test catalog is only updated from trunk builds. + +### PR Build + +Similar to trunk, this workflow starts in [ci.yml](ci.yml) and calls into [build.yml](build.yml). +Unlike trunk, the PR builds _will_ utilize the Gradle cache. + +### PR Triage + +In order to get the attention of committers, we have a triage workflow for Pull Requests +opened by non-committers. This workflow consists of two files: + +* [pr-update.yml](pr-update.yml) When a PR is created, add the `triage` label if + the PR was opened by a non-committer. +* [pr-labels-cron.yml](pr-labels-cron.yml) Cron job to add `needs-attention` label to community + PRs that have not been reviewed after 7 days. Also includes a cron job to + remove the `triage` and `needs-attention` labels from PRs which have been reviewed. + +_The pr-update.yml workflow includes pull_request_target!_ + +For committers to avoid having this label added, their membership in the ASF GitHub +organization must be public. Here are the steps to take: + +* Navigate to the ASF organization's "People" page https://github.com/orgs/apache/people +* Find yourself +* Change "Organization Visibility" to Public + +Full documentation for this process can be found in GitHub's docs: +https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-your-membership-in-organizations/publicizing-or-hiding-organization-membership + +If you are a committer and do not want your membership in the ASF org listed as public, +you will need to remove the `triage` label manually. + +### CI Approved + +Due to a combination of GitHub security and ASF's policy, we required explicit +approval of workflows on PRs submitted by non-committers (and non-contributors). +To simply this process, we have a `ci-approved` label which automatically approves +these workflows. + +There are two files related to this workflow: + +* [pr-labeled.yml](pr-labeled.yml) approves a pending approval for PRs that have +been labeled with `ci-approved` +* [ci-requested.yml](ci-requested.yml) approves future workflow requests automatically +if the PR has the `ci-approved` label + +_The pr-labeled.yml workflow includes pull_request_target!_ + +### Stale PRs + +This one is straightforward. Using the "actions/stale" GitHub Action, we automatically +label and eventually close PRs which have not had activity for some time. See the +[stale.yml](stale.yml) workflow file for specifics. + ## GitHub Actions Quirks ### Composite Actions diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fb08bb2a9f510..d878a52ddac29 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,14 +21,10 @@ name: Check and Test on: workflow_call: inputs: - gradle-cache-read-only: - description: "Should the Gradle cache be read-only?" + is-trunk: + description: "Is this a trunk build?" default: true type: boolean - gradle-cache-write-only: - description: "Should the Gradle cache be write-only?" - default: false - type: boolean is-public-fork: description: "Is this CI run from a public fork?" default: true @@ -105,8 +101,8 @@ jobs: uses: ./.github/actions/setup-gradle with: java-version: 23 - gradle-cache-read-only: ${{ inputs.gradle-cache-read-only }} - gradle-cache-write-only: ${{ inputs.gradle-cache-write-only }} + gradle-cache-read-only: ${{ !inputs.is-trunk }} + gradle-cache-write-only: ${{ inputs.is-trunk }} develocity-access-key: ${{ secrets.GE_ACCESS_TOKEN }} - name: Compile and validate env: @@ -117,7 +113,7 @@ jobs: # --scan: Publish the build scan. This will only work on PRs from apache/kafka and trunk # --no-scan: For public fork PRs, we won't attempt to publish the scan run: | - ./gradlew --build-cache --info $SCAN_ARG check -x test + ./gradlew --build-cache --info $SCAN_ARG check siteDocTar -x test - name: Archive check reports if: always() uses: actions/upload-artifact@v4 @@ -137,6 +133,16 @@ jobs: run: python .github/scripts/rat.py env: GITHUB_WORKSPACE: ${{ github.workspace }} + - name: Check generated documentation + # Check if there are any empty files under ./site-docs/generated, If any empty files are found, print an error + # message and list the empty files + run: | + tar zxvf core/build/distributions/kafka_2.13-$(./gradlew properties | grep version: | awk '{print $NF}' | head -n 1)-site-docs.tgz + if find ./site-docs/generated -type f -exec grep -L "." {} \; | grep -q "."; then + echo "One or more documentation files are empty!" >&2 + find ./site-docs/generated -type f -exec grep -L "." {} \; >&2 + exit 1 + fi test: needs: [validate, load-catalog] @@ -145,7 +151,9 @@ jobs: strategy: fail-fast: false matrix: - java: [ 23, 11 ] # If we change these, make sure to adjust ci-complete.yml + java: [ 23, 17 ] # If we change these, make sure to adjust ci-complete.yml + outputs: + timed-out: ${{ (steps.junit-test.outputs.gradle-exitcode == '124' || steps.junit-quarantined-test.outputs.gradle-exitcode == '124') }} name: JUnit tests Java ${{ matrix.java }} steps: - name: Checkout code @@ -158,39 +166,37 @@ jobs: uses: ./.github/actions/setup-gradle with: java-version: ${{ matrix.java }} - gradle-cache-read-only: ${{ inputs.gradle-cache-read-only }} - gradle-cache-write-only: ${{ inputs.gradle-cache-write-only }} + gradle-cache-read-only: ${{ !inputs.is-trunk }} + gradle-cache-write-only: ${{ inputs.is-trunk }} develocity-access-key: ${{ secrets.GE_ACCESS_TOKEN }} # If the load-catalog job failed, we won't be able to download the artifact. Since we don't want this to fail # the overall workflow, so we'll continue here without a test catalog. - name: Load Test Catalog + id: load-test-catalog uses: actions/download-artifact@v4 continue-on-error: true with: name: combined-test-catalog - - name: Test - # Gradle flags - # --build-cache: Let Gradle restore the build cache - # --no-scan: Don't attempt to publish the scan yet. We want to archive it first. - # --continue: Keep running even if a test fails - # -PcommitId Prevent the Git SHA being written into the jar files (which breaks caching) + - name: JUnit Quarantined Tests + id: junit-quarantined-test + uses: ./.github/actions/run-gradle + with: + test-task: quarantinedTest + timeout-minutes: 180 + test-catalog-path: ${{ steps.load-test-catalog.outputs.download-path }}/combined-test-catalog.txt + build-scan-artifact-name: build-scan-quarantined-test-${{ matrix.java }} + + - name: JUnit Tests id: junit-test - env: - TIMEOUT_MINUTES: 180 # 3 hours - run: | - set +e - ./.github/scripts/thread-dump.sh & - timeout ${TIMEOUT_MINUTES}m ./gradlew --build-cache --continue --no-scan \ - -PtestLoggingEvents=started,passed,skipped,failed \ - -PmaxParallelForks=2 \ - -PmaxTestRetries=1 -PmaxTestRetryFailures=3 \ - -PmaxQuarantineTestRetries=3 -PmaxQuarantineTestRetryFailures=0 \ - -PcommitId=xxxxxxxxxxxxxxxx \ - quarantinedTest test - exitcode="$?" - echo "exitcode=$exitcode" >> $GITHUB_OUTPUT + uses: ./.github/actions/run-gradle + with: + test-task: test + timeout-minutes: 180 # 3 hours + test-catalog-path: ${{ steps.load-test-catalog.outputs.download-path }}/combined-test-catalog.txt + build-scan-artifact-name: build-scan-test-${{ matrix.java }} + - name: Archive JUnit HTML reports uses: actions/upload-artifact@v4 id: junit-upload-artifact @@ -200,6 +206,7 @@ jobs: **/build/reports/tests/* compression-level: 9 if-no-files-found: ignore + - name: Archive JUnit XML uses: actions/upload-artifact@v4 with: @@ -208,9 +215,10 @@ jobs: build/junit-xml/**/*.xml compression-level: 9 if-no-files-found: ignore + - name: Archive Thread Dumps id: thread-dump-upload-artifact - if: always() && steps.junit-test.outputs.exitcode == '124' + if: always() && (steps.junit-test.outputs.gradle-exitcode == '124' || steps.junit-quarantined-test.outputs.gradle-exitcode == '124') uses: actions/upload-artifact@v4 with: name: junit-thread-dumps-${{ matrix.java }} @@ -218,13 +226,16 @@ jobs: thread-dumps/* compression-level: 9 if-no-files-found: ignore + - name: Parse JUnit tests run: python .github/scripts/junit.py --export-test-catalog ./test-catalog >> $GITHUB_STEP_SUMMARY env: GITHUB_WORKSPACE: ${{ github.workspace }} JUNIT_REPORT_URL: ${{ steps.junit-upload-artifact.outputs.artifact-url }} THREAD_DUMP_URL: ${{ steps.thread-dump-upload-artifact.outputs.artifact-url }} - GRADLE_EXIT_CODE: ${{ steps.junit-test.outputs.exitcode }} + GRADLE_TEST_EXIT_CODE: ${{ steps.junit-test.outputs.gradle-exitcode }} + GRADLE_QUARANTINED_TEST_EXIT_CODE: ${{ steps.junit-quarantined-test.outputs.gradle-exitcode }} + - name: Archive Test Catalog if: ${{ always() && matrix.java == '23' }} uses: actions/upload-artifact@v4 @@ -233,19 +244,11 @@ jobs: path: test-catalog compression-level: 9 if-no-files-found: ignore - - name: Archive Build Scan - if: always() - uses: actions/upload-artifact@v4 - with: - name: build-scan-test-${{ matrix.java }} - path: ~/.gradle/build-scan-data - compression-level: 9 - if-no-files-found: ignore update-test-catalog: name: Update Test Catalog needs: test - if: ${{ always() && !inputs.is-public-fork }} + if: ${{ always() && inputs.is-trunk && needs.test.outputs.timed-out == 'false' }} runs-on: ubuntu-latest permissions: contents: write diff --git a/.github/workflows/ci-complete.yml b/.github/workflows/ci-complete.yml index 6478ae2c6daef..982eb7725e0a0 100644 --- a/.github/workflows/ci-complete.yml +++ b/.github/workflows/ci-complete.yml @@ -43,7 +43,8 @@ jobs: strategy: fail-fast: false matrix: - java: [ 23, 11 ] + java: [ 23, 17 ] + artifact-prefix: [ "build-scan-test-", "build-scan-quarantined-test-"] steps: - name: Env run: printenv @@ -66,7 +67,7 @@ jobs: with: github-token: ${{ github.token }} run-id: ${{ github.event.workflow_run.id }} - name: build-scan-test-${{ matrix.java }} + name: ${{ matrix.artifact-prefix }}${{ matrix.java }} path: ~/.gradle/build-scan-data # This is where Gradle buffers unpublished build scan data when --no-scan is given - name: Handle missing scan if: ${{ steps.download-build-scan.outcome == 'failure' }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dabec1a6eed6e..c02c47c7584be 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,6 +19,7 @@ on: push: branches: - 'trunk' + - '4.0' schedule: - cron: '0 0 * * 6,0' # Run on Saturday and Sunday at midnight UTC @@ -27,17 +28,17 @@ on: types: [ opened, synchronize, ready_for_review, reopened ] branches: - 'trunk' + - '4.0' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: ${{ github.ref != 'refs/heads/trunk' }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} jobs: build: uses: ./.github/workflows/build.yml with: - gradle-cache-read-only: ${{ github.ref != 'refs/heads/trunk' }} - gradle-cache-write-only: ${{ github.ref == 'refs/heads/trunk' }} + is-trunk: ${{ github.ref == 'refs/heads/trunk' }} is-public-fork: ${{ github.event.pull_request.head.repo.fork || false }} secrets: inherit diff --git a/.github/workflows/docker_build_and_test.yml b/.github/workflows/docker_build_and_test.yml index 0fe70ff023420..67acdf9fb7424 100644 --- a/.github/workflows/docker_build_and_test.yml +++ b/.github/workflows/docker_build_and_test.yml @@ -37,6 +37,11 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.10" + - name: Setup Docker Compose + run: | + sudo curl -L "https://github.com/docker/compose/releases/download/v2.30.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + sudo mv /usr/local/bin/docker-compose /usr/bin/docker-compose + sudo chmod +x /usr/bin/docker-compose - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/docker_official_image_build_and_test.yml b/.github/workflows/docker_official_image_build_and_test.yml index 419bfadc1416d..58866a19d6cab 100644 --- a/.github/workflows/docker_official_image_build_and_test.yml +++ b/.github/workflows/docker_official_image_build_and_test.yml @@ -36,6 +36,11 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.10" + - name: Setup Docker Compose + run: | + sudo curl -L "https://github.com/docker/compose/releases/download/v2.30.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + sudo mv /usr/local/bin/docker-compose /usr/bin/docker-compose + sudo chmod +x /usr/bin/docker-compose - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/docker_scan.yml b/.github/workflows/docker_scan.yml index 693b80c0626b6..a76916fffa916 100644 --- a/.github/workflows/docker_scan.yml +++ b/.github/workflows/docker_scan.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: # This is an array of supported tags. Make sure this array only contains the supported tags - supported_image_tag: ['latest', '3.7.1', "3.8.0", "3.8.1", "3.9.0"] + supported_image_tag: ['latest', '3.7.2', '3.8.1', '3.9.0'] steps: - name: Run CVE scan uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0 diff --git a/bin/zookeeper-server-start.sh b/.github/workflows/generate-reports.yml old mode 100755 new mode 100644 similarity index 51% rename from bin/zookeeper-server-start.sh rename to .github/workflows/generate-reports.yml index bd9c1142817c0..e2d52cea53a97 --- a/bin/zookeeper-server-start.sh +++ b/.github/workflows/generate-reports.yml @@ -1,4 +1,3 @@ -#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -14,31 +13,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -if [ $# -lt 1 ]; -then - echo "USAGE: $0 [-daemon] zookeeper.properties" - exit 1 -fi -base_dir=$(dirname $0) +name: Flaky Test Report +on: + workflow_dispatch: # Let us run manually -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" -fi + schedule: + - cron: '0 6 * * *' # Run daily at 6am UTC -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M" -fi - -EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'} - -COMMAND=$1 -case $COMMAND in - -daemon) - EXTRA_ARGS="-daemon "$EXTRA_ARGS - shift - ;; - *) - ;; -esac - -exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@" +jobs: + flaky-test-report: + name: Flaky Test Report + if : github.event.repository.fork == 'false' + permissions: + contents: read + runs-on: ubuntu-latest + steps: + - name: Env + run: printenv + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Python + uses: ./.github/actions/setup-python + - name: Run Report + env: + DEVELOCITY_ACCESS_TOKEN: ${{ secrets.DV_API_ACCESS }} + run: | + python ./.github/scripts/develocity_reports.py >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/pr-labeled.yml b/.github/workflows/pr-labeled.yml index 34d5702ab1fce..87b39a659ec0c 100644 --- a/.github/workflows/pr-labeled.yml +++ b/.github/workflows/pr-labeled.yml @@ -44,10 +44,11 @@ jobs: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} PR_NUMBER: ${{ github.event.number }} SHA: ${{ github.event.pull_request.head.sha }} + # Find the CI workflow run that is pending approval and approve it run: | set +e echo "Found 'ci-approved' label on PR #$PR_NUMBER." - RUN_ID=$(gh run list -L 1 -c $SHA -s action_required --json databaseId --jq '.[].databaseId') + RUN_ID=$(gh run list -L 1 -c $SHA -s action_required -w CI --json databaseId --jq '.[].databaseId') if [ -z "$RUN_ID" ]; then echo "No workflow run found for SHA $SHA"; exit 0; @@ -64,4 +65,4 @@ jobs: repository: ${{ github.repository }} run_id: ${{ env.RUN_ID }} pr_number: ${{ env.PR_NUMBER }} - commit_sha: ${{ github.event.workflow_run.head_sha }} + commit_sha: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/pr-labels-cron.yml b/.github/workflows/pr-labels-cron.yml new file mode 100644 index 0000000000000..5faaca72ed36b --- /dev/null +++ b/.github/workflows/pr-labels-cron.yml @@ -0,0 +1,92 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Fixup PR Labels + +on: + workflow_dispatch: # Let us run manually + + schedule: + - cron: '0 3 * * *' # Run at 3:00 UTC nightly + +jobs: + # This job removes the triage and needs-attention labels from any PRs that have been reviewed. Once reviewed, it is + # assumed that a PR does _not_ need extra attention from the committers, so these labels can be removed. + fixup-pr-labels: + runs-on: ubuntu-latest + strategy: + matrix: + label: [triage, needs-attention] + steps: + - name: Env + run: printenv + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + - name: Remove label + uses: actions/github-script@v7 + continue-on-error: true + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + console.log("Finding PRs with label '${{ matrix.label }}'"); + github.paginate("GET /search/issues{?q}", { + q: "repo:apache/kafka label:${{ matrix.label }} is:pull-request" + }) + .then((pulls) => { + pulls.forEach(pull => { + github.request("GET /repos/{owner}/{repo}/pulls/{pull_number}/reviews", { + owner: "apache", + repo: "kafka", + pull_number: pull.number, + headers: { + "X-GitHub-Api-Version": "2022-11-28" + } + }).then((resp) => { + console.log("Found " + resp.data.length + " reviews for PR " + pull.number); + if (resp.data.length > 0) { + console.log("Removing '${{ matrix.label }}' label from PR " + pull.number + " : " + pull.title); + github.rest.issues.removeLabel({ + owner: "apache", + repo: "kafka", + issue_number: pull.number, + name: "${{ matrix.label }}" + }); + } + }); + }); + }); + + # This job adds a 'needs-attention' label to any PR that has not been updated in 7 days and has been labeled with 'triage'. + # The idea here is to give committers a quick way to see which PRs have languished and need attention. + needs-attention: + runs-on: ubuntu-latest + needs: [fixup-pr-labels] + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v9 + with: + debug-only: ${{ inputs.dryRun || false }} + operations-per-run: ${{ inputs.operationsPerRun || 500 }} + days-before-stale: 7 + days-before-close: -1 + ignore-pr-updates: true + only-pr-labels: 'triage' + stale-pr-label: 'needs-attention' + stale-pr-message: | + A label of 'needs-attention' was automatically added to this PR in order to raise the + attention of the committers. Once this issue has been triaged, the `triage` label + should be removed to prevent this automation from happening again. diff --git a/.github/workflows/pr-update.yml b/.github/workflows/pr-update.yml index 31e0038705499..e1cd7214d6c36 100644 --- a/.github/workflows/pr-update.yml +++ b/.github/workflows/pr-update.yml @@ -25,9 +25,11 @@ on: # * https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/ pull_request_target: types: [opened, reopened, synchronize] + branches: + - trunk jobs: - label_PRs: + add-labeler-labels: name: Labeler permissions: contents: read @@ -45,3 +47,24 @@ jobs: PR_NUM: ${{github.event.number}} run: | ./.github/scripts/label_small.sh + + add-triage-label: + if: github.event.action == 'opened' || github.event.action == 'reopened' + name: Add triage label + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Env + run: printenv + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + # If the PR is from a non-committer, add triage label + - if: | + github.event.pull_request.author_association != 'MEMBER' && + github.event.pull_request.author_association != 'OWNER' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_REPO: ${{ github.repository }} + NUMBER: ${{ github.event.pull_request.number }} + run: gh pr edit "$NUMBER" --add-label triage diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 816b754a968cd..9382d4173e94c 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -41,7 +41,7 @@ jobs: - uses: actions/stale@v9 with: debug-only: ${{ inputs.dryRun || false }} - operations-per-run: ${{ inputs.operationsPerRun || 100 }} + operations-per-run: ${{ inputs.operationsPerRun || 500 }} ascending: true days-before-stale: 90 days-before-close: 30 # Since adding 'stale' will update the PR, days-before-close is relative to that. diff --git a/LICENSE-binary b/LICENSE-binary index 97e045fd7534e..c5f65ca15da1f 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -206,63 +206,64 @@ This project bundles some components that are also licensed under the Apache License Version 2.0: audience-annotations-0.12.0 -caffeine-2.9.3 +caffeine-3.1.1 commons-beanutils-1.9.4 -commons-cli-1.4 commons-collections-3.2.2 commons-digester-2.1 -commons-io-2.14.0 commons-lang3-3.12.0 commons-logging-1.3.2 commons-validator-1.9.0 -error_prone_annotations-2.10.0 +error_prone_annotations-2.14.0 jackson-annotations-2.16.2 jackson-core-2.16.2 jackson-databind-2.16.2 jackson-dataformat-csv-2.16.2 +jackson-dataformat-yaml-2.16.2 jackson-datatype-jdk8-2.16.2 +jackson-jakarta-rs-base-2.16.2 +jackson-jakarta-rs-json-provider-2.16.2 jackson-jaxrs-base-2.16.2 jackson-jaxrs-json-provider-2.16.2 -jackson-module-afterburner-2.16.2 +jackson-module-blackbird-2.16.2 +jackson-module-jakarta-xmlbind-annotations-2.16.2 jackson-module-jaxb-annotations-2.16.2 jackson-module-scala_2.13-2.16.2 -jakarta.validation-api-2.0.2 +jakarta.inject-api-2.0.1 +jakarta.validation-api-3.0.2 javassist-3.29.2-GA -jetty-client-9.4.56.v20240826 +jetty-alpn-client-12.0.15 +jetty-client-12.0.15 jetty-continuation-9.4.56.v20240826 -jetty-http-9.4.56.v20240826 -jetty-io-9.4.56.v20240826 -jetty-security-9.4.56.v20240826 -jetty-server-9.4.56.v20240826 +jetty-ee10-servlet-12.0.15 +jetty-ee10-servlets-12.0.15 +jetty-http-12.0.15 +jetty-io-12.0.15 +jetty-security-12.0.15 +jetty-server-12.0.15 jetty-servlet-9.4.56.v20240826 jetty-servlets-9.4.56.v20240826 -jetty-util-9.4.56.v20240826 +jetty-session-12.0.15 +jetty-util-12.0.15 jetty-util-ajax-9.4.56.v20240826 jose4j-0.9.4 +log4j-api-2.24.1 +log4j-core-2.24.1 +log4j-core-test-2.24.1 +log4j-slf4j-impl-2.24.1 +log4j-1.2-api-2.24.1 lz4-java-1.8.0 maven-artifact-3.9.6 metrics-core-4.1.12.1 metrics-core-2.2.0 -netty-buffer-4.1.111.Final -netty-codec-4.1.111.Final -netty-common-4.1.111.Final -netty-handler-4.1.111.Final -netty-resolver-4.1.111.Final -netty-transport-4.1.111.Final -netty-transport-classes-epoll-4.1.111.Final -netty-transport-native-epoll-4.1.111.Final -netty-transport-native-unix-common-4.1.111.Final opentelemetry-proto-1.0.0-alpha plexus-utils-3.5.1 -reload4j-1.2.25 -rocksdbjni-7.9.2 +rocksdbjni-9.7.3 scala-library-2.13.15 scala-logging_2.13-3.9.5 scala-reflect-2.13.15 snappy-java-1.1.10.5 +snakeyaml-2.2 swagger-annotations-2.2.25 -zookeeper-3.8.4 -zookeeper-jute-3.8.4 =============================================================================== This product bundles various third-party components under other open source @@ -273,36 +274,36 @@ See licenses/ for text of these licenses. Eclipse Distribution License - v 1.0 see: licenses/eclipse-distribution-license-1.0 -jakarta.activation-api-1.2.2 -jakarta.xml.bind-api-2.3.3 +jakarta.activation-api-2.1.0 +jakarta.xml.bind-api-3.0.1 --------------------------------------- Eclipse Public License - v 2.0 see: licenses/eclipse-public-license-2.0 -jakarta.annotation-api-1.3.5 -jakarta.ws.rs-api-2.1.6 -hk2-api-2.6.1 -hk2-locator-2.6.1 -hk2-utils-2.6.1 +jakarta.annotation-api-2.1.1 +jakarta.ws.rs-api-3.1.0 +hk2-api-3.0.6 +hk2-locator-3.0.6 +hk2-utils-3.0.6 osgi-resource-locator-1.0.3 -aopalliance-repackaged-2.6.1 +aopalliance-repackaged-3.0.6 jakarta.inject-2.6.1 -jersey-client-2.39.1 -jersey-common-2.39.1 -jersey-container-servlet-2.39.1 -jersey-container-servlet-core-2.39.1 -jersey-hk2-2.39.1 -jersey-server-2.39.1 +jersey-client-3.1.9 +jersey-common-3.1.9 +jersey-container-servlet-3.1.9 +jersey-container-servlet-core-3.1.9 +jersey-hk2-3.1.9 +jersey-server-3.1.9 --------------------------------------- CDDL 1.1 + GPLv2 with classpath exception see: licenses/CDDL+GPL-1.1 +jakarta.servlet-api-6.0.0 javax.activation-api-1.2.0 javax.annotation-api-1.3.2 javax.servlet-api-3.1.0 -javax.ws.rs-api-2.1.1 jaxb-api-2.3.1 activation-1.1.1 @@ -313,7 +314,6 @@ argparse4j-0.7.0, see: licenses/argparse-MIT classgraph-4.8.173, see: licenses/classgraph-MIT jopt-simple-5.0.4, see: licenses/jopt-simple-MIT slf4j-api-1.7.36, see: licenses/slf4j-MIT -slf4j-reload4j-1.7.36, see: licenses/slf4j-MIT pcollections-4.0.1, see: licenses/pcollections-MIT --------------------------------------- @@ -329,6 +329,7 @@ jline-3.25.1, see: licenses/jline-BSD-3-clause jsr305-3.0.2, see: licenses/jsr305-BSD-3-clause paranamer-2.8, see: licenses/paranamer-BSD-3-clause protobuf-java-3.25.5, see: licenses/protobuf-java-BSD-3-clause +jakarta.activation-2.0.1, see: licenses/jakarta-BSD-3-clause --------------------------------------- Go License diff --git a/NOTICE b/NOTICE index c9806e02513a9..36f87db19a965 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Apache Kafka -Copyright 2024 The Apache Software Foundation. +Copyright 2025 The Apache Software Foundation. This product includes software developed at The Apache Software Foundation (https://www.apache.org/). @@ -15,9 +15,5 @@ The streams-scala (streams/streams-scala) module was donated by Lightbend and th Copyright (C) 2018 Lightbend Inc. Copyright (C) 2017-2018 Alexis Seigneurin. -This project contains the following code copied from Apache Hadoop: -clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java -Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license. - This project contains the following code copied from Apache Hive: streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java diff --git a/NOTICE-binary b/NOTICE-binary index d3207a131e25b..50dabb33d61de 100644 --- a/NOTICE-binary +++ b/NOTICE-binary @@ -15,10 +15,6 @@ The streams-scala (streams/streams-scala) module was donated by Lightbend and th Copyright (C) 2018 Lightbend Inc. Copyright (C) 2017-2018 Alexis Seigneurin. -This project contains the following code copied from Apache Hadoop: -clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java -Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license. - This project contains the following code copied from Apache Hive: streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java @@ -600,45 +596,6 @@ limitations under the License. This software includes projects with other licenses -- see `doc/LICENSE.md`. -Apache ZooKeeper - Server -Copyright 2008-2021 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -Apache ZooKeeper - Jute -Copyright 2008-2021 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -The Netty Project - ================= - -Please visit the Netty web site for more information: - - * https://netty.io/ - -Copyright 2014 The Netty Project - -The Netty Project licenses this file to you under the Apache License, -version 2.0 (the "License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at: - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. - -Also, please refer to each LICENSE..txt file, which is located in -the 'license' directory of the distribution file, for the license terms of the -components that this product depends on. - ------------------------------------------------------------------------------- This product contains the extensions to Java Collections Framework which has been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: diff --git a/README.md b/README.md index e996365475b37..efcddbcd35966 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,24 @@ -Apache Kafka -================= -See our [web site](https://kafka.apache.org) for details on the project. +

+ + + + Kafka Logo + +

+ +[![CI](https://github.com/apache/kafka/actions/workflows/ci.yml/badge.svg?branch=trunk&event=push)](https://github.com/apache/kafka/actions/workflows/ci.yml?query=event%3Apush+branch%3Atrunk) +[![Flaky Test Report](https://github.com/apache/kafka/actions/workflows/generate-reports.yml/badge.svg?branch=trunk&event=schedule)](https://github.com/apache/kafka/actions/workflows/generate-reports.yml?query=event%3Aschedule+branch%3Atrunk) + +[**Apache Kafka**](https://kafka.apache.org) is an open-source distributed event streaming platform used by thousands of + +companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications. You need to have [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed. -We build and test Apache Kafka with 11, 17 and 21. We set the `release` parameter in javac and scalac -to `11` to ensure the generated binaries are compatible with Java 11 or higher (independently of the Java version -used for compilation). Java 11 support for the broker and tools has been deprecated since Apache Kafka 3.7 and removal -of both is planned for Apache Kafka 4.0.([KIP-1013](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=284789510) for more details). +We build and test Apache Kafka with 17 and 23. The `release` parameter in javac is set to `11` for the clients +and streams modules, and `17` for the rest, ensuring compatibility with their respective +minimum Java versions. Similarly, the `release` parameter in scalac is set to `11` for the streams modules and `17` +for the rest. Scala 2.13 is the only supported version in Apache Kafka. @@ -52,10 +63,10 @@ Follow instructions in https://kafka.apache.org/quickstart ./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testTimeToNextUpdate ### Running a particular unit/integration test with log4j output ### -By default, there will be only small number of logs output while testing. You can adjust it by changing the `log4j.properties` file in the module's `src/test/resources` directory. +By default, there will be only small number of logs output while testing. You can adjust it by changing the `log4j2.yaml` file in the module's `src/test/resources` directory. -For example, if you want to see more logs for clients project tests, you can modify [the line](https://github.com/apache/kafka/blob/trunk/clients/src/test/resources/log4j.properties#L21) in `clients/src/test/resources/log4j.properties` -to `log4j.logger.org.apache.kafka=INFO` and then run: +For example, if you want to see more logs for clients project tests, you can modify [the line](https://github.com/apache/kafka/blob/trunk/clients/src/test/resources/log4j2.yaml#L35) in `clients/src/test/resources/log4j2.yaml` +to `level: INFO` and then run: ./gradlew cleanTest clients:test --tests NetworkClientTest @@ -94,12 +105,12 @@ fail due to code changes. You can just run: ./gradlew processMessages processTestMessages -### Running a Kafka broker in KRaft mode +### Running a Kafka broker Using compiled files: KAFKA_CLUSTER_ID="$(./bin/kafka-storage.sh random-uuid)" - ./bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/reconfig-server.properties + ./bin/kafka-storage.sh format --standalone -t $KAFKA_CLUSTER_ID -c config/kraft/reconfig-server.properties ./bin/kafka-server-start.sh config/kraft/reconfig-server.properties Using docker image: @@ -123,10 +134,14 @@ Streams has multiple sub-projects, but you can run all the tests: ./gradlew tasks ### Building IDE project #### -*Note that this is not strictly necessary (IntelliJ IDEA has good built-in support for Gradle projects, for example).* +*Note Please ensure that JDK17 is used when developing Kafka.* + +IntelliJ supports Gradle natively and it will automatically check Java syntax and compatibility for each module, even if +the Java version shown in the `Structure > Project Settings > Modules` may not be the correct one. + +When it comes to Eclipse, run: ./gradlew eclipse - ./gradlew idea The `eclipse` task has been configured to use `${project_dir}/build_eclipse` as Eclipse's build directory. Eclipse's default build directory (`${project_dir}/bin`) clashes with Kafka's scripts directory and we don't use Gradle's build directory @@ -161,6 +176,10 @@ Please note for this to work you should create/update user maven settings (typic ... +### Installing all projects to the local Maven repository ### + + ./gradlew -PskipSigning=true publishToMavenLocal + ### Installing specific projects to the local Maven repository ### ./gradlew -PskipSigning=true :streams:publishToMavenLocal @@ -179,6 +198,7 @@ You can run checkstyle using: The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails. +For experiments (or regression testing purposes) add `-PcheckstyleVersion=X.y.z` switch (to override project-defined checkstyle version). #### Spotless #### The import order is a part of static check. please call `spotlessApply` to optimize the imports of Java codes before filing pull request. diff --git a/Vagrantfile b/Vagrantfile index a053be28d01dc..3a6b5a5afe1d9 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -55,11 +55,11 @@ ec2_iam_instance_profile_name = nil ebs_volume_type = 'gp3' -jdk_major = '8' -jdk_full = '8u202-linux-x64' +jdk_major = '17' +jdk_full = '17-linux-x64' local_config_file = File.join(File.dirname(__FILE__), "Vagrantfile.local") -if File.exists?(local_config_file) then +if File.exist?(local_config_file) then eval(File.read(local_config_file), binding, "Vagrantfile.local") end diff --git a/bin/connect-distributed.sh b/bin/connect-distributed.sh index b8088ad923451..2fd00f025dc51 100755 --- a/bin/connect-distributed.sh +++ b/bin/connect-distributed.sh @@ -22,8 +22,8 @@ fi base_dir=$(dirname $0) -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" +if [ -z "$KAFKA_LOG4J_OPTS" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=$base_dir/../config/connect-log4j2.yaml" fi if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then diff --git a/bin/connect-mirror-maker.sh b/bin/connect-mirror-maker.sh index 8e2b2e162daac..daf6e32042d7d 100755 --- a/bin/connect-mirror-maker.sh +++ b/bin/connect-mirror-maker.sh @@ -22,8 +22,8 @@ fi base_dir=$(dirname $0) -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" +if [ -z "$KAFKA_LOG4J_OPTS" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=$base_dir/../config/connect-log4j2.yaml" fi if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then diff --git a/bin/connect-standalone.sh b/bin/connect-standalone.sh index bef78d658fda9..d0d8b8bff32ba 100755 --- a/bin/connect-standalone.sh +++ b/bin/connect-standalone.sh @@ -22,8 +22,8 @@ fi base_dir=$(dirname $0) -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" +if [ -z "$KAFKA_LOG4J_OPTS" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=$base_dir/../config/connect-log4j2.yaml" fi if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then diff --git a/bin/kafka-acls.sh b/bin/kafka-acls.sh index 8fa65542e10bf..ffbb1e198109b 100755 --- a/bin/kafka-acls.sh +++ b/bin/kafka-acls.sh @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@" +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.AclCommand "$@" diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh index b3291e461f2ba..8bd1b17623b12 100755 --- a/bin/kafka-run-class.sh +++ b/bin/kafka-run-class.sh @@ -23,7 +23,7 @@ fi # WINDOWS_OS_FORMAT == 1 if Cygwin or MinGW is detected, else 0. if [[ $(uname -a) =~ "CYGWIN" || $(uname -a) =~ "MINGW" || $(uname -a) =~ "MSYS" ]]; then WINDOWS_OS_FORMAT=1 - export MSYS2_ARG_CONV_EXCL="-Xlog:gc*:file=;-Dlog4j.configuration=;$MSYS2_ARG_CONV_EXCL" + export MSYS2_ARG_CONV_EXCL="-Xlog:gc*:file=;-Dlog4j2.configurationFile=;$MSYS2_ARG_CONV_EXCL" else WINDOWS_OS_FORMAT=0 fi @@ -116,14 +116,6 @@ else CLASSPATH="$file":"$CLASSPATH" fi done - if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH" - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH" - fi - if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH" - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH" - fi fi for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar; @@ -228,11 +220,18 @@ fi # Log4j settings if [ -z "$KAFKA_LOG4J_OPTS" ]; then # Log to console. This is a tool. - LOG4J_DIR="$base_dir/config/tools-log4j.properties" + LOG4J_DIR="$base_dir/config/tools-log4j2.yaml" # If Cygwin is detected, LOG4J_DIR is converted to Windows format. (( WINDOWS_OS_FORMAT )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}") - KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}" + KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=${LOG4J_DIR}" else + if echo "$KAFKA_LOG4J_OPTS" | grep -E "log4j\.[^[:space:]]+(\.properties|\.xml)$"; then + # Enable Log4j 1.x configuration compatibility mode for Log4j 2 + export LOG4J_COMPATIBILITY=true + echo DEPRECATED: A Log4j 1.x configuration file has been detected, which is no longer recommended. >&2 + echo To use a Log4j 2.x configuration, please see https://logging.apache.org/log4j/2.x/migrate-from-log4j1.html#Log4j2ConfigurationFormat for details about Log4j configuration file migration. >&2 + echo You can also use the \$KAFKA_HOME/config/tools-log4j2.yaml file as a starting point. Make sure to remove the Log4j 1.x configuration after completing the migration. >&2 + fi # create logs directory if [ ! -d "$LOG_DIR" ]; then mkdir -p "$LOG_DIR" diff --git a/bin/kafka-server-start.sh b/bin/kafka-server-start.sh index 5a53126172de9..b98a8bebe7f85 100755 --- a/bin/kafka-server-start.sh +++ b/bin/kafka-server-start.sh @@ -21,8 +21,8 @@ then fi base_dir=$(dirname $0) -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" +if [ -z "$KAFKA_LOG4J_OPTS" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=$base_dir/../config/log4j2.yaml" fi if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then diff --git a/shell/src/test/resources/log4j.properties b/bin/kafka-share-consumer-perf-test.sh old mode 100644 new mode 100755 similarity index 78% rename from shell/src/test/resources/log4j.properties rename to bin/kafka-share-consumer-perf-test.sh index a72a9693de2af..8e490ca02c430 --- a/shell/src/test/resources/log4j.properties +++ b/bin/kafka-share-consumer-perf-test.sh @@ -1,3 +1,4 @@ +#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -12,8 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=DEBUG, stdout -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ShareConsumerPerformance "$@" diff --git a/bin/kafka-verifiable-share-consumer.sh b/bin/kafka-verifiable-share-consumer.sh new file mode 100755 index 0000000000000..e30178cebcafa --- /dev/null +++ b/bin/kafka-verifiable-share-consumer.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableShareConsumer "$@" diff --git a/bin/windows/connect-distributed.bat b/bin/windows/connect-distributed.bat index 0535085bde507..396e5baf27801 100644 --- a/bin/windows/connect-distributed.bat +++ b/bin/windows/connect-distributed.bat @@ -27,7 +27,7 @@ popd rem Log4j settings IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties + set KAFKA_LOG4J_OPTS=-Dlog4j2.configurationFile=%BASE_DIR%/config/connect-log4j2.yaml ) "%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectDistributed %* diff --git a/bin/windows/connect-standalone.bat b/bin/windows/connect-standalone.bat index 12ebb21dc9a85..7ad655d5d39b0 100644 --- a/bin/windows/connect-standalone.bat +++ b/bin/windows/connect-standalone.bat @@ -27,7 +27,7 @@ popd rem Log4j settings IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties + set KAFKA_LOG4J_OPTS=-Dlog4j2.configurationFile=%BASE_DIR%/config/connect-log4j2.yaml ) "%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectStandalone %* diff --git a/bin/windows/kafka-acls.bat b/bin/windows/kafka-acls.bat index 8f0be85c0455a..12c4a9a69a7e5 100644 --- a/bin/windows/kafka-acls.bat +++ b/bin/windows/kafka-acls.bat @@ -14,4 +14,4 @@ rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. rem See the License for the specific language governing permissions and rem limitations under the License. -"%~dp0kafka-run-class.bat" kafka.admin.AclCommand %* +"%~dp0kafka-run-class.bat" org.apache.kafka.tools.AclCommand %* diff --git a/bin/windows/kafka-run-class.bat b/bin/windows/kafka-run-class.bat index a163ccd0a7c08..ca151e5df96ed 100755 --- a/bin/windows/kafka-run-class.bat +++ b/bin/windows/kafka-run-class.bat @@ -116,8 +116,17 @@ IF ["%LOG_DIR%"] EQU [""] ( rem Log4j settings IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties + set KAFKA_LOG4J_OPTS=-Dlog4j2.configurationFile=file:%BASE_DIR%/config/tools-log4j2.yaml ) ELSE ( + rem Check if Log4j 1.x configuration options are present in KAFKA_LOG4J_OPTS + echo %KAFKA_LOG4J_OPTS% | findstr /r /c:"log4j\.[^ ]*(\.properties|\.xml)$" >nul + IF %ERRORLEVEL% == 0 ( + rem Enable Log4j 1.x configuration compatibility mode for Log4j 2 + set LOG4J_COMPATIBILITY=true + echo DEPRECATED: A Log4j 1.x configuration file has been detected, which is no longer recommended. >&2 + echo To use a Log4j 2.x configuration, please see https://logging.apache.org/log4j/2.x/migrate-from-log4j1.html#Log4j2ConfigurationFormat for details about Log4j configuration file migration. >&2 + echo You can also use the %BASE_DIR%/config/tool-log4j2.yaml file as a starting point. Make sure to remove the Log4j 1.x configuration after completing the migration. >&2 + ) rem create logs directory IF not exist "%LOG_DIR%" ( mkdir "%LOG_DIR%" diff --git a/bin/windows/kafka-server-start.bat b/bin/windows/kafka-server-start.bat index 8624eda9ff089..6cf0533bd05b5 100644 --- a/bin/windows/kafka-server-start.bat +++ b/bin/windows/kafka-server-start.bat @@ -21,7 +21,7 @@ IF [%1] EQU [] ( SetLocal IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties + set KAFKA_LOG4J_OPTS=-Dlog4j2.configurationFile=%~dp0../../config/log4j2.yaml ) IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( rem detect OS architecture diff --git a/bin/windows/zookeeper-server-stop.bat b/bin/windows/kafka-share-consumer-perf-test.bat similarity index 85% rename from bin/windows/zookeeper-server-stop.bat rename to bin/windows/kafka-share-consumer-perf-test.bat index 8b57dd8d63069..5bfcb1ed11bcf 100644 --- a/bin/windows/zookeeper-server-stop.bat +++ b/bin/windows/kafka-share-consumer-perf-test.bat @@ -14,4 +14,7 @@ rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. rem See the License for the specific language governing permissions and rem limitations under the License. -wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete +SetLocal +set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M +"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ShareConsumerPerformance %* +EndLocal diff --git a/bin/windows/zookeeper-server-start.bat b/bin/windows/zookeeper-server-start.bat deleted file mode 100644 index f201a585135d2..0000000000000 --- a/bin/windows/zookeeper-server-start.bat +++ /dev/null @@ -1,30 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -IF [%1] EQU [] ( - echo USAGE: %0 zookeeper.properties - EXIT /B 1 -) - -SetLocal -IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties -) -IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( - set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M -) -"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %* -EndLocal diff --git a/bin/windows/zookeeper-shell.bat b/bin/windows/zookeeper-shell.bat deleted file mode 100644 index f1c86c430c170..0000000000000 --- a/bin/windows/zookeeper-shell.bat +++ /dev/null @@ -1,22 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -IF [%1] EQU [] ( - echo USAGE: %0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...] - EXIT /B 1 -) - -"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server %* diff --git a/bin/zookeeper-server-stop.sh b/bin/zookeeper-server-stop.sh deleted file mode 100755 index 11665f32707f8..0000000000000 --- a/bin/zookeeper-server-stop.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -SIGNAL=${SIGNAL:-TERM} - -OSNAME=$(uname -s) -if [[ "$OSNAME" == "OS/390" ]]; then - if [ -z $JOBNAME ]; then - JOBNAME="ZKEESTRT" - fi - PIDS=$(ps -A -o pid,jobname,comm | grep -i $JOBNAME | grep java | grep -v grep | awk '{print $1}') -elif [[ "$OSNAME" == "OS400" ]]; then - PIDS=$(ps -Af | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $2}') -else - PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}') -fi - -if [ -z "$PIDS" ]; then - echo "No zookeeper server to stop" - exit 1 -else - kill -s $SIGNAL $PIDS -fi diff --git a/bin/zookeeper-shell.sh b/bin/zookeeper-shell.sh deleted file mode 100755 index 2f1d0f2c61670..0000000000000 --- a/bin/zookeeper-shell.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]" - exit 1 -fi - -exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server "$@" diff --git a/build.gradle b/build.gradle index 20eeb3eff4714..25340f0a9eed5 100644 --- a/build.gradle +++ b/build.gradle @@ -47,7 +47,10 @@ plugins { ext { gradleVersion = versions.gradle - minJavaVersion = 11 + minClientJavaVersion = 11 + minNonClientJavaVersion = 17 + modulesNeedingJava11 = [":clients", ":generator", ":streams", ":streams:test-utils", ":streams-scala", ":test-common:test-common-runtime"] + buildVersionFileName = "kafka-version.properties" defaultMaxHeapSize = "2g" @@ -113,28 +116,48 @@ ext { commitId = determineCommitId() - configureJavaCompiler = { name, options -> + configureJavaCompiler = { name, options, projectPath -> // -parameters generates arguments with parameter names in TestInfo#getDisplayName. // ref: https://github.com/junit-team/junit5/blob/4c0dddad1b96d4a20e92a2cd583954643ac56ac0/junit-jupiter-params/src/main/java/org/junit/jupiter/params/ParameterizedTest.java#L161-L164 - if (name == "compileTestJava" || name == "compileTestScala") { + + def releaseVersion = modulesNeedingJava11.any { projectPath == it } ? minClientJavaVersion : minNonClientJavaVersion + + options.compilerArgs << "-encoding" << "UTF-8" + options.release = releaseVersion + + if (name in ["compileTestJava", "compileTestScala"]) { options.compilerArgs << "-parameters" - options.compilerArgs += ["--release", String.valueOf(minJavaVersion)] - } else if (name == "compileJava" || name == "compileScala") { - options.compilerArgs << "-Xlint:all" - if (!project.path.startsWith(":connect") && !project.path.startsWith(":storage")) - options.compilerArgs << "-Xlint:-rawtypes" - options.compilerArgs << "-encoding" << "UTF-8" + } else if (name in ["compileJava", "compileScala"]) { options.compilerArgs << "-Xlint:-rawtypes" + options.compilerArgs << "-Xlint:all" options.compilerArgs << "-Xlint:-serial" options.compilerArgs << "-Xlint:-try" options.compilerArgs << "-Werror" - options.compilerArgs += ["--release", String.valueOf(minJavaVersion)] } } runtimeTestLibs = [ - libs.slf4jReload4j, + libs.slf4jLog4j2, libs.junitPlatformLanucher, + project(":test-common:test-common-runtime") + ] + + log4jRuntimeLibs = [ + libs.slf4jLog4j2, + libs.log4j1Bridge2Api, + libs.jacksonDatabindYaml + ] + + log4j2Libs = [ + libs.log4j2Api, + libs.log4j2Core + ] + + testLog4j2Libs = [ + libs.slf4jApi, + libs.slf4jLog4j2, + libs.log4j2Api, + libs.log4j2Core ] } @@ -171,13 +194,7 @@ allprojects { // ensure we have a single version in the classpath despite transitive dependencies libs.scalaLibrary, libs.scalaReflect, - libs.jacksonAnnotations, - // be explicit about the Netty dependency version instead of relying on the version set by - // ZooKeeper (potentially older and containing CVEs) - libs.nettyHandler, - libs.nettyTransportNativeEpoll, - // be explicit about the reload4j version instead of relying on the transitive versions - libs.reload4j + libs.jacksonAnnotations ) } } @@ -196,6 +213,11 @@ allprojects { options.links "https://docs.oracle.com/en/java/javase/${JavaVersion.current().majorVersion}/docs/api/" } + tasks.withType(Checkstyle) { + minHeapSize = "200m" + maxHeapSize = "1g" + } + clean { delete "${projectDir}/src/generated" delete "${projectDir}/src/generated-test" @@ -224,7 +246,7 @@ static def projectToJUnitXmlPath(project) { projectNames.push(p.name) p = p.parent if (p.name == "kafka") { - break; + break } } return projectNames.join("/") @@ -320,7 +342,7 @@ subprojects { } tasks.withType(JavaCompile) { - configureJavaCompiler(name, options) + configureJavaCompiler(name, options, project.path) } if (shouldPublish) { @@ -483,6 +505,8 @@ subprojects { // KAFKA-17433 Used by deflake.yml github action to repeat individual tests systemProperty("kafka.cluster.test.repeat", project.findProperty("kafka.cluster.test.repeat")) + systemProperty("kafka.test.catalog.file", project.findProperty("kafka.test.catalog.file")) + systemProperty("kafka.test.run.quarantined", "false") testLogging { events = userTestLoggingEvents ?: testLoggingEvents @@ -537,6 +561,7 @@ subprojects { task quarantinedTest(type: Test, dependsOn: compileJava) { ext { isGithubActions = System.getenv('GITHUB_ACTIONS') != null + hadFailure = false // Used to track if any tests failed, see afterSuite below } // Disable caching and up-to-date for this task. We always want quarantined tests @@ -546,13 +571,15 @@ subprojects { outputs.cacheIf { false } maxParallelForks = maxTestForks - ignoreFailures = userIgnoreFailures + ignoreFailures = userIgnoreFailures || ext.isGithubActions maxHeapSize = defaultMaxHeapSize jvmArgs = defaultJvmArgs // KAFKA-17433 Used by deflake.yml github action to repeat individual tests systemProperty("kafka.cluster.test.repeat", project.findProperty("kafka.cluster.test.repeat")) + systemProperty("kafka.test.catalog.file", project.findProperty("kafka.test.catalog.file")) + systemProperty("kafka.test.run.quarantined", "true") testLogging { events = userTestLoggingEvents ?: testLoggingEvents @@ -564,7 +591,6 @@ subprojects { useJUnitPlatform { includeEngines 'junit-jupiter' - includeTags 'flaky' } develocity { @@ -574,6 +600,13 @@ subprojects { } } + // As we process results, check if there were any test failures. + afterSuite { desc, result -> + if (result.resultType == TestResult.ResultType.FAILURE) { + ext.hadFailure = true + } + } + // This closure will copy JUnit XML files out of the sub-project's build directory and into // a top-level build/junit-xml directory. This is necessary to avoid reporting on tests which // were not run, but instead were restored via FROM-CACHE. See KAFKA-17479 for more details. @@ -587,6 +620,11 @@ subprojects { ant.include(name: "**/*.xml") } } + // If there were any test failures, we want to fail the task to prevent the failures + // from being cached. + if (ext.hadFailure) { + throw new GradleException("Failing this task since '${project.name}:${name}' had test failures.") + } } } } @@ -726,7 +764,7 @@ subprojects { } tasks.withType(ScalaCompile) { - + def releaseVersion = modulesNeedingJava11.any { project.path == it } ? minClientJavaVersion : minNonClientJavaVersion scalaCompileOptions.keepAliveMode = userKeepAliveMode scalaCompileOptions.additionalParameters = [ @@ -770,10 +808,14 @@ subprojects { scalaCompileOptions.additionalParameters += ["-opt-warnings", "-Xlint:strict-unsealed-patmat"] // Scala 2.13.2 introduces compiler warnings suppression, which is a pre-requisite for -Xfatal-warnings scalaCompileOptions.additionalParameters += ["-Xfatal-warnings"] + scalaCompileOptions.additionalParameters += ["--release", String.valueOf(releaseVersion)] - scalaCompileOptions.additionalParameters += ["-release", String.valueOf(minJavaVersion)] + // Gradle does not support the `release` configuration when performing joint Java-Scala compilation. + // For more details, refer to https://github.com/gradle/gradle/issues/13762. + // As a result, we need to explicitly configure the Scala compiler with this setting. + options.compilerArgs += ["--release", String.valueOf(releaseVersion)] - configureJavaCompiler(name, options) + configureJavaCompiler(name, options, project.path) configure(scalaCompileOptions.forkOptions) { memoryMaximumSize = defaultMaxHeapSize @@ -932,24 +974,22 @@ project(':server') { implementation project(':group-coordinator') implementation project(':transaction-coordinator') implementation project(':raft') - implementation libs.metrics + implementation project(':share-coordinator') implementation libs.jacksonDatabind - + implementation libs.metrics implementation libs.slf4jApi - compileOnly libs.reload4j - testImplementation project(':clients').sourceSets.test.output testImplementation libs.mockitoCore testImplementation libs.junitJupiter - testImplementation libs.slf4jReload4j + testImplementation testLog4j2Libs - testRuntimeOnly libs.junitPlatformLanucher + testRuntimeOnly runtimeTestLibs } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -969,13 +1009,13 @@ project(':server') { jar { dependsOn createVersionFile - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } checkstyle { @@ -987,35 +1027,6 @@ project(':server') { } } -project(':share') { - base { - archivesName = "kafka-share" - } - - dependencies { - implementation project(':server-common') - - testImplementation project(':clients').sourceSets.test.output - testImplementation project(':server-common').sourceSets.test.output - - implementation libs.slf4jApi - - testImplementation libs.junitJupiter - testImplementation libs.mockitoCore - testImplementation libs.slf4jReload4j - - testRuntimeOnly libs.junitPlatformLanucher - } - - checkstyle { - configProperties = checkstyleConfigProperties("import-control-share.xml") - } - - javadoc { - enabled = false - } -} - project(':core') { apply plugin: 'scala' @@ -1033,12 +1044,29 @@ project(':core') { archivesName = "kafka_${versions.baseScala}" } + configurations { + // manually excludes some unnecessary dependencies + implementation.exclude module: 'javax' + implementation.exclude module: 'jline' + implementation.exclude module: 'jms' + implementation.exclude module: 'jmxri' + implementation.exclude module: 'jmxtools' + implementation.exclude module: 'mail' + // To prevent a UniqueResourceException due the same resource existing in both + // org.apache.directory.api/api-all and org.apache.directory.api/api-ldap-schema-data + testImplementation.exclude module: 'api-ldap-schema-data' + releaseOnly + } + dependencies { + releaseOnly log4jRuntimeLibs // `core` is often used in users' tests, define the following dependencies as `api` for backwards compatibility // even though the `core` module doesn't expose any public API api project(':clients') api libs.scalaLibrary + compileOnly log4j2Libs + implementation project(':server-common') implementation project(':group-coordinator:group-coordinator-api') implementation project(':group-coordinator') @@ -1050,7 +1078,6 @@ project(':core') { implementation project(':storage') implementation project(':server') implementation project(':coordinator-common') - implementation project(':share') implementation project(':share-coordinator') implementation libs.argparse4j @@ -1066,22 +1093,6 @@ project(':core') { implementation libs.scalaReflect implementation libs.scalaLogging implementation libs.slf4jApi - implementation libs.commonsIo // ZooKeeper dependency. Do not use, this is going away. - implementation(libs.zookeeper) { - // Dropwizard Metrics are required by ZooKeeper as of v3.6.0, - // but the library should *not* be used in Kafka code - implementation libs.dropwizardMetrics - exclude module: 'slf4j-log4j12' - exclude module: 'log4j' - // Both Kafka and Zookeeper use slf4j. ZooKeeper moved from log4j to logback in v3.8.0, but Kafka relies on reload4j. - // We are removing Zookeeper's dependency on logback so we have a singular logging backend. - exclude module: 'logback-classic' - exclude module: 'logback-core' - } - // ZooKeeperMain depends on commons-cli but declares the dependency as `provided` - implementation libs.commonsCli - - compileOnly libs.reload4j testImplementation project(':clients').sourceSets.test.output testImplementation project(':group-coordinator').sourceSets.test.output @@ -1091,7 +1102,6 @@ project(':core') { testImplementation project(':server-common').sourceSets.test.output testImplementation project(':storage:storage-api').sourceSets.test.output testImplementation project(':server').sourceSets.test.output - testImplementation project(':share').sourceSets.test.output testImplementation project(':test-common') testImplementation project(':test-common:test-common-api') testImplementation libs.bcpkix @@ -1111,10 +1121,10 @@ project(':core') { testImplementation libs.apachedsMavibotPartition testImplementation libs.apachedsJdbmPartition testImplementation libs.junitJupiter - testImplementation libs.slf4jReload4j testImplementation libs.caffeine + testImplementation testLog4j2Libs - testRuntimeOnly libs.junitPlatformLanucher + testRuntimeOnly runtimeTestLibs } if (userEnableTestCoverage) { @@ -1123,33 +1133,18 @@ project(':core') { if (versions.baseScala == '2.13') { scoverageScalaVersion = '2.13.9' // there's no newer 2.13 artifact, org.scoverage:scalac-scoverage-plugin_2.13.9:2.0.11 is the latest as of now } - reportDir = file("${rootProject.buildDir}/scoverage") + reportDir = file("${layout.buildDirectory.get().asFile.path}/scoverage") highlighting = false minimumRate = 0.0 } } - configurations { - // manually excludes some unnecessary dependencies - implementation.exclude module: 'javax' - implementation.exclude module: 'jline' - implementation.exclude module: 'jms' - implementation.exclude module: 'jmxri' - implementation.exclude module: 'jmxtools' - implementation.exclude module: 'mail' - // To prevent a UniqueResourceException due the same resource existing in both - // org.apache.directory.api/api-all and org.apache.directory.api/api-ldap-schema-data - testImplementation.exclude module: 'api-ldap-schema-data' - } - tasks.create(name: "copyDependantLibs", type: Copy) { - from (configurations.compileClasspath) { - include('reload4j*jar') - } from (configurations.runtimeClasspath) { exclude('kafka-clients*') } - into "$buildDir/dependant-libs-${versions.scala}" + from (configurations.releaseOnly) + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -1258,6 +1253,7 @@ project(':core') { from "$rootDir/NOTICE-binary" rename {String filename -> filename.replace("-binary", "")} from(configurations.runtimeClasspath) { into("libs/") } from(configurations.archives.artifacts.files) { into("libs/") } + from(configurations.releaseOnly) { into("libs/") } from(project.siteDocsTar) { into("site-docs/") } from(project(':tools').jar) { into("libs/") } from(project(':tools').configurations.runtimeClasspath) { into("libs/") } @@ -1308,10 +1304,10 @@ project(':core') { from (configurations.testRuntimeClasspath) { include('*.jar') } - into "$buildDir/dependant-testlibs" + into "${layout.buildDirectory.get().asFile.path}/dependant-testlibs" //By default gradle does not handle test dependencies between the sub-projects //This line is to include clients project test jar to dependant-testlibs - from (project(':clients').testJar ) { "$buildDir/dependant-testlibs" } + from (project(':clients').testJar ) { "${layout.buildDirectory.get().asFile.path}/dependant-testlibs" } duplicatesStrategy 'exclude' } @@ -1358,16 +1354,17 @@ project(':metadata') { implementation libs.jacksonDatabind implementation libs.jacksonJDK8Datatypes implementation libs.metrics - compileOnly libs.reload4j + implementation libs.slf4jApi + + testImplementation testLog4j2Libs testImplementation libs.junitJupiter testImplementation libs.jqwik testImplementation libs.mockitoCore - testImplementation libs.slf4jReload4j testImplementation project(':clients').sourceSets.test.output testImplementation project(':raft').sourceSets.test.output testImplementation project(':server-common').sourceSets.test.output - testRuntimeOnly libs.junitPlatformLanucher + testRuntimeOnly runtimeTestLibs generator project(':generator') } @@ -1423,7 +1420,7 @@ project(':group-coordinator:group-coordinator-api') { } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -1443,13 +1440,13 @@ project(':group-coordinator:group-coordinator-api') { jar { dependsOn createVersionFile - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } javadoc { @@ -1479,16 +1476,18 @@ project(':group-coordinator') { implementation project(':coordinator-common') implementation libs.jacksonDatabind implementation libs.jacksonJDK8Datatypes - implementation libs.slf4jApi implementation libs.metrics implementation libs.hdrHistogram implementation libs.re2j + implementation libs.slf4jApi testImplementation project(':clients').sourceSets.test.output testImplementation project(':server-common').sourceSets.test.output testImplementation project(':coordinator-common').sourceSets.test.output + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs @@ -1522,7 +1521,8 @@ project(':group-coordinator') { args = [ "-p", "org.apache.kafka.coordinator.group.generated", "-o", "${projectDir}/build/generated/main/java/org/apache/kafka/coordinator/group/generated", "-i", "src/main/resources/common/message", - "-m", "MessageDataGenerator", "JsonConverterGenerator" + "-m", "MessageDataGenerator", "JsonConverterGenerator", + "-t", "CoordinatorRecordTypeGenerator", "CoordinatorRecordJsonConvertersGenerator" ] inputs.dir("src/main/resources/common/message") .withPropertyName("messages") @@ -1536,6 +1536,7 @@ project(':group-coordinator') { } project(':test-common') { + // Test framework stuff. Implementations that support test-common-api base { archivesName = "kafka-test-common" } @@ -1547,10 +1548,13 @@ project(':test-common') { implementation project(':raft') implementation project(':storage') implementation project(':server-common') + implementation libs.jacksonDatabindYaml implementation libs.slf4jApi + testImplementation libs.junitJupiter testImplementation libs.mockitoCore - + testImplementation testLog4j2Libs + testRuntimeOnly runtimeTestLibs } @@ -1564,11 +1568,11 @@ project(':test-common') { } project(':test-common:test-common-api') { + // Interfaces, config classes, and other test APIs base { archivesName = "kafka-test-common-api" } - dependencies { implementation project(':clients') implementation project(':core') @@ -1583,6 +1587,7 @@ project(':test-common:test-common-api') { testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs } @@ -1596,6 +1601,29 @@ project(':test-common:test-common-api') { } } +project(':test-common:test-common-runtime') { + // Runtime-only test code including JUnit extentions + base { + archivesName = "kafka-test-common-runtime" + } + + dependencies { + implementation libs.junitPlatformLanucher + implementation libs.junitJupiterApi + implementation libs.junitJupiter + implementation libs.slf4jApi + testImplementation testLog4j2Libs + } + + checkstyle { + configProperties = checkstyleConfigProperties("import-control-test-common-api.xml") + } + + javadoc { + enabled = false + } +} + project(':transaction-coordinator') { base { archivesName = "kafka-transaction-coordinator" @@ -1609,13 +1637,16 @@ project(':transaction-coordinator') { implementation libs.jacksonDatabind implementation project(':clients') implementation project(':server-common') + implementation project(':coordinator-common') implementation libs.slf4jApi + testImplementation testLog4j2Libs testImplementation libs.junitJupiter testImplementation libs.mockitoCore testImplementation project(':clients').sourceSets.test.output testImplementation project(':test-common') testImplementation project(':test-common:test-common-api') + testRuntimeOnly runtimeTestLibs generator project(':generator') @@ -1679,6 +1710,7 @@ project(':coordinator-common') { testImplementation project(':server-common').sourceSets.test.output testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs } @@ -1706,17 +1738,16 @@ project(':share-coordinator') { implementation project(':clients') implementation project(':coordinator-common') implementation project(':metadata') - implementation project(':server') implementation project(':server-common') - implementation project(':share') - implementation libs.slf4jApi implementation libs.metrics + implementation libs.slf4jApi testImplementation project(':clients').sourceSets.test.output testImplementation project(':server-common').sourceSets.test.output testImplementation project(':coordinator-common').sourceSets.test.output testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs @@ -1746,7 +1777,8 @@ project(':share-coordinator') { args = [ "-p", "org.apache.kafka.coordinator.share.generated", "-o", "${projectDir}/build/generated/main/java/org/apache/kafka/coordinator/share/generated", "-i", "src/main/resources/common/message", - "-m", "MessageDataGenerator", "JsonConverterGenerator" + "-m", "MessageDataGenerator", "JsonConverterGenerator", + "-t", "CoordinatorRecordTypeGenerator", "CoordinatorRecordJsonConvertersGenerator" ] inputs.dir("src/main/resources/common/message") .withPropertyName("messages") @@ -1786,10 +1818,17 @@ project(':generator') { implementation libs.argparse4j implementation libs.jacksonDatabind implementation libs.jacksonJDK8Datatypes - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider + + implementation 'org.eclipse.jgit:org.eclipse.jgit:6.4.0.202211300538-r' + // SSH support for JGit based on Apache MINA sshd + implementation 'org.eclipse.jgit:org.eclipse.jgit.ssh.apache:6.4.0.202211300538-r' + // GPG support for JGit based on BouncyCastle (commit signing) + implementation 'org.eclipse.jgit:org.eclipse.jgit.gpg.bc:6.4.0.202211300538-r' + testImplementation libs.junitJupiter - testRuntimeOnly libs.junitPlatformLanucher + testRuntimeOnly runtimeTestLibs } javadoc { @@ -1811,9 +1850,9 @@ project(':clients') { implementation libs.zstd implementation libs.lz4 implementation libs.snappy - implementation libs.slf4jApi implementation libs.opentelemetryProto implementation libs.protobuf + implementation libs.slf4jApi // libraries which should be added as runtime dependencies in generated pom.xml should be defined here: shadowed libs.zstd @@ -1825,23 +1864,28 @@ project(':clients') { compileOnly libs.jacksonJDK8Datatypes compileOnly libs.jose4j // for SASL/OAUTHBEARER JWT validation; only used by broker + testImplementation libs.bcpkix - testImplementation libs.jacksonJaxrsJsonProvider + testImplementation libs.jacksonJakartarsJsonProvider testImplementation libs.jose4j testImplementation libs.junitJupiter - testImplementation libs.reload4j + testImplementation libs.spotbugs testImplementation libs.mockitoCore testImplementation libs.mockitoJunitJupiter // supports MockitoExtension + testImplementation testLog4j2Libs + + testCompileOnly libs.bndlib testRuntimeOnly libs.jacksonDatabind testRuntimeOnly libs.jacksonJDK8Datatypes testRuntimeOnly runtimeTestLibs + testRuntimeOnly log4jRuntimeLibs generator project(':generator') } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -1881,7 +1925,7 @@ project(':clients') { exclude "**/google/protobuf/*.proto" } - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } @@ -1895,7 +1939,7 @@ project(':clients') { } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } task processMessages(type:JavaExec) { @@ -1988,16 +2032,18 @@ project(':raft') { dependencies { implementation project(':server-common') implementation project(':clients') - implementation libs.slf4jApi implementation libs.jacksonDatabind + implementation libs.slf4jApi testImplementation project(':server-common') testImplementation project(':server-common').sourceSets.test.output testImplementation project(':clients') testImplementation project(':clients').sourceSets.test.output + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.mockitoCore testImplementation libs.jqwik + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs @@ -2005,7 +2051,7 @@ project(':raft') { } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2055,7 +2101,7 @@ project(':raft') { jar { dependsOn createVersionFile - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } } @@ -2067,7 +2113,7 @@ project(':raft') { } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } javadoc { @@ -2082,23 +2128,24 @@ project(':server-common') { dependencies { api project(':clients') - implementation libs.slf4jApi implementation libs.metrics implementation libs.joptSimple implementation libs.jacksonDatabind implementation libs.pcollections + implementation libs.slf4jApi testImplementation project(':clients') testImplementation project(':clients').sourceSets.test.output + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs - testRuntimeOnly project(":test-common") } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2118,13 +2165,13 @@ project(':server-common') { jar { dependsOn createVersionFile - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } checkstyle { @@ -2151,12 +2198,13 @@ project(':storage:storage-api') { testImplementation project(':clients').sourceSets.test.output testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2176,13 +2224,13 @@ project(':storage:storage-api') { jar { dependsOn createVersionFile - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } javadoc { @@ -2207,7 +2255,6 @@ project(':storage') { implementation project(':storage:storage-api') implementation project(':server-common') implementation project(':clients') - implementation project(':transaction-coordinator') implementation(libs.caffeine) { exclude group: 'org.checkerframework', module: 'checker-qual' } @@ -2224,9 +2271,11 @@ project(':storage') { testImplementation project(':server-common') testImplementation project(':server-common').sourceSets.test.output testImplementation libs.hamcrest + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.mockitoCore testImplementation libs.bcpkix + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs @@ -2234,7 +2283,7 @@ project(':storage') { } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2299,13 +2348,13 @@ project(':storage') { jar { dependsOn createVersionFile - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } javadoc { @@ -2325,11 +2374,12 @@ project(':tools:tools-api') { dependencies { implementation project(':clients') testImplementation libs.junitJupiter - testRuntimeOnly libs.junitPlatformLanucher + testImplementation testLog4j2Libs + testRuntimeOnly runtimeTestLibs } task createVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2349,13 +2399,13 @@ project(':tools:tools-api') { jar { dependsOn createVersionFile - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "$buildDir/kafka/" + delete "${layout.buildDirectory.get().asFile.path}/kafka/" } javadoc { @@ -2368,7 +2418,13 @@ project(':tools') { archivesName = "kafka-tools" } + configurations { + releaseOnly + } + dependencies { + releaseOnly log4jRuntimeLibs + implementation project(':clients') implementation project(':metadata') implementation project(':storage') @@ -2380,17 +2436,18 @@ project(':tools') { implementation project(':group-coordinator') implementation project(':coordinator-common') implementation project(':share-coordinator') - implementation project(':share') implementation libs.argparse4j implementation libs.jacksonDatabind implementation libs.jacksonDataformatCsv implementation libs.jacksonJDK8Datatypes - implementation libs.slf4jApi - implementation libs.slf4jReload4j implementation libs.joptSimple + implementation libs.slf4jApi + implementation libs.re2j implementation libs.jose4j // for SASL/OAUTHBEARER JWT validation - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider + + compileOnly libs.spotbugs testImplementation project(':clients') testImplementation project(':clients').sourceSets.test.output @@ -2417,17 +2474,16 @@ project(':tools') { testImplementation(libs.jfreechart) { exclude group: 'junit', module: 'junit' } - testImplementation libs.reload4j testImplementation libs.apachedsCoreApi testImplementation libs.apachedsInterceptorKerberos testImplementation libs.apachedsProtocolShared testImplementation libs.apachedsProtocolKerberos testImplementation libs.apachedsProtocolLdap testImplementation libs.apachedsLdifPartition + testImplementation testLog4j2Libs - testRuntimeOnly libs.junitPlatformLanucher + testRuntimeOnly runtimeTestLibs testRuntimeOnly libs.hamcrest - testRuntimeOnly project(':test-common') } javadoc { @@ -2438,7 +2494,8 @@ project(':tools') { from (configurations.runtimeClasspath) { exclude('kafka-clients*') } - into "$buildDir/dependant-libs-${versions.scala}" + from (configurations.releaseOnly) + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2458,23 +2515,34 @@ project(':trogdor') { implementation libs.jacksonDatabind implementation libs.jacksonJDK8Datatypes implementation libs.slf4jApi - runtimeOnly libs.reload4j - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider implementation libs.jerseyContainerServlet implementation libs.jerseyHk2 implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9 implementation libs.activation // Jersey dependency that was available in the JDK before Java 9 - implementation libs.jettyServer - implementation libs.jettyServlet - implementation libs.jettyServlets + implementation (libs.jettyServer) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlet) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlets) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + + implementation project(':group-coordinator') + implementation project(':group-coordinator:group-coordinator-api') testImplementation project(':clients') - testImplementation libs.junitJupiter testImplementation project(':clients').sourceSets.test.output + testImplementation project(':group-coordinator') + testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs + testRuntimeOnly libs.junitPlatformLanucher } javadoc { @@ -2485,7 +2553,7 @@ project(':trogdor') { from (configurations.runtimeClasspath) { exclude('kafka-clients*') } - into "$buildDir/dependant-libs-${versions.scala}" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2512,14 +2580,16 @@ project(':shell') { implementation project(':raft') implementation libs.jose4j // for SASL/OAUTHBEARER JWT validation - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider testImplementation project(':clients') testImplementation project(':clients').sourceSets.test.output testImplementation project(':core') testImplementation project(':server-common') testImplementation project(':server-common').sourceSets.test.output + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs } @@ -2532,7 +2602,7 @@ project(':shell') { from (configurations.runtimeClasspath) { include('jline-*jar') } - into "$buildDir/dependant-libs-${versions.scala}" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2557,26 +2627,27 @@ project(':streams') { // `org.rocksdb.Options` is part of Kafka Streams public api via `RocksDBConfigSetter` api libs.rocksDBJni - implementation libs.slf4jApi implementation libs.jacksonAnnotations implementation libs.jacksonDatabind + implementation libs.slf4jApi // testCompileOnly prevents streams from exporting a dependency on test-utils, which would cause a dependency cycle testCompileOnly project(':streams:test-utils') + testCompileOnly libs.bndlib - testImplementation project(':metadata') testImplementation project(':clients').sourceSets.test.output - testImplementation libs.reload4j + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.bcpkix testImplementation libs.hamcrest testImplementation libs.mockitoCore testImplementation libs.mockitoJunitJupiter // supports MockitoExtension testImplementation libs.junitPlatformSuiteEngine // supports suite test - testImplementation project(':group-coordinator') + testImplementation testLog4j2Libs testRuntimeOnly project(':streams:test-utils') testRuntimeOnly runtimeTestLibs + testRuntimeOnly log4jRuntimeLibs generator project(':generator') } @@ -2621,12 +2692,12 @@ project(':streams') { from (configurations.runtimeClasspath) { exclude('kafka-clients*') } - into "$buildDir/dependant-libs-${versions.scala}" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } task createStreamsVersionFile() { - def receiptFile = file("$buildDir/kafka/$buildStreamsVersionFileName") + def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildStreamsVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2645,7 +2716,7 @@ project(':streams') { jar { dependsOn 'createStreamsVersionFile' - from("$buildDir") { + from("${layout.buildDirectory.get().asFile.path}") { include "kafka/$buildStreamsVersionFileName" } dependsOn 'copyDependantLibs' @@ -2665,12 +2736,9 @@ project(':streams') { task testAll( dependsOn: [ ':streams:test', - ':streams:integration-tests', + ':streams:integration-tests:test', ':streams:test-utils:test', ':streams:streams-scala:test', - ':streams:upgrade-system-tests-0100:test', - ':streams:upgrade-system-tests-0101:test', - ':streams:upgrade-system-tests-0102:test', ':streams:upgrade-system-tests-0110:test', ':streams:upgrade-system-tests-10:test', ':streams:upgrade-system-tests-11:test', @@ -2692,6 +2760,7 @@ project(':streams') { ':streams:upgrade-system-tests-36:test', ':streams:upgrade-system-tests-37:test', ':streams:upgrade-system-tests-38:test', + ':streams:upgrade-system-tests-39:test', ':streams:examples:test' ] ) @@ -2712,8 +2781,11 @@ project(':streams:streams-scala') { testImplementation project(':clients').sourceSets.test.output testImplementation project(':streams:test-utils') + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.mockitoJunitJupiter // supports MockitoExtension + testImplementation testLog4j2Libs + testRuntimeOnly runtimeTestLibs } @@ -2729,7 +2801,7 @@ project(':streams:streams-scala') { from (configurations.runtimeClasspath) { exclude('kafka-streams*') } - into "$buildDir/dependant-libs-${versions.scala}" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2755,6 +2827,9 @@ project(':streams:integration-tests') { } dependencies { + implementation libs.slf4jApi + implementation libs.scalaLibrary + testImplementation project(':clients').sourceSets.test.output testImplementation project(':group-coordinator') testImplementation project(':server') @@ -2771,8 +2846,7 @@ project(':streams:integration-tests') { testImplementation libs.junitJupiter testImplementation libs.junitPlatformSuiteEngine // supports suite test testImplementation libs.mockitoCore - testImplementation libs.reload4j - testImplementation libs.slf4jApi + testImplementation testLog4j2Libs testImplementation project(':streams:test-utils') testRuntimeOnly runtimeTestLibs @@ -2811,9 +2885,11 @@ project(':streams:test-utils') { implementation libs.slf4jApi testImplementation project(':clients').sourceSets.test.output + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.mockitoCore testImplementation libs.hamcrest + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs } @@ -2822,7 +2898,7 @@ project(':streams:test-utils') { from (configurations.runtimeClasspath) { exclude('kafka-streams*') } - into "$buildDir/dependant-libs-${versions.scala}" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2839,21 +2915,17 @@ project(':streams:examples') { dependencies { // this dependency should be removed after we unify data API - implementation(project(':connect:json')) { - // this transitive dependency is not used in Streams, and it breaks SBT builds - exclude module: 'javax.ws.rs-api' - } - + implementation(project(':connect:json')) implementation project(':streams') - - implementation libs.slf4jReload4j + implementation libs.slf4jApi testImplementation project(':streams:test-utils') testImplementation project(':clients').sourceSets.test.output // for org.apache.kafka.test.IntegrationTest testImplementation libs.junitJupiter testImplementation libs.hamcrest + testImplementation testLog4j2Libs - testRuntimeOnly libs.junitPlatformLanucher + testRuntimeOnly runtimeTestLibs } javadoc { @@ -2864,7 +2936,7 @@ project(':streams:examples') { from (configurations.runtimeClasspath) { exclude('kafka-streams*') } - into "$buildDir/dependant-libs-${versions.scala}" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2873,57 +2945,6 @@ project(':streams:examples') { } } -project(':streams:upgrade-system-tests-0100') { - base { - archivesName = "kafka-streams-upgrade-system-tests-0100" - } - - dependencies { - testImplementation(libs.kafkaStreams_0100) { - exclude group: 'org.slf4j', module: 'slf4j-log4j12' - exclude group: 'log4j', module: 'log4j' - } - testRuntimeOnly libs.junitJupiter - } - - systemTestLibs { - dependsOn testJar - } -} - -project(':streams:upgrade-system-tests-0101') { - base { - archivesName = "kafka-streams-upgrade-system-tests-0101" - } - - dependencies { - testImplementation(libs.kafkaStreams_0101) { - exclude group: 'org.slf4j', module: 'slf4j-log4j12' - exclude group: 'log4j', module: 'log4j' - } - testRuntimeOnly libs.junitJupiter - } - - systemTestLibs { - dependsOn testJar - } -} - -project(':streams:upgrade-system-tests-0102') { - base { - archivesName = "kafka-streams-upgrade-system-tests-0102" - } - - dependencies { - testImplementation libs.kafkaStreams_0102 - testRuntimeOnly libs.junitJupiter - } - - systemTestLibs { - dependsOn testJar - } -} - project(':streams:upgrade-system-tests-0110') { base{ archivesName = "kafka-streams-upgrade-system-tests-0110" @@ -3239,6 +3260,21 @@ project(':streams:upgrade-system-tests-38') { } } +project(':streams:upgrade-system-tests-39') { + base { + archivesName = "kafka-streams-upgrade-system-tests-39" + } + + dependencies { + testImplementation libs.kafkaStreams_39 + testRuntimeOnly libs.junitJupiter + } + + systemTestLibs { + dependsOn testJar + } +} + project(':jmh-benchmarks') { apply plugin: 'io.github.goooler.shadow' @@ -3262,6 +3298,7 @@ project(':jmh-benchmarks') { implementation project(':metadata') implementation project(':storage') implementation project(':streams') + implementation project(':transaction-coordinator') implementation project(':core') implementation project(':connect:api') implementation project(':connect:transforms') @@ -3276,8 +3313,8 @@ project(':jmh-benchmarks') { implementation libs.jacksonDatabind implementation libs.metrics implementation libs.mockitoCore - implementation libs.slf4jReload4j implementation libs.scalaLibrary + implementation libs.slf4jApi } tasks.withType(JavaCompile) { @@ -3319,12 +3356,12 @@ project(':connect:api') { dependencies { api project(':clients') + implementation libs.jakartaRsApi implementation libs.slf4jApi - runtimeOnly libs.reload4j - implementation libs.jaxrsApi testImplementation libs.junitJupiter testImplementation project(':clients').sourceSets.test.output + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs } @@ -3338,7 +3375,7 @@ project(':connect:api') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3356,11 +3393,10 @@ project(':connect:transforms') { api project(':connect:api') implementation libs.slf4jApi - runtimeOnly libs.reload4j testImplementation libs.junitJupiter - testImplementation project(':clients').sourceSets.test.output + testImplementation testLog4j2Libs testRuntimeOnly runtimeTestLibs } @@ -3374,7 +3410,7 @@ project(':connect:transforms') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3393,13 +3429,12 @@ project(':connect:json') { api libs.jacksonDatabind api libs.jacksonJDK8Datatypes - api libs.jacksonAfterburner + api libs.jacksonBlackbird implementation libs.slf4jApi - runtimeOnly libs.reload4j testImplementation libs.junitJupiter - + testImplementation testLog4j2Libs testImplementation project(':clients').sourceSets.test.output testRuntimeOnly runtimeTestLibs @@ -3414,7 +3449,7 @@ project(':connect:json') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3440,26 +3475,39 @@ project(':connect:runtime') { api project(':connect:json') api project(':connect:transforms') + compileOnly log4j2Libs + implementation libs.slf4jApi - implementation libs.reload4j implementation libs.jose4j // for SASL/OAUTHBEARER JWT validation implementation libs.jacksonAnnotations - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider implementation libs.jerseyContainerServlet implementation libs.jerseyHk2 implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9 implementation libs.activation // Jersey dependency that was available in the JDK before Java 9 - implementation libs.jettyServer - implementation libs.jettyServlet - implementation libs.jettyServlets - implementation libs.jettyClient + implementation (libs.jettyServer) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlet) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlets) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyClient) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } implementation libs.classgraph implementation libs.mavenArtifact implementation libs.swaggerAnnotations + compileOnly libs.bndlib + compileOnly libs.spotbugs + // We use this library to generate OpenAPI docs for the REST API, but we don't want or need it at compile // or run time. So, we add it to a separate configuration, which we use later on during docs generation - swagger libs.swaggerJaxrs2 + swagger libs.jakartaServletApi + swagger libs.jaxrs2Jakarta testImplementation project(':clients').sourceSets.test.output testImplementation project(':core') @@ -3475,10 +3523,14 @@ project(':connect:runtime') { testImplementation project(':server-common').sourceSets.test.output testImplementation project(':test-common:test-common-api') + testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter testImplementation libs.mockitoCore testImplementation libs.mockitoJunitJupiter testImplementation libs.httpclient + testImplementation testLog4j2Libs + + testCompileOnly libs.bndlib testRuntimeOnly libs.bcpkix testRuntimeOnly runtimeTestLibs @@ -3493,7 +3545,7 @@ project(':connect:runtime') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3545,7 +3597,7 @@ project(':connect:runtime') { task setVersionInOpenAPISpec(type: Copy) { from "$rootDir/gradle/openapi.template" - into "$buildDir/resources/docs" + into "${layout.buildDirectory.get().asFile.path}/resources/docs" rename ('openapi.template', 'openapi.yaml') expand(kafkaVersion: "$rootProject.version") } @@ -3558,7 +3610,7 @@ project(':connect:runtime') { outputFormat = 'YAML' prettyPrint = 'TRUE' sortOutput = 'TRUE' - openApiFile = file("$buildDir/resources/docs/openapi.yaml") + openApiFile = file("${layout.buildDirectory.get().asFile.path}/resources/docs/openapi.yaml") resourcePackages = ['org.apache.kafka.connect.runtime.rest.resources'] if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() } outputDir = file(generatedDocsDir) @@ -3574,10 +3626,11 @@ project(':connect:file') { dependencies { implementation project(':connect:api') implementation libs.slf4jApi - runtimeOnly libs.reload4j + testImplementation testLog4j2Libs testImplementation libs.junitJupiter testImplementation libs.mockitoCore + testImplementation project(':clients').sourceSets.test.output testImplementation project(':connect:runtime') testImplementation project(':connect:runtime').sourceSets.test.output @@ -3597,7 +3650,7 @@ project(':connect:file') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3613,14 +3666,15 @@ project(':connect:basic-auth-extension') { dependencies { implementation project(':connect:api') + implementation libs.slf4jApi - runtimeOnly libs.reload4j - implementation libs.jaxrsApi + implementation libs.jakartaRsApi implementation libs.jaxAnnotationApi testImplementation libs.bcpkix testImplementation libs.mockitoCore testImplementation libs.junitJupiter + testImplementation testLog4j2Libs testImplementation project(':clients').sourceSets.test.output testRuntimeOnly libs.jerseyContainerServlet @@ -3636,7 +3690,7 @@ project(':connect:basic-auth-extension') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3657,23 +3711,30 @@ project(':connect:mirror') { implementation project(':clients') implementation libs.argparse4j - implementation libs.jacksonAnnotations implementation libs.slf4jApi - runtimeOnly libs.reload4j implementation libs.jacksonAnnotations - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider implementation libs.jerseyContainerServlet implementation libs.jerseyHk2 implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9 implementation libs.activation // Jersey dependency that was available in the JDK before Java 9 - implementation libs.jettyServer - implementation libs.jettyServlet - implementation libs.jettyServlets - implementation libs.jettyClient + implementation (libs.jettyServer) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlet) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlets) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyClient) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } implementation libs.swaggerAnnotations + testImplementation testLog4j2Libs testImplementation libs.junitJupiter - testImplementation libs.reload4j + testImplementation libs.bndlib testImplementation libs.mockitoCore testImplementation project(':clients').sourceSets.test.output testImplementation project(':connect:runtime').sourceSets.test.output @@ -3682,6 +3743,7 @@ project(':connect:mirror') { testImplementation project(':server') testImplementation project(':server-common').sourceSets.test.output + testRuntimeOnly project(':connect:runtime') testRuntimeOnly libs.bcpkix testRuntimeOnly runtimeTestLibs @@ -3696,7 +3758,7 @@ project(':connect:mirror') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3741,8 +3803,8 @@ project(':connect:mirror-client') { dependencies { implementation project(':clients') implementation libs.slf4jApi - runtimeOnly libs.reload4j + testImplementation testLog4j2Libs testImplementation libs.junitJupiter testImplementation project(':clients').sourceSets.test.output @@ -3758,7 +3820,7 @@ project(':connect:mirror-client') { exclude('kafka-clients*') exclude('connect-*') } - into "$buildDir/dependant-libs" + into "${layout.buildDirectory.get().asFile.path}/dependant-libs" duplicatesStrategy 'exclude' } @@ -3778,6 +3840,8 @@ project(':connect:test-plugins') { implementation project(':server-common') implementation libs.slf4jApi implementation libs.jacksonDatabind + + testImplementation testLog4j2Libs } } diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index 6b8aedb6e4700..fa1d5873a2cbc 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -39,7 +39,9 @@ - + + + diff --git a/checkstyle/import-control-core.xml b/checkstyle/import-control-core.xml index a8dc78160e3e1..3cfd0ce663cc1 100644 --- a/checkstyle/import-control-core.xml +++ b/checkstyle/import-control-core.xml @@ -114,7 +114,14 @@ - + + + + + + + + @@ -136,7 +143,7 @@ - + @@ -146,6 +153,14 @@ + + + + + + + + diff --git a/checkstyle/import-control-jmh-benchmarks.xml b/checkstyle/import-control-jmh-benchmarks.xml index 6840d786926d3..4469ccf3bbeb9 100644 --- a/checkstyle/import-control-jmh-benchmarks.xml +++ b/checkstyle/import-control-jmh-benchmarks.xml @@ -58,6 +58,7 @@ + diff --git a/checkstyle/import-control-server-common.xml b/checkstyle/import-control-server-common.xml index c576ea76f9a6c..ac8706686617c 100644 --- a/checkstyle/import-control-server-common.xml +++ b/checkstyle/import-control-server-common.xml @@ -98,6 +98,15 @@ + + + + + + + + + diff --git a/checkstyle/import-control-share.xml b/checkstyle/import-control-share.xml deleted file mode 100644 index 8abc1b9f7b779..0000000000000 --- a/checkstyle/import-control-share.xml +++ /dev/null @@ -1,62 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/checkstyle/import-control-storage.xml b/checkstyle/import-control-storage.xml index 2792ad844ee77..fecc55f98ea11 100644 --- a/checkstyle/import-control-storage.xml +++ b/checkstyle/import-control-storage.xml @@ -44,6 +44,7 @@ + diff --git a/checkstyle/import-control-test-common.xml b/checkstyle/import-control-test-common.xml index 9fe7f4d4844da..9520c0b21b86e 100644 --- a/checkstyle/import-control-test-common.xml +++ b/checkstyle/import-control-test-common.xml @@ -24,4 +24,5 @@ + diff --git a/checkstyle/import-control-transaction-coordinator.xml b/checkstyle/import-control-transaction-coordinator.xml index 4a61407cfa988..f4045907257a3 100644 --- a/checkstyle/import-control-transaction-coordinator.xml +++ b/checkstyle/import-control-transaction-coordinator.xml @@ -34,6 +34,7 @@ + diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 9465f016c8a6a..921db8162e33a 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -201,7 +201,7 @@ - + @@ -225,6 +225,7 @@ + @@ -289,6 +290,8 @@ + + @@ -307,6 +310,10 @@ + + + + @@ -330,6 +337,7 @@ + @@ -358,8 +366,8 @@ - - + + @@ -368,6 +376,7 @@ + @@ -378,6 +387,7 @@ + @@ -387,7 +397,7 @@ - + @@ -415,7 +425,6 @@ - @@ -442,7 +451,6 @@ - @@ -525,7 +533,7 @@ - + @@ -549,10 +557,11 @@ + - - + + @@ -562,29 +571,30 @@ + - + - - + + - + + - - + @@ -616,8 +626,8 @@ - - + + @@ -629,7 +639,7 @@ - + diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index 819b53ddc4a6b..ebff3f9bc8e8a 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -47,6 +47,7 @@ + @@ -57,7 +58,7 @@ + files="(AbstractFetch|Sender|SenderTest|ConsumerCoordinator|KafkaConsumer|KafkaProducer|Utils|TransactionManager|TransactionManagerTest|KafkaAdminClient|NetworkClient|Admin|RaftClientTestContext|TestingMetricsInterceptingAdminClient).java"/> + files="(NetworkClient|FieldSpec|KafkaProducer).java"/> + files="(KafkaConsumer|ConsumerCoordinator|AbstractFetch|KafkaProducer|AbstractRequest|AbstractResponse|TransactionManager|Admin|KafkaAdminClient|MockAdminClient|KafkaNetworkChannelTest).java"/> @@ -101,10 +102,10 @@ files="(AbstractFetch|ClientTelemetryReporter|ConsumerCoordinator|CommitRequestManager|FetchCollector|OffsetFetcherUtils|KafkaProducer|Sender|ConfigDef|KerberosLogin|AbstractRequest|AbstractResponse|Selector|SslFactory|SslTransportLayer|SaslClientAuthenticator|SaslClientCallbackHandler|SaslServerAuthenticator|AbstractCoordinator|TransactionManager|AbstractStickyAssignor|DefaultSslEngineFactory|Authorizer|RecordAccumulator|MemoryRecords|FetchSessionHandler|MockAdminClient).java"/> + files="(AbstractRequest|AbstractResponse|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest|KafkaAdminClientTest).java"/> + files="(ConsumerCoordinator|BufferPool|MetricName|Node|ConfigDef|RecordBatch|SslFactory|SslTransportLayer|MetadataResponse|KerberosLogin|Selector|Sender|Serdes|TokenInformation|Agent|PluginUtils|MiniTrogdorCluster|TasksRequest|KafkaProducer|AbstractStickyAssignor|Authorizer|FetchSessionHandler|RecordAccumulator|Shell).java"/> @@ -119,7 +120,7 @@ files="(Sender|Fetcher|FetchRequestManager|OffsetFetcher|KafkaConsumer|Metrics|RequestResponse|TransactionManager|KafkaAdminClient|Message|KafkaProducer)Test.java"/> + files="(ConsumerCoordinator|KafkaConsumer|RequestResponse|Fetcher|FetchRequestManager|KafkaAdminClient|Message|KafkaProducer|NetworkClient)Test.java"/> @@ -186,6 +187,9 @@ + + @@ -268,11 +272,11 @@ + files="(AclCommand|ConsoleConsumer|DefaultMessageFormatter|StreamsResetter|ProducerPerformance|Agent).java"/> + files="(AclCommand|DefaultMessageFormatter|ProducerPerformance|StreamsResetter|Agent|TransactionalMessageCopier|ReplicaVerificationTool|LineMessageReader).java"/> + + files="(ConfigurationControlManager|PartitionRegistration|PartitionChangeBuilder|ScramParser).java"/> - + + diff --git a/clients/src/main/java/org/apache/kafka/clients/ApiVersions.java b/clients/src/main/java/org/apache/kafka/clients/ApiVersions.java index 64341e1054f7f..e00755e994542 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ApiVersions.java +++ b/clients/src/main/java/org/apache/kafka/clients/ApiVersions.java @@ -16,13 +16,8 @@ */ package org.apache.kafka.clients; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.requests.ProduceRequest; - import java.util.HashMap; import java.util.Map; -import java.util.Optional; /** * Maintains node api versions for access outside of NetworkClient (which is where the information is derived). @@ -33,7 +28,6 @@ public class ApiVersions { private final Map nodeApiVersions = new HashMap<>(); - private byte maxUsableProduceMagic = RecordBatch.CURRENT_MAGIC_VALUE; // The maximum finalized feature epoch of all the node api versions. private long maxFinalizedFeaturesEpoch = -1; @@ -50,7 +44,6 @@ public static class FinalizedFeaturesInfo { public synchronized void update(String nodeId, NodeApiVersions nodeApiVersions) { this.nodeApiVersions.put(nodeId, nodeApiVersions); - this.maxUsableProduceMagic = computeMaxUsableProduceMagic(); if (maxFinalizedFeaturesEpoch < nodeApiVersions.finalizedFeaturesEpoch()) { this.maxFinalizedFeaturesEpoch = nodeApiVersions.finalizedFeaturesEpoch(); this.finalizedFeatures = nodeApiVersions.finalizedFeatures(); @@ -59,7 +52,6 @@ public synchronized void update(String nodeId, NodeApiVersions nodeApiVersions) public synchronized void remove(String nodeId) { this.nodeApiVersions.remove(nodeId); - this.maxUsableProduceMagic = computeMaxUsableProduceMagic(); } public synchronized NodeApiVersions get(String nodeId) { @@ -74,19 +66,4 @@ public synchronized FinalizedFeaturesInfo getFinalizedFeaturesInfo() { return new FinalizedFeaturesInfo(maxFinalizedFeaturesEpoch, finalizedFeatures); } - private byte computeMaxUsableProduceMagic() { - // use a magic version which is supported by all brokers to reduce the chance that - // we will need to convert the messages when they are ready to be sent. - Optional knownBrokerNodesMinRequiredMagicForProduce = this.nodeApiVersions.values().stream() - .filter(versions -> versions.apiVersion(ApiKeys.PRODUCE) != null) // filter out Raft controller nodes - .map(versions -> ProduceRequest.requiredMagicForVersion(versions.latestUsableVersion(ApiKeys.PRODUCE))) - .min(Byte::compare); - return (byte) Math.min(RecordBatch.CURRENT_MAGIC_VALUE, - knownBrokerNodesMinRequiredMagicForProduce.orElse(RecordBatch.CURRENT_MAGIC_VALUE)); - } - - public synchronized byte maxUsableProduceMagic() { - return maxUsableProduceMagic; - } - } diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index c2c2fca17d1b7..ad4e2d94e6887 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -118,7 +118,7 @@ public static ChannelBuilder createChannelBuilder(AbstractConfig config, Time ti SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM); return ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, config, null, - clientSaslMechanism, time, true, logContext); + clientSaslMechanism, time, logContext); } static List resolve(String host, HostResolver hostResolver) throws UnknownHostException { @@ -247,6 +247,7 @@ public static NetworkClient createNetworkClient(AbstractConfig config, logContext, hostResolver, clientTelemetrySender, + config.getLong(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG), MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)) ); } catch (Throwable t) { @@ -274,4 +275,4 @@ public static ClusterResourceListeners configureClusterResourceListeners(List return clusterResourceListeners; } -} \ No newline at end of file +} diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index 1a5bb595d6d39..aa3b5c9d628c9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -130,8 +130,7 @@ public class CommonClientConfigs { public static final String METRICS_CONTEXT_PREFIX = "metrics.context."; public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; - public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Valid values are: " + - String.join(", ", SecurityProtocol.names()) + "."; + public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers."; public static final String DEFAULT_SECURITY_PROTOCOL = "PLAINTEXT"; public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = "socket.connection.setup.timeout.ms"; @@ -207,7 +206,8 @@ public class CommonClientConfigs { + "to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, " + "then the broker will remove this client from the group and initiate a rebalance. Note that the value " + "must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms " - + "and group.max.session.timeout.ms."; + + "and group.max.session.timeout.ms. Note that this configuration is not supported when group.protocol " + + "is set to \"consumer\"."; public static final String HEARTBEAT_INTERVAL_MS_CONFIG = "heartbeat.interval.ms"; public static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer " @@ -230,8 +230,15 @@ public class CommonClientConfigs { "Brokers appear unavailable when disconnected and no current retry attempt is in-progress. " + "Consider increasing reconnect.backoff.ms and reconnect.backoff.max.ms and " + "decreasing socket.connection.setup.timeout.ms and socket.connection.setup.timeout.max.ms " + - "for the client."; - public static final String DEFAULT_METADATA_RECOVERY_STRATEGY = MetadataRecoveryStrategy.NONE.name; + "for the client. Rebootstrap is also triggered if connection cannot be established to any of the brokers for " + + "metadata.recovery.rebootstrap.trigger.ms milliseconds or if server requests rebootstrap."; + public static final String DEFAULT_METADATA_RECOVERY_STRATEGY = MetadataRecoveryStrategy.REBOOTSTRAP.name; + + public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG = "metadata.recovery.rebootstrap.trigger.ms"; + public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC = "If a client configured to rebootstrap using " + + "metadata.recovery.strategy=rebootstrap is unable to obtain metadata from any of the brokers in the last known " + + "metadata for this interval, client repeats the bootstrap process using bootstrap.servers configuration."; + public static final long DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS = 300 * 1000; /** * Postprocess the configuration so that exponential backoff is disabled when reconnect backoff diff --git a/clients/src/main/java/org/apache/kafka/clients/Metadata.java b/clients/src/main/java/org/apache/kafka/clients/Metadata.java index ece1a25adca19..b60156aae0066 100644 --- a/clients/src/main/java/org/apache/kafka/clients/Metadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/Metadata.java @@ -294,7 +294,7 @@ public Map topicIds() { public synchronized LeaderAndEpoch currentLeader(TopicPartition topicPartition) { Optional maybeMetadata = partitionMetadataIfCurrent(topicPartition); - if (!maybeMetadata.isPresent()) + if (maybeMetadata.isEmpty()) return new LeaderAndEpoch(Optional.empty(), Optional.ofNullable(lastSeenLeaderEpochs.get(topicPartition))); MetadataResponse.PartitionMetadata partitionMetadata = maybeMetadata.get(); @@ -392,7 +392,7 @@ public synchronized Set updatePartitionLeadership(Map updatePartitionLeadership(Map(State.ACTIVE); this.telemetrySender = (clientTelemetrySender != null) ? new TelemetrySender(clientTelemetrySender) : null; + this.rebootstrapTriggerMs = rebootstrapTriggerMs; this.metadataRecoveryStrategy = metadataRecoveryStrategy; } @@ -401,6 +448,8 @@ public void close(String nodeId) { long now = time.milliseconds(); cancelInFlightRequests(nodeId, now, null, false); connectionStates.remove(nodeId); + apiVersions.remove(nodeId); + nodesNeedingApiVersionsFetch.remove(nodeId); } /** @@ -608,6 +657,7 @@ public List poll(long timeout, long now) { handleInitiateApiVersionRequests(updatedNow); handleTimedOutConnections(responses, updatedNow); handleTimedOutRequests(responses, updatedNow); + handleRebootstrap(responses, updatedNow); completeResponses(responses); return responses; @@ -997,7 +1047,6 @@ private void handleApiVersionsResponse(List responses, NodeApiVersions nodeVersionInfo = new NodeApiVersions( apiVersionsResponse.data().apiKeys(), apiVersionsResponse.data().supportedFeatures(), - apiVersionsResponse.data().zkMigrationReady(), apiVersionsResponse.data().finalizedFeatures(), apiVersionsResponse.data().finalizedFeaturesEpoch()); apiVersions.update(node, nodeVersionInfo); @@ -1065,6 +1114,20 @@ private void handleInitiateApiVersionRequests(long now) { } } + private void handleRebootstrap(List responses, long now) { + if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP && metadataUpdater.needsRebootstrap(now, rebootstrapTriggerMs)) { + this.metadataUpdater.fetchNodes().forEach(node -> { + String nodeId = node.idString(); + this.selector.close(nodeId); + if (connectionStates.isConnecting(nodeId) || connectionStates.isConnected(nodeId)) { + log.info("Disconnecting from node {} due to client rebootstrap.", nodeId); + processDisconnection(responses, nodeId, now, ChannelState.LOCAL_CLOSE); + } + }); + metadataUpdater.rebootstrap(now); + } + } + /** * Initiate a connection to the given node * @param node the node to connect to @@ -1116,6 +1179,15 @@ class DefaultMetadataUpdater implements MetadataUpdater { // Defined if there is a request in progress, null otherwise private InProgressData inProgress; + /* + * The time in wall-clock milliseconds when we started attempts to fetch metadata. If empty, + * metadata has not been requested. This is the start time based on which rebootstrap is + * triggered if metadata is not obtained for the configured rebootstrap trigger interval. + * Set to Optional.of(0L) to force rebootstrap immediately. + */ + private Optional metadataAttemptStartMs = Optional.empty(); + + DefaultMetadataUpdater(Metadata metadata) { this.metadata = metadata; this.inProgress = null; @@ -1146,6 +1218,9 @@ public long maybeUpdate(long now) { return metadataTimeout; } + if (metadataAttemptStartMs.isEmpty()) + metadataAttemptStartMs = Optional.of(now); + // Beware that the behavior of this method and the computation of timeouts for poll() are // highly dependent on the behavior of leastLoadedNode. LeastLoadedNode leastLoadedNode = leastLoadedNode(now); @@ -1153,7 +1228,7 @@ public long maybeUpdate(long now) { // Rebootstrap if needed and configured. if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP && !leastLoadedNode.hasNodeAvailableOrConnectionReady()) { - metadata.rebootstrap(); + rebootstrap(now); leastLoadedNode = leastLoadedNode(now); } @@ -1219,23 +1294,42 @@ public void handleSuccessfulResponse(RequestHeader requestHeader, long now, Meta if (!errors.isEmpty()) log.warn("The metadata response from the cluster reported a recoverable issue with correlation id {} : {}", requestHeader.correlationId(), errors); - // When talking to the startup phase of a broker, it is possible to receive an empty metadata set, which - // we should retry later. - if (response.brokers().isEmpty()) { + if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP && response.topLevelError() == Errors.REBOOTSTRAP_REQUIRED) { + log.info("Rebootstrap requested by server."); + initiateRebootstrap(); + } else if (response.brokers().isEmpty()) { + // When talking to the startup phase of a broker, it is possible to receive an empty metadata set, which + // we should retry later. log.trace("Ignoring empty metadata response with correlation id {}.", requestHeader.correlationId()); this.metadata.failedUpdate(now); } else { this.metadata.update(inProgress.requestVersion, response, inProgress.isPartialUpdate, now); + metadataAttemptStartMs = Optional.empty(); } inProgress = null; } + @Override + public boolean needsRebootstrap(long now, long rebootstrapTriggerMs) { + return metadataAttemptStartMs.filter(startMs -> now - startMs > rebootstrapTriggerMs).isPresent(); + } + + @Override + public void rebootstrap(long now) { + metadata.rebootstrap(); + metadataAttemptStartMs = Optional.of(now); + } + @Override public void close() { this.metadata.close(); } + private void initiateRebootstrap() { + metadataAttemptStartMs = Optional.of(0L); // to force rebootstrap + } + /** * Add a metadata request to the list of sends if we can make one */ @@ -1317,7 +1411,7 @@ private long maybeUpdate(long now, Node node) { if (canSendRequest(nodeConnectionId, now)) { Optional> requestOpt = clientTelemetrySender.createRequest(); - if (!requestOpt.isPresent()) + if (requestOpt.isEmpty()) return Long.MAX_VALUE; AbstractRequest.Builder request = requestOpt.get(); diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java index e044fd48ee3e1..3e161b1e9939b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java @@ -71,6 +71,17 @@ public static boolean awaitReady(KafkaClient client, Node node, Time time, long throw new IOException("Connection to " + node + " failed."); } long pollTimeout = timeoutMs - (attemptStartTime - startTime); // initialize in this order to avoid overflow + + // If the network client is waiting to send data for some reason (eg. throttling or retry backoff), + // polling longer than that is potentially dangerous as the producer will not attempt to send + // any pending requests. + long waitingTime = client.pollDelayMs(node, startTime); + if (waitingTime > 0 && pollTimeout > waitingTime) { + // Block only until the next-scheduled time that it's okay to send data to the producer, + // wake up, and try again. This is the way. + pollTimeout = waitingTime; + } + client.poll(pollTimeout, attemptStartTime); if (client.authenticationException(node) != null) throw client.authenticationException(node); diff --git a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java index 43bd9125a1493..b2eaa481f557e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java +++ b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java @@ -49,8 +49,6 @@ public class NodeApiVersions { private final Map supportedFeatures; - private final boolean zkMigrationEnabled; - private final Map finalizedFeatures; private final long finalizedFeaturesEpoch; @@ -83,7 +81,7 @@ public static NodeApiVersions create(Collection overrides) { } if (!exists) apiVersions.add(ApiVersionsResponse.toApiVersion(apiKey)); } - return new NodeApiVersions(apiVersions, Collections.emptyList(), false, Collections.emptyList(), -1); + return new NodeApiVersions(apiVersions, Collections.emptyList(), Collections.emptyList(), -1); } @@ -104,16 +102,14 @@ public static NodeApiVersions create(short apiKey, short minVersion, short maxVe public NodeApiVersions( Collection nodeApiVersions, - Collection nodeSupportedFeatures, - boolean zkMigrationEnabled + Collection nodeSupportedFeatures ) { - this(nodeApiVersions, nodeSupportedFeatures, zkMigrationEnabled, Collections.emptyList(), -1); + this(nodeApiVersions, nodeSupportedFeatures, Collections.emptyList(), -1); } public NodeApiVersions( Collection nodeApiVersions, Collection nodeSupportedFeatures, - boolean zkMigrationEnabled, Collection nodeFinalizedFeatures, long finalizedFeaturesEpoch ) { @@ -133,8 +129,6 @@ public NodeApiVersions( new SupportedVersionRange(supportedFeature.minVersion(), supportedFeature.maxVersion())); } this.supportedFeatures = Collections.unmodifiableMap(supportedFeaturesBuilder); - this.zkMigrationEnabled = zkMigrationEnabled; - this.finalizedFeaturesEpoch = finalizedFeaturesEpoch; this.finalizedFeatures = new HashMap<>(); for (ApiVersionsResponseData.FinalizedFeatureKey finalizedFeature : nodeFinalizedFeatures) { @@ -264,10 +258,6 @@ public Map supportedFeatures() { return supportedFeatures; } - public boolean zkMigrationEnabled() { - return zkMigrationEnabled; - } - public Map finalizedFeatures() { return finalizedFeatures; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java index dd3b92cf44a21..6c743dec048a8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java @@ -486,41 +486,6 @@ default DescribeConfigsResult describeConfigs(Collection resourc */ DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options); - /** - * Update the configuration for the specified resources with the default options. - *

- * This is a convenience method for {@link #alterConfigs(Map, AlterConfigsOptions)} with default options. - * See the overload for more details. - *

- * This operation is supported by brokers with version 0.11.0.0 or higher. - * - * @param configs The resources with their configs (topic is the only resource type with configs that can - * be updated currently) - * @return The AlterConfigsResult - * @deprecated Since 2.3. Use {@link #incrementalAlterConfigs(Map)}. - */ - @Deprecated - default AlterConfigsResult alterConfigs(Map configs) { - return alterConfigs(configs, new AlterConfigsOptions()); - } - - /** - * Update the configuration for the specified resources with the default options. - *

- * Updates are not transactional so they may succeed for some resources while fail for others. The configs for - * a particular resource are updated atomically. - *

- * This operation is supported by brokers with version 0.11.0.0 or higher. - * - * @param configs The resources with their configs (topic is the only resource type with configs that can - * be updated currently) - * @param options The options to use when describing configs - * @return The AlterConfigsResult - * @deprecated Since 2.3. Use {@link #incrementalAlterConfigs(Map, AlterConfigsOptions)}. - */ - @Deprecated - AlterConfigsResult alterConfigs(Map configs, AlterConfigsOptions options); - /** * Incrementally updates the configuration for the specified resources with default options. *

@@ -938,9 +903,7 @@ default ListConsumerGroupsResult listConsumerGroups() { * @return The ListConsumerGroupOffsetsResult */ default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options) { - @SuppressWarnings("deprecation") - ListConsumerGroupOffsetsSpec groupSpec = new ListConsumerGroupOffsetsSpec() - .topicPartitions(options.topicPartitions()); + ListConsumerGroupOffsetsSpec groupSpec = new ListConsumerGroupOffsetsSpec(); // We can use the provided options with the batched API, which uses topic partitions from // the group spec and ignores any topic partitions set in the options. @@ -1480,14 +1443,23 @@ default DescribeFeaturesResult describeFeatures() { * error code for each supplied {@link FeatureUpdate}, and the code indicates if the update * succeeded or failed in the controller. *

    - *
  • Downgrade of feature version level is not a regular operation/intent. It is only allowed - * in the controller if the {@link FeatureUpdate} has the allowDowngrade flag set. Setting this - * flag conveys user intent to attempt downgrade of a feature max version level. Note that - * despite the allowDowngrade flag being set, certain downgrades may be rejected by the - * controller if it is deemed impossible.
  • - *
  • Deletion of a finalized feature version is not a regular operation/intent. It could be - * done by setting the allowDowngrade flag to true in the {@link FeatureUpdate}, and, setting - * the max version level to a value less than 1.
  • + *
  • Downgrading a feature version level is not a common operation and should only be + * performed when necessary. It is permitted only if the {@link FeatureUpdate} specifies the + * {@code upgradeType} as either {@link FeatureUpdate.UpgradeType#SAFE_DOWNGRADE} or + * {@link FeatureUpdate.UpgradeType#UNSAFE_DOWNGRADE}. + *
      + *
    • {@code SAFE_DOWNGRADE}: Allows downgrades that do not lead to metadata loss.
    • + *
    • {@code UNSAFE_DOWNGRADE}: Permits downgrades that might result in metadata loss.
    • + *
    + * Note that even with these settings, certain downgrades may still be rejected by the controller + * if they are considered unsafe or impossible.
  • + *
  • Deleting a finalized feature version is also not a common operation. To delete a feature, + * set the {@code maxVersionLevel} to zero and specify the {@code upgradeType} as either + * {@link FeatureUpdate.UpgradeType#SAFE_DOWNGRADE} or + * {@link FeatureUpdate.UpgradeType#UNSAFE_DOWNGRADE}.
  • + *
  • The {@link FeatureUpdate.UpgradeType#UPGRADE} type cannot be used when the + * {@code maxVersionLevel} is zero. Attempting to do so will result in an + * {@link IllegalArgumentException}.
  • *
*

* The following exceptions can be anticipated when calling {@code get()} on the futures @@ -1548,8 +1520,7 @@ default DescribeMetadataQuorumResult describeMetadataQuorum() { /** * Unregister a broker. *

- * This operation does not have any effect on partition assignments. It is supported - * only on Kafka clusters which use Raft to store metadata, rather than ZooKeeper. + * This operation does not have any effect on partition assignments. * * This is a convenience method for {@link #unregisterBroker(int, UnregisterBrokerOptions)} * @@ -1565,8 +1536,7 @@ default UnregisterBrokerResult unregisterBroker(int brokerId) { /** * Unregister a broker. *

- * This operation does not have any effect on partition assignments. It is supported - * only on Kafka clusters which use Raft to store metadata, rather than ZooKeeper. + * This operation does not have any effect on partition assignments. * * The following exceptions can be anticipated when calling {@code get()} on the future from the * returned {@link UnregisterBrokerResult}: @@ -1574,8 +1544,7 @@ default UnregisterBrokerResult unregisterBroker(int brokerId) { *

  • {@link org.apache.kafka.common.errors.TimeoutException} * If the request timed out before the describe operation could finish.
  • *
  • {@link org.apache.kafka.common.errors.UnsupportedVersionException} - * If the software is too old to support the unregistration API, or if the - * cluster is not using Raft to store metadata. + * If the software is too old to support the unregistration API. * *

    * @@ -1825,23 +1794,25 @@ default DescribeShareGroupsResult describeShareGroups(Collection groupId } /** - * List the share groups available in the cluster. + * List the share group offsets available in the cluster for the specified share groups. * - * @param options The options to use when listing the share groups. - * @return The ListShareGroupsResult. + * @param groupSpecs Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for. + * @param options The options to use when listing the share group offsets. + * @return The ListShareGroupOffsetsResult */ - ListShareGroupsResult listShareGroups(ListShareGroupsOptions options); + ListShareGroupOffsetsResult listShareGroupOffsets(Map groupSpecs, ListShareGroupOffsetsOptions options); /** - * List the share groups available in the cluster with the default options. - *

    - * This is a convenience method for {@link #listShareGroups(ListShareGroupsOptions)} with default options. - * See the overload for more details. + * List the share group offsets available in the cluster for the specified share groups with the default options. + * + *

    This is a convenience method for {@link #listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)} + * to list offsets of all partitions for the specified share groups with default options. * - * @return The ListShareGroupsResult. + * @param groupSpecs Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for. + * @return The ListShareGroupOffsetsResult */ - default ListShareGroupsResult listShareGroups() { - return listShareGroups(new ListShareGroupsOptions()); + default ListShareGroupOffsetsResult listShareGroupOffsets(Map groupSpecs) { + return listShareGroupOffsets(groupSpecs, new ListShareGroupOffsetsOptions()); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java index 2a00f450c5ca7..a87af6be154a5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java @@ -141,6 +141,10 @@ public class AdminClientConfig extends AbstractConfig { public static final String METADATA_RECOVERY_STRATEGY_DOC = CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC; public static final String DEFAULT_METADATA_RECOVERY_STRATEGY = CommonClientConfigs.DEFAULT_METADATA_RECOVERY_STRATEGY; + public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG = CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG; + public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC = CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC; + public static final long DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS = CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS; + /** * security.providers */ @@ -270,7 +274,13 @@ public class AdminClientConfig extends AbstractConfig { ConfigDef.CaseInsensitiveValidString .in(Utils.enumOptions(MetadataRecoveryStrategy.class)), Importance.LOW, - METADATA_RECOVERY_STRATEGY_DOC); + METADATA_RECOVERY_STRATEGY_DOC) + .define(METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, + Type.LONG, + DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, + atLeast(0), + Importance.LOW, + METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsOptions.java index 198a4eab62a70..b2e85b9e813e4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsOptions.java @@ -22,7 +22,7 @@ import java.util.Map; /** - * Options for {@link Admin#incrementalAlterConfigs(Map)} and {@link Admin#alterConfigs(Map)}. + * Options for {@link Admin#incrementalAlterConfigs(Map)}. * * The API of this class is evolving, see {@link Admin} for details. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java index 29056ce29403d..007b4422b364d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java @@ -24,7 +24,7 @@ import java.util.Map; /** - * The result of the {@link Admin#alterConfigs(Map)} call. + * The result of the {@link Admin#incrementalAlterConfigs(Map, AlterConfigsOptions)} call. * * The API of this class is evolving, see {@link Admin} for details. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java index 13ec5965eedbb..b2c44d4fbeb63 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java @@ -18,14 +18,16 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.ConsumerGroupState; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.Node; import org.apache.kafka.common.acl.AclOperation; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -38,10 +40,16 @@ public class ConsumerGroupDescription { private final Collection members; private final String partitionAssignor; private final GroupType type; - private final ConsumerGroupState state; + private final GroupState groupState; private final Node coordinator; private final Set authorizedOperations; + private final Optional groupEpoch; + private final Optional targetAssignmentEpoch; + /** + * @deprecated Since 4.0. Use {@link #ConsumerGroupDescription(String, boolean, Collection, String, GroupType, GroupState, Node, Set, Optional, Optional)} instead. + */ + @Deprecated public ConsumerGroupDescription(String groupId, boolean isSimpleConsumerGroup, Collection members, @@ -51,6 +59,10 @@ public ConsumerGroupDescription(String groupId, this(groupId, isSimpleConsumerGroup, members, partitionAssignor, state, coordinator, Collections.emptySet()); } + /** + * @deprecated Since 4.0. Use {@link #ConsumerGroupDescription(String, boolean, Collection, String, GroupType, GroupState, Node, Set, Optional, Optional)} instead. + */ + @Deprecated public ConsumerGroupDescription(String groupId, boolean isSimpleConsumerGroup, Collection members, @@ -61,6 +73,10 @@ public ConsumerGroupDescription(String groupId, this(groupId, isSimpleConsumerGroup, members, partitionAssignor, GroupType.CLASSIC, state, coordinator, authorizedOperations); } + /** + * @deprecated Since 4.0. Use {@link #ConsumerGroupDescription(String, boolean, Collection, String, GroupType, GroupState, Node, Set, Optional, Optional)} instead. + */ + @Deprecated public ConsumerGroupDescription(String groupId, boolean isSimpleConsumerGroup, Collection members, @@ -71,13 +87,36 @@ public ConsumerGroupDescription(String groupId, Set authorizedOperations) { this.groupId = groupId == null ? "" : groupId; this.isSimpleConsumerGroup = isSimpleConsumerGroup; - this.members = members == null ? Collections.emptyList() : - Collections.unmodifiableList(new ArrayList<>(members)); + this.members = members == null ? Collections.emptyList() : List.copyOf(members); + this.partitionAssignor = partitionAssignor == null ? "" : partitionAssignor; + this.type = type; + this.groupState = GroupState.parse(state.toString()); + this.coordinator = coordinator; + this.authorizedOperations = authorizedOperations; + this.groupEpoch = Optional.empty(); + this.targetAssignmentEpoch = Optional.empty(); + } + + public ConsumerGroupDescription(String groupId, + boolean isSimpleConsumerGroup, + Collection members, + String partitionAssignor, + GroupType type, + GroupState groupState, + Node coordinator, + Set authorizedOperations, + Optional groupEpoch, + Optional targetAssignmentEpoch) { + this.groupId = groupId == null ? "" : groupId; + this.isSimpleConsumerGroup = isSimpleConsumerGroup; + this.members = members == null ? Collections.emptyList() : List.copyOf(members); this.partitionAssignor = partitionAssignor == null ? "" : partitionAssignor; this.type = type; - this.state = state; + this.groupState = groupState; this.coordinator = coordinator; this.authorizedOperations = authorizedOperations; + this.groupEpoch = groupEpoch; + this.targetAssignmentEpoch = targetAssignmentEpoch; } @Override @@ -90,14 +129,17 @@ public boolean equals(final Object o) { Objects.equals(members, that.members) && Objects.equals(partitionAssignor, that.partitionAssignor) && type == that.type && - state == that.state && + groupState == that.groupState && Objects.equals(coordinator, that.coordinator) && - Objects.equals(authorizedOperations, that.authorizedOperations); + Objects.equals(authorizedOperations, that.authorizedOperations) && + Objects.equals(groupEpoch, that.groupEpoch) && + Objects.equals(targetAssignmentEpoch, that.targetAssignmentEpoch); } @Override public int hashCode() { - return Objects.hash(groupId, isSimpleConsumerGroup, members, partitionAssignor, type, state, coordinator, authorizedOperations); + return Objects.hash(groupId, isSimpleConsumerGroup, members, partitionAssignor, type, groupState, coordinator, + authorizedOperations, groupEpoch, targetAssignmentEpoch); } /** @@ -138,9 +180,18 @@ public GroupType type() { /** * The consumer group state, or UNKNOWN if the state is too new for us to parse. + * @deprecated Since 4.0. Use {@link #groupState()} instead. */ + @Deprecated public ConsumerGroupState state() { - return state; + return ConsumerGroupState.parse(groupState.toString()); + } + + /** + * The group state, or UNKNOWN if the state is too new for us to parse. + */ + public GroupState groupState() { + return groupState; } /** @@ -157,6 +208,24 @@ public Set authorizedOperations() { return authorizedOperations; } + /** + * The epoch of the consumer group. + * The optional is set to an integer if it is a {@link GroupType#CONSUMER} group, and to empty if it + * is a {@link GroupType#CLASSIC} group. + */ + public Optional groupEpoch() { + return groupEpoch; + } + + /** + * The epoch of the target assignment. + * The optional is set to an integer if it is a {@link GroupType#CONSUMER} group, and to empty if it + * is a {@link GroupType#CLASSIC} group. + */ + public Optional targetAssignmentEpoch() { + return targetAssignmentEpoch; + } + @Override public String toString() { return "(groupId=" + groupId + @@ -164,9 +233,11 @@ public String toString() { ", members=" + members.stream().map(MemberDescription::toString).collect(Collectors.joining(",")) + ", partitionAssignor=" + partitionAssignor + ", type=" + type + - ", state=" + state + + ", groupState=" + groupState + ", coordinator=" + coordinator + ", authorizedOperations=" + authorizedOperations + + ", groupEpoch=" + groupEpoch.orElse(null) + + ", targetAssignmentEpoch=" + targetAssignmentEpoch.orElse(null) + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java index 96a3ecce9d000..34b9f08f10e06 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java @@ -18,6 +18,7 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.ConsumerGroupState; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import java.util.Objects; @@ -29,28 +30,30 @@ public class ConsumerGroupListing { private final String groupId; private final boolean isSimpleConsumerGroup; - private final Optional state; + private final Optional groupState; private final Optional type; /** * Create an instance with the specified parameters. * - * @param groupId Group Id - * @param isSimpleConsumerGroup If consumer group is simple or not. + * @param groupId Group Id. + * @param isSimpleConsumerGroup If consumer group is simple or not. */ public ConsumerGroupListing(String groupId, boolean isSimpleConsumerGroup) { - this(groupId, isSimpleConsumerGroup, Optional.empty(), Optional.empty()); + this(groupId, Optional.empty(), Optional.empty(), isSimpleConsumerGroup); } /** * Create an instance with the specified parameters. * - * @param groupId Group Id - * @param isSimpleConsumerGroup If consumer group is simple or not. - * @param state The state of the consumer group + * @param groupId Group Id. + * @param isSimpleConsumerGroup If consumer group is simple or not. + * @param state The state of the consumer group. + * @deprecated Since 4.0. Use {@link #ConsumerGroupListing(String, Optional, boolean)} instead. */ + @Deprecated public ConsumerGroupListing(String groupId, boolean isSimpleConsumerGroup, Optional state) { - this(groupId, isSimpleConsumerGroup, state, Optional.empty()); + this(groupId, Objects.requireNonNull(state).map(state0 -> GroupState.parse(state0.toString())), Optional.empty(), isSimpleConsumerGroup); } /** @@ -60,17 +63,51 @@ public ConsumerGroupListing(String groupId, boolean isSimpleConsumerGroup, Optio * @param isSimpleConsumerGroup If consumer group is simple or not. * @param state The state of the consumer group. * @param type The type of the consumer group. + * @deprecated Since 4.0. Use {@link #ConsumerGroupListing(String, Optional, Optional, boolean)} instead. */ + @Deprecated public ConsumerGroupListing( String groupId, boolean isSimpleConsumerGroup, Optional state, Optional type + ) { + this(groupId, Objects.requireNonNull(state).map(state0 -> GroupState.parse(state0.toString())), type, isSimpleConsumerGroup); + } + + /** + * Create an instance with the specified parameters. + * + * @param groupId Group Id. + * @param groupState The state of the consumer group. + * @param isSimpleConsumerGroup If consumer group is simple or not. + */ + public ConsumerGroupListing( + String groupId, + Optional groupState, + boolean isSimpleConsumerGroup + ) { + this(groupId, groupState, Optional.empty(), isSimpleConsumerGroup); + } + + /** + * Create an instance with the specified parameters. + * + * @param groupId Group Id. + * @param groupState The state of the consumer group. + * @param type The type of the consumer group. + * @param isSimpleConsumerGroup If consumer group is simple or not. + */ + public ConsumerGroupListing( + String groupId, + Optional groupState, + Optional type, + boolean isSimpleConsumerGroup ) { this.groupId = groupId; - this.isSimpleConsumerGroup = isSimpleConsumerGroup; - this.state = Objects.requireNonNull(state); + this.groupState = Objects.requireNonNull(groupState); this.type = Objects.requireNonNull(type); + this.isSimpleConsumerGroup = isSimpleConsumerGroup; } /** @@ -87,11 +124,20 @@ public boolean isSimpleConsumerGroup() { return isSimpleConsumerGroup; } + /** + * Group state + */ + public Optional groupState() { + return groupState; + } + /** * Consumer Group state + * @deprecated Since 4.0. Use {@link #groupState()} instead. */ + @Deprecated public Optional state() { - return state; + return groupState.map(state0 -> ConsumerGroupState.parse(state0.toString())); } /** @@ -108,14 +154,14 @@ public String toString() { return "(" + "groupId='" + groupId + '\'' + ", isSimpleConsumerGroup=" + isSimpleConsumerGroup + - ", state=" + state + + ", groupState=" + groupState + ", type=" + type + ')'; } @Override public int hashCode() { - return Objects.hash(groupId, isSimpleConsumerGroup(), state, type); + return Objects.hash(groupId, isSimpleConsumerGroup(), groupState, type); } @Override @@ -125,7 +171,7 @@ public boolean equals(Object o) { ConsumerGroupListing that = (ConsumerGroupListing) o; return isSimpleConsumerGroup() == that.isSimpleConsumerGroup() && Objects.equals(groupId, that.groupId) && - Objects.equals(state, that.state) && + Objects.equals(groupState, that.groupState) && Objects.equals(type, that.type); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.java index 13540b0299cfc..8e5c24aa6fc1f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.java @@ -31,7 +31,7 @@ */ @InterfaceStability.Evolving public class CreateDelegationTokenOptions extends AbstractOptions { - private long maxLifeTimeMs = -1; + private long maxLifetimeMs = -1; private List renewers = new LinkedList<>(); private KafkaPrincipal owner = null; @@ -53,12 +53,29 @@ public Optional owner() { return Optional.ofNullable(owner); } - public CreateDelegationTokenOptions maxlifeTimeMs(long maxLifeTimeMs) { - this.maxLifeTimeMs = maxLifeTimeMs; + /** + * @deprecated Since 4.0 and should not be used any longer. + */ + @Deprecated + public CreateDelegationTokenOptions maxlifeTimeMs(long maxLifetimeMs) { + this.maxLifetimeMs = maxLifetimeMs; return this; } + public CreateDelegationTokenOptions maxLifetimeMs(long maxLifetimeMs) { + this.maxLifetimeMs = maxLifetimeMs; + return this; + } + + /** + * @deprecated Since 4.0 and should not be used any longer. + */ + @Deprecated public long maxlifeTimeMs() { - return maxLifeTimeMs; + return maxLifetimeMs; + } + + public long maxLifetimeMs() { + return maxLifetimeMs; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java index 90ddbd0582c49..5c1e60b1a6176 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java @@ -42,7 +42,7 @@ public class DeleteConsumerGroupsResult { */ public Map> deletedGroups() { Map> deletedGroups = new HashMap<>(futures.size()); - futures.forEach((key, future) -> deletedGroups.put(key, future)); + deletedGroups.putAll(futures); return deletedGroups; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java index 725b82a78dee8..8ab7cdd801baf 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java @@ -70,16 +70,6 @@ public Map> topicNameValues() { return nameFutures; } - /** - * @return a map from topic names to futures which can be used to check the status of - * individual deletions if the deleteTopics request used topic names. Otherwise return null. - * @deprecated Since 3.0 use {@link #topicNameValues} instead - */ - @Deprecated - public Map> values() { - return nameFutures; - } - /** * @return a future which succeeds only if all the topic deletions succeed. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java index 2eac1f055f6e0..a6ef6f7f0f34d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java @@ -29,6 +29,8 @@ public class DescribeClusterOptions extends AbstractOptions>> values() { - return descriptions().entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - entry -> entry.getValue().thenApply(this::convertMapValues))); - } - - @SuppressWarnings("deprecation") - private Map convertMapValues(Map map) { - Stream> stream = map.entrySet().stream(); - return stream.collect(Collectors.toMap( - Map.Entry::getKey, - infoEntry -> { - LogDirDescription logDir = infoEntry.getValue(); - return new DescribeLogDirsResponse.LogDirInfo(logDir.error() == null ? Errors.NONE : Errors.forException(logDir.error()), - logDir.replicaInfos().entrySet().stream().collect(Collectors.toMap( - Map.Entry::getKey, - replicaEntry -> new DescribeLogDirsResponse.ReplicaInfo( - replicaEntry.getValue().size(), - replicaEntry.getValue().offsetLag(), - replicaEntry.getValue().isFuture()) - ))); - })); - } - /** * Return a map from brokerId to future which can be used to check the information of partitions on each individual broker. * The result of the future is a map from broker log directory path to a description of that log directory. @@ -81,18 +47,6 @@ public Map>> descriptions() return futures; } - /** - * Return a future which succeeds only if all the brokers have responded without error - * @deprecated Deprecated Since Kafka 2.7. Use {@link #allDescriptions()}. - */ - @Deprecated - public KafkaFuture>> all() { - return allDescriptions().thenApply(map -> map.entrySet().stream().collect(Collectors.toMap( - Map.Entry::getKey, - entry -> convertMapValues(entry.getValue()) - ))); - } - /** * Return a future which succeeds only if all the brokers have responded without error. * The result of the future is a map from brokerId to a map from broker log directory path diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java index 1c68d8180b4cb..6673918c7313a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java @@ -37,11 +37,6 @@ public class DescribeTopicsResult { private final Map> topicIdFutures; private final Map> nameFutures; - @Deprecated - protected DescribeTopicsResult(Map> futures) { - this(null, futures); - } - // VisibleForTesting protected DescribeTopicsResult(Map> topicIdFutures, Map> nameFutures) { if (topicIdFutures != null && nameFutures != null) @@ -80,30 +75,6 @@ public Map> topicNameValues() { return nameFutures; } - /** - * @return a map from topic names to futures which can be used to check the status of - * individual topics if the request used topic names, otherwise return null. - * - * @deprecated Since 3.1.0 use {@link #topicNameValues} instead - */ - @Deprecated - public Map> values() { - return nameFutures; - } - - /** - * @return A future map from topic names to descriptions which can be used to check - * the status of individual description if the describe topic request used - * topic names, otherwise return null, this request succeeds only if all the - * topic descriptions succeed - * - * @deprecated Since 3.1.0 use {@link #allTopicNames()} instead - */ - @Deprecated - public KafkaFuture> all() { - return all(nameFutures); - } - /** * @return A future map from topic names to descriptions which can be used to check * the status of individual description if the describe topic request used diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java index 2eddd7ee28c0c..f13fd2a8e7d18 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java @@ -125,7 +125,7 @@ public KafkaFuture description(String userName) // for users 1, 2, and 3 but this is looking for user 4), so explicitly take care of that case Optional optionalUserResult = data.results().stream().filter(result -> result.user().equals(userName)).findFirst(); - if (!optionalUserResult.isPresent()) { + if (optionalUserResult.isEmpty()) { retval.completeExceptionally(new ResourceNotFoundException("No such user: " + userName)); } else { DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult userResult = optionalUserResult.get(); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/FeatureUpdate.java b/clients/src/main/java/org/apache/kafka/clients/admin/FeatureUpdate.java index a2a71b528e402..0b7b78ff9a0b8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/FeatureUpdate.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/FeatureUpdate.java @@ -54,21 +54,6 @@ public static UpgradeType fromCode(int code) { } } - /** - * @param maxVersionLevel the new maximum version level for the finalized feature. - * a value of zero is special and indicates that the update is intended to - * delete the finalized feature, and should be accompanied by setting - * the allowDowngrade flag to true. - * @param allowDowngrade - true, if this feature update was meant to downgrade the existing - * maximum version level of the finalized feature. Only "safe" downgrades are - * enabled with this boolean. See {@link FeatureUpdate#FeatureUpdate(short, UpgradeType)} - * - false, otherwise. - */ - @Deprecated - public FeatureUpdate(final short maxVersionLevel, final boolean allowDowngrade) { - this(maxVersionLevel, allowDowngrade ? UpgradeType.SAFE_DOWNGRADE : UpgradeType.UPGRADE); - } - /** * @param maxVersionLevel The new maximum version level for the finalized feature. * a value of zero is special and indicates that the update is intended to @@ -96,11 +81,6 @@ public short maxVersionLevel() { return maxVersionLevel; } - @Deprecated - public boolean allowDowngrade() { - return upgradeType != UpgradeType.UPGRADE; - } - public UpgradeType upgradeType() { return upgradeType; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java index 285acaf695e30..45ed560dffc5f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java @@ -103,12 +103,6 @@ public DescribeConfigsResult describeConfigs(Collection resource return delegate.describeConfigs(resources, options); } - @Deprecated - @Override - public AlterConfigsResult alterConfigs(Map configs, AlterConfigsOptions options) { - return delegate.alterConfigs(configs, options); - } - @Override public AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { return delegate.incrementalAlterConfigs(configs, options); @@ -305,8 +299,8 @@ public DescribeShareGroupsResult describeShareGroups(Collection groupIds } @Override - public ListShareGroupsResult listShareGroups(ListShareGroupsOptions options) { - return delegate.listShareGroups(options); + public ListShareGroupOffsetsResult listShareGroupOffsets(Map groupSpecs, ListShareGroupOffsetsOptions options) { + return delegate.listShareGroupOffsets(groupSpecs, options); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/GroupListing.java b/clients/src/main/java/org/apache/kafka/clients/admin/GroupListing.java index 0ee2a211e704b..8a0727c284fa6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/GroupListing.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/GroupListing.java @@ -17,7 +17,9 @@ package org.apache.kafka.clients.admin; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; +import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Objects; import java.util.Optional; @@ -25,22 +27,26 @@ /** * A listing of a group in the cluster. */ +@InterfaceStability.Evolving public class GroupListing { private final String groupId; private final Optional type; private final String protocol; + private final Optional groupState; /** * Create an instance with the specified parameters. * - * @param groupId Group Id - * @param type Group type - * @param protocol Protocol + * @param groupId Group Id + * @param type Group type + * @param protocol Protocol + * @param groupState Group state */ - public GroupListing(String groupId, Optional type, String protocol) { + public GroupListing(String groupId, Optional type, String protocol, Optional groupState) { this.groupId = groupId; this.type = Objects.requireNonNull(type); this.protocol = protocol; + this.groupState = groupState; } /** @@ -75,6 +81,19 @@ public String protocol() { return protocol; } + /** + * The group state. + *

    + * If the broker returns a group state which is not recognised, as might + * happen when talking to a broker with a later version, the state will be + * Optional.of(GroupState.UNKNOWN). + * + * @return An Optional containing the state, if available. + */ + public Optional groupState() { + return groupState; + } + /** * If the group is a simple consumer group or not. */ @@ -88,12 +107,13 @@ public String toString() { "groupId='" + groupId + '\'' + ", type=" + type.map(GroupType::toString).orElse("none") + ", protocol='" + protocol + '\'' + + ", groupState=" + groupState.map(GroupState::toString).orElse("none") + ')'; } @Override public int hashCode() { - return Objects.hash(groupId, type, protocol); + return Objects.hash(groupId, type, protocol, groupState); } @Override @@ -103,6 +123,7 @@ public boolean equals(Object o) { GroupListing that = (GroupListing) o; return Objects.equals(groupId, that.groupId) && Objects.equals(type, that.type) && - Objects.equals(protocol, that.protocol); + Objects.equals(protocol, that.protocol) && + Objects.equals(groupState, that.groupState); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index ef46b49db6efc..3f88cd5aa8070 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -58,12 +58,13 @@ import org.apache.kafka.clients.admin.internals.ListConsumerGroupOffsetsHandler; import org.apache.kafka.clients.admin.internals.ListOffsetsHandler; import org.apache.kafka.clients.admin.internals.ListTransactionsHandler; +import org.apache.kafka.clients.admin.internals.PartitionLeaderStrategy; import org.apache.kafka.clients.admin.internals.RemoveMembersFromConsumerGroupHandler; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.ConsumerGroupState; import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.KafkaFuture; @@ -71,7 +72,6 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicCollection; import org.apache.kafka.common.TopicCollection.TopicIdCollection; import org.apache.kafka.common.TopicCollection.TopicNameCollection; @@ -182,8 +182,6 @@ import org.apache.kafka.common.requests.AddRaftVoterResponse; import org.apache.kafka.common.requests.AlterClientQuotasRequest; import org.apache.kafka.common.requests.AlterClientQuotasResponse; -import org.apache.kafka.common.requests.AlterConfigsRequest; -import org.apache.kafka.common.requests.AlterConfigsResponse; import org.apache.kafka.common.requests.AlterPartitionReassignmentsRequest; import org.apache.kafka.common.requests.AlterPartitionReassignmentsResponse; import org.apache.kafka.common.requests.AlterReplicaLogDirsRequest; @@ -407,6 +405,7 @@ public class KafkaAdminClient extends AdminClient { private final long retryBackoffMaxMs; private final ExponentialBackoff retryBackoff; private final MetadataRecoveryStrategy metadataRecoveryStrategy; + private final Map partitionLeaderCache; private final AdminFetchMetricsManager adminFetchMetricsManager; private final Optional clientTelemetryReporter; @@ -633,6 +632,7 @@ private KafkaAdminClient(AdminClientConfig config, this.clientTelemetryReporter = clientTelemetryReporter; this.clientTelemetryReporter.ifPresent(reporters::add); this.metadataRecoveryStrategy = MetadataRecoveryStrategy.forName(config.getString(AdminClientConfig.METADATA_RECOVERY_STRATEGY_CONFIG)); + this.partitionLeaderCache = new HashMap<>(); this.adminFetchMetricsManager = new AdminFetchMetricsManager(metrics); config.logUnused(); AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); @@ -722,10 +722,11 @@ private interface NodeProvider { private class MetadataUpdateNodeIdProvider implements NodeProvider { @Override public Node provide() { - LeastLoadedNode leastLoadedNode = client.leastLoadedNode(time.milliseconds()); + long now = time.milliseconds(); + LeastLoadedNode leastLoadedNode = client.leastLoadedNode(now); if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP && !leastLoadedNode.hasNodeAvailableOrConnectionReady()) { - metadataManager.rebootstrap(time.milliseconds()); + metadataManager.rebootstrap(now); } return leastLoadedNode.node(); @@ -1701,7 +1702,11 @@ public MetadataRequest.Builder createRequest(int timeoutMs) { public void handleResponse(AbstractResponse abstractResponse) { MetadataResponse response = (MetadataResponse) abstractResponse; long now = time.milliseconds(); - metadataManager.update(response.buildCluster(), now); + + if (response.topLevelError() == Errors.REBOOTSTRAP_REQUIRED) + metadataManager.initiateRebootstrap(); + else + metadataManager.update(response.buildCluster(), now); // Unassign all unsent requests after a metadata refresh to allow for a new // destination to be selected from the new metadata @@ -2499,10 +2504,14 @@ public DescribeClusterResult describeCluster(DescribeClusterOptions options) { @Override AbstractRequest.Builder createRequest(int timeoutMs) { if (!useMetadataRequest) { + if (metadataManager.usingBootstrapControllers() && options.includeFencedBrokers()) { + throw new IllegalArgumentException("Cannot request fenced brokers from controller endpoint"); + } return new DescribeClusterRequest.Builder(new DescribeClusterRequestData() .setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations()) .setEndpointType(metadataManager.usingBootstrapControllers() ? - EndpointType.CONTROLLER.id() : EndpointType.BROKER.id())); + EndpointType.CONTROLLER.id() : EndpointType.BROKER.id()) + .setIncludeFencedBrokers(options.includeFencedBrokers())); } else { // Since this only requests node information, it's safe to pass true for allowAutoTopicCreation (and it // simplifies communication with older brokers) @@ -2518,7 +2527,6 @@ AbstractRequest.Builder createRequest(int timeoutMs) { void handleResponse(AbstractResponse abstractResponse) { if (!useMetadataRequest) { DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse; - Errors error = Errors.forCode(response.data().errorCode()); if (error != Errors.NONE) { ApiError apiError = new ApiError(error, response.data().errorMessage()); @@ -2566,6 +2574,12 @@ boolean handleUnsupportedVersionException(final UnsupportedVersionException exce return false; } + // If unsupportedVersion exception was caused by the option to include fenced brokers (only supported for version 2+) + // then we should not fall back to the metadataRequest. + if (options.includeFencedBrokers()) { + return false; + } + useMetadataRequest = true; return true; } @@ -2861,72 +2875,6 @@ private ConfigEntry.ConfigSource configSource(DescribeConfigsResponse.ConfigSour return configSource; } - @Override - @Deprecated - public AlterConfigsResult alterConfigs(Map configs, final AlterConfigsOptions options) { - final Map> allFutures = new HashMap<>(); - // We must make a separate AlterConfigs request for every BROKER resource we want to alter - // and send the request to that specific node. Other resources are grouped together into - // a single request that may be sent to any node. - final Collection unifiedRequestResources = new ArrayList<>(); - - for (ConfigResource resource : configs.keySet()) { - Integer node = nodeFor(resource); - if (node != null) { - NodeProvider nodeProvider = new ConstantBrokerOrActiveKController(node); - allFutures.putAll(alterConfigs(configs, options, Collections.singleton(resource), nodeProvider)); - } else - unifiedRequestResources.add(resource); - } - if (!unifiedRequestResources.isEmpty()) - allFutures.putAll(alterConfigs(configs, options, unifiedRequestResources, new LeastLoadedBrokerOrActiveKController())); - return new AlterConfigsResult(new HashMap<>(allFutures)); - } - - private Map> alterConfigs(Map configs, - final AlterConfigsOptions options, - Collection resources, - NodeProvider nodeProvider) { - final Map> futures = new HashMap<>(); - final Map requestMap = new HashMap<>(resources.size()); - for (ConfigResource resource : resources) { - List configEntries = new ArrayList<>(); - for (ConfigEntry configEntry: configs.get(resource).entries()) - configEntries.add(new AlterConfigsRequest.ConfigEntry(configEntry.name(), configEntry.value())); - requestMap.put(resource, new AlterConfigsRequest.Config(configEntries)); - futures.put(resource, new KafkaFutureImpl<>()); - } - - final long now = time.milliseconds(); - runnable.call(new Call("alterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) { - - @Override - public AlterConfigsRequest.Builder createRequest(int timeoutMs) { - return new AlterConfigsRequest.Builder(requestMap, options.shouldValidateOnly()); - } - - @Override - public void handleResponse(AbstractResponse abstractResponse) { - AlterConfigsResponse response = (AlterConfigsResponse) abstractResponse; - for (Map.Entry> entry : futures.entrySet()) { - KafkaFutureImpl future = entry.getValue(); - ApiException exception = response.errors().get(entry.getKey()).exception(); - if (exception != null) { - future.completeExceptionally(exception); - } else { - future.complete(null); - } - } - } - - @Override - void handleFailure(Throwable throwable) { - completeAllExceptionally(futures.values(), throwable); - } - }, now); - return futures; - } - @Override public AlterConfigsResult incrementalAlterConfigs(Map> configs, final AlterConfigsOptions options) { @@ -3343,7 +3291,8 @@ void handleFailure(Throwable throwable) { @Override public DeleteRecordsResult deleteRecords(final Map recordsToDelete, final DeleteRecordsOptions options) { - SimpleAdminApiFuture future = DeleteRecordsHandler.newFuture(recordsToDelete.keySet()); + PartitionLeaderStrategy.PartitionLeaderFuture future = + DeleteRecordsHandler.newFuture(recordsToDelete.keySet(), partitionLeaderCache); int timeoutMs = defaultApiTimeoutMs; if (options.timeoutMs() != null) { timeoutMs = options.timeoutMs(); @@ -3371,7 +3320,7 @@ public CreateDelegationTokenResult createDelegationToken(final CreateDelegationT CreateDelegationTokenRequest.Builder createRequest(int timeoutMs) { CreateDelegationTokenRequestData data = new CreateDelegationTokenRequestData() .setRenewers(renewers) - .setMaxLifetimeMs(options.maxlifeTimeMs()); + .setMaxLifetimeMs(options.maxLifetimeMs()); if (options.owner().isPresent()) { data.setOwnerPrincipalName(options.owner().get().getName()); data.setOwnerPrincipalType(options.owner().get().getPrincipalType()); @@ -3576,8 +3525,13 @@ ListGroupsRequest.Builder createRequest(int timeoutMs) { .stream() .map(GroupType::toString) .collect(Collectors.toList()); + List groupStates = options.groupStates() + .stream() + .map(GroupState::toString) + .collect(Collectors.toList()); return new ListGroupsRequest.Builder(new ListGroupsRequestData() .setTypesFilter(groupTypes) + .setStatesFilter(groupStates) ); } @@ -3590,10 +3544,17 @@ private void maybeAddGroup(ListGroupsResponseData.ListedGroup group) { type = Optional.of(GroupType.parse(group.groupType())); } final String protocolType = group.protocolType(); + final Optional groupState; + if (group.groupState() == null || group.groupState().isEmpty()) { + groupState = Optional.empty(); + } else { + groupState = Optional.of(GroupState.parse(group.groupState())); + } final GroupListing groupListing = new GroupListing( groupId, type, - protocolType + protocolType, + groupState ); results.addListing(groupListing); } @@ -3718,9 +3679,9 @@ void handleResponse(AbstractResponse abstractResponse) { runnable.call(new Call("listConsumerGroups", deadline, new ConstantNodeIdProvider(node.id())) { @Override ListGroupsRequest.Builder createRequest(int timeoutMs) { - List states = options.states() + List states = options.groupStates() .stream() - .map(ConsumerGroupState::toString) + .map(GroupState::toString) .collect(Collectors.toList()); List groupTypes = options.types() .stream() @@ -3736,17 +3697,17 @@ private void maybeAddConsumerGroup(ListGroupsResponseData.ListedGroup group) { String protocolType = group.protocolType(); if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) { final String groupId = group.groupId(); - final Optional state = group.groupState().isEmpty() + final Optional groupState = group.groupState().isEmpty() ? Optional.empty() - : Optional.of(ConsumerGroupState.parse(group.groupState())); + : Optional.of(GroupState.parse(group.groupState())); final Optional type = group.groupType().isEmpty() ? Optional.empty() : Optional.of(GroupType.parse(group.groupType())); final ConsumerGroupListing groupListing = new ConsumerGroupListing( groupId, - protocolType.isEmpty(), - state, - type + groupState, + type, + protocolType.isEmpty() ); results.addListing(groupListing); } @@ -3824,48 +3785,6 @@ public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets( return new DeleteConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), partitions); } - private static final class ListShareGroupsResults { - private final List errors; - private final HashMap listings; - private final HashSet remaining; - private final KafkaFutureImpl> future; - - ListShareGroupsResults(Collection leaders, - KafkaFutureImpl> future) { - this.errors = new ArrayList<>(); - this.listings = new HashMap<>(); - this.remaining = new HashSet<>(leaders); - this.future = future; - tryComplete(); - } - - synchronized void addError(Throwable throwable, Node node) { - ApiError error = ApiError.fromThrowable(throwable); - if (error.message() == null || error.message().isEmpty()) { - errors.add(error.error().exception("Error listing groups on " + node)); - } else { - errors.add(error.error().exception("Error listing groups on " + node + ": " + error.message())); - } - } - - synchronized void addListing(ShareGroupListing listing) { - listings.put(listing.groupId(), listing); - } - - synchronized void tryComplete(Node leader) { - remaining.remove(leader); - tryComplete(); - } - - private synchronized void tryComplete() { - if (remaining.isEmpty()) { - ArrayList results = new ArrayList<>(listings.values()); - results.addAll(errors); - future.complete(results); - } - } - } - @Override public DescribeShareGroupsResult describeShareGroups(final Collection groupIds, final DescribeShareGroupsOptions options) { @@ -3877,91 +3796,12 @@ public DescribeShareGroupsResult describeShareGroups(final Collection gr .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); } + // To do in a follow-up PR @Override - public ListShareGroupsResult listShareGroups(ListShareGroupsOptions options) { - final KafkaFutureImpl> all = new KafkaFutureImpl<>(); - final long nowMetadata = time.milliseconds(); - final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs()); - runnable.call(new Call("findAllBrokers", deadline, new LeastLoadedNodeProvider()) { - @Override - MetadataRequest.Builder createRequest(int timeoutMs) { - return new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(Collections.emptyList()) - .setAllowAutoTopicCreation(true)); - } - - @Override - void handleResponse(AbstractResponse abstractResponse) { - MetadataResponse metadataResponse = (MetadataResponse) abstractResponse; - Collection nodes = metadataResponse.brokers(); - if (nodes.isEmpty()) - throw new StaleMetadataException("Metadata fetch failed due to missing broker list"); - - HashSet allNodes = new HashSet<>(nodes); - final ListShareGroupsResults results = new ListShareGroupsResults(allNodes, all); - - for (final Node node : allNodes) { - final long nowList = time.milliseconds(); - runnable.call(new Call("listShareGroups", deadline, new ConstantNodeIdProvider(node.id())) { - @Override - ListGroupsRequest.Builder createRequest(int timeoutMs) { - List states = options.states() - .stream() - .map(ShareGroupState::toString) - .collect(Collectors.toList()); - List types = Collections.singletonList(GroupType.SHARE.toString()); - return new ListGroupsRequest.Builder(new ListGroupsRequestData() - .setStatesFilter(states) - .setTypesFilter(types) - ); - } - - private void maybeAddShareGroup(ListGroupsResponseData.ListedGroup group) { - final String groupId = group.groupId(); - final Optional state = group.groupState().isEmpty() - ? Optional.empty() - : Optional.of(ShareGroupState.parse(group.groupState())); - final ShareGroupListing groupListing = new ShareGroupListing(groupId, state); - results.addListing(groupListing); - } - - @Override - void handleResponse(AbstractResponse abstractResponse) { - final ListGroupsResponse response = (ListGroupsResponse) abstractResponse; - synchronized (results) { - Errors error = Errors.forCode(response.data().errorCode()); - if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.COORDINATOR_NOT_AVAILABLE) { - throw error.exception(); - } else if (error != Errors.NONE) { - results.addError(error.exception(), node); - } else { - for (ListGroupsResponseData.ListedGroup group : response.data().groups()) { - maybeAddShareGroup(group); - } - } - results.tryComplete(node); - } - } - - @Override - void handleFailure(Throwable throwable) { - synchronized (results) { - results.addError(throwable, node); - results.tryComplete(node); - } - } - }, nowList); - } - } - - @Override - void handleFailure(Throwable throwable) { - KafkaException exception = new KafkaException("Failed to find brokers to send ListGroups", throwable); - all.complete(Collections.singletonList(exception)); - } - }, nowMetadata); - - return new ListShareGroupsResult(all); + public ListShareGroupOffsetsResult listShareGroupOffsets(final Map groupSpecs, + final ListShareGroupOffsetsOptions options) { + // To-do + throw new InvalidRequestException("The method is not yet implemented"); } @Override @@ -4356,8 +4196,8 @@ public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets( @Override public ListOffsetsResult listOffsets(Map topicPartitionOffsets, ListOffsetsOptions options) { - AdminApiFuture.SimpleAdminApiFuture future = - ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); + PartitionLeaderStrategy.PartitionLeaderFuture future = + ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet(), partitionLeaderCache); Map offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext, defaultApiTimeoutMs); @@ -4902,8 +4742,8 @@ void handleFailure(Throwable throwable) { @Override public DescribeProducersResult describeProducers(Collection topicPartitions, DescribeProducersOptions options) { - AdminApiFuture.SimpleAdminApiFuture future = - DescribeProducersHandler.newFuture(topicPartitions); + PartitionLeaderStrategy.PartitionLeaderFuture future = + DescribeProducersHandler.newFuture(topicPartitions, partitionLeaderCache); DescribeProducersHandler handler = new DescribeProducersHandler(options, logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeProducersResult(future.all()); @@ -4920,8 +4760,8 @@ public DescribeTransactionsResult describeTransactions(Collection transa @Override public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortTransactionOptions options) { - AdminApiFuture.SimpleAdminApiFuture future = - AbortTransactionHandler.newFuture(Collections.singleton(spec.topicPartition())); + PartitionLeaderStrategy.PartitionLeaderFuture future = + AbortTransactionHandler.newFuture(Collections.singleton(spec.topicPartition()), partitionLeaderCache); AbortTransactionHandler handler = new AbortTransactionHandler(spec, logContext); invokeDriver(handler, future, options.timeoutMs); return new AbortTransactionResult(future.all()); @@ -4999,12 +4839,12 @@ AddRaftVoterRequest.Builder createRequest(int timeoutMs) { setHost(endpoint.host()). setPort(endpoint.port()))); return new AddRaftVoterRequest.Builder( - new AddRaftVoterRequestData(). - setClusterId(options.clusterId().orElse(null)). - setTimeoutMs(timeoutMs). - setVoterId(voterId) . - setVoterDirectoryId(voterDirectoryId). - setListeners(listeners)); + new AddRaftVoterRequestData(). + setClusterId(options.clusterId().orElse(null)). + setTimeoutMs(timeoutMs). + setVoterId(voterId) . + setVoterDirectoryId(voterDirectoryId). + setListeners(listeners)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.java index 44d3a407327e1..936f4a82a9a6c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsOptions.java @@ -17,10 +17,8 @@ package org.apache.kafka.clients.admin; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.annotation.InterfaceStability; -import java.util.List; /** * Options for {@link Admin#listConsumerGroupOffsets(java.util.Map)} and {@link Admin#listConsumerGroupOffsets(String)}. @@ -30,26 +28,8 @@ @InterfaceStability.Evolving public class ListConsumerGroupOffsetsOptions extends AbstractOptions { - private List topicPartitions; private boolean requireStable = false; - /** - * Set the topic partitions to list as part of the result. - * {@code null} includes all topic partitions. - *

    - * @deprecated Since 3.3. - * Use {@link Admin#listConsumerGroupOffsets(java.util.Map, ListConsumerGroupOffsetsOptions)} - * to specify topic partitions. - * - * @param topicPartitions List of topic partitions to include - * @return This ListGroupOffsetsOptions - */ - @Deprecated - public ListConsumerGroupOffsetsOptions topicPartitions(List topicPartitions) { - this.topicPartitions = topicPartitions; - return this; - } - /** * Sets an optional requireStable flag. */ @@ -58,18 +38,6 @@ public ListConsumerGroupOffsetsOptions requireStable(final boolean requireStable return this; } - /** - * Returns a list of topic partitions to add as part of the result. - *

    - * @deprecated Since 3.3. - * Use {@link Admin#listConsumerGroupOffsets(java.util.Map, ListConsumerGroupOffsetsOptions)} - * to specify topic partitions. - */ - @Deprecated - public List topicPartitions() { - return topicPartitions; - } - public boolean requireStable() { return requireStable; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java index acbe3f00d600a..b63ad77c59ab4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java @@ -18,12 +18,14 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.ConsumerGroupState; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Collections; import java.util.HashSet; import java.util.Set; +import java.util.stream.Collectors; /** * Options for {@link Admin#listConsumerGroups()}. @@ -33,17 +35,30 @@ @InterfaceStability.Evolving public class ListConsumerGroupsOptions extends AbstractOptions { - private Set states = Collections.emptySet(); - + private Set groupStates = Collections.emptySet(); private Set types = Collections.emptySet(); + /** + * If groupStates is set, only groups in these states will be returned by listGroups(). + * Otherwise, all groups are returned. + * This operation is supported by brokers with version 2.6.0 or later. + */ + public ListConsumerGroupsOptions inGroupStates(Set groupStates) { + this.groupStates = (groupStates == null || groupStates.isEmpty()) ? Collections.emptySet() : Set.copyOf(groupStates); + return this; + } + /** * If states is set, only groups in these states will be returned by listConsumerGroups(). * Otherwise, all groups are returned. * This operation is supported by brokers with version 2.6.0 or later. + * @deprecated Since 4.0. Use {@link #inGroupStates(Set)} instead. */ + @Deprecated public ListConsumerGroupsOptions inStates(Set states) { - this.states = (states == null || states.isEmpty()) ? Collections.emptySet() : new HashSet<>(states); + this.groupStates = (states == null || states.isEmpty()) + ? Collections.emptySet() + : states.stream().map(state -> GroupState.parse(state.toString())).collect(Collectors.toSet()); return this; } @@ -56,11 +71,20 @@ public ListConsumerGroupsOptions withTypes(Set types) { return this; } + /** + * Returns the list of group states that are requested or empty if no states have been specified. + */ + public Set groupStates() { + return groupStates; + } + /** * Returns the list of States that are requested or empty if no states have been specified. + * @deprecated Since 4.0. Use {@link #inGroupStates(Set)} instead. */ + @Deprecated public Set states() { - return states; + return groupStates.stream().map(groupState -> ConsumerGroupState.parse(groupState.toString())).collect(Collectors.toSet()); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java index 042c88dc80f05..d1fa2c7b288c8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.admin; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.annotation.InterfaceStability; @@ -31,8 +32,19 @@ @InterfaceStability.Evolving public class ListGroupsOptions extends AbstractOptions { + private Set groupStates = Collections.emptySet(); private Set types = Collections.emptySet(); + /** + * If groupStates is set, only groups in these states will be returned by listGroups(). + * Otherwise, all groups are returned. + * This operation is supported by brokers with version 2.6.0 or later. + */ + public ListGroupsOptions inGroupStates(Set groupStates) { + this.groupStates = (groupStates == null || groupStates.isEmpty()) ? Collections.emptySet() : Set.copyOf(groupStates); + return this; + } + /** * If types is set, only groups of these types will be returned by listGroups(). * Otherwise, all groups are returned. @@ -42,6 +54,13 @@ public ListGroupsOptions withTypes(Set types) { return this; } + /** + * Returns the list of group states that are requested or empty if no states have been specified. + */ + public Set groupStates() { + return groupStates; + } + /** * Returns the list of group types that are requested or empty if no types have been specified. */ diff --git a/server-common/src/main/java/org/apache/kafka/security/CipherParamsEncoder.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsOptions.java similarity index 67% rename from server-common/src/main/java/org/apache/kafka/security/CipherParamsEncoder.java rename to clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsOptions.java index d829e9885faee..bd740c24670db 100644 --- a/server-common/src/main/java/org/apache/kafka/security/CipherParamsEncoder.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsOptions.java @@ -14,16 +14,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.security; -import java.security.AlgorithmParameters; -import java.security.spec.AlgorithmParameterSpec; -import java.security.spec.InvalidParameterSpecException; -import java.util.Map; +package org.apache.kafka.clients.admin; -public interface CipherParamsEncoder { +import org.apache.kafka.common.annotation.InterfaceStability; - Map toMap(AlgorithmParameters cipher) throws InvalidParameterSpecException; +import java.util.Map; - AlgorithmParameterSpec toParameterSpec(Map paramMap); +/** + * Options for {@link Admin#listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)}. + *

    + * The API of this class is evolving, see {@link Admin} for details. + */ +@InterfaceStability.Evolving +public class ListShareGroupOffsetsOptions extends AbstractOptions { } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.java new file mode 100644 index 0000000000000..8e28ec015370a --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsResult.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.admin; + +import org.apache.kafka.clients.admin.internals.CoordinatorKey; +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.annotation.InterfaceStability; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +/** + * The result of the {@link Admin#listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)} call. + *

    + * The API of this class is evolving, see {@link Admin} for details. + */ +@InterfaceStability.Evolving +public class ListShareGroupOffsetsResult { + + private final Map>> futures; + + ListShareGroupOffsetsResult(final Map>> futures) { + this.futures = futures.entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey().idValue, Map.Entry::getValue)); + } + + /** + * Return the future when the requests for all groups succeed. + * + * @return - Future which yields all Map objects, if requests for all the groups succeed. + */ + public KafkaFuture>> all() { + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( + nil -> { + Map> offsets = new HashMap<>(futures.size()); + futures.forEach((groupId, future) -> { + try { + offsets.put(groupId, future.get()); + } catch (InterruptedException | ExecutionException e) { + // This should be unreachable, since the KafkaFuture#allOf already ensured + // that all the futures completed successfully. + throw new RuntimeException(e); + } + }); + return offsets; + }); + } + + /** + * @param groupId - The groupId for which the Map is needed + * @return - Future which yields a map of topic partitions to offsets for the specified group. + */ + public KafkaFuture> partitionsToOffset(String groupId) { + if (!futures.containsKey(groupId)) { + throw new IllegalArgumentException("Group ID not found: " + groupId); + } + return futures.get(groupId); + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsSpec.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsSpec.java new file mode 100644 index 0000000000000..050781ad5569f --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupOffsetsSpec.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.admin; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.annotation.InterfaceStability; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Specification of share group offsets to list using {@link Admin#listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)}. + *

    + * The API of this class is evolving, see {@link Admin} for details. + */ +@InterfaceStability.Evolving +public class ListShareGroupOffsetsSpec { + + private Collection topicPartitions; + + /** + * Set the topic partitions whose offsets are to be listed for a share group. + */ + public ListShareGroupOffsetsSpec topicPartitions(Collection topicPartitions) { + this.topicPartitions = topicPartitions; + return this; + } + + /** + * Returns the topic partitions whose offsets are to be listed for a share group. + */ + public Collection topicPartitions() { + return topicPartitions == null ? List.of() : topicPartitions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ListShareGroupOffsetsSpec)) { + return false; + } + ListShareGroupOffsetsSpec that = (ListShareGroupOffsetsSpec) o; + return Objects.equals(topicPartitions, that.topicPartitions); + } + + @Override + public int hashCode() { + return Objects.hash(topicPartitions); + } + + @Override + public String toString() { + return "ListShareGroupOffsetsSpec(" + + "topicPartitions=" + (topicPartitions != null ? topicPartitions : "null") + + ')'; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupsOptions.java deleted file mode 100644 index 61f0aa40eb2b4..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupsOptions.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.clients.admin; - -import org.apache.kafka.common.ShareGroupState; -import org.apache.kafka.common.annotation.InterfaceStability; - -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -/** - * Options for {@link Admin#listShareGroups(ListShareGroupsOptions)}. - * - * The API of this class is evolving, see {@link Admin} for details. - */ -@InterfaceStability.Evolving -public class ListShareGroupsOptions extends AbstractOptions { - - private Set states = Collections.emptySet(); - - /** - * If states is set, only groups in these states will be returned. Otherwise, all groups are returned. - */ - public ListShareGroupsOptions inStates(Set states) { - this.states = (states == null) ? Collections.emptySet() : new HashSet<>(states); - return this; - } - - /** - * Return the list of States that are requested or empty if no states have been specified. - */ - public Set states() { - return states; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupsResult.java deleted file mode 100644 index 70d46fbe01bf1..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListShareGroupsResult.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.clients.admin; - -import org.apache.kafka.common.KafkaFuture; -import org.apache.kafka.common.annotation.InterfaceStability; -import org.apache.kafka.common.internals.KafkaFutureImpl; - -import java.util.ArrayList; -import java.util.Collection; - -/** - * The result of the {@link Admin#listShareGroups(ListShareGroupsOptions)} call. - *

    - * The API of this class is evolving, see {@link Admin} for details. - */ -@InterfaceStability.Evolving -public class ListShareGroupsResult { - - private final KafkaFutureImpl> all; - private final KafkaFutureImpl> valid; - private final KafkaFutureImpl> errors; - - ListShareGroupsResult(KafkaFuture> future) { - this.all = new KafkaFutureImpl<>(); - this.valid = new KafkaFutureImpl<>(); - this.errors = new KafkaFutureImpl<>(); - future.thenApply(results -> { - ArrayList curErrors = new ArrayList<>(); - ArrayList curValid = new ArrayList<>(); - for (Object resultObject : results) { - if (resultObject instanceof Throwable) { - curErrors.add((Throwable) resultObject); - } else { - curValid.add((ShareGroupListing) resultObject); - } - } - if (!curErrors.isEmpty()) { - all.completeExceptionally(curErrors.get(0)); - } else { - all.complete(curValid); - } - valid.complete(curValid); - errors.complete(curErrors); - return null; - }); - } - - /** - * Returns a future that yields either an exception, or the full set of share group listings. - */ - public KafkaFuture> all() { - return all; - } - - /** - * Returns a future which yields just the valid listings. - */ - public KafkaFuture> valid() { - return valid; - } - - /** - * Returns a future which yields just the errors which occurred. - */ - public KafkaFuture> errors() { - return errors; - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java b/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java index 495ddb09745a1..ec30b83baf771 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.TopicPartition; import java.util.Collections; -import java.util.HashSet; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -36,8 +35,7 @@ public class MemberAssignment { * @param topicPartitions List of topic partitions */ public MemberAssignment(Set topicPartitions) { - this.topicPartitions = topicPartitions == null ? Collections.emptySet() : - Collections.unmodifiableSet(new HashSet<>(topicPartitions)); + this.topicPartitions = topicPartitions == null ? Collections.emptySet() : Set.copyOf(topicPartitions); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/MemberDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/MemberDescription.java index 5320b7f711dc7..e72a48d8a626c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/MemberDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/MemberDescription.java @@ -16,12 +16,14 @@ */ package org.apache.kafka.clients.admin; +import org.apache.kafka.common.GroupType; + import java.util.Collections; import java.util.Objects; import java.util.Optional; /** - * A detailed description of a single group instance in the cluster. + * A detailed description of a single group member in the cluster. */ public class MemberDescription { private final String memberId; @@ -30,13 +32,18 @@ public class MemberDescription { private final String host; private final MemberAssignment assignment; private final Optional targetAssignment; + private final Optional memberEpoch; + private final Optional upgraded; - public MemberDescription(String memberId, + public MemberDescription( + String memberId, Optional groupInstanceId, String clientId, String host, MemberAssignment assignment, - Optional targetAssignment + Optional targetAssignment, + Optional memberEpoch, + Optional upgraded ) { this.memberId = memberId == null ? "" : memberId; this.groupInstanceId = groupInstanceId; @@ -45,8 +52,38 @@ public MemberDescription(String memberId, this.assignment = assignment == null ? new MemberAssignment(Collections.emptySet()) : assignment; this.targetAssignment = targetAssignment; + this.memberEpoch = memberEpoch; + this.upgraded = upgraded; } + /** + * @deprecated Since 4.0. Use {@link #MemberDescription(String, Optional, String, String, MemberAssignment, Optional, Optional, Optional)} instead. + */ + @Deprecated + public MemberDescription( + String memberId, + Optional groupInstanceId, + String clientId, + String host, + MemberAssignment assignment, + Optional targetAssignment + ) { + this( + memberId, + groupInstanceId, + clientId, + host, + assignment, + targetAssignment, + Optional.empty(), + Optional.empty() + ); + } + + /** + * @deprecated Since 4.0. Use {@link #MemberDescription(String, Optional, String, String, MemberAssignment, Optional, Optional, Optional)} instead. + */ + @Deprecated public MemberDescription( String memberId, Optional groupInstanceId, @@ -64,6 +101,10 @@ public MemberDescription( ); } + /** + * @deprecated Since 4.0. Use {@link #MemberDescription(String, Optional, String, String, MemberAssignment, Optional, Optional, Optional)} instead. + */ + @Deprecated public MemberDescription(String memberId, String clientId, String host, @@ -81,12 +122,14 @@ public boolean equals(Object o) { clientId.equals(that.clientId) && host.equals(that.host) && assignment.equals(that.assignment) && - targetAssignment.equals(that.targetAssignment); + targetAssignment.equals(that.targetAssignment) && + memberEpoch.equals(that.memberEpoch) && + upgraded.equals(that.upgraded); } @Override public int hashCode() { - return Objects.hash(memberId, groupInstanceId, clientId, host, assignment, targetAssignment); + return Objects.hash(memberId, groupInstanceId, clientId, host, assignment, targetAssignment, memberEpoch, upgraded); } /** @@ -131,6 +174,25 @@ public Optional targetAssignment() { return targetAssignment; } + /** + * The epoch of the group member. + * The optional is set to an integer if the member is in a {@link GroupType#CONSUMER} group, and to empty if it + * is in a {@link GroupType#CLASSIC} group. + */ + public Optional memberEpoch() { + return memberEpoch; + } + + /** + * The flag indicating whether a member within a {@link GroupType#CONSUMER} group uses the + * {@link GroupType#CONSUMER} protocol. + * The optional is set to true if it does, to false if it does not, and to empty if it is unknown or if the group + * is a {@link GroupType#CLASSIC} group. + */ + public Optional upgraded() { + return upgraded; + } + @Override public String toString() { return "(memberId=" + memberId + @@ -138,6 +200,9 @@ public String toString() { ", clientId=" + clientId + ", host=" + host + ", assignment=" + assignment + - ", targetAssignment=" + targetAssignment + ")"; + ", targetAssignment=" + targetAssignment + + ", memberEpoch=" + memberEpoch.orElse(null) + + ", upgraded=" + upgraded.orElse(null) + + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java b/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java index 02bce5e98c4e4..0a37c012cc79a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java @@ -17,8 +17,6 @@ package org.apache.kafka.clients.admin; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -34,7 +32,7 @@ public class NewPartitionReassignment { public NewPartitionReassignment(List targetReplicas) { if (targetReplicas == null || targetReplicas.isEmpty()) throw new IllegalArgumentException("Cannot create a new partition reassignment without any replicas"); - this.targetReplicas = Collections.unmodifiableList(new ArrayList<>(targetReplicas)); + this.targetReplicas = List.copyOf(targetReplicas); } public List targetReplicas() { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java index aabb535c94901..0f1107c91c9be 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java @@ -66,6 +66,7 @@ public NewTopic(String name, Optional numPartitions, Optional re * @param name the topic name. * @param replicasAssignments a map from partition id to replica ids (i.e. broker ids). Although not enforced, it is * generally a good idea for all partitions to have the same number of replicas. + * The first replica will be treated as the preferred leader. */ public NewTopic(String name, Map> replicasAssignments) { this.name = name; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java index 30d88e3119276..469c23428eb9b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java @@ -17,14 +17,14 @@ package org.apache.kafka.clients.admin; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.Node; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.annotation.InterfaceStability; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -35,28 +35,35 @@ @InterfaceStability.Evolving public class ShareGroupDescription { private final String groupId; - private final Collection members; - private final ShareGroupState state; + private final Collection members; + private final GroupState groupState; private final Node coordinator; + private final int groupEpoch; + private final int targetAssignmentEpoch; private final Set authorizedOperations; public ShareGroupDescription(String groupId, - Collection members, - ShareGroupState state, - Node coordinator) { - this(groupId, members, state, coordinator, Collections.emptySet()); + Collection members, + GroupState groupState, + Node coordinator, + int groupEpoch, + int targetAssignmentEpoch) { + this(groupId, members, groupState, coordinator, groupEpoch, targetAssignmentEpoch, Collections.emptySet()); } public ShareGroupDescription(String groupId, - Collection members, - ShareGroupState state, + Collection members, + GroupState groupState, Node coordinator, + int groupEpoch, + int targetAssignmentEpoch, Set authorizedOperations) { this.groupId = groupId == null ? "" : groupId; - this.members = members == null ? Collections.emptyList() : - Collections.unmodifiableList(new ArrayList<>(members)); - this.state = state; + this.members = members == null ? Collections.emptyList() : List.copyOf(members); + this.groupState = groupState; this.coordinator = coordinator; + this.groupEpoch = groupEpoch; + this.targetAssignmentEpoch = targetAssignmentEpoch; this.authorizedOperations = authorizedOperations; } @@ -67,14 +74,16 @@ public boolean equals(final Object o) { final ShareGroupDescription that = (ShareGroupDescription) o; return Objects.equals(groupId, that.groupId) && Objects.equals(members, that.members) && - state == that.state && + groupState == that.groupState && Objects.equals(coordinator, that.coordinator) && + groupEpoch == that.groupEpoch && + targetAssignmentEpoch == that.targetAssignmentEpoch && Objects.equals(authorizedOperations, that.authorizedOperations); } @Override public int hashCode() { - return Objects.hash(groupId, members, state, coordinator, authorizedOperations); + return Objects.hash(groupId, members, groupState, coordinator, groupEpoch, targetAssignmentEpoch, authorizedOperations); } /** @@ -87,15 +96,15 @@ public String groupId() { /** * A list of the members of the share group. */ - public Collection members() { + public Collection members() { return members; } /** - * The share group state, or UNKNOWN if the state is too new for us to parse. + * The group state, or UNKNOWN if the state is too new for us to parse. */ - public ShareGroupState state() { - return state; + public GroupState groupState() { + return groupState; } /** @@ -112,12 +121,28 @@ public Set authorizedOperations() { return authorizedOperations; } + /** + * The epoch of the share group. + */ + public int groupEpoch() { + return groupEpoch; + } + + /** + * The epoch of the target assignment. + */ + public int targetAssignmentEpoch() { + return targetAssignmentEpoch; + } + @Override public String toString() { return "(groupId=" + groupId + - ", members=" + members.stream().map(MemberDescription::toString).collect(Collectors.joining(",")) + - ", state=" + state + + ", members=" + members.stream().map(ShareMemberDescription::toString).collect(Collectors.joining(",")) + + ", groupState=" + groupState + ", coordinator=" + coordinator + + ", groupEpoch=" + groupEpoch + + ", targetAssignmentEpoch=" + targetAssignmentEpoch + ", authorizedOperations=" + authorizedOperations + ")"; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupListing.java b/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupListing.java deleted file mode 100644 index 05e605a82775c..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupListing.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.clients.admin; - -import org.apache.kafka.common.ShareGroupState; -import org.apache.kafka.common.annotation.InterfaceStability; - -import java.util.Objects; -import java.util.Optional; - -/** - * A listing of a share group in the cluster. - *

    - * The API of this class is evolving, see {@link Admin} for details. - */ -@InterfaceStability.Evolving -public class ShareGroupListing { - private final String groupId; - private final Optional state; - - /** - * Create an instance with the specified parameters. - * - * @param groupId Group Id - */ - public ShareGroupListing(String groupId) { - this(groupId, Optional.empty()); - } - - /** - * Create an instance with the specified parameters. - * - * @param groupId Group Id - * @param state The state of the share group - */ - public ShareGroupListing(String groupId, Optional state) { - this.groupId = groupId; - this.state = Objects.requireNonNull(state); - } - - /** - * The id of the share group. - */ - public String groupId() { - return groupId; - } - - /** - * The share group state. - */ - public Optional state() { - return state; - } - - @Override - public String toString() { - return "(" + - "groupId='" + groupId + '\'' + - ", state=" + state + - ')'; - } - - @Override - public int hashCode() { - return Objects.hash(groupId, state); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof ShareGroupListing)) return false; - ShareGroupListing that = (ShareGroupListing) o; - return Objects.equals(groupId, that.groupId) && - Objects.equals(state, that.state); - } -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberAssignment.java b/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberAssignment.java new file mode 100644 index 0000000000000..de3be9d73f4dc --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberAssignment.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.annotation.InterfaceStability; + +import java.util.Collections; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * A description of the assignments of a specific share group member. + */ +@InterfaceStability.Evolving +public class ShareMemberAssignment { + private final Set topicPartitions; + + /** + * Creates an instance with the specified parameters. + * + * @param topicPartitions List of topic partitions + */ + public ShareMemberAssignment(Set topicPartitions) { + this.topicPartitions = topicPartitions == null ? Collections.emptySet() : Set.copyOf(topicPartitions); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ShareMemberAssignment that = (ShareMemberAssignment) o; + + return Objects.equals(topicPartitions, that.topicPartitions); + } + + @Override + public int hashCode() { + return topicPartitions != null ? topicPartitions.hashCode() : 0; + } + + /** + * The topic partitions assigned to a group member. + */ + public Set topicPartitions() { + return topicPartitions; + } + + @Override + public String toString() { + return "(topicPartitions=" + topicPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(",")) + ")"; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberDescription.java new file mode 100644 index 0000000000000..5fb74d8b24276 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberDescription.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.common.annotation.InterfaceStability; + +import java.util.Collections; +import java.util.Objects; + +/** + * A detailed description of a single share group member in the cluster. + */ +@InterfaceStability.Evolving +public class ShareMemberDescription { + private final String memberId; + private final String clientId; + private final String host; + private final ShareMemberAssignment assignment; + private final int memberEpoch; + + public ShareMemberDescription( + String memberId, + String clientId, + String host, + ShareMemberAssignment assignment, + int memberEpoch + ) { + this.memberId = memberId == null ? "" : memberId; + this.clientId = clientId == null ? "" : clientId; + this.host = host == null ? "" : host; + this.assignment = assignment == null ? + new ShareMemberAssignment(Collections.emptySet()) : assignment; + this.memberEpoch = memberEpoch; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShareMemberDescription that = (ShareMemberDescription) o; + return memberId.equals(that.memberId) && + clientId.equals(that.clientId) && + host.equals(that.host) && + assignment.equals(that.assignment) && + memberEpoch == that.memberEpoch; + } + + @Override + public int hashCode() { + return Objects.hash(memberId, clientId, host, assignment, memberEpoch); + } + + /** + * The consumer id of the group member. + */ + public String consumerId() { + return memberId; + } + + /** + * The client id of the group member. + */ + public String clientId() { + return clientId; + } + + /** + * The host where the group member is running. + */ + public String host() { + return host; + } + + /** + * The assignment of the group member. + */ + public ShareMemberAssignment assignment() { + return assignment; + } + + /** + * The epoch of the group member. + */ + public int memberEpoch() { + return memberEpoch; + } + + @Override + public String toString() { + return "(memberId=" + memberId + + ", clientId=" + clientId + + ", host=" + host + + ", assignment=" + assignment + + ", memberEpoch=" + memberEpoch + + ")"; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/TopicListing.java b/clients/src/main/java/org/apache/kafka/clients/admin/TopicListing.java index 42ceeff20bb93..f402fa298ae2d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/TopicListing.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/TopicListing.java @@ -27,20 +27,6 @@ public class TopicListing { private final Uuid topicId; private final boolean internal; - /** - * Create an instance with the specified parameters. - * - * @param name The topic name - * @param internal Whether the topic is internal to Kafka - * @deprecated Since 3.0 use {@link #TopicListing(String, Uuid, boolean)} instead - */ - @Deprecated - public TopicListing(String name, boolean internal) { - this.name = name; - this.internal = internal; - this.topicId = Uuid.ZERO_UUID; - } - /** * Create an instance with the specified parameters. * diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesOptions.java index d9a7be887802b..b4819ce77a318 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesOptions.java @@ -29,20 +29,10 @@ public class UpdateFeaturesOptions extends AbstractOptions { private boolean validateOnly = false; - @Deprecated - public boolean dryRun() { - return validateOnly; - } - public boolean validateOnly() { return validateOnly; } - @Deprecated - public UpdateFeaturesOptions dryRun(boolean dryRun) { - return validateOnly(dryRun); - } - public UpdateFeaturesOptions validateOnly(boolean validateOnly) { this.validateOnly = validateOnly; return this; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/UserScramCredentialsDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/UserScramCredentialsDescription.java index 97bc3588af6aa..03a713149be47 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/UserScramCredentialsDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/UserScramCredentialsDescription.java @@ -17,8 +17,6 @@ package org.apache.kafka.clients.admin; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -61,7 +59,7 @@ public String toString() { */ public UserScramCredentialsDescription(String name, List credentialInfos) { this.name = Objects.requireNonNull(name); - this.credentialInfos = Collections.unmodifiableList(new ArrayList<>(credentialInfos)); + this.credentialInfos = List.copyOf(credentialInfos); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AbortTransactionHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AbortTransactionHandler.java index 0f5f4781080b2..f0b6d28be6b3d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AbortTransactionHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AbortTransactionHandler.java @@ -34,6 +34,7 @@ import org.slf4j.Logger; import java.util.List; +import java.util.Map; import java.util.Set; import static java.util.Collections.singleton; @@ -53,10 +54,11 @@ public AbortTransactionHandler( this.lookupStrategy = new PartitionLeaderStrategy(logContext); } - public static AdminApiFuture.SimpleAdminApiFuture newFuture( - Set topicPartitions + public static PartitionLeaderStrategy.PartitionLeaderFuture newFuture( + Set topicPartitions, + Map partitionLeaderCache ) { - return AdminApiFuture.forKeys(topicPartitions); + return new PartitionLeaderStrategy.PartitionLeaderFuture<>(topicPartitions, partitionLeaderCache); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiDriver.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiDriver.java index 92e724e74f5b9..6286f59ed7163 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiDriver.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiDriver.java @@ -110,7 +110,13 @@ public AdminApiDriver( retryBackoffMaxMs, CommonClientConfigs.RETRY_BACKOFF_JITTER); this.log = logContext.logger(AdminApiDriver.class); - retryLookup(future.lookupKeys()); + + // For any lookup keys for which we do not have cached information, we will need to look up + // metadata. For all cached keys, they can proceed straight to the fulfillment map. + // Note that the cache is only used on the initial calls, and any errors that result + // in additional lookups use the full set of lookup keys. + retryLookup(future.uncachedLookupKeys()); + future.cachedKeyBrokerIdMapping().forEach((key, brokerId) -> fulfillmentMap.put(new FulfillmentScope(brokerId), key)); } /** @@ -333,7 +339,7 @@ private void collectRequests( } // Copy the keys to avoid exposing the underlying mutable set - Set copyKeys = Collections.unmodifiableSet(new HashSet<>(keys)); + Set copyKeys = Set.copyOf(keys); Collection> newRequests = buildRequest.apply(copyKeys, scope); if (newRequests.isEmpty()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiFuture.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiFuture.java index b0294d8616649..322d116a3dfcf 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiFuture.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiFuture.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.internals.KafkaFutureImpl; +import java.util.Collections; import java.util.Map; import java.util.Set; import java.util.function.Function; @@ -37,6 +38,25 @@ public interface AdminApiFuture { */ Set lookupKeys(); + /** + * The set of request keys that do not have cached key-broker id mappings. If there + * is no cached key mapping, this will be the same as the lookup keys. + * Can be empty, but only if the cached key mapping is not empty. + */ + default Set uncachedLookupKeys() { + return lookupKeys(); + } + + /** + * The cached key-broker id mapping. For lookup strategies that do not make use of a + * cache of metadata, this will be empty. + * + * @return mapping of keys to broker ids + */ + default Map cachedKeyBrokerIdMapping() { + return Collections.emptyMap(); + } + /** * Complete the futures associated with the given keys. * diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java index 90b237aa749eb..0ac5419991e1b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java @@ -23,11 +23,10 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.AuthenticationException; -import org.apache.kafka.common.errors.MismatchedEndpointTypeException; -import org.apache.kafka.common.errors.UnsupportedEndpointTypeException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestHeader; +import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; @@ -83,6 +82,15 @@ public class AdminMetadataManager { */ private long lastMetadataFetchAttemptMs = 0; + /** + * The time in wall-clock milliseconds when we started attempts to fetch metadata. If empty, + * metadata has not been requested. This is the start time based on which rebootstrap is + * triggered if metadata is not obtained for the configured rebootstrap trigger interval. + * Set to Optional.of(0L) to force rebootstrap immediately. + */ + private Optional metadataAttemptStartMs = Optional.empty(); + + /** * The current cluster information. */ @@ -130,6 +138,16 @@ public void handleSuccessfulResponse(RequestHeader requestHeader, long now, Meta // Do nothing } + @Override + public boolean needsRebootstrap(long now, long rebootstrapTriggerMs) { + return AdminMetadataManager.this.needsRebootstrap(now, rebootstrapTriggerMs); + } + + @Override + public void rebootstrap(long now) { + AdminMetadataManager.this.rebootstrap(now); + } + @Override public void close() { } @@ -240,35 +258,39 @@ private long delayBeforeNextAttemptMs(long now) { return Math.max(0, refreshBackoffMs - timeSinceAttempt); } + public boolean needsRebootstrap(long now, long rebootstrapTriggerMs) { + return metadataAttemptStartMs.filter(startMs -> now - startMs > rebootstrapTriggerMs).isPresent(); + } + /** * Transition into the UPDATE_PENDING state. Updates lastMetadataFetchAttemptMs. */ public void transitionToUpdatePending(long now) { this.state = State.UPDATE_PENDING; this.lastMetadataFetchAttemptMs = now; + if (metadataAttemptStartMs.isEmpty()) + metadataAttemptStartMs = Optional.of(now); } public void updateFailed(Throwable exception) { // We depend on pending calls to request another metadata update this.state = State.QUIESCENT; - if (exception instanceof AuthenticationException) { - log.warn("Metadata update failed due to authentication error", exception); - this.fatalException = (ApiException) exception; - } else if (exception instanceof MismatchedEndpointTypeException) { - log.warn("Metadata update failed due to mismatched endpoint type error", exception); - this.fatalException = (ApiException) exception; - } else if (exception instanceof UnsupportedEndpointTypeException) { - log.warn("Metadata update failed due to unsupported endpoint type error", exception); - this.fatalException = (ApiException) exception; - } else if (exception instanceof UnsupportedVersionException) { - if (usingBootstrapControllers) { - log.warn("The remote node is not a CONTROLLER that supports the KIP-919 " + - "DESCRIBE_CLUSTER api.", exception); - } else { - log.warn("The remote node is not a BROKER that supports the METADATA api.", exception); + if (RequestUtils.isFatalException(exception)) { + log.warn("Fatal error during metadata update", exception); + // avoid unchecked/unconfirmed cast to ApiException + if (exception instanceof ApiException) { + this.fatalException = (ApiException) exception; + } + + if (exception instanceof UnsupportedVersionException) { + if (usingBootstrapControllers) { + log.warn("The remote node is not a CONTROLLER that supports the KIP-919 " + + "DESCRIBE_CLUSTER api.", exception); + } else { + log.warn("The remote node is not a BROKER that supports the METADATA api.", exception); + } } - this.fatalException = (ApiException) exception; } else { log.info("Metadata update failed", exception); } @@ -289,17 +311,23 @@ public void update(Cluster cluster, long now) { this.state = State.QUIESCENT; this.fatalException = null; + this.metadataAttemptStartMs = Optional.empty(); if (!cluster.nodes().isEmpty()) { this.cluster = cluster; } } + public void initiateRebootstrap() { + this.metadataAttemptStartMs = Optional.of(0L); + } + /** * Rebootstrap metadata with the cluster previously used for bootstrapping. */ public void rebootstrap(long now) { log.info("Rebootstrapping with {}", this.bootstrapCluster); update(bootstrapCluster, now); + this.metadataAttemptStartMs = Optional.of(now); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategy.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategy.java index 433d25e8e54ca..ed5f513e020c0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategy.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategy.java @@ -195,7 +195,7 @@ public KafkaFutureImpl>> all() { } private KafkaFutureImpl futureOrThrow(BrokerKey key) { - if (!key.brokerId.isPresent()) { + if (key.brokerId.isEmpty()) { throw new IllegalArgumentException("Attempt to complete with invalid key: " + key); } else { int brokerId = key.brokerId.getAsInt(); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandler.java index 836f2bc2a5910..4afef617cb2e9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandler.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.admin.DeletedRecords; import org.apache.kafka.clients.admin.RecordsToDelete; -import org.apache.kafka.clients.admin.internals.AdminApiFuture.SimpleAdminApiFuture; import org.apache.kafka.clients.admin.internals.AdminApiHandler.Batched; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -72,10 +71,11 @@ public AdminApiLookupStrategy lookupStrategy() { return this.lookupStrategy; } - public static SimpleAdminApiFuture newFuture( - Collection topicPartitions + public static PartitionLeaderStrategy.PartitionLeaderFuture newFuture( + Collection topicPartitions, + Map partitionLeaderCache ) { - return AdminApiFuture.forKeys(new HashSet<>(topicPartitions)); + return new PartitionLeaderStrategy.PartitionLeaderFuture<>(new HashSet<>(topicPartitions), partitionLeaderCache); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeClassicGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeClassicGroupsHandler.java index 77c04c5d5f02e..686ee43a44b2b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeClassicGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeClassicGroupsHandler.java @@ -136,7 +136,10 @@ public ApiResult handleResponse( Optional.ofNullable(groupMember.groupInstanceId()), groupMember.clientId(), groupMember.clientHost(), - new MemberAssignment(partitions))); + new MemberAssignment(partitions), + Optional.empty(), + Optional.empty(), + Optional.empty())); }); final ClassicGroupDescription classicGroupDescription = diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java index 6233c4082e608..457675e92675a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandler.java @@ -21,7 +21,7 @@ import org.apache.kafka.clients.admin.MemberDescription; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; -import org.apache.kafka.common.ConsumerGroupState; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -64,6 +64,7 @@ public class DescribeConsumerGroupsHandler implements AdminApiHandler lookupStrategy; private final Set useClassicGroupApi; + private final Map groupIdNotFoundErrorMessages; public DescribeConsumerGroupsHandler( boolean includeAuthorizedOperations, @@ -73,6 +74,7 @@ public DescribeConsumerGroupsHandler( this.log = logContext.logger(DescribeConsumerGroupsHandler.class); this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); this.useClassicGroupApi = new HashSet<>(); + this.groupIdNotFoundErrorMessages = new HashMap<>(); } private static Set buildKeySet(Collection groupIds) { @@ -220,7 +222,9 @@ private ApiResult handledConsumerGroup groupMember.clientId(), groupMember.clientHost(), new MemberAssignment(convertAssignment(groupMember.assignment())), - Optional.of(new MemberAssignment(convertAssignment(groupMember.targetAssignment()))) + Optional.of(new MemberAssignment(convertAssignment(groupMember.targetAssignment()))), + Optional.of(groupMember.memberEpoch()), + groupMember.memberType() == -1 ? Optional.empty() : Optional.of(groupMember.memberType() == 1) )) ); @@ -231,9 +235,11 @@ private ApiResult handledConsumerGroup memberDescriptions, describedGroup.assignorName(), GroupType.CONSUMER, - ConsumerGroupState.parse(describedGroup.groupState()), + GroupState.parse(describedGroup.groupState()), coordinator, - authorizedOperations + authorizedOperations, + Optional.of(describedGroup.groupEpoch()), + Optional.of(describedGroup.assignmentEpoch()) ); completed.put(groupIdKey, consumerGroupDescription); } @@ -255,7 +261,7 @@ private ApiResult handledClassicGroupR handleError( groupIdKey, error, - null, + describedGroup.errorMessage(), failed, groupsToUnmap, false @@ -279,16 +285,21 @@ private ApiResult handledClassicGroupR Optional.ofNullable(groupMember.groupInstanceId()), groupMember.clientId(), groupMember.clientHost(), - new MemberAssignment(partitions))); + new MemberAssignment(partitions), + Optional.empty(), + Optional.empty(), + Optional.empty())); } final ConsumerGroupDescription consumerGroupDescription = new ConsumerGroupDescription(groupIdKey.idValue, protocolType.isEmpty(), memberDescriptions, describedGroup.protocolData(), GroupType.CLASSIC, - ConsumerGroupState.parse(describedGroup.groupState()), + GroupState.parse(describedGroup.groupState()), coordinator, - authorizedOperations); + authorizedOperations, + Optional.empty(), + Optional.empty()); completed.put(groupIdKey, consumerGroupDescription); } else { failed.put(groupIdKey, new IllegalArgumentException( @@ -354,11 +365,18 @@ private void handleError( case GROUP_ID_NOT_FOUND: if (isConsumerGroupResponse) { log.debug("`{}` request for group id {} failed because the group is not " + - "a new consumer group. Will retry with `DescribeGroups` API.", apiName, groupId.idValue); + "a new consumer group. Will retry with `DescribeGroups` API. {}", + apiName, groupId.idValue, errorMsg != null ? errorMsg : ""); useClassicGroupApi.add(groupId.idValue); + + // The error message from the ConsumerGroupDescribe API is more informative to the user + // than the error message from the classic group API. Capture it and use it if we get the + // same error code for the classic group API also. + groupIdNotFoundErrorMessages.put(groupId.idValue, errorMsg); } else { - log.error("`{}` request for group id {} failed because the group does not exist.", apiName, groupId.idValue); - failed.put(groupId, error.exception(errorMsg)); + log.debug("`{}` request for group id {} failed because the group does not exist. {}", + apiName, groupId.idValue, errorMsg != null ? errorMsg : ""); + failed.put(groupId, error.exception(groupIdNotFoundErrorMessages.getOrDefault(groupId.idValue, errorMsg))); } break; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeProducersHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeProducersHandler.java index e4b203545bdae..84338feb9e4c3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeProducersHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeProducersHandler.java @@ -66,10 +66,11 @@ public DescribeProducersHandler( } } - public static AdminApiFuture.SimpleAdminApiFuture newFuture( - Collection topicPartitions + public static PartitionLeaderStrategy.PartitionLeaderFuture newFuture( + Collection topicPartitions, + Map partitionLeaderCache ) { - return AdminApiFuture.forKeys(new HashSet<>(topicPartitions)); + return new PartitionLeaderStrategy.PartitionLeaderFuture<>(new HashSet<>(topicPartitions), partitionLeaderCache); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java index 80a112a5ed553..1c79225b0837c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java @@ -16,11 +16,11 @@ */ package org.apache.kafka.clients.admin.internals; -import org.apache.kafka.clients.admin.MemberAssignment; -import org.apache.kafka.clients.admin.MemberDescription; import org.apache.kafka.clients.admin.ShareGroupDescription; +import org.apache.kafka.clients.admin.ShareMemberAssignment; +import org.apache.kafka.clients.admin.ShareMemberDescription; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.Node; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.message.ShareGroupDescribeRequestData; @@ -37,7 +37,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -114,23 +113,26 @@ public ApiResult handleResponse( continue; } - final List memberDescriptions = new ArrayList<>(describedGroup.members().size()); + final List memberDescriptions = new ArrayList<>(describedGroup.members().size()); final Set authorizedOperations = validAclOperations(describedGroup.authorizedOperations()); describedGroup.members().forEach(groupMember -> - memberDescriptions.add(new MemberDescription( + memberDescriptions.add(new ShareMemberDescription( groupMember.memberId(), groupMember.clientId(), groupMember.clientHost(), - new MemberAssignment(convertAssignment(groupMember.assignment())) + new ShareMemberAssignment(convertAssignment(groupMember.assignment())), + groupMember.memberEpoch() )) ); final ShareGroupDescription shareGroupDescription = new ShareGroupDescription(groupIdKey.idValue, memberDescriptions, - ShareGroupState.parse(describedGroup.groupState()), + GroupState.parse(describedGroup.groupState()), coordinator, + describedGroup.groupEpoch(), + describedGroup.assignmentEpoch(), authorizedOperations); completed.put(groupIdKey, shareGroupDescription); } @@ -177,17 +179,9 @@ private void handleError( break; case GROUP_ID_NOT_FOUND: - // In order to maintain compatibility with describeConsumerGroups, an unknown group ID is - // reported as a DEAD share group, and the admin client operation did not fail log.debug("`DescribeShareGroups` request for group id {} failed because the group does not exist. {}", groupId.idValue, errorMsg != null ? errorMsg : ""); - final ShareGroupDescription shareGroupDescription = - new ShareGroupDescription(groupId.idValue, - Collections.emptySet(), - ShareGroupState.DEAD, - coordinator, - validAclOperations(describedGroup.authorizedOperations())); - completed.put(groupId, shareGroupDescription); + failed.put(groupId, error.exception(errorMsg)); break; default: diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java index e36330a992432..f7c495d7fd8aa 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.admin.ListOffsetsOptions; import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; -import org.apache.kafka.clients.admin.internals.AdminApiFuture.SimpleAdminApiFuture; import org.apache.kafka.clients.admin.internals.AdminApiHandler.Batched; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -217,9 +216,10 @@ public Map handleUnsupportedVersionException( } } - public static SimpleAdminApiFuture newFuture( - Collection topicPartitions + public static PartitionLeaderStrategy.PartitionLeaderFuture newFuture( + Collection topicPartitions, + Map partitionLeaderCache ) { - return AdminApiFuture.forKeys(new HashSet<>(topicPartitions)); + return new PartitionLeaderStrategy.PartitionLeaderFuture<>(new HashSet<>(topicPartitions), partitionLeaderCache); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java index 56318fc0acc04..71b8e1a7c5607 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java @@ -124,7 +124,7 @@ private AllBrokersStrategy.BrokerKey requireSingleton( } AllBrokersStrategy.BrokerKey key = keys.iterator().next(); - if (!key.brokerId.isPresent() || key.brokerId.getAsInt() != brokerId) { + if (key.brokerId.isEmpty() || key.brokerId.getAsInt() != brokerId) { throw new IllegalArgumentException("Unexpected broker key: " + key); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java index 9d52327b3c4f4..ff7dff2db8e22 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java @@ -16,9 +16,11 @@ */ package org.apache.kafka.clients.admin.internals; +import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.common.message.MetadataRequestData; import org.apache.kafka.common.message.MetadataResponseData; import org.apache.kafka.common.protocol.Errors; @@ -31,9 +33,11 @@ import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; /** * Base driver implementation for APIs which target partition leaders. @@ -195,4 +199,92 @@ public LookupResult handleResponse( return new LookupResult<>(failed, mapped); } + /** + * This subclass of {@link AdminApiFuture} starts with a pre-fetched map for keys to broker ids which can be + * used to optimise the request. The map is kept up to date as metadata is fetching as this request is processed. + * This is useful for situations in which {@link PartitionLeaderStrategy} is used + * repeatedly, such as a sequence of identical calls to + * {@link org.apache.kafka.clients.admin.Admin#listOffsets(Map, org.apache.kafka.clients.admin.ListOffsetsOptions)}. + */ + public static class PartitionLeaderFuture implements AdminApiFuture { + private final Set requestKeys; + private final Map partitionLeaderCache; + private final Map> futures; + + public PartitionLeaderFuture(Set requestKeys, Map partitionLeaderCache) { + this.requestKeys = requestKeys; + this.partitionLeaderCache = partitionLeaderCache; + this.futures = requestKeys.stream().collect(Collectors.toUnmodifiableMap( + Function.identity(), + k -> new KafkaFutureImpl<>() + )); + } + + @Override + public Set lookupKeys() { + return futures.keySet(); + } + + @Override + public Set uncachedLookupKeys() { + Set keys = new HashSet<>(); + requestKeys.forEach(tp -> { + if (!partitionLeaderCache.containsKey(tp)) { + keys.add(tp); + } + }); + return keys; + } + + @Override + public Map cachedKeyBrokerIdMapping() { + Map mapping = new HashMap<>(); + requestKeys.forEach(tp -> { + Integer brokerId = partitionLeaderCache.get(tp); + if (brokerId != null) { + mapping.put(tp, brokerId); + } + }); + return mapping; + } + + public Map> all() { + return futures; + } + + @Override + public void complete(Map values) { + values.forEach(this::complete); + } + + private void complete(TopicPartition key, V value) { + futureOrThrow(key).complete(value); + } + + @Override + public void completeLookup(Map brokerIdMapping) { + partitionLeaderCache.putAll(brokerIdMapping); + } + + @Override + public void completeExceptionally(Map errors) { + errors.forEach(this::completeExceptionally); + } + + private void completeExceptionally(TopicPartition key, Throwable t) { + partitionLeaderCache.remove(key); + futureOrThrow(key).completeExceptionally(t); + } + + private KafkaFutureImpl futureOrThrow(TopicPartition key) { + // The below typecast is safe because we initialise futures using only KafkaFutureImpl. + KafkaFutureImpl future = (KafkaFutureImpl) futures.get(key); + if (future == null) { + throw new IllegalArgumentException("Attempt to complete future for " + key + + ", which was not requested"); + } else { + return future; + } + } + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java index 4201395578390..2c8376e5ccd8a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java @@ -73,6 +73,16 @@ public interface Consumer extends Closeable { */ void subscribe(Pattern pattern); + /** + * @see KafkaConsumer#subscribe(SubscriptionPattern, ConsumerRebalanceListener) + */ + void subscribe(SubscriptionPattern pattern, ConsumerRebalanceListener callback); + + /** + * @see KafkaConsumer#subscribe(SubscriptionPattern) + */ + void subscribe(SubscriptionPattern pattern); + /** * @see KafkaConsumer#unsubscribe() */ diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index cee249c280fce..b0b3927407593 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.ClientDnsLookup; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.MetadataRecoveryStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; @@ -34,7 +35,7 @@ import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.Utils; -import java.util.Arrays; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -60,13 +61,12 @@ public class ConsumerConfig extends AbstractConfig { // a list contains all the assignor names that only assign subscribed topics to consumer. Should be updated when new assignor added. // This is to help optimize ConsumerCoordinator#performAssignment method - public static final List ASSIGN_FROM_SUBSCRIBED_ASSIGNORS = - Collections.unmodifiableList(Arrays.asList( - RANGE_ASSIGNOR_NAME, - ROUNDROBIN_ASSIGNOR_NAME, - STICKY_ASSIGNOR_NAME, + public static final List ASSIGN_FROM_SUBSCRIBED_ASSIGNORS = List.of( + RANGE_ASSIGNOR_NAME, + ROUNDROBIN_ASSIGNOR_NAME, + STICKY_ASSIGNOR_NAME, COOPERATIVE_STICKY_ASSIGNOR_NAME - )); + ); /* * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS @@ -171,8 +171,10 @@ public class ConsumerConfig extends AbstractConfig { public static final String AUTO_OFFSET_RESET_CONFIG = "auto.offset.reset"; public static final String AUTO_OFFSET_RESET_DOC = "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server " + "(e.g. because that data has been deleted): " + - "

    • earliest: automatically reset the offset to the earliest offset" + + "
      • earliest: automatically reset the offset to the earliest offset
      • " + "
      • latest: automatically reset the offset to the latest offset
      • " + + "
      • by_duration:<duration>: automatically reset the offset to a configured <duration> from the current timestamp. <duration> must be specified in ISO8601 format (PnDTnHnMn.nS). " + + "Negative duration is not allowed.
      • " + "
      • none: throw exception to the consumer if no previous offset is found for the consumer's group
      • " + "
      • anything else: throw exception to the consumer.
      " + "

      Note that altering partition numbers while setting this config to latest may cause message delivery loss since " + @@ -380,6 +382,22 @@ public class ConsumerConfig extends AbstractConfig { private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); + /** + * A list of configuration keys not supported for CLASSIC protocol. + */ + private static final List CLASSIC_PROTOCOL_UNSUPPORTED_CONFIGS = Collections.singletonList( + GROUP_REMOTE_ASSIGNOR_CONFIG + ); + + /** + * A list of configuration keys not supported for CONSUMER protocol. + */ + private static final List CONSUMER_PROTOCOL_UNSUPPORTED_CONFIGS = List.of( + PARTITION_ASSIGNMENT_STRATEGY_CONFIG, + HEARTBEAT_INTERVAL_MS_CONFIG, + SESSION_TIMEOUT_MS_CONFIG + ); + static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, @@ -413,7 +431,7 @@ public class ConsumerConfig extends AbstractConfig { HEARTBEAT_INTERVAL_MS_DOC) .define(PARTITION_ASSIGNMENT_STRATEGY_CONFIG, Type.LIST, - Arrays.asList(RangeAssignor.class, CooperativeStickyAssignor.class), + List.of(RangeAssignor.class, CooperativeStickyAssignor.class), new ConfigDef.NonNullValidator(), Importance.MEDIUM, PARTITION_ASSIGNMENT_STRATEGY_DOC) @@ -511,8 +529,8 @@ public class ConsumerConfig extends AbstractConfig { ENABLE_METRICS_PUSH_DOC) .define(AUTO_OFFSET_RESET_CONFIG, Type.STRING, - OffsetResetStrategy.LATEST.toString(), - in(Utils.enumOptions(OffsetResetStrategy.class)), + AutoOffsetResetStrategy.LATEST.name(), + new AutoOffsetResetStrategy.Validator(), Importance.MEDIUM, AUTO_OFFSET_RESET_DOC) .define(CHECK_CRCS_CONFIG, @@ -654,7 +672,14 @@ public class ConsumerConfig extends AbstractConfig { ConfigDef.CaseInsensitiveValidString .in(Utils.enumOptions(MetadataRecoveryStrategy.class)), Importance.LOW, - CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC); + CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC) + .define(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, + Type.LONG, + CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, + atLeast(0), + Importance.LOW, + CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); + } @Override @@ -664,7 +689,7 @@ protected Map postProcessParsedConfig(final Map Map refinedConfigs = CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues); maybeOverrideClientId(refinedConfigs); maybeOverrideEnableAutoCommit(refinedConfigs); - checkGroupRemoteAssignor(); + checkUnsupportedConfigs(); return refinedConfigs; } @@ -702,7 +727,7 @@ private void maybeOverrideEnableAutoCommit(Map configs) { Optional groupId = Optional.ofNullable(getString(CommonClientConfigs.GROUP_ID_CONFIG)); Map originals = originals(); boolean enableAutoCommit = originals.containsKey(ENABLE_AUTO_COMMIT_CONFIG) ? getBoolean(ENABLE_AUTO_COMMIT_CONFIG) : false; - if (!groupId.isPresent()) { // overwrite in case of default group id where the config is not explicitly provided + if (groupId.isEmpty()) { // overwrite in case of default group id where the config is not explicitly provided if (!originals.containsKey(ENABLE_AUTO_COMMIT_CONFIG)) { configs.put(ENABLE_AUTO_COMMIT_CONFIG, false); } else if (enableAutoCommit) { @@ -711,9 +736,28 @@ private void maybeOverrideEnableAutoCommit(Map configs) { } } - private void checkGroupRemoteAssignor() { - if (getString(GROUP_PROTOCOL_CONFIG).equalsIgnoreCase(GroupProtocol.CLASSIC.name()) && getString(GROUP_REMOTE_ASSIGNOR_CONFIG) != null && !getString(GROUP_REMOTE_ASSIGNOR_CONFIG).isEmpty()) { - throw new ConfigException(GROUP_REMOTE_ASSIGNOR_CONFIG + " cannot be set when " + GROUP_PROTOCOL_CONFIG + "=" + GroupProtocol.CLASSIC.name()); + private void checkUnsupportedConfigs() { + String groupProtocol = getString(GROUP_PROTOCOL_CONFIG); + if (GroupProtocol.CLASSIC.name().equalsIgnoreCase(groupProtocol)) { + checkUnsupportedConfigs(GroupProtocol.CLASSIC, CLASSIC_PROTOCOL_UNSUPPORTED_CONFIGS); + } else if (GroupProtocol.CONSUMER.name().equalsIgnoreCase(groupProtocol)) { + checkUnsupportedConfigs(GroupProtocol.CONSUMER, CONSUMER_PROTOCOL_UNSUPPORTED_CONFIGS); + } + } + + private void checkUnsupportedConfigs(GroupProtocol groupProtocol, List unsupportedConfigs) { + if (getString(GROUP_PROTOCOL_CONFIG).equalsIgnoreCase(groupProtocol.name())) { + List invalidConfigs = new ArrayList<>(); + unsupportedConfigs.forEach(configName -> { + Object config = originals().get(configName); + if (config != null && !Utils.isBlank(config.toString())) { + invalidConfigs.add(configName); + } + }); + if (!invalidConfigs.isEmpty()) { + throw new ConfigException(String.join(", ", invalidConfigs) + + " cannot be set when " + GROUP_PROTOCOL_CONFIG + "=" + groupProtocol.name()); + } } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java index f36f4cdc7b8ee..0cb3b8f967799 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java @@ -37,6 +37,9 @@ public class ConsumerRecords implements Iterable> { private final Map>> records; private final Map nextOffsets; + /** + * @deprecated Since 4.0. Use {@link #ConsumerRecords(Map, Map)} instead. + */ @Deprecated public ConsumerRecords(Map>> records) { this(records, Map.of()); @@ -115,7 +118,7 @@ public ConcatenatedIterable(Iterable>> i @Override public Iterator> iterator() { - return new AbstractIterator>() { + return new AbstractIterator<>() { final Iterator>> iters = iterables.iterator(); Iterator> current; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index 8ba5cff7c0874..8c9ea180f84fb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -29,6 +29,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.InvalidRegularExpression; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; @@ -69,8 +70,12 @@ *

      Offsets and Consumer Position

      * Kafka maintains a numerical offset for each record in a partition. This offset acts as a unique identifier of * a record within that partition, and also denotes the position of the consumer in the partition. For example, a consumer - * which is at position 5 has consumed records with offsets 0 through 4 and will next receive the record with offset 5. There - * are actually two notions of position relevant to the user of the consumer: + * which is at position 5 has consumed records with offsets 0 through 4 and will next receive the record with offset 5. + * Note that offsets are not guaranteed to be consecutive (such as compacted topic or when records have been produced + * using transactions). For example, if the consumer did read a record with offset 4, but 5 is not an offset + * with a record, its position might advance to 6 (or higher) directly. Similarly, if the consumer's position is 5, + * but there is no record with offset 5, the consumer will return the record with the next higher offset. + * There are actually two notions of position relevant to the user of the consumer: *

      * The {@link #position(TopicPartition) position} of the consumer gives the offset of the next record that will be given * out. It will be one larger than the highest offset the consumer has seen in that partition. It automatically advances @@ -265,8 +270,7 @@ * for (ConsumerRecord<String, String> record : partitionRecords) { * System.out.println(record.offset() + ": " + record.value()); * } - * long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset(); - * consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1))); + * consumer.commitSync(Collections.singletonMap(partition, records.nextOffsets().get(partition))); * } * } * } finally { @@ -275,7 +279,10 @@ * * * Note: The committed offset should always be the offset of the next message that your application will read. - * Thus, when calling {@link #commitSync(Map) commitSync(offsets)} you should add one to the offset of the last message processed. + * Thus, when calling {@link #commitSync(Map) commitSync(offsets)} you should use {@code nextRecordToBeProcessed.offset()} + * or if {@link ConsumerRecords} is exhausted already {@link ConsumerRecords#nextOffsets()} instead. + * You should also add the leader epoch as commit metadata, which can be obtained from + * {@link ConsumerRecord#leaderEpoch()} or {@link ConsumerRecords#nextOffsets()}. * *

      Manual Partition Assignment

      * @@ -755,6 +762,55 @@ public void subscribe(Pattern pattern) { delegate.subscribe(pattern); } + /** + * Subscribe to all topics matching the specified pattern, to get dynamically assigned partitions. + * The pattern matching will be done periodically against all topics. This is only supported under the + * CONSUMER group protocol (see {@link ConsumerConfig#GROUP_PROTOCOL_CONFIG}). + *

      + * If the provided pattern is not compatible with Google RE2/J, an {@link InvalidRegularExpression} will be + * eventually thrown on a call to {@link #poll(Duration)} following this call to subscribe. + *

      + * See {@link #subscribe(Collection, ConsumerRebalanceListener)} for details on the + * use of the {@link ConsumerRebalanceListener}. Generally, rebalances are triggered when there + * is a change to the topics matching the provided pattern and when consumer group membership changes. + * Group rebalances only take place during an active call to {@link #poll(Duration)}. + * + * @param pattern Pattern to subscribe to, that must be compatible with Google RE2/J. + * @param listener Non-null listener instance to get notifications on partition assignment/revocation for the + * subscribed topics. + * @throws IllegalArgumentException If pattern is null or empty, or if the listener is null. + * @throws IllegalStateException If {@code subscribe()} is called previously with topics, or assign is called + * previously (without a subsequent call to {@link #unsubscribe()}). + */ + @Override + public void subscribe(SubscriptionPattern pattern, ConsumerRebalanceListener listener) { + delegate.subscribe(pattern, listener); + } + + /** + * Subscribe to all topics matching the specified pattern, to get dynamically assigned partitions. + * The pattern matching will be done periodically against topics. This is only supported under the + * CONSUMER group protocol (see {@link ConsumerConfig#GROUP_PROTOCOL_CONFIG}) + *

      + * If the provided pattern is not compatible with Google RE2/J, an {@link InvalidRegularExpression} will be + * eventually thrown on a call to {@link #poll(Duration)} following this call to subscribe. + *

      + * This is a short-hand for {@link #subscribe(Pattern, ConsumerRebalanceListener)}, which + * uses a no-op listener. If you need the ability to seek to particular offsets, you should prefer + * {@link #subscribe(Pattern, ConsumerRebalanceListener)}, since group rebalances will cause partition offsets + * to be reset. You should also provide your own listener if you are doing your own offset + * management since the listener gives you an opportunity to commit offsets before a rebalance finishes. + * + * @param pattern Pattern to subscribe to, that must be compatible with Google RE2/J. + * @throws IllegalArgumentException If pattern is null or empty. + * @throws IllegalStateException If {@code subscribe()} is called previously with topics, or assign is called + * previously (without a subsequent call to {@link #unsubscribe()}). + */ + @Override + public void subscribe(SubscriptionPattern pattern) { + delegate.subscribe(pattern); + } + /** * Unsubscribe from topics currently subscribed with {@link #subscribe(Collection)} or {@link #subscribe(Pattern)}. * This also clears any partitions directly assigned through {@link #assign(Collection)}. @@ -828,7 +884,10 @@ public void assign(Collection partitions) { * @throws org.apache.kafka.common.errors.InvalidTopicException if the current subscription contains any invalid * topic (per {@link org.apache.kafka.common.internals.Topic#validate(String)}) * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the consumer attempts to fetch stable offsets - * when the broker doesn't support this feature + * when the broker doesn't support this feature. Also, if the consumer attempts to subscribe to a + * SubscriptionPattern via {@link #subscribe(SubscriptionPattern)} or + * {@link #subscribe(SubscriptionPattern, ConsumerRebalanceListener)} and the broker doesn't + * support this feature. * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer instance gets fenced by broker. */ @Override @@ -931,7 +990,10 @@ public void commitSync(Duration timeout) { * This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every * rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. The committed offset should be the next message your application will consume, - * i.e. lastProcessedMessageOffset + 1. If automatic group management with {@link #subscribe(Collection)} is used, + * i.e. {@code nextRecordToBeProcessed.offset()} (or {@link ConsumerRecords#nextOffsets()}). + * You should also add the leader epoch as commit metadata, which can be obtained from + * {@link ConsumerRecord#leaderEpoch()} or {@link ConsumerRecords#nextOffsets()}. + * If automatic group management with {@link #subscribe(Collection)} is used, * then the committed offsets must belong to the currently auto-assigned partitions. *

      * This is a synchronous commit and will block until either the commit succeeds or an unrecoverable error is @@ -980,7 +1042,10 @@ public void commitSync(final Map offsets) { * This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every * rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. The committed offset should be the next message your application will consume, - * i.e. lastProcessedMessageOffset + 1. If automatic group management with {@link #subscribe(Collection)} is used, + * i.e. {@code nextRecordToBeProcessed.offset()} (or {@link ConsumerRecords#nextOffsets()}). + * You should also add the leader epoch as commit metadata, which can be obtained from + * {@link ConsumerRecord#leaderEpoch()} or {@link ConsumerRecords#nextOffsets()}. + * If automatic group management with {@link #subscribe(Collection)} is used, * then the committed offsets must belong to the currently auto-assigned partitions. *

      * This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is @@ -1064,7 +1129,10 @@ public void commitAsync(OffsetCommitCallback callback) { * This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every * rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. The committed offset should be the next message your application will consume, - * i.e. lastProcessedMessageOffset + 1. If automatic group management with {@link #subscribe(Collection)} is used, + * i.e. {@code nextRecordToBeProcessed.offset()} (or {@link ConsumerRecords#nextOffsets()}). + * You should also add the leader epoch as commit metadata, which can be obtained from + * {@link ConsumerRecord#leaderEpoch()} or {@link ConsumerRecords#nextOffsets()}. + * If automatic group management with {@link #subscribe(Collection)} is used, * then the committed offsets must belong to the currently auto-assigned partitions. *

      * This is an asynchronous call and will not block. Any errors encountered are either passed to the callback diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java index dd2efb7057c88..19036875d11f2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java @@ -33,6 +33,7 @@ import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.LogContext; @@ -406,7 +407,7 @@ public KafkaShareConsumer(Map configs, keyDeserializer, valueDeserializer); } - public KafkaShareConsumer(ConsumerConfig config, + KafkaShareConsumer(ConsumerConfig config, Deserializer keyDeserializer, Deserializer valueDeserializer) { delegate = CREATOR.create(config, keyDeserializer, valueDeserializer); @@ -645,6 +646,41 @@ public Uuid clientInstanceId(Duration timeout) { return delegate.metrics(); } + /** + * Add the provided application metric for subscription. This metric will be added to this client's metrics + * that are available for subscription and sent as telemetry data to the broker. + * The provided metric must map to an OTLP metric data point type in the OpenTelemetry v1 metrics protobuf message types. + * Specifically, the metric should be one of the following: + *

        + *
      • + * Sum: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. + *
      • + *
      • + * Gauge: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. + *
      • + *
      + * Metrics not matching these types are silently ignored. Executing this method for a previously registered metric + * is a benign operation and results in updating that metric's entry. + * + * @param metric The application metric to register + */ + @Override + public void registerMetricForSubscription(KafkaMetric metric) { + delegate.registerMetricForSubscription(metric); + } + + /** + * Remove the provided application metric for subscription. This metric is removed from this client's metrics + * and will not be available for subscription any longer. Executing this method with a metric that has not been registered is a + * benign operation and does not result in any action taken (no-op). + * + * @param metric The application metric to remove + */ + @Override + public void unregisterMetricFromSubscription(KafkaMetric metric) { + delegate.unregisterMetricFromSubscription(metric); + } + /** * Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. * This will commit acknowledgements if possible within the default timeout. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java index 3c29c749acfaa..b9e69806694dd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.consumer; import org.apache.kafka.clients.Metadata; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; @@ -61,6 +62,7 @@ public class MockConsumer implements Consumer { private final SubscriptionState subscriptions; private final Map beginningOffsets; private final Map endOffsets; + private final Map durationResetOffsets; private final Map committed; private final Queue pollTasks; private final Set paused; @@ -79,7 +81,23 @@ public class MockConsumer implements Consumer { private final List addedMetrics = new ArrayList<>(); + /** + * @deprecated Since 4.0. Use {@link #MockConsumer(String)} instead. + */ + @Deprecated public MockConsumer(OffsetResetStrategy offsetResetStrategy) { + this(AutoOffsetResetStrategy.fromString(offsetResetStrategy.toString())); + } + + /** + * A mock consumer is instantiated by providing ConsumerConfig.AUTO_OFFSET_RESET_CONFIG value as the input. + * @param offsetResetStrategy the offset reset strategy to use + */ + public MockConsumer(String offsetResetStrategy) { + this(AutoOffsetResetStrategy.fromString(offsetResetStrategy)); + } + + private MockConsumer(AutoOffsetResetStrategy offsetResetStrategy) { this.subscriptions = new SubscriptionState(new LogContext(), offsetResetStrategy); this.partitions = new HashMap<>(); this.records = new HashMap<>(); @@ -87,6 +105,7 @@ public MockConsumer(OffsetResetStrategy offsetResetStrategy) { this.closed = false; this.beginningOffsets = new HashMap<>(); this.endOffsets = new HashMap<>(); + this.durationResetOffsets = new HashMap<>(); this.pollTasks = new LinkedList<>(); this.pollException = null; this.wakeup = new AtomicBoolean(false); @@ -143,6 +162,26 @@ public synchronized void subscribe(Pattern pattern) { subscribe(pattern, Optional.empty()); } + @Override + public void subscribe(SubscriptionPattern pattern, ConsumerRebalanceListener listener) { + if (listener == null) + throw new IllegalArgumentException("RebalanceListener cannot be null"); + subscribe(pattern, Optional.of(listener)); + } + + @Override + public void subscribe(SubscriptionPattern pattern) { + subscribe(pattern, Optional.empty()); + } + + private void subscribe(SubscriptionPattern pattern, Optional listener) { + if (pattern == null || pattern.toString().isEmpty()) + throw new IllegalArgumentException("Topic pattern cannot be " + (pattern == null ? "null" : "empty")); + ensureNotClosed(); + committed.clear(); + this.subscriptions.subscribe(pattern, listener); + } + @Override public void subscribe(Collection topics, final ConsumerRebalanceListener listener) { if (listener == null) @@ -275,14 +314,6 @@ public synchronized void addRecord(ConsumerRecord record) { recs.add(record); } - /** - * @deprecated Use {@link #setPollException(KafkaException)} instead - */ - @Deprecated - public synchronized void setException(KafkaException exception) { - setPollException(exception); - } - public synchronized void setPollException(KafkaException exception) { this.pollException = exception; } @@ -379,7 +410,7 @@ public synchronized long position(TopicPartition partition, final Duration timeo @Override public synchronized void seekToBeginning(Collection partitions) { ensureNotClosed(); - subscriptions.requestOffsetReset(partitions, OffsetResetStrategy.EARLIEST); + subscriptions.requestOffsetReset(partitions, AutoOffsetResetStrategy.EARLIEST); } public synchronized void updateBeginningOffsets(Map newOffsets) { @@ -389,13 +420,17 @@ public synchronized void updateBeginningOffsets(Map newOff @Override public synchronized void seekToEnd(Collection partitions) { ensureNotClosed(); - subscriptions.requestOffsetReset(partitions, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(partitions, AutoOffsetResetStrategy.LATEST); } public synchronized void updateEndOffsets(final Map newOffsets) { endOffsets.putAll(newOffsets); } + public synchronized void updateDurationOffsets(final Map newOffsets) { + durationResetOffsets.putAll(newOffsets); + } + public void disableTelemetry() { telemetryDisabled = true; } @@ -543,7 +578,7 @@ public synchronized void scheduleNopPollTask() { } public synchronized Set paused() { - return Collections.unmodifiableSet(new HashSet<>(paused)); + return Set.copyOf(paused); } private void ensureNotClosed() { @@ -563,16 +598,20 @@ private void updateFetchPosition(TopicPartition tp) { } private void resetOffsetPosition(TopicPartition tp) { - OffsetResetStrategy strategy = subscriptions.resetStrategy(tp); + AutoOffsetResetStrategy strategy = subscriptions.resetStrategy(tp); Long offset; - if (strategy == OffsetResetStrategy.EARLIEST) { + if (strategy == AutoOffsetResetStrategy.EARLIEST) { offset = beginningOffsets.get(tp); if (offset == null) throw new IllegalStateException("MockConsumer didn't have beginning offset specified, but tried to seek to beginning"); - } else if (strategy == OffsetResetStrategy.LATEST) { + } else if (strategy == AutoOffsetResetStrategy.LATEST) { offset = endOffsets.get(tp); if (offset == null) throw new IllegalStateException("MockConsumer didn't have end offset specified, but tried to seek to end"); + } else if (strategy.type() == AutoOffsetResetStrategy.StrategyType.BY_DURATION) { + offset = durationResetOffsets.get(tp); + if (offset == null) + throw new IllegalStateException("MockConsumer didn't have duration offset specified, but tried to seek to timestamp"); } else { throw new NoOffsetForPartitionException(tp); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java index bf8404f45597d..81cb2eeec0046 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.consumer; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; @@ -23,6 +24,7 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.utils.LogContext; import java.time.Duration; @@ -54,7 +56,7 @@ public class MockShareConsumer implements ShareConsumer { private Uuid clientInstanceId; public MockShareConsumer() { - this.subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + this.subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); this.records = new HashMap<>(); this.closed = false; this.wakeup = new AtomicBoolean(false); @@ -139,6 +141,14 @@ public synchronized Uuid clientInstanceId(Duration timeout) { return Collections.emptyMap(); } + @Override + public void registerMetricForSubscription(KafkaMetric metric) { + } + + @Override + public void unregisterMetricFromSubscription(KafkaMetric metric) { + } + @Override public synchronized void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java b/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java index 2890f8e333250..93b6094fcfcab 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/NoOffsetForPartitionException.java @@ -20,7 +20,6 @@ import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Set; /** @@ -40,7 +39,7 @@ public NoOffsetForPartitionException(TopicPartition partition) { public NoOffsetForPartitionException(Collection partitions) { super("Undefined offset with no reset policy for partitions: " + partitions); - this.partitions = Collections.unmodifiableSet(new HashSet<>(partitions)); + this.partitions = Set.copyOf(partitions); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java index 8b2297c96865e..42a9c268de8b4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java @@ -18,6 +18,10 @@ import java.util.Locale; +/** + * @deprecated Since 4.0. Use {@link org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy} instead. + */ +@Deprecated public enum OffsetResetStrategy { LATEST, EARLIEST, NONE; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java index 6bc064006fe6f..f5be90b712ee3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java @@ -269,7 +269,7 @@ public TopicAssignmentState(String topic, List partitionInfos, Li boolean racksMatch(String consumer, TopicPartition tp) { Optional consumerRack = consumers.get(consumer); Set replicaRacks = partitionRacks.get(tp); - return !consumerRack.isPresent() || (replicaRacks != null && replicaRacks.contains(consumerRack.get())); + return consumerRack.isEmpty() || (replicaRacks != null && replicaRacks.contains(consumerRack.get())); } int maxAssignable(String consumer) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java index 8ac4198c70df3..2d926b2cad547 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.annotation.InterfaceStability; +import org.apache.kafka.common.metrics.KafkaMetric; import java.io.Closeable; import java.time.Duration; @@ -88,7 +89,7 @@ public interface ShareConsumer extends Closeable { void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callback); /** - * See {@link KafkaShareConsumer#clientInstanceId(Duration)}} + * @see KafkaShareConsumer#clientInstanceId(Duration) */ Uuid clientInstanceId(Duration timeout); @@ -97,6 +98,16 @@ public interface ShareConsumer extends Closeable { */ Map metrics(); + /** + * @see KafkaShareConsumer#registerMetricForSubscription(KafkaMetric) + */ + void registerMetricForSubscription(KafkaMetric metric); + + /** + * @see KafkaShareConsumer#unregisterMetricFromSubscription(KafkaMetric) + */ + void unregisterMetricFromSubscription(KafkaMetric metric); + /** * @see KafkaShareConsumer#close() */ diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/SubscriptionPattern.java b/clients/src/main/java/org/apache/kafka/clients/consumer/SubscriptionPattern.java new file mode 100644 index 0000000000000..d6e168b8da179 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/SubscriptionPattern.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import java.util.Objects; + +/** + * Represents a regular expression compatible with Google RE2/J, used to subscribe to topics. + * This just keeps the String representation of the pattern, and all validations to ensure + * it is RE2/J compatible are delegated to the broker. + */ +public class SubscriptionPattern { + + /** + * String representation the regular expression, compatible with RE2/J. + */ + private final String pattern; + + public SubscriptionPattern(String pattern) { + this.pattern = pattern; + } + + /** + * @return Regular expression pattern compatible with RE2/J. + */ + public String pattern() { + return this.pattern; + } + + @Override + public String toString() { + return pattern; + } + + @Override + public int hashCode() { + return pattern.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof SubscriptionPattern && + Objects.equals(pattern, ((SubscriptionPattern) obj).pattern); + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java index f177dad62b97c..9860d2f58901f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java @@ -568,7 +568,7 @@ private synchronized RequestFuture initiateJoinGroup() { if (lastRebalanceStartMs == -1L) lastRebalanceStartMs = time.milliseconds(); joinFuture = sendJoinGroupRequest(); - joinFuture.addListener(new RequestFutureListener() { + joinFuture.addListener(new RequestFutureListener<>() { @Override public void onSuccess(ByteBuffer value) { // do nothing since all the handler logic are in SyncGroupResponseHandler already @@ -1188,7 +1188,7 @@ public synchronized RequestFuture maybeLeaveGroup(String leaveReason) { } protected boolean isDynamicMember() { - return !rebalanceConfig.groupInstanceId.isPresent(); + return rebalanceConfig.groupInstanceId.isEmpty(); } private class LeaveGroupResponseHandler extends CoordinatorResponseHandler { @@ -1528,7 +1528,7 @@ public void run() { } else { heartbeat.sentHeartbeat(now); final RequestFuture heartbeatFuture = sendHeartbeatRequest(); - heartbeatFuture.addListener(new RequestFutureListener() { + heartbeatFuture.addListener(new RequestFutureListener<>() { @Override public void onSuccess(Void value) { synchronized (AbstractCoordinator.this) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java index 4802eb7a120d2..e3d4eb58af457 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java @@ -416,7 +416,7 @@ protected Map prepareFetchRequests() Optional leaderOpt = position.currentLeader.leader; - if (!leaderOpt.isPresent()) { + if (leaderOpt.isEmpty()) { log.debug("Requesting metadata update for partition {} since the position {} is missing the current leader node", partition, position); metadata.requestUpdate(false); continue; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java index 55b4f65c848af..1f8ddc725b58d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java @@ -94,6 +94,10 @@ public abstract class AbstractHeartbeatRequestManager impl * requests in cases where a currently assigned topic is in the target assignment (new * partition assigned, or revoked), but it is not present the Metadata cache at that moment. * The cache is cleared when the subscription changes ({@link #transitionToJoining()}, the - * member fails ({@link #transitionToFatal()} or leaves the group ({@link #leaveGroup()}). + * member fails ({@link #transitionToFatal()} or leaves the group + * ({@link #leaveGroup()}/{@link #leaveGroupOnClose()}). */ private final Map assignedTopicNamesCache; @@ -157,9 +157,9 @@ public abstract class AbstractMembershipManager impl private boolean rejoinedWhileReconciliationInProgress; /** - * If the member is currently leaving the group after a call to {@link #leaveGroup()}}, this - * will have a future that will complete when the ongoing leave operation completes - * (callbacks executed and heartbeat request to leave is sent out). This will be empty is the + * If the member is currently leaving the group after a call to {@link #leaveGroup()} or + * {@link #leaveGroupOnClose()}, this will have a future that will complete when the ongoing leave operation + * completes (callbacks executed and heartbeat request to leave is sent out). This will be empty is the * member is not leaving. */ private Optional> leaveGroupInProgress = Optional.empty(); @@ -481,6 +481,7 @@ public void onConsumerPoll() { private void clearAssignment() { if (subscriptions.hasAutoAssignedPartitions()) { subscriptions.assignFromSubscribed(Collections.emptySet()); + notifyAssignmentChange(Collections.emptySet()); } currentAssignment = LocalAssignment.NONE; clearPendingAssignmentsAndLocalNamesCache(); @@ -496,8 +497,9 @@ private void clearAssignment() { */ private void updateSubscriptionAwaitingCallback(SortedSet assignedPartitions, SortedSet addedPartitions) { - Collection assignedTopicPartitions = toTopicPartitionSet(assignedPartitions); + Set assignedTopicPartitions = toTopicPartitionSet(assignedPartitions); subscriptions.assignFromSubscribedAwaitingCallback(assignedTopicPartitions, addedPartitions); + notifyAssignmentChange(assignedTopicPartitions); } /** @@ -523,18 +525,45 @@ public void transitionToJoining() { /** * Transition to {@link MemberState#PREPARE_LEAVING} to release the assignment. Once completed, * transition to {@link MemberState#LEAVING} to send the heartbeat request and leave the group. - * This is expected to be invoked when the user calls the unsubscribe API. + * This is expected to be invoked when the user calls the {@link Consumer#close()} API. + * + * @return Future that will complete when the heartbeat to leave the group has been sent out. + */ + public CompletableFuture leaveGroupOnClose() { + return leaveGroup(false); + } + + /** + * Transition to {@link MemberState#PREPARE_LEAVING} to release the assignment. Once completed, + * transition to {@link MemberState#LEAVING} to send the heartbeat request and leave the group. + * This is expected to be invoked when the user calls the {@link Consumer#unsubscribe()} API. * * @return Future that will complete when the callback execution completes and the heartbeat * to leave the group has been sent out. */ public CompletableFuture leaveGroup() { + return leaveGroup(true); + } + + /** + * Transition to {@link MemberState#PREPARE_LEAVING} to release the assignment. Once completed, + * transition to {@link MemberState#LEAVING} to send the heartbeat request and leave the group. + * This is expected to be invoked when the user calls the unsubscribe API or is closing the consumer. + * + * @param runCallbacks {@code true} to insert the step to execute the {@link ConsumerRebalanceListener} callback, + * {@code false} to skip + * + * @return Future that will complete when the callback execution completes and the heartbeat + * to leave the group has been sent out. + */ + protected CompletableFuture leaveGroup(boolean runCallbacks) { if (isNotInGroup()) { if (state == MemberState.FENCED) { clearAssignment(); transitionTo(MemberState.UNSUBSCRIBED); } subscriptions.unsubscribe(); + notifyAssignmentChange(Collections.emptySet()); return CompletableFuture.completedFuture(null); } @@ -549,31 +578,39 @@ public CompletableFuture leaveGroup() { CompletableFuture leaveResult = new CompletableFuture<>(); leaveGroupInProgress = Optional.of(leaveResult); - CompletableFuture callbackResult = signalMemberLeavingGroup(); - callbackResult.whenComplete((result, error) -> { - if (error != null) { - log.error("Member {} callback to release assignment failed. It will proceed " + - "to clear its assignment and send a leave group heartbeat", memberId, error); - } else { - log.info("Member {} completed callback to release assignment. It will proceed " + - "to clear its assignment and send a leave group heartbeat", memberId); - } - - // Clear the subscription, no matter if the callback execution failed or succeeded. - subscriptions.unsubscribe(); - clearAssignment(); + if (runCallbacks) { + CompletableFuture callbackResult = signalMemberLeavingGroup(); + callbackResult.whenComplete((result, error) -> { + if (error != null) { + log.error("Member {} callback to release assignment failed. It will proceed " + + "to clear its assignment and send a leave group heartbeat", memberId, error); + } else { + log.info("Member {} completed callback to release assignment. It will proceed " + + "to clear its assignment and send a leave group heartbeat", memberId); + } - // Transition to ensure that a heartbeat request is sent out to effectively leave the - // group (even in the case where the member had no assignment to release or when the - // callback execution failed.) - transitionToSendingLeaveGroup(false); - }); + // Clear the assignment, no matter if the callback execution failed or succeeded. + clearAssignmentAndLeaveGroup(); + }); + } else { + clearAssignmentAndLeaveGroup(); + } // Return future to indicate that the leave group is done when the callbacks // complete, and the transition to send the heartbeat has been made. return leaveResult; } + private void clearAssignmentAndLeaveGroup() { + subscriptions.unsubscribe(); + clearAssignment(); + + // Transition to ensure that a heartbeat request is sent out to effectively leave the + // group (even in the case where the member had no assignment to release or when the + // callback execution failed.) + transitionToSendingLeaveGroup(false); + } + /** * Reset member epoch to the value required for the leave the group heartbeat request, and * transition to the {@link MemberState#LEAVING} state so that a heartbeat request is sent @@ -616,6 +653,15 @@ void notifyEpochChange(Optional epoch) { stateUpdatesListeners.forEach(stateListener -> stateListener.onMemberEpochUpdated(epoch, memberId)); } + /** + * Invokes the {@link MemberStateListener#onGroupAssignmentUpdated(Set)} callback for each listener when the + * set of assigned partitions changes. This includes on assignment changes, unsubscribe, and when leaving + * the group. + */ + void notifyAssignmentChange(Set partitions) { + stateUpdatesListeners.forEach(stateListener -> stateListener.onGroupAssignmentUpdated(partitions)); + } + /** * @return True if the member should send heartbeat to the coordinator without waiting for * the interval. @@ -805,6 +851,10 @@ void maybeReconcile() { revokedPartitions ); + // Mark partitions as pending revocation to stop fetching from the partitions (no new + // fetches sent out, and no in-flight fetches responses processed). + markPendingRevocationToPauseFetching(revokedPartitions); + // Commit offsets if auto-commit enabled before reconciling a new assignment. Request will // be retried until it succeeds, fails with non-retriable error, or timer expires. CompletableFuture commitResult; @@ -1125,10 +1175,16 @@ private CompletableFuture assignPartitions( // Invoke user call back. CompletableFuture result = signalPartitionsAssigned(addedPartitions); + // Enable newly added partitions to start fetching and updating positions for them. result.whenComplete((__, exception) -> { if (exception == null) { - // Enable newly added partitions to start fetching and updating positions for them. - subscriptions.enablePartitionsAwaitingCallback(addedPartitions); + // Enable assigned partitions to start fetching and updating positions for them. + // We use assignedPartitions here instead of addedPartitions because there's a chance that the callback + // might throw an exception, leaving addedPartitions empty. This would result in the poll operation + // returning no records, as no topic partitions are marked as fetchable. In contrast, with the classic consumer, + // if the first callback fails but the next one succeeds, polling can still retrieve data. To align with + // this behavior, we rely on assignedPartitions to avoid such scenarios. + subscriptions.enablePartitionsAwaitingCallback(toTopicPartitionSet(assignedPartitions)); } else { // Keeping newly added partitions as non-fetchable after the callback failure. // They will be retried on the next reconciliation loop, until it succeeds or the diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java index 966b44b59a6f5..4ac1513ede52d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java @@ -892,7 +892,7 @@ private List getUnassignedPartitions(List sorted List unassignedPartitions = new ArrayList<>(totalPartitionsCount - sortedAssignedPartitions.size()); - Collections.sort(sortedAssignedPartitions, Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition)); + sortedAssignedPartitions.sort(Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition)); boolean shouldAddDirectly = false; Iterator sortedAssignedPartitionsIter = sortedAssignedPartitions.iterator(); @@ -991,7 +991,7 @@ private class GeneralAssignmentBuilder extends AbstractAssignmentBuilder { currentPartitionConsumer.put(topicPartition, entry.getKey()); List sortedAllTopics = new ArrayList<>(topic2AllPotentialConsumers.keySet()); - Collections.sort(sortedAllTopics, new TopicComparator(topic2AllPotentialConsumers)); + sortedAllTopics.sort(new TopicComparator(topic2AllPotentialConsumers)); sortedAllPartitions = getAllTopicPartitions(sortedAllTopics); sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); @@ -1084,7 +1084,7 @@ private List getUnassignedPartitions(List sorted List unassignedPartitions = new ArrayList<>(); - Collections.sort(sortedAssignedPartitions, new PartitionComparator(topic2AllPotentialConsumers)); + sortedAssignedPartitions.sort(new PartitionComparator(topic2AllPotentialConsumers)); boolean shouldAddDirectly = false; Iterator sortedAssignedPartitionsIter = sortedAssignedPartitions.iterator(); @@ -1154,7 +1154,7 @@ private void prepopulateCurrentAssignments(Map DEFAULT_GENERATION) { + } else if (memberData.generation.isEmpty() && maxGeneration > DEFAULT_GENERATION) { // if maxGeneration is larger than DEFAULT_GENERATION // put all (no generation) partitions as DEFAULT_GENERATION into prevAssignment if needed updatePrevAssignment(prevAssignment, memberData.partitions, consumer, DEFAULT_GENERATION); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncClient.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncClient.java index d4265e72c04ab..05f04cd66592f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncClient.java @@ -37,7 +37,7 @@ public abstract class AsyncClient sendAsyncRequest(Node node, T1 requestData) { AbstractRequest.Builder requestBuilder = prepareRequest(node, requestData); - return client.send(node, requestBuilder).compose(new RequestFutureAdapter() { + return client.send(node, requestBuilder).compose(new RequestFutureAdapter<>() { @Override @SuppressWarnings("unchecked") public void onSuccess(ClientResponse value, RequestFuture future) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index 7fda9a20c0567..f5e12407be52e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -33,7 +33,7 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetCommitCallback; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.events.AllTopicsMetadataEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; @@ -51,20 +51,25 @@ import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackCompletedEvent; import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackNeededEvent; import org.apache.kafka.clients.consumer.internals.events.CreateFetchRequestsEvent; +import org.apache.kafka.clients.consumer.internals.events.CurrentLagEvent; import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; import org.apache.kafka.clients.consumer.internals.events.EventProcessor; import org.apache.kafka.clients.consumer.internals.events.FetchCommittedOffsetsEvent; +import org.apache.kafka.clients.consumer.internals.events.LeaveGroupOnCloseEvent; import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; +import org.apache.kafka.clients.consumer.internals.events.PausePartitionsEvent; import org.apache.kafka.clients.consumer.internals.events.PollEvent; import org.apache.kafka.clients.consumer.internals.events.ResetOffsetEvent; +import org.apache.kafka.clients.consumer.internals.events.ResumePartitionsEvent; import org.apache.kafka.clients.consumer.internals.events.SeekUnvalidatedEvent; import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; import org.apache.kafka.clients.consumer.internals.events.TopicMetadataEvent; import org.apache.kafka.clients.consumer.internals.events.TopicPatternSubscriptionChangeEvent; +import org.apache.kafka.clients.consumer.internals.events.TopicRe2JPatternSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.TopicSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.UnsubscribeEvent; import org.apache.kafka.clients.consumer.internals.events.UpdatePatternSubscriptionEvent; -import org.apache.kafka.clients.consumer.internals.metrics.KafkaConsumerMetrics; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.RebalanceCallbackMetricsManager; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; @@ -75,10 +80,11 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; -import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; @@ -104,7 +110,6 @@ import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -112,6 +117,7 @@ import java.util.OptionalLong; import java.util.Set; import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; @@ -126,6 +132,7 @@ import java.util.stream.Collectors; import static java.util.Objects.requireNonNull; +import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_JMX_PREFIX; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; @@ -170,12 +177,6 @@ public class AsyncKafkaConsumer implements ConsumerDelegate { */ private class BackgroundEventProcessor implements EventProcessor { - private final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker; - - public BackgroundEventProcessor(final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker) { - this.rebalanceListenerInvoker = rebalanceListenerInvoker; - } - @Override public void process(final BackgroundEvent event) { switch (event.type()) { @@ -214,10 +215,11 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { private final ApplicationEventHandler applicationEventHandler; private final Time time; private final AtomicReference> groupMetadata = new AtomicReference<>(Optional.empty()); - private final KafkaConsumerMetrics kafkaConsumerMetrics; + private final AsyncConsumerMetrics kafkaConsumerMetrics; private Logger log; private final String clientId; private final BlockingQueue backgroundEventQueue; + private final BackgroundEventHandler backgroundEventHandler; private final BackgroundEventProcessor backgroundEventProcessor; private final CompletableEventReaper backgroundEventReaper; private final Deserializers deserializers; @@ -234,10 +236,16 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { private final IsolationLevel isolationLevel; private final SubscriptionState subscriptions; + + /** + * This is a snapshot of the partitions assigned to this consumer. HOWEVER, this is only populated and used in + * the case where this consumer is in a consumer group. Self-assigned partitions do not appear here. + */ + private final AtomicReference> groupAssignmentSnapshot = new AtomicReference<>(Collections.emptySet()); private final ConsumerMetadata metadata; private final Metrics metrics; private final long retryBackoffMs; - private final int defaultApiTimeoutMs; + private final Duration defaultApiTimeoutMs; private final boolean autoCommitEnabled; private volatile boolean closed = false; // Init value is needed to avoid NPE in case of exception raised in the constructor @@ -247,6 +255,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { private boolean cachedSubscriptionHasAllFetchPositions; private final WakeupTrigger wakeupTrigger = new WakeupTrigger(); private final OffsetCommitCallbackInvoker offsetCommitCallbackInvoker; + private final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker; // Last triggered async commit future. Used to wait until all previous async commits are completed. // We only need to keep track of the last one, since they are guaranteed to complete in order. private CompletableFuture> lastPendingAsyncCommit = null; @@ -256,6 +265,18 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { private final AtomicLong currentThread = new AtomicLong(NO_CURRENT_THREAD); private final AtomicInteger refCount = new AtomicInteger(0); + private final MemberStateListener memberStateListener = new MemberStateListener() { + @Override + public void onMemberEpochUpdated(Optional memberEpoch, String memberId) { + updateGroupMetadata(memberEpoch, memberId); + } + + @Override + public void onGroupAssignmentUpdated(Set partitions) { + setGroupAssignmentSnapshot(partitions); + } + }; + AsyncKafkaConsumer(final ConsumerConfig config, final Deserializer keyDeserializer, final Deserializer valueDeserializer) { @@ -294,12 +315,13 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.log = logContext.logger(getClass()); log.debug("Initializing the Kafka consumer"); - this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); + this.defaultApiTimeoutMs = Duration.ofMillis(config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)); this.time = time; List reporters = CommonClientConfigs.metricsReporters(clientId, config); this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); + this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); List> interceptorList = configuredConsumerInterceptors(config); @@ -319,7 +341,11 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { ApiVersions apiVersions = new ApiVersions(); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); + this.backgroundEventHandler = new BackgroundEventHandler( + backgroundEventQueue, + time, + kafkaConsumerMetrics + ); // This FetchBuffer is shared between the application and network threads. this.fetchBuffer = new FetchBuffer(logContext); @@ -331,7 +357,10 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { metrics, fetchMetricsManager.throttleTimeSensor(), clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - backgroundEventHandler); + backgroundEventHandler, + false, + kafkaConsumerMetrics + ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); final Supplier requestManagersSupplier = RequestManagers.supplier(time, @@ -348,7 +377,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { clientTelemetryReporter, metrics, offsetCommitCallbackInvoker, - this::updateGroupMetadata + memberStateListener ); final Supplier applicationEventProcessorSupplier = ApplicationEventProcessor.supplier(logContext, metadata, @@ -361,17 +390,17 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, - requestManagersSupplier); + requestManagersSupplier, + kafkaConsumerMetrics + ); - ConsumerRebalanceListenerInvoker rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( + this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, subscriptions, time, new RebalanceCallbackMetricsManager(metrics) ); - this.backgroundEventProcessor = new BackgroundEventProcessor( - rebalanceListenerInvoker - ); + this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaperFactory.build(logContext); // The FetchCollector is only used on the application thread. @@ -383,8 +412,6 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { fetchMetricsManager, time); - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, CONSUMER_METRIC_GROUP_PREFIX); - if (groupMetadata.get().isPresent() && GroupProtocol.of(config.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)) == GroupProtocol.CONSUMER) { config.ignore(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG); // Used by background thread @@ -431,19 +458,25 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.interceptors = Objects.requireNonNull(interceptors); this.time = time; this.backgroundEventQueue = backgroundEventQueue; - this.backgroundEventProcessor = new BackgroundEventProcessor(rebalanceListenerInvoker); + this.rebalanceListenerInvoker = rebalanceListenerInvoker; + this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaper; this.metrics = metrics; this.groupMetadata.set(initializeGroupMetadata(groupId, Optional.empty())); this.metadata = metadata; this.retryBackoffMs = retryBackoffMs; - this.defaultApiTimeoutMs = defaultApiTimeoutMs; + this.defaultApiTimeoutMs = Duration.ofMillis(defaultApiTimeoutMs); this.deserializers = deserializers; this.applicationEventHandler = applicationEventHandler; - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, "consumer"); + this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); this.clientTelemetryReporter = Optional.empty(); this.autoCommitEnabled = autoCommitEnabled; this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); + this.backgroundEventHandler = new BackgroundEventHandler( + backgroundEventQueue, + time, + kafkaConsumerMetrics + ); } AsyncKafkaConsumer(LogContext logContext, @@ -465,7 +498,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.metrics = new Metrics(time); this.metadata = metadata; this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); - this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); + this.defaultApiTimeoutMs = Duration.ofMillis(config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)); this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); this.clientTelemetryReporter = Optional.empty(); @@ -478,7 +511,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { deserializers, fetchMetricsManager, time); - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, "consumer"); + this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, @@ -489,8 +522,12 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); this.backgroundEventQueue = new LinkedBlockingQueue<>(); - BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); - ConsumerRebalanceListenerInvoker rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( + this.backgroundEventHandler = new BackgroundEventHandler( + backgroundEventQueue, + time, + kafkaConsumerMetrics + ); + this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, subscriptions, time, @@ -503,7 +540,9 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { logContext, client, metadata, - backgroundEventHandler + backgroundEventHandler, + false, + kafkaConsumerMetrics ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); Supplier requestManagersSupplier = RequestManagers.supplier( @@ -521,7 +560,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { clientTelemetryReporter, metrics, offsetCommitCallbackInvoker, - this::updateGroupMetadata + memberStateListener ); Supplier applicationEventProcessorSupplier = ApplicationEventProcessor.supplier( logContext, @@ -535,8 +574,9 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, - requestManagersSupplier); - this.backgroundEventProcessor = new BackgroundEventProcessor(rebalanceListenerInvoker); + requestManagersSupplier, + kafkaConsumerMetrics); + this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = new CompletableEventReaper(logContext); } @@ -550,7 +590,8 @@ ApplicationEventHandler build( final CompletableEventReaper applicationEventReaper, final Supplier applicationEventProcessorSupplier, final Supplier networkClientDelegateSupplier, - final Supplier requestManagersSupplier + final Supplier requestManagersSupplier, + final AsyncConsumerMetrics asyncConsumerMetrics ); } @@ -595,7 +636,7 @@ private Optional initializeGroupMetadata(final ConsumerCo groupRebalanceConfig.groupId, groupRebalanceConfig.groupInstanceId ); - if (!groupMetadata.isPresent()) { + if (groupMetadata.isEmpty()) { config.ignore(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG); config.ignore(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED); } @@ -639,19 +680,25 @@ private void updateGroupMetadata(final Optional memberEpoch, final Stri ); } + void setGroupAssignmentSnapshot(final Set partitions) { + groupAssignmentSnapshot.set(Collections.unmodifiableSet(partitions)); + } + @Override public void registerMetricForSubscription(KafkaMetric metric) { - if (clientTelemetryReporter.isPresent()) { - ClientTelemetryReporter reporter = clientTelemetryReporter.get(); - reporter.metricChange(metric); + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricChange(metric)); + } else { + log.debug("Skipping registration for metric {}. Existing consumer metrics cannot be overwritten.", metric.metricName()); } } @Override public void unregisterMetricFromSubscription(KafkaMetric metric) { - if (clientTelemetryReporter.isPresent()) { - ClientTelemetryReporter reporter = clientTelemetryReporter.get(); - reporter.metricRemoval(metric); + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricRemoval(metric)); + } else { + log.debug("Skipping unregistration for metric {}. Existing consumer metrics cannot be removed.", metric.metricName()); } } @@ -739,7 +786,7 @@ public ConsumerRecords poll(final Duration timeout) { */ @Override public void commitSync() { - commitSync(Duration.ofMillis(defaultApiTimeoutMs)); + commitSync(defaultApiTimeoutMs); } /** @@ -804,9 +851,12 @@ public void seek(TopicPartition partition, long offset) { acquireAndEnsureOpen(); try { log.info("Seeking to offset {} for partition {}", offset, partition); - Timer timer = time.timer(defaultApiTimeoutMs); SeekUnvalidatedEvent seekUnvalidatedEventEvent = new SeekUnvalidatedEvent( - calculateDeadlineMs(timer), partition, offset, Optional.empty()); + defaultApiTimeoutDeadlineMs(), + partition, + offset, + Optional.empty() + ); applicationEventHandler.addAndGet(seekUnvalidatedEventEvent); } finally { release(); @@ -829,10 +879,12 @@ public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) log.info("Seeking to offset {} for partition {}", offset, partition); } - Timer timer = time.timer(defaultApiTimeoutMs); - SeekUnvalidatedEvent seekUnvalidatedEventEvent = new SeekUnvalidatedEvent( - calculateDeadlineMs(timer), partition, offsetAndMetadata.offset(), offsetAndMetadata.leaderEpoch()); - applicationEventHandler.addAndGet(seekUnvalidatedEventEvent); + applicationEventHandler.addAndGet(new SeekUnvalidatedEvent( + defaultApiTimeoutDeadlineMs(), + partition, + offsetAndMetadata.offset(), + offsetAndMetadata.leaderEpoch() + )); } finally { release(); } @@ -840,23 +892,25 @@ public void seek(TopicPartition partition, OffsetAndMetadata offsetAndMetadata) @Override public void seekToBeginning(Collection partitions) { - seek(partitions, OffsetResetStrategy.EARLIEST); + seek(partitions, AutoOffsetResetStrategy.EARLIEST); } @Override public void seekToEnd(Collection partitions) { - seek(partitions, OffsetResetStrategy.LATEST); + seek(partitions, AutoOffsetResetStrategy.LATEST); } - private void seek(Collection partitions, OffsetResetStrategy offsetResetStrategy) { + private void seek(Collection partitions, AutoOffsetResetStrategy offsetResetStrategy) { if (partitions == null) throw new IllegalArgumentException("Partitions collection cannot be null"); acquireAndEnsureOpen(); try { - Timer timer = time.timer(defaultApiTimeoutMs); - ResetOffsetEvent event = new ResetOffsetEvent(partitions, offsetResetStrategy, calculateDeadlineMs(timer)); - applicationEventHandler.addAndGet(event); + applicationEventHandler.addAndGet(new ResetOffsetEvent( + partitions, + offsetResetStrategy, + defaultApiTimeoutDeadlineMs()) + ); } finally { release(); } @@ -864,7 +918,7 @@ private void seek(Collection partitions, OffsetResetStrategy off @Override public long position(TopicPartition partition) { - return position(partition, Duration.ofMillis(defaultApiTimeoutMs)); + return position(partition, defaultApiTimeoutMs); } @Override @@ -894,7 +948,7 @@ public long position(TopicPartition partition, Duration timeout) { @Override public Map committed(final Set partitions) { - return committed(partitions, Duration.ofMillis(defaultApiTimeoutMs)); + return committed(partitions, defaultApiTimeoutMs); } @Override @@ -928,7 +982,7 @@ public Map committed(final Set partitionsFor(String topic) { - return partitionsFor(topic, Duration.ofMillis(defaultApiTimeoutMs)); + return partitionsFor(topic, defaultApiTimeoutMs); } @Override @@ -974,7 +1028,7 @@ public List partitionsFor(String topic, Duration timeout) { @Override public Map> listTopics() { - return listTopics(Duration.ofMillis(defaultApiTimeoutMs)); + return listTopics(defaultApiTimeoutMs); } @Override @@ -1011,10 +1065,10 @@ public Set paused() { public void pause(Collection partitions) { acquireAndEnsureOpen(); try { - log.debug("Pausing partitions {}", partitions); - for (TopicPartition partition : partitions) { - subscriptions.pause(partition); - } + Objects.requireNonNull(partitions, "The partitions to pause must be nonnull"); + + if (!partitions.isEmpty()) + applicationEventHandler.addAndGet(new PausePartitionsEvent(partitions, defaultApiTimeoutDeadlineMs())); } finally { release(); } @@ -1024,10 +1078,10 @@ public void pause(Collection partitions) { public void resume(Collection partitions) { acquireAndEnsureOpen(); try { - log.debug("Resuming partitions {}", partitions); - for (TopicPartition partition : partitions) { - subscriptions.resume(partition); - } + Objects.requireNonNull(partitions, "The partitions to resume must be nonnull"); + + if (!partitions.isEmpty()) + applicationEventHandler.addAndGet(new ResumePartitionsEvent(partitions, defaultApiTimeoutDeadlineMs())); } finally { release(); } @@ -1035,7 +1089,7 @@ public void resume(Collection partitions) { @Override public Map offsetsForTimes(Map timestampsToSearch) { - return offsetsForTimes(timestampsToSearch, Duration.ofMillis(defaultApiTimeoutMs)); + return offsetsForTimes(timestampsToSearch, defaultApiTimeoutMs); } @Override @@ -1083,7 +1137,7 @@ public Map offsetsForTimes(Map beginningOffsets(Collection partitions) { - return beginningOffsets(partitions, Duration.ofMillis(defaultApiTimeoutMs)); + return beginningOffsets(partitions, defaultApiTimeoutMs); } @Override @@ -1093,7 +1147,7 @@ public Map beginningOffsets(Collection par @Override public Map endOffsets(Collection partitions) { - return endOffsets(partitions, Duration.ofMillis(defaultApiTimeoutMs)); + return endOffsets(partitions, defaultApiTimeoutMs); } @Override @@ -1149,25 +1203,11 @@ private Map beginningOrEndOffset(Collection + *
    • + * The execution of the {@link ConsumerRebalanceListener} callback (if applicable) must be performed on + * the application thread to ensure it does not interfere with the network I/O on the background thread. + *
    • + *
    • + * The {@link ConsumerRebalanceListener} callback execution must complete before an attempt to leave + * the consumer group is performed. In this context, “complete” does not necessarily imply + * success; execution is “complete” even if the execution fails with an error. + *
    • + *
    • + * Any error thrown during the {@link ConsumerRebalanceListener} callback execution will be caught to + * ensure it does not prevent execution of the remaining {@link #close()} logic. + *
    • + *
    • + * The application thread will be blocked during the entire duration of the execution of the + * {@link ConsumerRebalanceListener}. The consumer does not employ a mechanism to short-circuit the + * callback execution, so execution is not bound by the timeout in {@link #close(Duration)}. + *
    • + *
    • + * A given {@link ConsumerRebalanceListener} implementation may be affected by the application thread's + * interrupt state. If the callback implementation performs any blocking operations, it may result in + * an error. An implementation may choose to preemptively check the thread's interrupt flag via + * {@link Thread#isInterrupted()} or {@link Thread#isInterrupted()} and alter its behavior. + *
    • + *
    • + * If the application thread was interrupted prior to the execution of the + * {@link ConsumerRebalanceListener} callback, the thread's interrupt state will be preserved for the + * {@link ConsumerRebalanceListener} execution. + *
    • + *
    • + * If the application thread was interrupted prior to the execution of the + * {@link ConsumerRebalanceListener} callback but the callback cleared out the interrupt state, + * the {@link #close()} method will not make any effort to restore the application thread's interrupt + * state for the remainder of the execution of {@link #close()}. + *
    • + *
    • + * Leaving the consumer group is achieved by issuing a ‘leave group‘ network request. The consumer will + * attempt to leave the group on a “best-case” basis. There is no stated guarantee that the consumer will + * have successfully left the group before the {@link #close()} method completes processing. + *
    • + *
    • + * The consumer will attempt to leave the group regardless of the timeout elapsing or the application + * thread receiving an {@link InterruptException} or {@link InterruptedException}. + *
    • + *
    • + * The application thread will wait for confirmation that the consumer left the group until one of the + * following occurs: + * + *
        + *
      1. Confirmation that the ’leave group‘ response was received from the group coordinator
      2. + *
      3. The timeout provided by the user elapses
      4. + *
      5. An {@link InterruptException} or {@link InterruptedException} is thrown
      6. + *
      + *
    • + * + */ private void close(Duration timeout, boolean swallowException) { log.trace("Closing the Kafka consumer"); AtomicReference firstException = new AtomicReference<>(); @@ -1227,9 +1329,15 @@ private void close(Duration timeout, boolean swallowException) { clientTelemetryReporter.ifPresent(ClientTelemetryReporter::initiateClose); closeTimer.update(); // Prepare shutting down the network thread - swallow(log, Level.ERROR, "Failed to release assignment before closing consumer", - () -> releaseAssignmentAndLeaveGroup(closeTimer), firstException); - swallow(log, Level.ERROR, "Failed invoking asynchronous commit callback.", + // Prior to closing the network thread, we need to make sure the following operations happen in the right + // sequence... + swallow(log, Level.ERROR, "Failed to auto-commit offsets", + () -> autoCommitOnClose(closeTimer), firstException); + swallow(log, Level.ERROR, "Failed to release group assignment", + () -> runRebalanceCallbacksOnClose(closeTimer), firstException); + swallow(log, Level.ERROR, "Failed to leave group while closing consumer", + () -> leaveGroupOnClose(closeTimer), firstException); + swallow(log, Level.ERROR, "Failed invoking asynchronous commit callbacks while closing consumer", () -> awaitPendingAsyncCommitsAndExecuteCommitCallbacks(closeTimer, false), firstException); if (applicationEventHandler != null) closeQuietly(() -> applicationEventHandler.close(Duration.ofMillis(closeTimer.remainingMs())), "Failed shutting down network thread", firstException); @@ -1257,32 +1365,56 @@ private void close(Duration timeout, boolean swallowException) { } } - /** - * Prior to closing the network thread, we need to make sure the following operations happen in the right sequence: - * 1. autocommit offsets - * 2. release assignment. This is done via a background unsubscribe event that will - * trigger the callbacks, clear the assignment on the subscription state and send the leave group request to the broker - */ - private void releaseAssignmentAndLeaveGroup(final Timer timer) { - if (!groupMetadata.get().isPresent()) + private void autoCommitOnClose(final Timer timer) { + if (groupMetadata.get().isEmpty()) return; if (autoCommitEnabled) commitSyncAllConsumed(timer); applicationEventHandler.add(new CommitOnCloseEvent()); + } + + private void runRebalanceCallbacksOnClose(final Timer timer) { + if (groupMetadata.get().isEmpty()) + return; + + int memberEpoch = groupMetadata.get().get().generationId(); + + Set assignedPartitions = groupAssignmentSnapshot.get(); + + if (assignedPartitions.isEmpty()) + // Nothing to revoke. + return; + + SortedSet droppedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); + droppedPartitions.addAll(assignedPartitions); + + try { + final Exception error; + + if (memberEpoch > 0) + error = rebalanceListenerInvoker.invokePartitionsRevoked(droppedPartitions); + else + error = rebalanceListenerInvoker.invokePartitionsLost(droppedPartitions); + + if (error != null) + throw ConsumerUtils.maybeWrapAsKafkaException(error); + } finally { + timer.update(); + } + } - log.info("Releasing assignment and leaving group before closing consumer"); - UnsubscribeEvent unsubscribeEvent = new UnsubscribeEvent(calculateDeadlineMs(timer)); - applicationEventHandler.add(unsubscribeEvent); + private void leaveGroupOnClose(final Timer timer) { + if (groupMetadata.get().isEmpty()) + return; + + log.debug("Leaving the consumer group during consumer close"); try { - // If users subscribe to an invalid topic name, they will get InvalidTopicException in error events, - // because network thread keeps trying to send MetadataRequest in the background. - // Ignore it to avoid unsubscribe failed. - processBackgroundEvents(unsubscribeEvent.future(), timer, e -> e instanceof InvalidTopicException); - log.info("Completed releasing assignment and sending leave group to close consumer"); + applicationEventHandler.addAndGet(new LeaveGroupOnCloseEvent(calculateDeadlineMs(timer))); + log.info("Completed leaving the group"); } catch (TimeoutException e) { - log.warn("Consumer triggered an unsubscribe event to leave the group but couldn't " + + log.warn("Consumer attempted to leave the group but couldn't " + "complete it within {} ms. It will proceed to close.", timer.timeoutMs()); } finally { timer.update(); @@ -1319,7 +1451,7 @@ public void commitSync(final Duration timeout) { @Override public void commitSync(Map offsets) { - commitSync(Optional.of(offsets), Duration.ofMillis(defaultApiTimeoutMs)); + commitSync(Optional.of(offsets), defaultApiTimeoutMs); } @Override @@ -1374,7 +1506,7 @@ private void awaitPendingAsyncCommitsAndExecuteCommitCallbacks(Timer timer, bool @Override public Uuid clientInstanceId(Duration timeout) { - if (!clientTelemetryReporter.isPresent()) { + if (clientTelemetryReporter.isEmpty()) { throw new IllegalStateException("Telemetry is not enabled. Set config `" + ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`."); } @@ -1440,9 +1572,11 @@ public void assign(Collection partitions) { // be no following rebalance. // // See the ApplicationEventProcessor.process() method that handles this event for more detail. - Timer timer = time.timer(defaultApiTimeoutMs); - AssignmentChangeEvent assignmentChangeEvent = new AssignmentChangeEvent(timer.currentTimeMs(), calculateDeadlineMs(timer), partitions); - applicationEventHandler.addAndGet(assignmentChangeEvent); + applicationEventHandler.addAndGet(new AssignmentChangeEvent( + time.milliseconds(), + defaultApiTimeoutDeadlineMs(), + partitions + )); } finally { release(); } @@ -1460,10 +1594,9 @@ public void unsubscribe() { subscriptions.assignedPartitions()); try { - // If users subscribe to an invalid topic name, they will get InvalidTopicException in error events, - // because network thread keeps trying to send MetadataRequest in the background. - // Ignore it to avoid unsubscribe failed. - processBackgroundEvents(unsubscribeEvent.future(), timer, e -> e instanceof InvalidTopicException); + // If users have fatal error, they will get some exceptions in the background queue. + // When running unsubscribe, these exceptions should be ignored, or users can't unsubscribe successfully. + processBackgroundEvents(unsubscribeEvent.future(), timer, e -> e instanceof GroupAuthorizationException); log.info("Unsubscribed all topics or patterns and assigned partitions"); } catch (TimeoutException e) { log.error("Failed while waiting for the unsubscribe event to complete"); @@ -1648,12 +1781,14 @@ private void sendPrefetches(Timer timer) { @Override public boolean updateAssignmentMetadataIfNeeded(Timer timer) { offsetCommitCallbackInvoker.executeCallbacks(); - try { - applicationEventHandler.addAndGet(new UpdatePatternSubscriptionEvent(calculateDeadlineMs(timer))); - } catch (TimeoutException e) { - return false; - } finally { - timer.update(); + if (subscriptions.hasPatternSubscription()) { + try { + applicationEventHandler.addAndGet(new UpdatePatternSubscriptionEvent(calculateDeadlineMs(timer))); + } catch (TimeoutException e) { + return false; + } finally { + timer.update(); + } } processBackgroundEvents(); @@ -1678,6 +1813,18 @@ public void subscribe(Pattern pattern) { subscribeInternal(pattern, Optional.empty()); } + @Override + public void subscribe(SubscriptionPattern pattern, ConsumerRebalanceListener listener) { + if (listener == null) + throw new IllegalArgumentException("RebalanceListener cannot be null"); + subscribeToRegex(pattern, Optional.of(listener)); + } + + @Override + public void subscribe(SubscriptionPattern pattern) { + subscribeToRegex(pattern, Optional.empty()); + } + @Override public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) { if (listener == null) @@ -1734,12 +1881,45 @@ private void subscribeInternal(Pattern pattern, Optional listener) { + acquireAndEnsureOpen(); + try { + maybeThrowInvalidGroupIdException(); + throwIfSubscriptionPatternIsInvalid(pattern); + log.info("Subscribing to regular expression {}", pattern); + applicationEventHandler.addAndGet(new TopicRe2JPatternSubscriptionChangeEvent( + pattern, + listener, + calculateDeadlineMs(time.timer(defaultApiTimeoutMs)))); + } finally { + release(); + } + } + + private void throwIfSubscriptionPatternIsInvalid(SubscriptionPattern subscriptionPattern) { + if (subscriptionPattern == null) { + throw new IllegalArgumentException("Topic pattern to subscribe to cannot be null"); + } + if (subscriptionPattern.pattern().isEmpty()) { + throw new IllegalArgumentException("Topic pattern to subscribe to cannot be empty"); + } + } + private void subscribeInternal(Collection topics, Optional listener) { acquireAndEnsureOpen(); try { @@ -1766,7 +1946,10 @@ private void subscribeInternal(Collection topics, Optional(topics), listener, calculateDeadlineMs(time.timer(defaultApiTimeoutMs)))); + new HashSet<>(topics), + listener, + defaultApiTimeoutDeadlineMs() + )); } } finally { release(); @@ -1778,25 +1961,30 @@ private void subscribeInternal(Collection topics, Optional firstError = new AtomicReference<>(); - LinkedList events = new LinkedList<>(); - backgroundEventQueue.drainTo(events); - - for (BackgroundEvent event : events) { - try { - if (event instanceof CompletableEvent) - backgroundEventReaper.add((CompletableEvent) event); - - backgroundEventProcessor.process(event); - } catch (Throwable t) { - KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); - - if (!firstError.compareAndSet(null, e)) - log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); + List events = backgroundEventHandler.drainEvents(); + if (!events.isEmpty()) { + long startMs = time.milliseconds(); + for (BackgroundEvent event : events) { + kafkaConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); + try { + if (event instanceof CompletableEvent) + backgroundEventReaper.add((CompletableEvent) event); + + backgroundEventProcessor.process(event); + } catch (Throwable t) { + KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); + + if (!firstError.compareAndSet(null, e)) + log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); + } } + kafkaConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); } backgroundEventReaper.reap(time.milliseconds()); @@ -1885,23 +2073,27 @@ static ConsumerRebalanceListenerCallbackCompletedEvent invokeRebalanceCallbacks( ConsumerRebalanceListenerMethodName methodName, SortedSet partitions, CompletableFuture future) { - final Exception e; + Exception e; - switch (methodName) { - case ON_PARTITIONS_REVOKED: - e = rebalanceListenerInvoker.invokePartitionsRevoked(partitions); - break; + try { + switch (methodName) { + case ON_PARTITIONS_REVOKED: + e = rebalanceListenerInvoker.invokePartitionsRevoked(partitions); + break; - case ON_PARTITIONS_ASSIGNED: - e = rebalanceListenerInvoker.invokePartitionsAssigned(partitions); - break; + case ON_PARTITIONS_ASSIGNED: + e = rebalanceListenerInvoker.invokePartitionsAssigned(partitions); + break; - case ON_PARTITIONS_LOST: - e = rebalanceListenerInvoker.invokePartitionsLost(partitions); - break; + case ON_PARTITIONS_LOST: + e = rebalanceListenerInvoker.invokePartitionsLost(partitions); + break; - default: - throw new IllegalArgumentException("The method " + methodName.fullyQualifiedMethodName() + " to invoke was not expected"); + default: + throw new IllegalArgumentException("The method " + methodName.fullyQualifiedMethodName() + " to invoke was not expected"); + } + } catch (WakeupException | InterruptException ex) { + e = ex; } final Optional error; @@ -1925,7 +2117,7 @@ public Metrics metricsRegistry() { } @Override - public KafkaConsumerMetrics kafkaConsumerMetrics() { + public AsyncConsumerMetrics kafkaConsumerMetrics() { return kafkaConsumerMetrics; } @@ -1933,4 +2125,8 @@ public KafkaConsumerMetrics kafkaConsumerMetrics() { SubscriptionState subscriptions() { return subscriptions; } + + private long defaultApiTimeoutDeadlineMs() { + return calculateDeadlineMs(time, defaultApiTimeoutMs); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategy.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategy.java new file mode 100644 index 0000000000000..e904ca3d5d63a --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategy.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.requests.ListOffsetsRequest; +import org.apache.kafka.common.utils.Utils; + +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.Locale; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +public class AutoOffsetResetStrategy { + public enum StrategyType { + LATEST, EARLIEST, NONE, BY_DURATION; + + @Override + public String toString() { + return super.toString().toLowerCase(Locale.ROOT); + } + } + + public static final AutoOffsetResetStrategy EARLIEST = new AutoOffsetResetStrategy(StrategyType.EARLIEST); + public static final AutoOffsetResetStrategy LATEST = new AutoOffsetResetStrategy(StrategyType.LATEST); + public static final AutoOffsetResetStrategy NONE = new AutoOffsetResetStrategy(StrategyType.NONE); + + private final StrategyType type; + private final Optional duration; + + private AutoOffsetResetStrategy(StrategyType type) { + this.type = type; + this.duration = Optional.empty(); + } + + private AutoOffsetResetStrategy(Duration duration) { + this.type = StrategyType.BY_DURATION; + this.duration = Optional.of(duration); + } + + /** + * Returns the AutoOffsetResetStrategy from the given string. + */ + public static AutoOffsetResetStrategy fromString(String offsetStrategy) { + if (offsetStrategy == null) { + throw new IllegalArgumentException("Auto offset reset strategy is null"); + } + + if (StrategyType.BY_DURATION.toString().equals(offsetStrategy)) { + throw new IllegalArgumentException("<:duration> part is missing in by_duration auto offset reset strategy."); + } + + if (Arrays.asList(Utils.enumOptions(StrategyType.class)).contains(offsetStrategy)) { + StrategyType type = StrategyType.valueOf(offsetStrategy.toUpperCase(Locale.ROOT)); + switch (type) { + case EARLIEST: + return EARLIEST; + case LATEST: + return LATEST; + case NONE: + return NONE; + default: + throw new IllegalArgumentException("Unknown auto offset reset strategy: " + offsetStrategy); + } + } + + if (offsetStrategy.startsWith(StrategyType.BY_DURATION + ":")) { + String isoDuration = offsetStrategy.substring(StrategyType.BY_DURATION.toString().length() + 1); + try { + Duration duration = Duration.parse(isoDuration); + if (duration.isNegative()) { + throw new IllegalArgumentException("Negative duration is not supported in by_duration offset reset strategy."); + } + return new AutoOffsetResetStrategy(duration); + } catch (Exception e) { + throw new IllegalArgumentException("Unable to parse duration string in by_duration offset reset strategy.", e); + } + } + + throw new IllegalArgumentException("Unknown auto offset reset strategy: " + offsetStrategy); + } + + /** + * Returns the offset reset strategy type. + */ + public StrategyType type() { + return type; + } + + /** + * Returns the name of the offset reset strategy. + */ + public String name() { + return type.toString(); + } + + /** + * Return the timestamp to be used for the ListOffsetsRequest. + * @return the timestamp for the OffsetResetStrategy, + * if the strategy is EARLIEST or LATEST or duration is provided + * else return Optional.empty() + */ + public Optional timestamp() { + if (type == StrategyType.EARLIEST) + return Optional.of(ListOffsetsRequest.EARLIEST_TIMESTAMP); + else if (type == StrategyType.LATEST) + return Optional.of(ListOffsetsRequest.LATEST_TIMESTAMP); + else if (type == StrategyType.BY_DURATION && duration.isPresent()) { + Instant now = Instant.now(); + return Optional.of(now.minus(duration.get()).toEpochMilli()); + } else + return Optional.empty(); + } + + public Optional duration() { + return duration; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AutoOffsetResetStrategy that = (AutoOffsetResetStrategy) o; + return type == that.type && Objects.equals(duration, that.duration); + } + + @Override + public int hashCode() { + return Objects.hash(type, duration); + } + + @Override + public String toString() { + return "AutoOffsetResetStrategy{" + + "type=" + type + + (duration.map(value -> ", duration=" + value).orElse("")) + + '}'; + } + + public static class Validator implements ConfigDef.Validator { + @Override + public void ensureValid(String name, Object value) { + String offsetStrategy = (String) value; + try { + fromString(offsetStrategy); + } catch (Exception e) { + throw new ConfigException(name, value, "Invalid value `" + offsetStrategy + "` for configuration " + + name + ". The value must be either 'earliest', 'latest', 'none' or of the format 'by_duration:'."); + } + } + + @Override + public String toString() { + String values = Arrays.stream(StrategyType.values()) + .map(strategyType -> { + if (strategyType == StrategyType.BY_DURATION) { + return "by_duration:PnDTnHnMn.nS"; + } + return strategyType.toString(); + }).collect(Collectors.joining(", ")); + return "[" + values + "]"; + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java index 82a9bd2a53bfc..28b82f3b3adc3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java @@ -34,7 +34,7 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetCommitCallback; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.metrics.KafkaConsumerMetrics; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; @@ -211,7 +211,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { ); // no coordinator will be constructed for the default (null) group id - if (!groupId.isPresent()) { + if (groupId.isEmpty()) { config.ignore(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG); config.ignore(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED); this.coordinator = null; @@ -413,7 +413,7 @@ public Set assignment() { public Set subscription() { acquireAndEnsureOpen(); try { - return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); + return Set.copyOf(this.subscriptions.subscription()); } finally { release(); } @@ -430,17 +430,19 @@ public void subscribe(Collection topics, ConsumerRebalanceListener liste @Override public void registerMetricForSubscription(KafkaMetric metric) { - if (clientTelemetryReporter.isPresent()) { - ClientTelemetryReporter reporter = clientTelemetryReporter.get(); - reporter.metricChange(metric); + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricChange(metric)); + } else { + log.debug("Skipping registration for metric {}. Existing consumer metrics cannot be overwritten.", metric.metricName()); } } @Override public void unregisterMetricFromSubscription(KafkaMetric metric) { - if (clientTelemetryReporter.isPresent()) { - ClientTelemetryReporter reporter = clientTelemetryReporter.get(); - reporter.metricRemoval(metric); + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricRemoval(metric)); + } else { + log.debug("Skipping unregistration for metric {}. Existing consumer metrics cannot be removed.", metric.metricName()); } } @@ -518,6 +520,18 @@ public void subscribe(Pattern pattern) { subscribeInternal(pattern, Optional.empty()); } + @Override + public void subscribe(SubscriptionPattern pattern, ConsumerRebalanceListener callback) { + throw new UnsupportedOperationException(String.format("Subscribe to RE2/J pattern is not supported when using" + + "the %s protocol defined in config %s", GroupProtocol.CLASSIC, ConsumerConfig.GROUP_PROTOCOL_CONFIG)); + } + + @Override + public void subscribe(SubscriptionPattern pattern) { + throw new UnsupportedOperationException(String.format("Subscribe to RE2/J pattern is not supported when using" + + "the %s protocol defined in config %s", GroupProtocol.CLASSIC, ConsumerConfig.GROUP_PROTOCOL_CONFIG)); + } + /** * Internal helper method for {@link #subscribe(Pattern)} and * {@link #subscribe(Pattern, ConsumerRebalanceListener)} @@ -813,7 +827,7 @@ public void seekToBeginning(Collection partitions) { acquireAndEnsureOpen(); try { Collection parts = partitions.isEmpty() ? this.subscriptions.assignedPartitions() : partitions; - subscriptions.requestOffsetReset(parts, OffsetResetStrategy.EARLIEST); + subscriptions.requestOffsetReset(parts, AutoOffsetResetStrategy.EARLIEST); } finally { release(); } @@ -827,7 +841,7 @@ public void seekToEnd(Collection partitions) { acquireAndEnsureOpen(); try { Collection parts = partitions.isEmpty() ? this.subscriptions.assignedPartitions() : partitions; - subscriptions.requestOffsetReset(parts, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(parts, AutoOffsetResetStrategy.LATEST); } finally { release(); } @@ -891,7 +905,7 @@ public Map committed(final Set commitOffsetsAsync(final Map() { + lookupCoordinator().addListener(new RequestFutureListener<>() { @Override public void onSuccess(Void value) { pendingAsyncCommits.decrementAndGet(); @@ -1059,7 +1059,7 @@ private RequestFuture doCommitOffsetsAsync(final Map future = sendOffsetCommitRequest(offsets); inFlightAsyncCommits.incrementAndGet(); final OffsetCommitCallback cb = callback == null ? defaultOffsetCommitCallback : callback; - future.addListener(new RequestFutureListener() { + future.addListener(new RequestFutureListener<>() { @Override public void onSuccess(Void value) { inFlightAsyncCommits.decrementAndGet(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java index a87ffcd07eb3a..09bc3796b34ba 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java @@ -17,9 +17,11 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.metrics.HeartbeatMetricsManager; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; @@ -32,10 +34,13 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.REGEX_RESOLUTION_NOT_SUPPORTED_MSG; + /** * This is the heartbeat request manager for consumer groups. * @@ -89,12 +94,43 @@ public ConsumerHeartbeatRequestManager( * {@inheritDoc} */ @Override - public boolean handleSpecificError(final ConsumerGroupHeartbeatResponse response, final long currentTimeMs) { + public boolean handleSpecificFailure(Throwable exception) { + boolean errorHandled = false; + String errorMessage = exception.getMessage(); + if (exception instanceof UnsupportedVersionException) { + String message = CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG; + if (errorMessage.equals(REGEX_RESOLUTION_NOT_SUPPORTED_MSG)) { + message = REGEX_RESOLUTION_NOT_SUPPORTED_MSG; + logger.error("{} regex resolution not supported: {}", heartbeatRequestName(), message); + } else { + logger.error("{} failed due to unsupported version while sending request: {}", heartbeatRequestName(), errorMessage); + } + handleFatalFailure(new UnsupportedVersionException(message, exception)); + errorHandled = true; + } + return errorHandled; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean handleSpecificExceptionInResponse(final ConsumerGroupHeartbeatResponse response, final long currentTimeMs) { Errors error = errorForResponse(response); String errorMessage = errorMessageForResponse(response); boolean errorHandled; switch (error) { + // Broker responded with HB not supported, meaning the new protocol is not enabled, so propagate + // custom message for it. Note that the case where the protocol is not supported at all should fail + // on the client side when building the request and checking supporting APIs (handled on onFailure). + case UNSUPPORTED_VERSION: + logger.error("{} failed due to unsupported version response on broker side: {}", + heartbeatRequestName(), CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG); + handleFatalFailure(error.exception(CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG)); + errorHandled = true; + break; + case UNRELEASED_INSTANCE_ID: logger.error("{} failed due to unreleased instance id {}: {}", heartbeatRequestName(), membershipManager.groupInstanceId().orElse("null"), errorMessage); @@ -231,6 +267,15 @@ public ConsumerGroupHeartbeatRequestData buildRequestData() { sentFields.subscribedTopicNames = subscribedTopicNames; } + // SubscribedTopicRegex - only sent if it has changed since the last heartbeat. + // Send empty string to indicate that a subscribed pattern needs to be removed. + SubscriptionPattern pattern = subscriptions.subscriptionPattern(); + boolean patternUpdated = !Objects.equals(pattern, sentFields.pattern); + if ((sendAllFields && pattern != null) || patternUpdated) { + data.setSubscribedTopicRegex((pattern != null) ? pattern.pattern() : ""); + sentFields.pattern = pattern; + } + // ServerAssignor - sent when joining or if it has changed since the last heartbeat this.membershipManager.serverAssignor().ifPresent(serverAssignor -> { if (sendAllFields || !serverAssignor.equals(sentFields.serverAssignor)) { @@ -239,8 +284,6 @@ public ConsumerGroupHeartbeatRequestData buildRequestData() { } }); - // ClientAssignors - not supported yet - // TopicPartitions - sent when joining or with the first heartbeat after a new assignment from // the server was reconciled. This is ensured by resending the topic partitions whenever the // local assignment, including its local epoch is changed (although the local epoch is not sent @@ -268,6 +311,7 @@ private List buildTopicPartit static class SentFields { private int rebalanceTimeoutMs = -1; private TreeSet subscribedTopicNames = null; + private SubscriptionPattern pattern = null; private String serverAssignor = null; private AbstractMembershipManager.LocalAssignment localAssignment = null; @@ -278,6 +322,7 @@ void reset() { rebalanceTimeoutMs = -1; serverAssignor = null; localAssignment = null; + pattern = null; } } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java index bf81027d8ea48..cb4c7dde6f8c5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java @@ -68,7 +68,7 @@ public boolean allowAutoTopicCreation() { @Override public synchronized MetadataRequest.Builder newMetadataRequestBuilder() { - if (subscription.hasPatternSubscription()) + if (subscription.hasPatternSubscription() || subscription.hasRe2JPatternSubscription()) return MetadataRequest.Builder.allTopics(); List topics = new ArrayList<>(); topics.addAll(subscription.metadataTopics()); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 4f7d256104bb8..0e7b58acc2158 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -20,8 +20,10 @@ import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.internals.IdempotentCloser; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.utils.KafkaThread; @@ -40,6 +42,7 @@ import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.function.Supplier; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.common.utils.Utils.closeQuietly; @@ -60,6 +63,7 @@ public class ConsumerNetworkThread extends KafkaThread implements Closeable { private final Supplier applicationEventProcessorSupplier; private final Supplier networkClientDelegateSupplier; private final Supplier requestManagersSupplier; + private final AsyncConsumerMetrics asyncConsumerMetrics; private ApplicationEventProcessor applicationEventProcessor; private NetworkClientDelegate networkClientDelegate; private RequestManagers requestManagers; @@ -67,6 +71,7 @@ public class ConsumerNetworkThread extends KafkaThread implements Closeable { private final IdempotentCloser closer = new IdempotentCloser(); private volatile Duration closeTimeout = Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS); private volatile long cachedMaximumTimeToWait = MAX_POLL_TIMEOUT_MS; + private long lastPollTimeMs = 0L; public ConsumerNetworkThread(LogContext logContext, Time time, @@ -74,7 +79,8 @@ public ConsumerNetworkThread(LogContext logContext, CompletableEventReaper applicationEventReaper, Supplier applicationEventProcessorSupplier, Supplier networkClientDelegateSupplier, - Supplier requestManagersSupplier) { + Supplier requestManagersSupplier, + AsyncConsumerMetrics asyncConsumerMetrics) { super(BACKGROUND_THREAD_NAME, true); this.time = time; this.log = logContext.logger(getClass()); @@ -84,6 +90,7 @@ public ConsumerNetworkThread(LogContext logContext, this.networkClientDelegateSupplier = networkClientDelegateSupplier; this.requestManagersSupplier = requestManagersSupplier; this.running = true; + this.asyncConsumerMetrics = asyncConsumerMetrics; } @Override @@ -139,6 +146,11 @@ void runOnce() { processApplicationEvents(); final long currentTimeMs = time.milliseconds(); + if (lastPollTimeMs != 0L) { + asyncConsumerMetrics.recordTimeBetweenNetworkThreadPoll(currentTimeMs - lastPollTimeMs); + } + lastPollTimeMs = currentTimeMs; + final long pollWaitTimeMs = requestManagers.entries().stream() .filter(Optional::isPresent) .map(Optional::get) @@ -154,6 +166,8 @@ void runOnce() { .reduce(Long.MAX_VALUE, Math::min); reapExpiredApplicationEvents(currentTimeMs); + List> uncompletedEvents = applicationEventReaper.uncompletedEvents(); + maybeFailOnMetadataError(uncompletedEvents); } /** @@ -162,17 +176,27 @@ void runOnce() { private void processApplicationEvents() { LinkedList events = new LinkedList<>(); applicationEventQueue.drainTo(events); + if (events.isEmpty()) + return; + asyncConsumerMetrics.recordApplicationEventQueueSize(0); + long startMs = time.milliseconds(); for (ApplicationEvent event : events) { + asyncConsumerMetrics.recordApplicationEventQueueTime(time.milliseconds() - event.enqueuedMs()); try { - if (event instanceof CompletableEvent) + if (event instanceof CompletableEvent) { applicationEventReaper.add((CompletableEvent) event); - + // Check if there are any metadata errors and fail the CompletableEvent if an error is present. + // This call is meant to handle "immediately completed events" which may not enter the awaiting state, + // so metadata errors need to be checked and handled right away. + maybeFailOnMetadataError(List.of((CompletableEvent) event)); + } applicationEventProcessor.process(event); } catch (Throwable t) { log.warn("Error processing event {}", t.getMessage(), t); } } + asyncConsumerMetrics.recordApplicationEventQueueProcessingTime(time.milliseconds() - startMs); } /** @@ -181,7 +205,7 @@ private void processApplicationEvents() { * is given least one attempt to satisfy any network requests before checking if a timeout has expired. */ private void reapExpiredApplicationEvents(long currentTimeMs) { - applicationEventReaper.reap(currentTimeMs); + asyncConsumerMetrics.recordApplicationEventExpiredSize(applicationEventReaper.reap(currentTimeMs)); } /** @@ -318,11 +342,28 @@ void cleanup() { log.error("Unexpected error during shutdown. Proceed with closing.", e); } finally { sendUnsentRequests(timer); - applicationEventReaper.reap(applicationEventQueue); + asyncConsumerMetrics.recordApplicationEventExpiredSize(applicationEventReaper.reap(applicationEventQueue)); closeQuietly(requestManagers, "request managers"); closeQuietly(networkClientDelegate, "network client delegate"); log.debug("Closed the consumer network thread"); } } + + /** + * If there is a metadata error, complete all uncompleted events that require subscription metadata. + */ + private void maybeFailOnMetadataError(List> events) { + List> subscriptionMetadataEvent = events.stream() + .filter(e -> e instanceof CompletableApplicationEvent) + .map(e -> (CompletableApplicationEvent) e) + .filter(CompletableApplicationEvent::requireSubscriptionMetadata) + .collect(Collectors.toList()); + + if (subscriptionMetadataEvent.isEmpty()) + return; + networkClientDelegate.getAndClearMetadataError().ifPresent(metadataError -> + subscriptionMetadataEvent.forEach(event -> event.future().completeExceptionally(metadataError)) + ); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java index 113b8e0b9d6df..e4b0fa924c0d2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java @@ -25,7 +25,6 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerInterceptor; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; @@ -69,6 +68,7 @@ public final class ConsumerUtils { public static final String CONSUMER_SHARE_METRIC_GROUP_PREFIX = "consumer-share"; public static final String COORDINATOR_METRICS_SUFFIX = "-coordinator-metrics"; public static final String CONSUMER_METRICS_SUFFIX = "-metrics"; + public static final String CONSUMER_METRIC_GROUP = CONSUMER_METRIC_GROUP_PREFIX + CONSUMER_METRICS_SUFFIX; /** * A fixed, large enough value will suffice for max. @@ -130,8 +130,8 @@ public static IsolationLevel configuredIsolationLevel(ConsumerConfig config) { } public static SubscriptionState createSubscriptionState(ConsumerConfig config, LogContext logContext) { - String s = config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(Locale.ROOT); - OffsetResetStrategy strategy = OffsetResetStrategy.valueOf(s); + String s = config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG); + AutoOffsetResetStrategy strategy = AutoOffsetResetStrategy.fromString(s); return new SubscriptionState(logContext, strategy); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java index a9e1bf46bedd9..4664267a0e858 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java @@ -139,22 +139,34 @@ public void handleCoordinatorDisconnect(Throwable exception, long currentTimeMs) } /** - * Mark the current coordinator null. + * Mark the coordinator as "unknown" (i.e. {@code null}) when a disconnect is detected. This detection can occur + * in one of two paths: * - * @param cause why the coordinator is marked unknown. - * @param currentTimeMs the current time in ms. + *
        + *
      1. The coordinator was discovered, but then later disconnected
      2. + *
      3. The coordinator has not yet been discovered and/or connected
      4. + *
      + * + * @param cause String explanation of why the coordinator is marked unknown + * @param currentTimeMs Current time in milliseconds */ public void markCoordinatorUnknown(final String cause, final long currentTimeMs) { - if (this.coordinator != null) { - log.info("Group coordinator {} is unavailable or invalid due to cause: {}. " - + "Rediscovery will be attempted.", this.coordinator, cause); - this.coordinator = null; + if (coordinator != null || timeMarkedUnknownMs == -1) { timeMarkedUnknownMs = currentTimeMs; totalDisconnectedMin = 0; + } + + if (coordinator != null) { + log.info( + "Group coordinator {} is unavailable or invalid due to cause: {}. Rediscovery will be attempted.", + coordinator, + cause + ); + coordinator = null; } else { long durationOfOngoingDisconnectMs = Math.max(0, currentTimeMs - timeMarkedUnknownMs); long currDisconnectMin = durationOfOngoingDisconnectMs / COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS; - if (currDisconnectMin > this.totalDisconnectedMin) { + if (currDisconnectMin > totalDisconnectedMin) { log.debug("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnectMs); totalDisconnectedMin = currDisconnectMin; } @@ -210,7 +222,7 @@ private void onResponse( ) { // handles Runtime exception Optional coordinator = response.coordinatorByKey(this.groupId); - if (!coordinator.isPresent()) { + if (coordinator.isEmpty()) { String msg = String.format("Response did not contain expected coordinator section for groupId: %s", this.groupId); onFailedResponse(currentTimeMs, new IllegalStateException(msg)); return; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java index 4127603372d03..fa45de7e2cb4e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java @@ -32,9 +32,9 @@ public class Fetch { private final Map>> records; + private final Map nextOffsetAndMetadata; private boolean positionAdvanced; private int numRecords; - private Map nextOffsetAndMetadata; public static Fetch empty() { return new Fetch<>(new HashMap<>(), false, 0, new HashMap<>()); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java index 794ff3acebabe..94e76edd0a578 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java @@ -347,7 +347,7 @@ private void handleInitializeErrors(final CompletedFetch completedFetch, final E } else if (error == Errors.OFFSET_OUT_OF_RANGE) { Optional clearedReplicaId = subscriptions.clearPreferredReadReplica(tp); - if (!clearedReplicaId.isPresent()) { + if (clearedReplicaId.isEmpty()) { // If there's no preferred replica to clear, we're fetching from the leader so handle this error normally SubscriptionState.FetchPosition position = subscriptions.positionOrNull(tp); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java index 153279162bc09..98644180e8b0b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java @@ -24,10 +24,12 @@ import org.apache.kafka.common.metrics.stats.WindowedCount; import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.Set; +import static org.apache.kafka.common.utils.Utils.mkEntry; +import static org.apache.kafka.common.utils.Utils.mkMap; + /** * The {@link FetchMetricsManager} class provides wrapper methods to record lag, lead, latency, and fetch metrics. * It keeps an internal ID of the assigned set of partitions which is updated to ensure the set of metrics it @@ -101,20 +103,24 @@ void recordRecordsFetched(int records) { void recordBytesFetched(String topic, int bytes) { String name = topicBytesFetchedMetricName(topic); - Sensor bytesFetched = new SensorBuilder(metrics, name, () -> topicTags(topic)) - .withAvg(metricsRegistry.topicFetchSizeAvg) - .withMax(metricsRegistry.topicFetchSizeMax) - .withMeter(metricsRegistry.topicBytesConsumedRate, metricsRegistry.topicBytesConsumedTotal) - .build(); + maybeRecordDeprecatedBytesFetched(name, topic, bytes); + + Sensor bytesFetched = new SensorBuilder(metrics, name, () -> Map.of("topic", topic)) + .withAvg(metricsRegistry.topicFetchSizeAvg) + .withMax(metricsRegistry.topicFetchSizeMax) + .withMeter(metricsRegistry.topicBytesConsumedRate, metricsRegistry.topicBytesConsumedTotal) + .build(); bytesFetched.record(bytes); } void recordRecordsFetched(String topic, int records) { String name = topicRecordsFetchedMetricName(topic); - Sensor recordsFetched = new SensorBuilder(metrics, name, () -> topicTags(topic)) - .withAvg(metricsRegistry.topicRecordsPerRequestAvg) - .withMeter(metricsRegistry.topicRecordsConsumedRate, metricsRegistry.topicRecordsConsumedTotal) - .build(); + maybeRecordDeprecatedRecordsFetched(name, topic, records); + + Sensor recordsFetched = new SensorBuilder(metrics, name, () -> Map.of("topic", topic)) + .withAvg(metricsRegistry.topicRecordsPerRequestAvg) + .withMeter(metricsRegistry.topicRecordsConsumedRate, metricsRegistry.topicRecordsConsumedTotal) + .build(); recordsFetched.record(records); } @@ -122,11 +128,13 @@ void recordPartitionLag(TopicPartition tp, long lag) { this.recordsLag.record(lag); String name = partitionRecordsLagMetricName(tp); - Sensor recordsLag = new SensorBuilder(metrics, name, () -> topicPartitionTags(tp)) - .withValue(metricsRegistry.partitionRecordsLag) - .withMax(metricsRegistry.partitionRecordsLagMax) - .withAvg(metricsRegistry.partitionRecordsLagAvg) - .build(); + maybeRecordDeprecatedPartitionLag(name, tp, lag); + + Sensor recordsLag = new SensorBuilder(metrics, name, () -> mkMap(mkEntry("topic", tp.topic()), mkEntry("partition", String.valueOf(tp.partition())))) + .withValue(metricsRegistry.partitionRecordsLag) + .withMax(metricsRegistry.partitionRecordsLagMax) + .withAvg(metricsRegistry.partitionRecordsLagAvg) + .build(); recordsLag.record(lag); } @@ -135,11 +143,13 @@ void recordPartitionLead(TopicPartition tp, long lead) { this.recordsLead.record(lead); String name = partitionRecordsLeadMetricName(tp); - Sensor recordsLead = new SensorBuilder(metrics, name, () -> topicPartitionTags(tp)) - .withValue(metricsRegistry.partitionRecordsLead) - .withMin(metricsRegistry.partitionRecordsLeadMin) - .withAvg(metricsRegistry.partitionRecordsLeadAvg) - .build(); + maybeRecordDeprecatedPartitionLead(name, tp, lead); + + Sensor recordsLead = new SensorBuilder(metrics, name, () -> mkMap(mkEntry("topic", tp.topic()), mkEntry("partition", String.valueOf(tp.partition())))) + .withValue(metricsRegistry.partitionRecordsLead) + .withMin(metricsRegistry.partitionRecordsLeadMin) + .withAvg(metricsRegistry.partitionRecordsLeadAvg) + .build(); recordsLead.record(lead); } @@ -162,16 +172,22 @@ void maybeUpdateAssignment(SubscriptionState subscription) { metrics.removeSensor(partitionRecordsLagMetricName(tp)); metrics.removeSensor(partitionRecordsLeadMetricName(tp)); metrics.removeMetric(partitionPreferredReadReplicaMetricName(tp)); + // Remove deprecated metrics. + metrics.removeSensor(deprecatedMetricName(partitionRecordsLagMetricName(tp))); + metrics.removeSensor(deprecatedMetricName(partitionRecordsLeadMetricName(tp))); + metrics.removeMetric(deprecatedPartitionPreferredReadReplicaMetricName(tp)); } } for (TopicPartition tp : newAssignedPartitions) { if (!this.assignedPartitions.contains(tp)) { + maybeRecordDeprecatedPreferredReadReplica(tp, subscription); + MetricName metricName = partitionPreferredReadReplicaMetricName(tp); metrics.addMetricIfAbsent( - metricName, - null, - (Gauge) (config, now) -> subscription.preferredReadReplica(tp, 0L).orElse(-1) + metricName, + null, + (Gauge) (config, now) -> subscription.preferredReadReplica(tp, 0L).orElse(-1) ); } } @@ -181,6 +197,67 @@ void maybeUpdateAssignment(SubscriptionState subscription) { } } + @Deprecated // To be removed in Kafka 5.0 release. + private void maybeRecordDeprecatedBytesFetched(String name, String topic, int bytes) { + if (shouldReportDeprecatedMetric(topic)) { + Sensor deprecatedBytesFetched = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicTags(topic)) + .withAvg(metricsRegistry.topicFetchSizeAvg) + .withMax(metricsRegistry.topicFetchSizeMax) + .withMeter(metricsRegistry.topicBytesConsumedRate, metricsRegistry.topicBytesConsumedTotal) + .build(); + deprecatedBytesFetched.record(bytes); + } + } + + @Deprecated // To be removed in Kafka 5.0 release. + private void maybeRecordDeprecatedRecordsFetched(String name, String topic, int records) { + if (shouldReportDeprecatedMetric(topic)) { + Sensor deprecatedRecordsFetched = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicTags(topic)) + .withAvg(metricsRegistry.topicRecordsPerRequestAvg) + .withMeter(metricsRegistry.topicRecordsConsumedRate, metricsRegistry.topicRecordsConsumedTotal) + .build(); + deprecatedRecordsFetched.record(records); + } + } + + @Deprecated // To be removed in Kafka 5.0 release. + private void maybeRecordDeprecatedPartitionLag(String name, TopicPartition tp, long lag) { + if (shouldReportDeprecatedMetric(tp.topic())) { + Sensor deprecatedRecordsLag = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicPartitionTags(tp)) + .withValue(metricsRegistry.partitionRecordsLag) + .withMax(metricsRegistry.partitionRecordsLagMax) + .withAvg(metricsRegistry.partitionRecordsLagAvg) + .build(); + + deprecatedRecordsLag.record(lag); + } + } + + @Deprecated // To be removed in Kafka 5.0 release. + private void maybeRecordDeprecatedPartitionLead(String name, TopicPartition tp, double lead) { + if (shouldReportDeprecatedMetric(tp.topic())) { + Sensor deprecatedRecordsLead = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicPartitionTags(tp)) + .withValue(metricsRegistry.partitionRecordsLead) + .withMin(metricsRegistry.partitionRecordsLeadMin) + .withAvg(metricsRegistry.partitionRecordsLeadAvg) + .build(); + + deprecatedRecordsLead.record(lead); + } + } + + @Deprecated // To be removed in Kafka 5.0 release. + private void maybeRecordDeprecatedPreferredReadReplica(TopicPartition tp, SubscriptionState subscription) { + if (shouldReportDeprecatedMetric(tp.topic())) { + MetricName metricName = deprecatedPartitionPreferredReadReplicaMetricName(tp); + metrics.addMetricIfAbsent( + metricName, + null, + (Gauge) (config, now) -> subscription.preferredReadReplica(tp, 0L).orElse(-1) + ); + } + } + private static String topicBytesFetchedMetricName(String topic) { return "topic." + topic + ".bytes-fetched"; } @@ -197,22 +274,34 @@ private static String partitionRecordsLagMetricName(TopicPartition tp) { return tp + ".records-lag"; } + private static String deprecatedMetricName(String name) { + return name + ".deprecated"; + } + + private static boolean shouldReportDeprecatedMetric(String topic) { + return topic.contains("."); + } + private MetricName partitionPreferredReadReplicaMetricName(TopicPartition tp) { + Map metricTags = mkMap(mkEntry("topic", tp.topic()), mkEntry("partition", String.valueOf(tp.partition()))); + return this.metrics.metricInstance(metricsRegistry.partitionPreferredReadReplica, metricTags); + } + + @Deprecated + private MetricName deprecatedPartitionPreferredReadReplicaMetricName(TopicPartition tp) { Map metricTags = topicPartitionTags(tp); return this.metrics.metricInstance(metricsRegistry.partitionPreferredReadReplica, metricTags); } + @Deprecated static Map topicTags(String topic) { - Map metricTags = new HashMap<>(1); - metricTags.put("topic", topic.replace('.', '_')); - return metricTags; + return Map.of("topic", topic.replace('.', '_')); } + @Deprecated static Map topicPartitionTags(TopicPartition tp) { - Map metricTags = new HashMap<>(2); - metricTags.put("topic", tp.topic().replace('.', '_')); - metricTags.put("partition", String.valueOf(tp.partition())); - return metricTags; + return mkMap(mkEntry("topic", tp.topic().replace('.', '_')), + mkEntry("partition", String.valueOf(tp.partition()))); } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java index b0e69bb22a389..589cb6736b367 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java @@ -26,6 +26,9 @@ public class FetchMetricsRegistry { + private static final String DEPRECATED_TOPIC_METRICS_MESSAGE = "Note: For topic names with periods (.), an additional " + + "metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead."; + public MetricNameTemplate fetchSizeAvg; public MetricNameTemplate fetchSizeMax; public MetricNameTemplate bytesConsumedRate; @@ -110,39 +113,39 @@ public FetchMetricsRegistry(Set tags, String metricGrpPrefix) { topicTags.add("topic"); this.topicFetchSizeAvg = new MetricNameTemplate("fetch-size-avg", groupName, - "The average number of bytes fetched per request for a topic", topicTags); + "The average number of bytes fetched per request for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); this.topicFetchSizeMax = new MetricNameTemplate("fetch-size-max", groupName, - "The maximum number of bytes fetched per request for a topic", topicTags); + "The maximum number of bytes fetched per request for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); this.topicBytesConsumedRate = new MetricNameTemplate("bytes-consumed-rate", groupName, - "The average number of bytes consumed per second for a topic", topicTags); + "The average number of bytes consumed per second for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); this.topicBytesConsumedTotal = new MetricNameTemplate("bytes-consumed-total", groupName, - "The total number of bytes consumed for a topic", topicTags); + "The total number of bytes consumed for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); this.topicRecordsPerRequestAvg = new MetricNameTemplate("records-per-request-avg", groupName, - "The average number of records in each request for a topic", topicTags); + "The average number of records in each request for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); this.topicRecordsConsumedRate = new MetricNameTemplate("records-consumed-rate", groupName, - "The average number of records consumed per second for a topic", topicTags); + "The average number of records consumed per second for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); this.topicRecordsConsumedTotal = new MetricNameTemplate("records-consumed-total", groupName, - "The total number of records consumed for a topic", topicTags); + "The total number of records consumed for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); /* Partition level */ Set partitionTags = new HashSet<>(topicTags); partitionTags.add("partition"); this.partitionRecordsLag = new MetricNameTemplate("records-lag", groupName, - "The latest lag of the partition", partitionTags); + "The latest lag of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); this.partitionRecordsLagMax = new MetricNameTemplate("records-lag-max", groupName, - "The max lag of the partition", partitionTags); + "The max lag of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); this.partitionRecordsLagAvg = new MetricNameTemplate("records-lag-avg", groupName, - "The average lag of the partition", partitionTags); + "The average lag of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); this.partitionRecordsLead = new MetricNameTemplate("records-lead", groupName, - "The latest lead of the partition", partitionTags); + "The latest lead of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); this.partitionRecordsLeadMin = new MetricNameTemplate("records-lead-min", groupName, - "The min lead of the partition", partitionTags); + "The min lead of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); this.partitionRecordsLeadAvg = new MetricNameTemplate("records-lead-avg", groupName, - "The average lead of the partition", partitionTags); + "The average lead of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); this.partitionPreferredReadReplica = new MetricNameTemplate( "preferred-read-replica", groupName, - "The current read replica for the partition, or -1 if reading from leader", partitionTags); + "The current read replica for the partition, or -1 if reading from leader. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); } public List getAllTemplates() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java index 745bbfde99289..ac86d1ebeaab0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java @@ -192,7 +192,7 @@ private List> sendFetchesInternal(Map responseFuture = client.send(fetchTarget, request); - responseFuture.addListener(new RequestFutureListener() { + responseFuture.addListener(new RequestFutureListener<>() { @Override public void onSuccess(ClientResponse resp) { successHandler.handle(fetchTarget, data, resp); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberStateListener.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberStateListener.java index 8b977eb5c35ee..98b6271fcc0a5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberStateListener.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MemberStateListener.java @@ -17,10 +17,13 @@ package org.apache.kafka.clients.consumer.internals; +import org.apache.kafka.common.TopicPartition; + import java.util.Optional; +import java.util.Set; /** - * Listener for getting notified of member epoch changes. + * Listener for getting notified of membership state changes. */ public interface MemberStateListener { @@ -34,4 +37,14 @@ public interface MemberStateListener { * @param memberId Current member ID. It won't change until the process is terminated. */ void onMemberEpochUpdated(Optional memberEpoch, String memberId); + + /** + * This callback is invoked when a group member's assigned set of partitions changes. Assignments can change via + * group coordinator partition assignment changes, unsubscribing, and when leaving the group. + * + * @param partitions New assignment, can be empty, but not {@code null} + */ + default void onGroupAssignmentUpdated(Set partitions) { + + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java index 56e4d6977480a..3c280e39d0279 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java @@ -27,6 +27,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; @@ -69,6 +70,9 @@ public class NetworkClientDelegate implements AutoCloseable { private final int requestTimeoutMs; private final Queue unsentRequests; private final long retryBackoffMs; + private Optional metadataError; + private final boolean notifyMetadataErrorsViaErrorQueue; + private final AsyncConsumerMetrics asyncConsumerMetrics; public NetworkClientDelegate( final Time time, @@ -76,7 +80,9 @@ public NetworkClientDelegate( final LogContext logContext, final KafkaClient client, final Metadata metadata, - final BackgroundEventHandler backgroundEventHandler) { + final BackgroundEventHandler backgroundEventHandler, + final boolean notifyMetadataErrorsViaErrorQueue, + final AsyncConsumerMetrics asyncConsumerMetrics) { this.time = time; this.client = client; this.metadata = metadata; @@ -85,6 +91,9 @@ public NetworkClientDelegate( this.unsentRequests = new ArrayDeque<>(); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); + this.metadataError = Optional.empty(); + this.notifyMetadataErrorsViaErrorQueue = notifyMetadataErrorsViaErrorQueue; + this.asyncConsumerMetrics = asyncConsumerMetrics; } // Visible for testing @@ -144,13 +153,18 @@ public void poll(final long timeoutMs, final long currentTimeMs) { this.client.poll(pollTimeoutMs, currentTimeMs); maybePropagateMetadataError(); checkDisconnects(currentTimeMs); + asyncConsumerMetrics.recordUnsentRequestsQueueSize(unsentRequests.size(), currentTimeMs); } private void maybePropagateMetadataError() { try { metadata.maybeThrowAnyException(); } catch (Exception e) { - backgroundEventHandler.add(new ErrorEvent(e)); + if (notifyMetadataErrorsViaErrorQueue) { + backgroundEventHandler.add(new ErrorEvent(e)); + } else { + metadataError = Optional.of(e); + } } } @@ -173,6 +187,7 @@ private void trySend(final long currentTimeMs) { unsent.timer.update(currentTimeMs); if (unsent.timer.isExpired()) { iterator.remove(); + asyncConsumerMetrics.recordUnsentRequestsQueueTime(time.milliseconds() - unsent.enqueueTimeMs()); unsent.handler.onFailure(currentTimeMs, new TimeoutException( "Failed to send request after " + unsent.timer.timeoutMs() + " ms.")); continue; @@ -183,6 +198,7 @@ private void trySend(final long currentTimeMs) { continue; } iterator.remove(); + asyncConsumerMetrics.recordUnsentRequestsQueueTime(time.milliseconds() - unsent.enqueueTimeMs()); } } @@ -210,6 +226,7 @@ protected void checkDisconnects(final long currentTimeMs) { UnsentRequest u = iter.next(); if (u.node.isPresent() && client.connectionFailed(u.node.get())) { iter.remove(); + asyncConsumerMetrics.recordUnsentRequestsQueueTime(time.milliseconds() - u.enqueueTimeMs()); AuthenticationException authenticationException = client.authenticationException(u.node.get()); u.handler.onFailure(currentTimeMs, authenticationException); } @@ -230,6 +247,12 @@ private ClientRequest makeClientRequest( unsent.handler ); } + + public Optional getAndClearMetadataError() { + Optional metadataError = this.metadataError; + this.metadataError = Optional.empty(); + return metadataError; + } public Node leastLoadedNode() { return this.client.leastLoadedNode(time.milliseconds()).node(); @@ -267,6 +290,7 @@ public void addAll(final List requests) { public void add(final UnsentRequest r) { Objects.requireNonNull(r); r.setTimer(this.time, this.requestTimeoutMs); + r.setEnqueueTimeMs(time.milliseconds()); unsentRequests.add(r); } @@ -300,6 +324,7 @@ public static class UnsentRequest { private final Optional node; // empty if random node can be chosen private Timer timer; + private long enqueueTimeMs; // time when the request was enqueued to unsentRequests, not duration in the queue. public UnsentRequest(final AbstractRequest.Builder requestBuilder, final Optional node) { @@ -317,6 +342,20 @@ Timer timer() { return timer; } + /** + * Set the time when the request was enqueued to {@link NetworkClientDelegate#unsentRequests}. + */ + private void setEnqueueTimeMs(final long enqueueTimeMs) { + this.enqueueTimeMs = enqueueTimeMs; + } + + /** + * Return the time when the request was enqueued to {@link NetworkClientDelegate#unsentRequests}. + */ + private long enqueueTimeMs() { + return enqueueTimeMs; + } + CompletableFuture future() { return handler.future; } @@ -412,8 +451,10 @@ public static Supplier supplier(final Time time, final Metrics metrics, final Sensor throttleTimeSensor, final ClientTelemetrySender clientTelemetrySender, - final BackgroundEventHandler backgroundEventHandler) { - return new CachedSupplier() { + final BackgroundEventHandler backgroundEventHandler, + final boolean notifyMetadataErrorsViaErrorQueue, + final AsyncConsumerMetrics asyncConsumerMetrics) { + return new CachedSupplier<>() { @Override protected NetworkClientDelegate create() { KafkaClient client = ClientUtils.createNetworkClient(config, @@ -426,7 +467,7 @@ protected NetworkClientDelegate create() { metadata, throttleTimeSensor, clientTelemetrySender); - return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler); + return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, asyncConsumerMetrics); } }; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java index e5a8ba197a110..bb01510e906be 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java @@ -101,12 +101,13 @@ public OffsetFetcher(LogContext logContext, * and one or more partitions aren't awaiting a seekToBeginning() or seekToEnd(). */ public void resetPositionsIfNeeded() { - Map offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); + Map partitionAutoOffsetResetStrategyMap = + offsetFetcherUtils.getOffsetResetStrategyForPartitions(); - if (offsetResetTimestamps.isEmpty()) + if (partitionAutoOffsetResetStrategyMap.isEmpty()) return; - resetPositionsAsync(offsetResetTimestamps); + resetPositionsAsync(partitionAutoOffsetResetStrategyMap); } /** @@ -144,7 +145,7 @@ private ListOffsetResult fetchOffsetsByTimes(Map timestamp do { RequestFuture future = sendListOffsetsRequests(remainingToSearch, requireTimestamps); - future.addListener(new RequestFutureListener() { + future.addListener(new RequestFutureListener<>() { @Override public void onSuccess(ListOffsetResult value) { synchronized (future) { @@ -209,7 +210,9 @@ private Map beginningOrEndOffset(Collection partitionResetTimestamps) { + private void resetPositionsAsync(Map partitionAutoOffsetResetStrategyMap) { + Map partitionResetTimestamps = partitionAutoOffsetResetStrategyMap.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().timestamp().get())); Map> timestampsToSearchByNode = groupListOffsetRequests(partitionResetTimestamps, new HashSet<>()); for (Map.Entry> entry : timestampsToSearchByNode.entrySet()) { @@ -218,10 +221,10 @@ private void resetPositionsAsync(Map partitionResetTimesta subscriptions.setNextAllowedRetry(resetTimestamps.keySet(), time.milliseconds() + requestTimeoutMs); RequestFuture future = sendListOffsetRequest(node, resetTimestamps, false); - future.addListener(new RequestFutureListener() { + future.addListener(new RequestFutureListener<>() { @Override public void onSuccess(ListOffsetResult result) { - offsetFetcherUtils.onSuccessfulResponseForResettingPositions(resetTimestamps, result); + offsetFetcherUtils.onSuccessfulResponseForResettingPositions(result, partitionAutoOffsetResetStrategyMap); } @Override @@ -271,7 +274,7 @@ private void validatePositionsAsync(Map partition RequestFuture future = offsetsForLeaderEpochClient.sendAsyncRequest(node, fetchPositions); - future.addListener(new RequestFutureListener() { + future.addListener(new RequestFutureListener<>() { @Override public void onSuccess(OffsetForEpochResult offsetsResult) { offsetFetcherUtils.onSuccessfulResponseForValidatingPositions(fetchPositions, @@ -308,7 +311,7 @@ private RequestFuture sendListOffsetsRequests(final Map> entry : timestampsToSearchByNode.entrySet()) { RequestFuture future = sendListOffsetRequest(entry.getKey(), entry.getValue(), requireTimestamps); - future.addListener(new RequestFutureListener() { + future.addListener(new RequestFutureListener<>() { @Override public void onSuccess(ListOffsetResult partialResult) { synchronized (listOffsetRequestsFuture) { @@ -352,7 +355,7 @@ private Map> groupListOffsetRequ Long offset = entry.getValue(); Metadata.LeaderAndEpoch leaderAndEpoch = metadata.currentLeader(tp); - if (!leaderAndEpoch.leader.isPresent()) { + if (leaderAndEpoch.leader.isEmpty()) { log.debug("Leader for partition {} is unknown for fetching offset {}", tp, offset); metadata.requestUpdate(true); partitionsToRetry.add(tp); @@ -397,7 +400,7 @@ private RequestFuture sendListOffsetRequest(final Node node, log.debug("Sending ListOffsetRequest {} to broker {}", builder, node); return client.send(node, builder) - .compose(new RequestFutureAdapter() { + .compose(new RequestFutureAdapter<>() { @Override public void onSuccess(ClientResponse response, RequestFuture future) { ListOffsetsResponse lor = (ListOffsetsResponse) response.responseBody(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java index 89940087611f4..0b7813eaad6b7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java @@ -22,7 +22,6 @@ import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -33,7 +32,6 @@ import org.apache.kafka.common.message.ListOffsetsResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.ListOffsetsRequest; import org.apache.kafka.common.requests.ListOffsetsResponse; import org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest; import org.apache.kafka.common.utils.LogContext; @@ -113,33 +111,15 @@ OffsetFetcherUtils.ListOffsetResult handleListOffsetResponse(ListOffsetsResponse Errors error = Errors.forCode(partition.errorCode()); switch (error) { case NONE: - if (!partition.oldStyleOffsets().isEmpty()) { - // Handle v0 response with offsets - long offset; - if (partition.oldStyleOffsets().size() > 1) { - throw new IllegalStateException("Unexpected partitionData response of length " + - partition.oldStyleOffsets().size()); - } else { - offset = partition.oldStyleOffsets().get(0); - } - log.debug("Handling v0 ListOffsetResponse response for {}. Fetched offset {}", - topicPartition, offset); - if (offset != ListOffsetsResponse.UNKNOWN_OFFSET) { - OffsetFetcherUtils.ListOffsetData offsetData = new OffsetFetcherUtils.ListOffsetData(offset, null, Optional.empty()); - fetchedOffsets.put(topicPartition, offsetData); - } - } else { - // Handle v1 and later response or v0 without offsets - log.debug("Handling ListOffsetResponse response for {}. Fetched offset {}, timestamp {}", - topicPartition, partition.offset(), partition.timestamp()); - if (partition.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) { - Optional leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) - ? Optional.empty() - : Optional.of(partition.leaderEpoch()); - OffsetFetcherUtils.ListOffsetData offsetData = new OffsetFetcherUtils.ListOffsetData(partition.offset(), partition.timestamp(), - leaderEpoch); - fetchedOffsets.put(topicPartition, offsetData); - } + log.debug("Handling ListOffsetResponse response for {}. Fetched offset {}, timestamp {}", + topicPartition, partition.offset(), partition.timestamp()); + if (partition.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) { + Optional leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) + ? Optional.empty() + : Optional.of(partition.leaderEpoch()); + OffsetFetcherUtils.ListOffsetData offsetData = new OffsetFetcherUtils.ListOffsetData(partition.offset(), partition.timestamp(), + leaderEpoch); + fetchedOffsets.put(topicPartition, offsetData); } break; case UNSUPPORTED_FOR_MESSAGE_FORMAT: @@ -225,19 +205,22 @@ void validatePositionsOnMetadataChange() { } } - Map getOffsetResetTimestamp() { + /** + * get OffsetResetStrategy for all assigned partitions + */ + Map getOffsetResetStrategyForPartitions() { // Raise exception from previous offset fetch if there is one RuntimeException exception = cachedResetPositionsException.getAndSet(null); if (exception != null) throw exception; Set partitions = subscriptionState.partitionsNeedingReset(time.milliseconds()); - final Map offsetResetTimestamps = new HashMap<>(); + final Map partitionAutoOffsetResetStrategyMap = new HashMap<>(); for (final TopicPartition partition : partitions) { - offsetResetTimestamps.put(partition, offsetResetStrategyTimestamp(partition)); + partitionAutoOffsetResetStrategyMap.put(partition, offsetResetStrategyWithValidTimestamp(partition)); } - return offsetResetTimestamps; + return partitionAutoOffsetResetStrategyMap; } static Map buildListOffsetsResult( @@ -284,14 +267,13 @@ static Map buildOffsetsForTimeIntern return offsetsResults; } - private long offsetResetStrategyTimestamp(final TopicPartition partition) { - OffsetResetStrategy strategy = subscriptionState.resetStrategy(partition); - if (strategy == OffsetResetStrategy.EARLIEST) - return ListOffsetsRequest.EARLIEST_TIMESTAMP; - else if (strategy == OffsetResetStrategy.LATEST) - return ListOffsetsRequest.LATEST_TIMESTAMP; - else + private AutoOffsetResetStrategy offsetResetStrategyWithValidTimestamp(final TopicPartition partition) { + AutoOffsetResetStrategy strategy = subscriptionState.resetStrategy(partition); + if (strategy.timestamp().isPresent()) { + return strategy; + } else { throw new NoOffsetForPartitionException(partition); + } } static Set topicsForPartitions(Collection partitions) { @@ -320,18 +302,9 @@ void updateSubscriptionState(Map resetTimestamps, - final ListOffsetResult result) { + final ListOffsetResult result, + final Map partitionAutoOffsetResetStrategyMap) { if (!result.partitionsToRetry.isEmpty()) { subscriptionState.requestFailed(result.partitionsToRetry, time.milliseconds() + retryBackoffMs); metadata.requestUpdate(false); @@ -340,10 +313,9 @@ void onSuccessfulResponseForResettingPositions( for (Map.Entry fetchedOffset : result.fetchedOffsets.entrySet()) { TopicPartition partition = fetchedOffset.getKey(); ListOffsetData offsetData = fetchedOffset.getValue(); - ListOffsetsRequestData.ListOffsetsPartition requestedReset = resetTimestamps.get(partition); resetPositionIfNeeded( partition, - timestampToOffsetResetStrategy(requestedReset.timestamp()), + partitionAutoOffsetResetStrategyMap.get(partition), offsetData); } } @@ -411,7 +383,7 @@ private LogTruncationException buildLogTruncationException(List prepareRequest( Map requestData) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java index 6d296149b704c..4c8d10ad323ac 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java @@ -472,20 +472,20 @@ private boolean canReusePendingOffsetFetchEvent(Set partitions) * this function (ex. {@link org.apache.kafka.common.errors.TopicAuthorizationException}) */ CompletableFuture resetPositionsIfNeeded() { - Map offsetResetTimestamps; + Map partitionAutoOffsetResetStrategyMap; try { - offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); + partitionAutoOffsetResetStrategyMap = offsetFetcherUtils.getOffsetResetStrategyForPartitions(); } catch (Exception e) { CompletableFuture result = new CompletableFuture<>(); result.completeExceptionally(e); return result; } - if (offsetResetTimestamps.isEmpty()) + if (partitionAutoOffsetResetStrategyMap.isEmpty()) return CompletableFuture.completedFuture(null); - return sendListOffsetsRequestsAndResetPositions(offsetResetTimestamps); + return sendListOffsetsRequestsAndResetPositions(partitionAutoOffsetResetStrategyMap); } /** @@ -578,6 +578,7 @@ private List buildListOffsetsRequests( listOffsetsRequestState.globalResult.complete(listOffsetResult); } else { requestsToRetry.add(listOffsetsRequestState); + metadata.requestUpdate(false); } } else { log.debug("ListOffsets request failed with error", error); @@ -652,12 +653,14 @@ private CompletableFuture buildListOffsetRequestToNode( * partitions. Use the retrieved offsets to reset positions in the subscription state. * This also adds the request to the list of unsentRequests. * - * @param timestampsToSearch the mapping between partitions and target time + * @param partitionAutoOffsetResetStrategyMap the mapping between partitions and AutoOffsetResetStrategy * @return A {@link CompletableFuture} which completes when the requests are * complete. */ private CompletableFuture sendListOffsetsRequestsAndResetPositions( - final Map timestampsToSearch) { + final Map partitionAutoOffsetResetStrategyMap) { + Map timestampsToSearch = partitionAutoOffsetResetStrategyMap.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().timestamp().get())); Map> timestampsToSearchByNode = groupListOffsetRequests(timestampsToSearch, Optional.empty()); @@ -677,8 +680,8 @@ private CompletableFuture sendListOffsetsRequestsAndResetPositions( partialResult.whenComplete((result, error) -> { if (error == null) { - offsetFetcherUtils.onSuccessfulResponseForResettingPositions(resetTimestamps, - result); + offsetFetcherUtils.onSuccessfulResponseForResettingPositions(result, + partitionAutoOffsetResetStrategyMap); } else { RuntimeException e; if (error instanceof RuntimeException) { @@ -894,7 +897,7 @@ private Map offsetsRequestState.remainingToSearch.put(tp, offset)); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java index 034efd4dba850..ed75524ac8129 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java @@ -200,7 +200,7 @@ else if (succeeded()) */ public RequestFuture compose(final RequestFutureAdapter adapter) { final RequestFuture adapted = new RequestFuture<>(); - addListener(new RequestFutureListener() { + addListener(new RequestFutureListener<>() { @Override public void onSuccess(T value) { adapter.onSuccess(value, adapted); @@ -215,7 +215,7 @@ public void onFailure(RuntimeException e) { } public void chain(final RequestFuture future) { - addListener(new RequestFutureListener() { + addListener(new RequestFutureListener<>() { @Override public void onSuccess(T value) { future.complete(value); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java index 7f682c81fc4d9..304f0fffd4ad5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java @@ -160,7 +160,7 @@ public static Supplier supplier(final Time time, final OffsetCommitCallbackInvoker offsetCommitCallbackInvoker, final MemberStateListener applicationThreadMemberStateListener ) { - return new CachedSupplier() { + return new CachedSupplier<>() { @Override protected RequestManagers create() { final NetworkClientDelegate networkClientDelegate = networkClientDelegateSupplier.get(); @@ -284,7 +284,7 @@ public static Supplier supplier(final Time time, final Optional clientTelemetryReporter, final Metrics metrics ) { - return new CachedSupplier() { + return new CachedSupplier<>() { @Override protected RequestManagers create() { long retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java index cfaad3667fa40..20a022cb6ca84 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java @@ -87,14 +87,15 @@ public class ShareConsumeRequestManager implements RequestManager, MemberStateLi private final IdempotentCloser idempotentCloser = new IdempotentCloser(); private Uuid memberId; private boolean fetchMoreRecords = false; - private final Map fetchAcknowledgementsMap; + private final Map fetchAcknowledgementsToSend; + private final Map fetchAcknowledgementsInFlight; private final Map> acknowledgeRequestStates; private final long retryBackoffMs; private final long retryBackoffMaxMs; private boolean closing = false; private final CompletableFuture closeFuture; private boolean isAcknowledgementCommitCallbackRegistered = false; - private final Map forgottenTopicNames = new HashMap<>(); + private final Map topicNamesMap = new HashMap<>(); ShareConsumeRequestManager(final Time time, final LogContext logContext, @@ -122,7 +123,8 @@ public class ShareConsumeRequestManager implements RequestManager, MemberStateLi this.sessionHandlers = new HashMap<>(); this.nodesWithPendingRequests = new HashSet<>(); this.acknowledgeRequestStates = new HashMap<>(); - this.fetchAcknowledgementsMap = new HashMap<>(); + this.fetchAcknowledgementsToSend = new HashMap<>(); + this.fetchAcknowledgementsInFlight = new HashMap<>(); this.closeFuture = new CompletableFuture<>(); } @@ -148,7 +150,7 @@ public PollResult poll(long currentTimeMs) { for (TopicPartition partition : partitionsToFetch()) { Optional leaderOpt = metadata.currentLeader(partition).leader; - if (!leaderOpt.isPresent()) { + if (leaderOpt.isEmpty()) { log.debug("Requesting metadata update for partition {} since current leader node is missing", partition); metadata.requestUpdate(false); continue; @@ -170,12 +172,14 @@ public PollResult poll(long currentTimeMs) { k -> sessionHandlers.computeIfAbsent(node.id(), n -> new ShareSessionHandler(logContext, n, memberId))); TopicIdPartition tip = new TopicIdPartition(topicId, partition); - Acknowledgements acknowledgementsToSend = fetchAcknowledgementsMap.get(tip); + Acknowledgements acknowledgementsToSend = fetchAcknowledgementsToSend.remove(tip); if (acknowledgementsToSend != null) { metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); + fetchAcknowledgementsInFlight.put(tip, acknowledgementsToSend); } handler.addPartitionToFetch(tip, acknowledgementsToSend); fetchedPartitions.add(tip); + topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic()); log.debug("Added fetch request for partition {} to node {}", tip, node.id()); } @@ -194,17 +198,22 @@ public PollResult poll(long currentTimeMs) { } else { for (TopicIdPartition tip : sessionHandler.sessionPartitions()) { if (!fetchedPartitions.contains(tip)) { - Acknowledgements acknowledgementsToSend = fetchAcknowledgementsMap.get(tip); + Acknowledgements acknowledgementsToSend = fetchAcknowledgementsToSend.remove(tip); + if (acknowledgementsToSend != null) { metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); - } - sessionHandler.addPartitionToFetch(tip, acknowledgementsToSend); - partitionsToForgetMap.putIfAbsent(node, new ArrayList<>()); - partitionsToForgetMap.get(node).add(tip); + fetchAcknowledgementsInFlight.put(tip, acknowledgementsToSend); + + sessionHandler.addPartitionToFetch(tip, acknowledgementsToSend); + handlerMap.put(node, sessionHandler); - forgottenTopicNames.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic()); - fetchedPartitions.add(tip); - log.debug("Added fetch request for previously subscribed partition {} to node {}", tip, node.id()); + partitionsToForgetMap.putIfAbsent(node, new ArrayList<>()); + partitionsToForgetMap.get(node).add(tip); + + topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic()); + fetchedPartitions.add(tip); + log.debug("Added fetch request for previously subscribed partition {} to node {}", tip, node.id()); + } } } } @@ -253,7 +262,7 @@ public void fetch(Map acknowledgementsMap) { } // The acknowledgements sent via ShareFetch are stored in this map. - acknowledgementsMap.forEach((tip, acks) -> fetchAcknowledgementsMap.merge(tip, acks, Acknowledgements::merge)); + acknowledgementsMap.forEach((tip, acks) -> fetchAcknowledgementsToSend.merge(tip, acks, Acknowledgements::merge)); } /** @@ -566,8 +575,10 @@ public CompletableFuture acknowledgeOnClose(final Map partitions = responseData.keySet().stream().map(TopicIdPartition::topicPartition).collect(Collectors.toSet()); @@ -653,7 +664,7 @@ private void handleShareFetchSuccess(Node fetchTarget, log.debug("ShareFetch for partition {} returned fetch data {}", tip, partitionData); - Acknowledgements acks = fetchAcknowledgementsMap.remove(tip); + Acknowledgements acks = fetchAcknowledgementsInFlight.remove(tip); if (acks != null) { if (partitionData.acknowledgeErrorCode() != Errors.NONE.code()) { metricsManager.recordFailedAcknowledgements(acks.size()); @@ -716,7 +727,7 @@ private void handleShareFetchFailure(Node fetchTarget, partition.partitionIndex(), metadata.topicNames().get(topic.topicId())); - Acknowledgements acks = fetchAcknowledgementsMap.remove(tip); + Acknowledgements acks = fetchAcknowledgementsInFlight.remove(tip); if (acks != null) { metricsManager.recordFailedAcknowledgements(acks.size()); acks.setAcknowledgeErrorCode(Errors.forException(error)); @@ -981,32 +992,37 @@ UnsentRequest buildRequest() { } ShareAcknowledgeRequest.Builder requestBuilder = sessionHandler.newShareAcknowledgeBuilder(groupId, fetchConfig); - Node nodeToSend = metadata.fetch().nodeById(nodeId); - log.trace("Building acknowledgements to send : {}", finalAcknowledgementsToSend); - nodesWithPendingRequests.add(nodeId); isProcessed = false; - - BiConsumer responseHandler = (clientResponse, error) -> { - if (error != null) { - handleShareAcknowledgeFailure(nodeToSend, requestBuilder.data(), this, error, clientResponse.receivedTimeMs()); - } else { - handleShareAcknowledgeSuccess(nodeToSend, requestBuilder.data(), this, clientResponse, clientResponse.receivedTimeMs()); - } - }; + Node nodeToSend = metadata.fetch().nodeById(nodeId); if (requestBuilder == null) { handleSessionErrorCode(Errors.SHARE_SESSION_NOT_FOUND); return null; - } else { + } else if (nodeToSend != null) { + nodesWithPendingRequests.add(nodeId); + + log.trace("Building acknowledgements to send : {}", finalAcknowledgementsToSend); + inFlightAcknowledgements.putAll(finalAcknowledgementsToSend); if (incompleteAcknowledgements.isEmpty()) { acknowledgementsToSend.clear(); } else { incompleteAcknowledgements.clear(); } - return new UnsentRequest(requestBuilder, Optional.of(nodeToSend)).whenComplete(responseHandler); + + UnsentRequest unsentRequest = new UnsentRequest(requestBuilder, Optional.of(nodeToSend)); + BiConsumer responseHandler = (clientResponse, error) -> { + if (error != null) { + handleShareAcknowledgeFailure(nodeToSend, requestBuilder.data(), this, error, unsentRequest.handler().completionTimeMs()); + } else { + handleShareAcknowledgeSuccess(nodeToSend, requestBuilder.data(), this, clientResponse, unsentRequest.handler().completionTimeMs()); + } + }; + return unsentRequest.whenComplete(responseHandler); } + + return null; } int getInFlightAcknowledgementsCount(TopicIdPartition tip) { @@ -1072,12 +1088,16 @@ void handleAcknowledgeTimedOut(TopicIdPartition tip) { * being sent. */ void handleSessionErrorCode(Errors errorCode) { - inFlightAcknowledgements.forEach((tip, acks) -> { + Map acknowledgementsMapToClear = + incompleteAcknowledgements.isEmpty() ? acknowledgementsToSend : incompleteAcknowledgements; + + acknowledgementsMapToClear.forEach((tip, acks) -> { if (acks != null) { acks.setAcknowledgeErrorCode(errorCode); } resultHandler.complete(tip, acks, onCommitAsync()); }); + acknowledgementsMapToClear.clear(); processingComplete(); } @@ -1151,12 +1171,17 @@ class ResultHandler { * signal the completion when all results are known. */ public void complete(TopicIdPartition partition, Acknowledgements acknowledgements, boolean isCommitAsync) { - if (acknowledgements != null) { + if (!isCommitAsync && acknowledgements != null) { result.put(partition, acknowledgements); } // For commitAsync, we do not wait for other results to complete, we prepare a background event // for every ShareAcknowledgeResponse. - if (isCommitAsync || (remainingResults != null && remainingResults.decrementAndGet() == 0)) { + // For commitAsync, we send out a background event for every TopicIdPartition, so we use a singletonMap each time. + if (isCommitAsync) { + if (acknowledgements != null) { + maybeSendShareAcknowledgeCommitCallbackEvent(Collections.singletonMap(partition, acknowledgements)); + } + } else if (remainingResults != null && remainingResults.decrementAndGet() == 0) { maybeSendShareAcknowledgeCommitCallbackEvent(result); future.ifPresent(future -> future.complete(result)); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java index aeaaec7add70c..625f6abf0cd38 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java @@ -25,6 +25,8 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; +import org.slf4j.Logger; + /** * {@code ShareConsumerDelegateCreator} implements a quasi-factory pattern to allow the caller to remain unaware of the * underlying {@link ShareConsumer} implementation that is created. This provides the means by which @@ -41,6 +43,9 @@ public ShareConsumerDelegate create(final ConsumerConfig config, final Deserializer keyDeserializer, final Deserializer valueDeserializer) { try { + LogContext logContext = new LogContext(); + Logger log = logContext.logger(getClass()); + log.warn("Share groups and KafkaShareConsumer are part of the early access of KIP-932 and MUST NOT be used in production."); return new ShareConsumerImpl<>(config, keyDeserializer, valueDeserializer); } catch (KafkaException e) { throw e; @@ -60,6 +65,8 @@ public ShareConsumerDelegate create(final LogContext logContext, final SubscriptionState subscriptions, final ConsumerMetadata metadata) { try { + Logger log = logContext.logger(getClass()); + log.warn("Share groups and KafkaShareConsumer are part of the early access of KIP-932 and MUST NOT be used in production."); return new ShareConsumerImpl<>( logContext, clientId, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java index e209ec00b0d18..de737dde3bf92 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java @@ -47,6 +47,7 @@ import org.apache.kafka.clients.consumer.internals.events.ShareFetchEvent; import org.apache.kafka.clients.consumer.internals.events.ShareSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.ShareUnsubscribeEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.KafkaShareConsumerMetrics; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; @@ -58,6 +59,7 @@ import org.apache.kafka.common.errors.InvalidGroupIdException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.internals.ClusterResourceListeners; +import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.protocol.Errors; @@ -160,6 +162,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { private final ApplicationEventHandler applicationEventHandler; private final Time time; private final KafkaShareConsumerMetrics kafkaShareConsumerMetrics; + private final AsyncConsumerMetrics asyncConsumerMetrics; private Logger log; private final String clientId; private final String groupId; @@ -252,6 +255,7 @@ private enum AcknowledgementMode { this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); this.currentFetch = ShareFetch.empty(); @@ -266,7 +270,8 @@ private enum AcknowledgementMode { ShareFetchMetricsManager shareFetchMetricsManager = createShareFetchMetricsManager(metrics); ApiVersions apiVersions = new ApiVersions(); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); + final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( + backgroundEventQueue, time, asyncConsumerMetrics); // This FetchBuffer is shared between the application and network threads. this.fetchBuffer = new ShareFetchBuffer(logContext); @@ -279,7 +284,9 @@ private enum AcknowledgementMode { metrics, shareFetchMetricsManager.throttleTimeSensor(), clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - backgroundEventHandler + backgroundEventHandler, + true, + asyncConsumerMetrics ); this.completedAcknowledgements = new LinkedList<>(); @@ -310,7 +317,8 @@ private enum AcknowledgementMode { new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, - requestManagersSupplier); + requestManagersSupplier, + asyncConsumerMetrics); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaperFactory.build(logContext); @@ -372,13 +380,15 @@ private enum AcknowledgementMode { new FetchConfig(config), deserializers); this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); final BlockingQueue backgroundEventQueue = new LinkedBlockingQueue<>(); - final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); + final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( + backgroundEventQueue, time, asyncConsumerMetrics); final Supplier networkClientDelegateSupplier = - () -> new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler); + () -> new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, true, asyncConsumerMetrics); GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, @@ -411,7 +421,8 @@ private enum AcknowledgementMode { new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, - requestManagersSupplier); + requestManagersSupplier, + asyncConsumerMetrics); this.backgroundEventQueue = new LinkedBlockingQueue<>(); this.backgroundEventProcessor = new BackgroundEventProcessor(); @@ -457,6 +468,7 @@ private enum AcknowledgementMode { this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); this.clientTelemetryReporter = Optional.empty(); this.completedAcknowledgements = Collections.emptyList(); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); } // auxiliary interface for testing @@ -469,7 +481,8 @@ ApplicationEventHandler build( final CompletableEventReaper applicationEventReaper, final Supplier applicationEventProcessorSupplier, final Supplier networkClientDelegateSupplier, - final Supplier requestManagersSupplier + final Supplier requestManagersSupplier, + final AsyncConsumerMetrics asyncConsumerMetrics ); } @@ -785,7 +798,7 @@ public void setAcknowledgementCommitCallback(final AcknowledgementCommitCallback */ @Override public Uuid clientInstanceId(final Duration timeout) { - if (!clientTelemetryReporter.isPresent()) { + if (clientTelemetryReporter.isEmpty()) { throw new IllegalStateException("Telemetry is not enabled. Set config `" + ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`."); } @@ -800,6 +813,30 @@ public Uuid clientInstanceId(final Duration timeout) { return Collections.unmodifiableMap(metrics.metrics()); } + /** + * {@inheritDoc} + */ + @Override + public void registerMetricForSubscription(KafkaMetric metric) { + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricChange(metric)); + } else { + log.debug("Skipping registration for metric {}. Existing consumer metrics cannot be overwritten.", metric.metricName()); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void unregisterMetricFromSubscription(KafkaMetric metric) { + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricRemoval(metric)); + } else { + log.debug("Skipping unregistration for metric {}. Existing consumer metrics cannot be removed.", metric.metricName()); + } + } + /** * {@inheritDoc} */ @@ -854,6 +891,7 @@ private void close(final Duration timeout, final boolean swallowException) { backgroundEventReaper.reap(backgroundEventQueue); closeQuietly(kafkaShareConsumerMetrics, "kafka share consumer metrics", firstException); + closeQuietly(asyncConsumerMetrics, "kafka async consumer metrics", firstException); closeQuietly(metrics, "consumer metrics", firstException); closeQuietly(deserializers, "consumer deserializers", firstException); clientTelemetryReporter.ifPresent(reporter -> closeQuietly(reporter, "consumer telemetry reporter", firstException)); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java index 6e37fa0ed3878..34878d239e625 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.metrics.HeartbeatMetricsManager; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; @@ -50,6 +51,9 @@ public class ShareHeartbeatRequestManager extends AbstractHeartbeatRequestManage */ private final HeartbeatState heartbeatState; + public static final String SHARE_PROTOCOL_NOT_SUPPORTED_MSG = "The cluster does not support the share group protocol. " + + "To use share groups, the cluster must have the share group protocol enabled."; + public ShareHeartbeatRequestManager( final LogContext logContext, final Time time, @@ -82,6 +86,45 @@ public ShareHeartbeatRequestManager( this.heartbeatState = heartbeatState; } + /** + * {@inheritDoc} + */ + @Override + public boolean handleSpecificFailure(Throwable exception) { + boolean errorHandled = false; + if (exception instanceof UnsupportedVersionException) { + logger.error("{} failed due to {}: {}", heartbeatRequestName(), exception.getMessage(), SHARE_PROTOCOL_NOT_SUPPORTED_MSG); + handleFatalFailure(new UnsupportedVersionException(SHARE_PROTOCOL_NOT_SUPPORTED_MSG, exception)); + errorHandled = true; + } + return errorHandled; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean handleSpecificExceptionInResponse(final ShareGroupHeartbeatResponse response, final long currentTimeMs) { + Errors error = errorForResponse(response); + boolean errorHandled; + + switch (error) { + // Broker responded with HB not supported, meaning the new protocol is not enabled, so propagate + // custom message for it. Note that the case where the protocol is not supported at all should fail + // on the client side when building the request and checking supporting APIs (handled on onFailure). + case UNSUPPORTED_VERSION: + logger.error("{} failed due to unsupported version: {}", + heartbeatRequestName(), SHARE_PROTOCOL_NOT_SUPPORTED_MSG); + handleFatalFailure(error.exception(SHARE_PROTOCOL_NOT_SUPPORTED_MSG)); + errorHandled = true; + break; + + default: + errorHandled = false; + } + return errorHandled; + } + /** * {@inheritDoc} */ diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java index d7469afdc9e70..27cfdc5981e24 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java @@ -171,13 +171,15 @@ public ShareFetchRequest.Builder newShareFetchBuilder(String groupId, FetchConfi return ShareFetchRequest.Builder.forConsumer( groupId, nextMetadata, fetchConfig.maxWaitMs, - fetchConfig.minBytes, fetchConfig.maxBytes, fetchConfig.fetchSize, + fetchConfig.minBytes, fetchConfig.maxBytes, fetchConfig.fetchSize, fetchConfig.maxPollRecords, added, removed, acknowledgementBatches); } public ShareAcknowledgeRequest.Builder newShareAcknowledgeBuilder(String groupId, FetchConfig fetchConfig) { if (nextMetadata.isNewSession()) { // A share session cannot be started with a ShareAcknowledge request + nextPartitions.clear(); + nextAcknowledgements.clear(); return null; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java index 310c7a3b8b169..bd45e71c884d9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java @@ -22,7 +22,7 @@ import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.internals.PartitionStates; @@ -76,7 +76,7 @@ public class SubscriptionState { private final Logger log; private enum SubscriptionType { - NONE, AUTO_TOPICS, AUTO_PATTERN, USER_ASSIGNED, AUTO_TOPICS_SHARE + NONE, AUTO_TOPICS, AUTO_PATTERN, AUTO_PATTERN_RE2J, USER_ASSIGNED, AUTO_TOPICS_SHARE } /* the type of subscription */ @@ -85,6 +85,9 @@ private enum SubscriptionType { /* the pattern user has requested */ private Pattern subscribedPattern; + /* the Re2J pattern user has requested */ + private SubscriptionPattern subscribedRe2JPattern; + /* the list of topics the user has requested */ private Set subscription; @@ -97,10 +100,10 @@ private enum SubscriptionType { private final PartitionStates assignment; /* Default offset reset strategy */ - private final OffsetResetStrategy defaultResetStrategy; + private final AutoOffsetResetStrategy defaultResetStrategy; /* User-provided listener to be invoked when assignment changes */ - private Optional rebalanceListener; + private Optional rebalanceListener = Optional.empty(); private int assignmentId = 0; @@ -108,13 +111,21 @@ private enum SubscriptionType { public synchronized String toString() { return "SubscriptionState{" + "type=" + subscriptionType + - ", subscribedPattern=" + subscribedPattern + + ", subscribedPattern=" + subscribedPatternInUse() + ", subscription=" + String.join(",", subscription) + ", groupSubscription=" + String.join(",", groupSubscription) + ", defaultResetStrategy=" + defaultResetStrategy + ", assignment=" + assignment.partitionStateValues() + " (id=" + assignmentId + ")}"; } + private Object subscribedPatternInUse() { + if (subscriptionType == SubscriptionType.AUTO_PATTERN_RE2J) + return subscribedRe2JPattern; + if (subscriptionType == SubscriptionType.AUTO_PATTERN) + return subscribedPattern; + return null; + } + public synchronized String prettyString() { switch (subscriptionType) { case NONE: @@ -123,6 +134,8 @@ public synchronized String prettyString() { return "Subscribe(" + String.join(",", subscription) + ")"; case AUTO_PATTERN: return "Subscribe(" + subscribedPattern + ")"; + case AUTO_PATTERN_RE2J: + return "Subscribe(" + subscribedRe2JPattern + ")"; case USER_ASSIGNED: return "Assign(" + assignedPartitions() + " , id=" + assignmentId + ")"; case AUTO_TOPICS_SHARE: @@ -132,13 +145,14 @@ public synchronized String prettyString() { } } - public SubscriptionState(LogContext logContext, OffsetResetStrategy defaultResetStrategy) { + public SubscriptionState(LogContext logContext, AutoOffsetResetStrategy defaultResetStrategy) { this.log = logContext.logger(this.getClass()); this.defaultResetStrategy = defaultResetStrategy; this.subscription = new TreeSet<>(); // use a sorted set for better logging this.assignment = new PartitionStates<>(); this.groupSubscription = new HashSet<>(); this.subscribedPattern = null; + this.subscribedRe2JPattern = null; this.subscriptionType = SubscriptionType.NONE; } @@ -177,6 +191,12 @@ public synchronized void subscribe(Pattern pattern, Optional listener) { + registerRebalanceListener(listener); + setSubscriptionType(SubscriptionType.AUTO_PATTERN_RE2J); + this.subscribedRe2JPattern = pattern; + } + public synchronized boolean subscribeFromPattern(Set topics) { if (subscriptionType != SubscriptionType.AUTO_PATTERN) throw new IllegalArgumentException("Attempt to subscribe from pattern while subscription type set to " + @@ -250,7 +270,11 @@ public synchronized boolean assignFromUser(Set partitions) { } /** - * @return true if assignments matches subscription, otherwise false + * Check if an assignment received while using the classic group protocol matches the subscription. + * Note that this only considers the subscribedPattern because this functionality is only used under the + * classic protocol, where subscribedRe2JPattern is not supported. + * + * @return true if assignments matches subscription, otherwise false. */ public synchronized boolean checkAssignmentMatchedSubscription(Collection assignments) { for (TopicPartition topicPartition : assignments) { @@ -337,6 +361,24 @@ public synchronized Set subscription() { return Collections.emptySet(); } + /** + * @return The RE2J compatible pattern in use, provided via a call to + * {@link #subscribe(SubscriptionPattern, Optional)}. + * Null if there is no SubscriptionPattern in use. + */ + public synchronized SubscriptionPattern subscriptionPattern() { + if (hasRe2JPatternSubscription()) + return this.subscribedRe2JPattern; + return null; + } + + /** + * @return True if subscribed using RE2J pattern. False otherwise. + */ + public synchronized boolean hasRe2JPatternSubscription() { + return this.subscriptionType == SubscriptionType.AUTO_PATTERN_RE2J; + } + public synchronized Set pausedPartitions() { return collectPartitions(TopicPartitionState::isPaused); } @@ -393,13 +435,13 @@ public void seekUnvalidated(TopicPartition tp, FetchPosition position) { assignedState(tp).seekUnvalidated(position); } - synchronized void maybeSeekUnvalidated(TopicPartition tp, FetchPosition position, OffsetResetStrategy requestedResetStrategy) { + synchronized void maybeSeekUnvalidated(TopicPartition tp, FetchPosition position, AutoOffsetResetStrategy requestedResetStrategy) { TopicPartitionState state = assignedStateOrNull(tp); if (state == null) { log.debug("Skipping reset of partition {} since it is no longer assigned", tp); } else if (!state.awaitingReset()) { log.debug("Skipping reset of partition {} since reset is no longer needed", tp); - } else if (requestedResetStrategy != state.resetStrategy) { + } else if (requestedResetStrategy != null && !requestedResetStrategy.equals(state.resetStrategy)) { log.debug("Skipping reset of partition {} since an alternative reset has been requested", tp); } else { log.info("Resetting offset for partition {} to position {}.", tp, position); @@ -445,7 +487,7 @@ public synchronized List fetchablePartitions(Predicate allConsumed() { return allConsumed; } - public synchronized void requestOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy) { + public synchronized void requestOffsetReset(TopicPartition partition, AutoOffsetResetStrategy offsetResetStrategy) { assignedState(partition).reset(offsetResetStrategy); } - public synchronized void requestOffsetReset(Collection partitions, OffsetResetStrategy offsetResetStrategy) { + public synchronized void requestOffsetReset(Collection partitions, AutoOffsetResetStrategy offsetResetStrategy) { partitions.forEach(tp -> { log.info("Seeking to {} offset of partition {}", offsetResetStrategy, tp); assignedState(tp).reset(offsetResetStrategy); @@ -734,14 +776,14 @@ synchronized void setNextAllowedRetry(Set partitions, long nextA } boolean hasDefaultOffsetResetPolicy() { - return defaultResetStrategy != OffsetResetStrategy.NONE; + return defaultResetStrategy != AutoOffsetResetStrategy.NONE; } public synchronized boolean isOffsetResetNeeded(TopicPartition partition) { return assignedState(partition).awaitingReset(); } - public synchronized OffsetResetStrategy resetStrategy(TopicPartition partition) { + public synchronized AutoOffsetResetStrategy resetStrategy(TopicPartition partition) { return assignedState(partition).resetStrategy(); } @@ -782,7 +824,7 @@ public synchronized void resetInitializingPositions(Predicate in final Set partitionsWithNoOffsets = new HashSet<>(); assignment.forEach((tp, partitionState) -> { if (partitionState.shouldInitialize() && initPartitionsToInclude.test(tp)) { - if (defaultResetStrategy == OffsetResetStrategy.NONE) + if (defaultResetStrategy == AutoOffsetResetStrategy.NONE) partitionsWithNoOffsets.add(tp); else requestOffsetReset(tp); @@ -856,8 +898,8 @@ public synchronized void assignFromSubscribedAwaitingCallback(Collection partitions) { @@ -897,7 +939,7 @@ private static class TopicPartitionState { private boolean paused; // whether this partition has been paused by the user private boolean pendingRevocation; private boolean pendingOnAssignedCallback; - private OffsetResetStrategy resetStrategy; // the strategy to use if the offset needs resetting + private AutoOffsetResetStrategy resetStrategy; // the strategy to use if the offset needs resetting private Long nextRetryTimeMs; private Integer preferredReadReplica; private Long preferredReadReplicaExpireTimeMs; @@ -966,7 +1008,7 @@ private Optional clearPreferredReadReplica() { } } - private void reset(OffsetResetStrategy strategy) { + private void reset(AutoOffsetResetStrategy strategy) { transitionState(FetchStates.AWAIT_RESET, () -> { this.resetStrategy = strategy; this.nextRetryTimeMs = null; @@ -985,7 +1027,7 @@ private boolean maybeValidatePosition(Metadata.LeaderAndEpoch currentLeaderAndEp return false; } - if (!currentLeaderAndEpoch.leader.isPresent()) { + if (currentLeaderAndEpoch.leader.isEmpty()) { return false; } @@ -1137,7 +1179,7 @@ private void lastStableOffset(Long lastStableOffset) { this.endOffsetRequested = false; } - private OffsetResetStrategy resetStrategy() { + private AutoOffsetResetStrategy resetStrategy() { return resetStrategy; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java index e11e702388ca5..dfb775f8947c1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java @@ -31,9 +31,11 @@ public abstract class ApplicationEvent { public enum Type { COMMIT_ASYNC, COMMIT_SYNC, POLL, FETCH_COMMITTED_OFFSETS, NEW_TOPICS_METADATA_UPDATE, ASSIGNMENT_CHANGE, LIST_OFFSETS, CHECK_AND_UPDATE_POSITIONS, RESET_OFFSET, TOPIC_METADATA, ALL_TOPICS_METADATA, - TOPIC_SUBSCRIPTION_CHANGE, TOPIC_PATTERN_SUBSCRIPTION_CHANGE, UPDATE_SUBSCRIPTION_METADATA, - UNSUBSCRIBE, CONSUMER_REBALANCE_LISTENER_CALLBACK_COMPLETED, - COMMIT_ON_CLOSE, CREATE_FETCH_REQUESTS, + TOPIC_SUBSCRIPTION_CHANGE, TOPIC_PATTERN_SUBSCRIPTION_CHANGE, TOPIC_RE2J_PATTERN_SUBSCRIPTION_CHANGE, + UPDATE_SUBSCRIPTION_METADATA, UNSUBSCRIBE, + CONSUMER_REBALANCE_LISTENER_CALLBACK_COMPLETED, + COMMIT_ON_CLOSE, CREATE_FETCH_REQUESTS, LEAVE_GROUP_ON_CLOSE, + PAUSE_PARTITIONS, RESUME_PARTITIONS, CURRENT_LAG, SHARE_FETCH, SHARE_ACKNOWLEDGE_ASYNC, SHARE_ACKNOWLEDGE_SYNC, SHARE_SUBSCRIPTION_CHANGE, SHARE_UNSUBSCRIBE, SHARE_ACKNOWLEDGE_ON_CLOSE, @@ -49,6 +51,12 @@ public enum Type { */ private final Uuid id; + /** + * The time in milliseconds when this event was enqueued. + * This field can be changed after the event is created, so it should not be used in hashCode or equals. + */ + private long enqueuedMs; + protected ApplicationEvent(Type type) { this.type = Objects.requireNonNull(type); this.id = Uuid.randomUuid(); @@ -62,6 +70,14 @@ public Uuid id() { return id; } + public void setEnqueuedMs(long enqueuedMs) { + this.enqueuedMs = enqueuedMs; + } + + public long enqueuedMs() { + return enqueuedMs; + } + @Override public final boolean equals(Object o) { if (this == o) return true; @@ -76,7 +92,7 @@ public final int hashCode() { } protected String toStringBase() { - return "type=" + type + ", id=" + id; + return "type=" + type + ", id=" + id + ", enqueuedMs=" + enqueuedMs; } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java index 0baafcd3038d1..6ab827b617c19 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.consumer.internals.ConsumerUtils; import org.apache.kafka.clients.consumer.internals.NetworkClientDelegate; import org.apache.kafka.clients.consumer.internals.RequestManagers; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.internals.IdempotentCloser; import org.apache.kafka.common.utils.LogContext; @@ -42,9 +43,11 @@ public class ApplicationEventHandler implements Closeable { private final Logger log; + private final Time time; private final BlockingQueue applicationEventQueue; private final ConsumerNetworkThread networkThread; private final IdempotentCloser closer = new IdempotentCloser(); + private final AsyncConsumerMetrics asyncConsumerMetrics; public ApplicationEventHandler(final LogContext logContext, final Time time, @@ -52,16 +55,20 @@ public ApplicationEventHandler(final LogContext logContext, final CompletableEventReaper applicationEventReaper, final Supplier applicationEventProcessorSupplier, final Supplier networkClientDelegateSupplier, - final Supplier requestManagersSupplier) { + final Supplier requestManagersSupplier, + final AsyncConsumerMetrics asyncConsumerMetrics) { this.log = logContext.logger(ApplicationEventHandler.class); + this.time = time; this.applicationEventQueue = applicationEventQueue; + this.asyncConsumerMetrics = asyncConsumerMetrics; this.networkThread = new ConsumerNetworkThread(logContext, time, applicationEventQueue, applicationEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, - requestManagersSupplier); + requestManagersSupplier, + asyncConsumerMetrics); this.networkThread.start(); } @@ -73,6 +80,10 @@ public ApplicationEventHandler(final LogContext logContext, */ public void add(final ApplicationEvent event) { Objects.requireNonNull(event, "ApplicationEvent provided to add must be non-null"); + event.setEnqueuedMs(time.milliseconds()); + // Record the updated queue size before actually adding the event to the queue + // to avoid race conditions (the background thread is continuously removing from this queue) + asyncConsumerMetrics.recordApplicationEventQueueSize(applicationEventQueue.size() + 1); applicationEventQueue.add(event); wakeupNetworkThread(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java index 642e5e0cca2d3..9c119e28b7b10 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java @@ -27,18 +27,22 @@ import org.apache.kafka.clients.consumer.internals.ShareConsumeRequestManager; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.requests.ListOffsetsRequest; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; @@ -120,6 +124,10 @@ public void process(ApplicationEvent event) { process((TopicPatternSubscriptionChangeEvent) event); return; + case TOPIC_RE2J_PATTERN_SUBSCRIPTION_CHANGE: + process((TopicRe2JPatternSubscriptionChangeEvent) event); + return; + case UPDATE_SUBSCRIPTION_METADATA: process((UpdatePatternSubscriptionEvent) event); return; @@ -136,6 +144,10 @@ public void process(ApplicationEvent event) { process((CommitOnCloseEvent) event); return; + case LEAVE_GROUP_ON_CLOSE: + process((LeaveGroupOnCloseEvent) event); + return; + case CREATE_FETCH_REQUESTS: process((CreateFetchRequestsEvent) event); return; @@ -172,6 +184,18 @@ public void process(ApplicationEvent event) { process((SeekUnvalidatedEvent) event); return; + case PAUSE_PARTITIONS: + process((PausePartitionsEvent) event); + return; + + case RESUME_PARTITIONS: + process((ResumePartitionsEvent) event); + return; + + case CURRENT_LAG: + process((CurrentLagEvent) event); + return; + default: log.warn("Application event type {} was not expected", event.type()); } @@ -277,7 +301,7 @@ private void process(final ListOffsetsEvent event) { * it is already a member on the next poll. */ private void process(final TopicSubscriptionChangeEvent event) { - if (!requestManagers.consumerHeartbeatRequestManager.isPresent()) { + if (requestManagers.consumerHeartbeatRequestManager.isEmpty()) { log.warn("Group membership manager not present when processing a subscribe event"); event.future().complete(null); return; @@ -296,9 +320,11 @@ private void process(final TopicSubscriptionChangeEvent event) { } /** - * Process event that indicates that the subscription topic pattern changed. This will make the - * consumer join the group if it is not part of it yet, or send the updated subscription if - * it is already a member on the next poll. + * Process event that indicates that the subscription java pattern changed. + * This will update the subscription state in the client to persist the new pattern. + * It will also evaluate the pattern against the latest metadata to find the matching topics, + * and send an updated subscription to the broker on the next poll + * (joining the group if it's not already part of it). */ private void process(final TopicPatternSubscriptionChangeEvent event) { try { @@ -311,16 +337,39 @@ private void process(final TopicPatternSubscriptionChangeEvent event) { } } + /** + * Process event that indicates that the subscription RE2J pattern changed. + * This will update the subscription state in the client to persist the new pattern. + * It will also make the consumer send the updated pattern on the next poll, + * joining the group if it's not already part of it. + * Note that this does not evaluate the pattern, it just passes it to the broker. + */ + private void process(final TopicRe2JPatternSubscriptionChangeEvent event) { + if (requestManagers.consumerMembershipManager.isEmpty()) { + event.future().completeExceptionally( + new KafkaException("MembershipManager is not available when processing a subscribe event")); + return; + } + try { + subscriptions.subscribe(event.pattern(), event.listener()); + requestManagers.consumerMembershipManager.get().onSubscriptionUpdated(); + event.future().complete(null); + } catch (Exception e) { + event.future().completeExceptionally(e); + } + } + /** * Process event that re-evaluates the subscribed regular expression using the latest topics from metadata, only if metadata changed. * This will make the consumer send the updated subscription on the next poll. */ private void process(final UpdatePatternSubscriptionEvent event) { + if (!subscriptions.hasPatternSubscription()) { + return; + } if (this.metadataVersionSnapshot < metadata.updateVersion()) { this.metadataVersionSnapshot = metadata.updateVersion(); - if (subscriptions.hasPatternSubscription()) { - updatePatternSubscription(metadata.fetch()); - } + updatePatternSubscription(metadata.fetch()); } event.future().complete(null); } @@ -377,7 +426,7 @@ private void process(final AllTopicsMetadataEvent event) { } private void process(final ConsumerRebalanceListenerCallbackCompletedEvent event) { - if (!requestManagers.consumerHeartbeatRequestManager.isPresent()) { + if (requestManagers.consumerHeartbeatRequestManager.isEmpty()) { log.warn( "An internal error occurred; the group membership manager was not present, so the notification of the {} callback execution could not be sent", event.methodName() @@ -388,12 +437,21 @@ private void process(final ConsumerRebalanceListenerCallbackCompletedEvent event } private void process(@SuppressWarnings("unused") final CommitOnCloseEvent event) { - if (!requestManagers.commitRequestManager.isPresent()) + if (requestManagers.commitRequestManager.isEmpty()) return; log.debug("Signal CommitRequestManager closing"); requestManagers.commitRequestManager.get().signalClose(); } + private void process(final LeaveGroupOnCloseEvent event) { + if (requestManagers.consumerMembershipManager.isEmpty()) + return; + + log.debug("Signal the ConsumerMembershipManager to leave the consumer group since the consumer is closing"); + CompletableFuture future = requestManagers.consumerMembershipManager.get().leaveGroupOnClose(); + future.whenComplete(complete(event.future())); + } + /** * Process event that tells the share consume request manager to fetch more records. */ @@ -405,7 +463,7 @@ private void process(final ShareFetchEvent event) { * Process event that indicates the consumer acknowledged delivery of records synchronously. */ private void process(final ShareAcknowledgeSyncEvent event) { - if (!requestManagers.shareConsumeRequestManager.isPresent()) { + if (requestManagers.shareConsumeRequestManager.isEmpty()) { return; } @@ -419,7 +477,7 @@ private void process(final ShareAcknowledgeSyncEvent event) { * Process event that indicates the consumer acknowledged delivery of records asynchronously. */ private void process(final ShareAcknowledgeAsyncEvent event) { - if (!requestManagers.shareConsumeRequestManager.isPresent()) { + if (requestManagers.shareConsumeRequestManager.isEmpty()) { return; } @@ -433,7 +491,7 @@ private void process(final ShareAcknowledgeAsyncEvent event) { * it is already a member. */ private void process(final ShareSubscriptionChangeEvent event) { - if (!requestManagers.shareHeartbeatRequestManager.isPresent()) { + if (requestManagers.shareHeartbeatRequestManager.isEmpty()) { KafkaException error = new KafkaException("Group membership manager not present when processing a subscribe event"); event.future().completeExceptionally(error); return; @@ -456,7 +514,7 @@ private void process(final ShareSubscriptionChangeEvent event) { * the group is sent out. */ private void process(final ShareUnsubscribeEvent event) { - if (!requestManagers.shareHeartbeatRequestManager.isPresent()) { + if (requestManagers.shareHeartbeatRequestManager.isEmpty()) { KafkaException error = new KafkaException("Group membership manager not present when processing an unsubscribe event"); event.future().completeExceptionally(error); return; @@ -477,7 +535,7 @@ private void process(final ShareUnsubscribeEvent event) { * the acknowledgements have responses. */ private void process(final ShareAcknowledgeOnCloseEvent event) { - if (!requestManagers.shareConsumeRequestManager.isPresent()) { + if (requestManagers.shareConsumeRequestManager.isEmpty()) { KafkaException error = new KafkaException("Group membership manager not present when processing an acknowledge-on-close event"); event.future().completeExceptionally(error); return; @@ -494,7 +552,7 @@ private void process(final ShareAcknowledgeOnCloseEvent event) { * @param event Event containing a boolean to indicate if the callback handler is configured or not. */ private void process(final ShareAcknowledgementCommitCallbackRegistrationEvent event) { - if (!requestManagers.shareConsumeRequestManager.isPresent()) { + if (requestManagers.shareConsumeRequestManager.isEmpty()) { return; } @@ -502,6 +560,87 @@ private void process(final ShareAcknowledgementCommitCallbackRegistrationEvent e manager.setAcknowledgementCommitCallbackRegistered(event.isCallbackRegistered()); } + private void process(final SeekUnvalidatedEvent event) { + try { + event.offsetEpoch().ifPresent(epoch -> metadata.updateLastSeenEpochIfNewer(event.partition(), epoch)); + SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition( + event.offset(), + event.offsetEpoch(), + metadata.currentLeader(event.partition()) + ); + subscriptions.seekUnvalidated(event.partition(), newPosition); + event.future().complete(null); + } catch (Exception e) { + event.future().completeExceptionally(e); + } + } + + private void process(final PausePartitionsEvent event) { + try { + Collection partitions = event.partitions(); + log.debug("Pausing partitions {}", partitions); + + for (TopicPartition partition : partitions) { + subscriptions.pause(partition); + } + + event.future().complete(null); + } catch (Exception e) { + event.future().completeExceptionally(e); + } + } + + private void process(final ResumePartitionsEvent event) { + try { + Collection partitions = event.partitions(); + log.debug("Resuming partitions {}", partitions); + + for (TopicPartition partition : partitions) { + subscriptions.resume(partition); + } + + event.future().complete(null); + } catch (Exception e) { + event.future().completeExceptionally(e); + } + } + + private void process(final CurrentLagEvent event) { + try { + final TopicPartition topicPartition = event.partition(); + final IsolationLevel isolationLevel = event.isolationLevel(); + final Long lag = subscriptions.partitionLag(topicPartition, isolationLevel); + + final OptionalLong lagOpt; + if (lag == null) { + if (subscriptions.partitionEndOffset(topicPartition, isolationLevel) == null && + !subscriptions.partitionEndOffsetRequested(topicPartition)) { + // If the log end offset is unknown and there isn't already an in-flight list offset + // request, issue one with the goal that the lag will be available the next time the + // user calls currentLag(). + log.info("Requesting the log end offset for {} in order to compute lag", topicPartition); + subscriptions.requestPartitionEndOffset(topicPartition); + + // Emulates the Consumer.endOffsets() logic... + Map timestampToSearch = Collections.singletonMap( + topicPartition, + ListOffsetsRequest.LATEST_TIMESTAMP + ); + + requestManagers.offsetsRequestManager.fetchOffsets(timestampToSearch, false); + } + + lagOpt = OptionalLong.empty(); + } else { + lagOpt = OptionalLong.of(lag); + } + + event.future().complete(lagOpt); + } catch (Exception e) { + event.future().completeExceptionally(e); + } + } + private BiConsumer complete(final CompletableFuture b) { return (value, exception) -> { if (exception != null) @@ -519,7 +658,7 @@ public static Supplier supplier(final LogContext logC final ConsumerMetadata metadata, final SubscriptionState subscriptions, final Supplier requestManagersSupplier) { - return new CachedSupplier() { + return new CachedSupplier<>() { @Override protected ApplicationEventProcessor create() { RequestManagers requestManagers = requestManagersSupplier.get(); @@ -533,21 +672,6 @@ protected ApplicationEventProcessor create() { }; } - private void process(final SeekUnvalidatedEvent event) { - try { - event.offsetEpoch().ifPresent(epoch -> metadata.updateLastSeenEpochIfNewer(event.partition(), epoch)); - SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition( - event.offset(), - event.offsetEpoch(), - metadata.currentLeader(event.partition()) - ); - subscriptions.seekUnvalidated(event.partition(), newPosition); - event.future().complete(null); - } catch (Exception e) { - event.future().completeExceptionally(e); - } - } - /** * This function evaluates the regex that the consumer subscribed to * against the list of topic names from metadata, and updates @@ -566,9 +690,11 @@ private void updatePatternSubscription(Cluster cluster) { if (subscriptions.subscribeFromPattern(topicsToSubscribe)) { this.metadataVersionSnapshot = metadata.requestUpdateForNewTopics(); - // Join the group if not already part of it, or just send the new subscription to the broker on the next poll. - requestManagers.consumerHeartbeatRequestManager.get().membershipManager().onSubscriptionUpdated(); } + // Join the group if not already part of it, or just send the updated subscription + // to the broker on the next poll. Note that this is done even if no topics matched + // the regex, to ensure the member joins the group if needed (with empty subscription). + requestManagers.consumerHeartbeatRequestManager.get().membershipManager().onSubscriptionUpdated(); } // Visible for testing diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AssignmentChangeEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AssignmentChangeEvent.java index 68e1bbc5e6d4e..7f48ee644c7ed 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AssignmentChangeEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AssignmentChangeEvent.java @@ -19,8 +19,7 @@ import org.apache.kafka.common.TopicPartition; import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; +import java.util.Set; public class AssignmentChangeEvent extends CompletableApplicationEvent { @@ -30,7 +29,7 @@ public class AssignmentChangeEvent extends CompletableApplicationEvent { public AssignmentChangeEvent(final long currentTimeMs, final long deadlineMs, final Collection partitions) { super(Type.ASSIGNMENT_CHANGE, deadlineMs); this.currentTimeMs = currentTimeMs; - this.partitions = Collections.unmodifiableSet(new HashSet<>(partitions)); + this.partitions = Set.copyOf(partitions); } public long currentTimeMs() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java index 7e9fdaed2d837..02fc4b4a29ba4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java @@ -38,6 +38,12 @@ public enum Type { */ private final Uuid id; + /** + * The time in milliseconds when this event was enqueued. + * This field can be changed after the event is created, so it should not be used in hashCode or equals. + */ + private long enqueuedMs; + protected BackgroundEvent(Type type) { this.type = Objects.requireNonNull(type); this.id = Uuid.randomUuid(); @@ -51,6 +57,14 @@ public Uuid id() { return id; } + public void setEnqueuedMs(long enqueuedMs) { + this.enqueuedMs = enqueuedMs; + } + + public long enqueuedMs() { + return enqueuedMs; + } + @Override public final boolean equals(Object o) { if (this == o) return true; @@ -65,7 +79,7 @@ public final int hashCode() { } protected String toStringBase() { - return "type=" + type + ", id=" + id; + return "type=" + type + ", id=" + id + ", enqueuedMs=" + enqueuedMs; } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java index f6ded0bf735e0..3e83908f3df42 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java @@ -17,9 +17,13 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkThread; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; +import org.apache.kafka.common.utils.Time; +import java.util.ArrayList; +import java.util.List; import java.util.Objects; -import java.util.Queue; +import java.util.concurrent.BlockingQueue; /** * An event handler that receives {@link BackgroundEvent background events} from the @@ -29,10 +33,16 @@ public class BackgroundEventHandler { - private final Queue backgroundEventQueue; + private final BlockingQueue backgroundEventQueue; + private final Time time; + private final AsyncConsumerMetrics asyncConsumerMetrics; - public BackgroundEventHandler(final Queue backgroundEventQueue) { + public BackgroundEventHandler(final BlockingQueue backgroundEventQueue, + final Time time, + final AsyncConsumerMetrics asyncConsumerMetrics) { this.backgroundEventQueue = backgroundEventQueue; + this.time = time; + this.asyncConsumerMetrics = asyncConsumerMetrics; } /** @@ -42,6 +52,20 @@ public BackgroundEventHandler(final Queue backgroundEventQueue) */ public void add(BackgroundEvent event) { Objects.requireNonNull(event, "BackgroundEvent provided to add must be non-null"); + event.setEnqueuedMs(time.milliseconds()); + asyncConsumerMetrics.recordBackgroundEventQueueSize(backgroundEventQueue.size() + 1); backgroundEventQueue.add(event); } + + /** + * Drain all the {@link BackgroundEvent events} from the handler. + * + * @return A list of {@link BackgroundEvent events} that were drained + */ + public List drainEvents() { + List events = new ArrayList<>(); + backgroundEventQueue.drainTo(events); + asyncConsumerMetrics.recordBackgroundEventQueueSize(0); + return events; + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java index 2c7fdd7464283..5f1ced33e3a09 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java @@ -18,6 +18,9 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.SubscriptionState; +import org.apache.kafka.common.TopicPartition; + +import java.time.Duration; /** * Event to check if all assigned partitions have fetch positions. If there are positions missing, it will fetch @@ -32,4 +35,15 @@ public class CheckAndUpdatePositionsEvent extends CompletableApplicationEvent> validate(final Optional> offsets) { - if (!offsets.isPresent()) { + if (offsets.isEmpty()) { return Optional.empty(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java index 8cd17d19feb66..51b2d1ffbdb90 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java @@ -52,4 +52,8 @@ public long deadlineMs() { protected String toStringBase() { return super.toStringBase() + ", future=" + future + ", deadlineMs=" + deadlineMs; } + + public boolean requireSubscriptionMetadata() { + return false; + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java index 9f91617c73579..5a0358df8964f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java @@ -29,6 +29,7 @@ import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; +import java.util.stream.Collectors; /** * {@code CompletableEventReaper} is responsible for tracking {@link CompletableEvent time-bound events} and removing @@ -81,8 +82,9 @@ public void add(CompletableEvent event) { * * @param currentTimeMs Current time with which to compare against the * {@link CompletableEvent#deadlineMs() expiration time} + * @return The number of events that were expired */ - public void reap(long currentTimeMs) { + public long reap(long currentTimeMs) { Consumer> expireEvent = event -> { long pastDueMs = currentTimeMs - event.deadlineMs(); TimeoutException error = new TimeoutException(String.format("%s was %s ms past its expiration of %s", event.getClass().getSimpleName(), pastDueMs, event.deadlineMs())); @@ -95,13 +97,16 @@ public void reap(long currentTimeMs) { }; // First, complete (exceptionally) any events that have passed their deadline AND aren't already complete. - tracked.stream() + long count = tracked.stream() .filter(e -> !e.future().isDone()) .filter(e -> currentTimeMs >= e.deadlineMs()) - .forEach(expireEvent); + .peek(expireEvent) + .count(); // Second, remove any events that are already complete, just to make sure we don't hold references. This will // include any events that finished successfully as well as any events we just completed exceptionally above. tracked.removeIf(e -> e.future().isDone()); + + return count; } /** @@ -121,8 +126,9 @@ public void reap(long currentTimeMs) { * don't take the deadline into consideration, just close it regardless. * * @param events Events from a queue that have not yet been tracked that also need to be reviewed + * @return The number of events that were expired */ - public void reap(Collection events) { + public long reap(Collection events) { Objects.requireNonNull(events, "Event queue to reap must be non-null"); Consumer> expireEvent = event -> { @@ -135,17 +141,20 @@ public void reap(Collection events) { } }; - tracked.stream() + long trackedExpiredCount = tracked.stream() .filter(e -> !e.future().isDone()) - .forEach(expireEvent); + .peek(expireEvent) + .count(); tracked.clear(); - events.stream() + long eventExpiredCount = events.stream() .filter(e -> e instanceof CompletableEvent) .map(e -> (CompletableEvent) e) .filter(e -> !e.future().isDone()) - .forEach(expireEvent); + .peek(expireEvent) + .count(); events.clear(); + return trackedExpiredCount + eventExpiredCount; } public int size() { @@ -155,4 +164,11 @@ public int size() { public boolean contains(CompletableEvent event) { return event != null && tracked.contains(event); } + + public List> uncompletedEvents() { + return tracked.stream() + .filter(e -> !e.future().isDone()) + .collect(Collectors.toList()); + } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CurrentLagEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CurrentLagEvent.java new file mode 100644 index 0000000000000..95bd23fde6791 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CurrentLagEvent.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.consumer.internals.events; + +import org.apache.kafka.common.IsolationLevel; +import org.apache.kafka.common.TopicPartition; + +import java.util.Objects; +import java.util.OptionalLong; + +public class CurrentLagEvent extends CompletableApplicationEvent { + + private final TopicPartition partition; + + private final IsolationLevel isolationLevel; + + public CurrentLagEvent(final TopicPartition partition, final IsolationLevel isolationLevel, final long deadlineMs) { + super(Type.CURRENT_LAG, deadlineMs); + this.partition = Objects.requireNonNull(partition); + this.isolationLevel = Objects.requireNonNull(isolationLevel); + } + + public TopicPartition partition() { + return partition; + } + + public IsolationLevel isolationLevel() { + return isolationLevel; + } + + @Override + public String toStringBase() { + return super.toStringBase() + ", partition=" + partition + ", isolationLevel=" + isolationLevel; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveGroupOnCloseEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveGroupOnCloseEvent.java new file mode 100644 index 0000000000000..4afc00390d449 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveGroupOnCloseEvent.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals.events; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.internals.ConsumerMembershipManager; +import org.apache.kafka.clients.consumer.internals.ConsumerUtils; + +import java.time.Duration; + +/** + * When the user calls {@link Consumer#close()}, this event is sent to signal the {@link ConsumerMembershipManager} + * to perform the necessary steps to leave the consumer group cleanly, if possible. The event's timeout is based on + * either the user-provided value to {@link Consumer#close(Duration)} or + * {@link ConsumerUtils#DEFAULT_CLOSE_TIMEOUT_MS} if {@link Consumer#close()} was called. The event is considered + * complete when the membership manager receives the heartbeat response that it has left the group. + */ +public class LeaveGroupOnCloseEvent extends CompletableApplicationEvent { + + public LeaveGroupOnCloseEvent(final long deadlineMs) { + super(Type.LEAVE_GROUP_ON_CLOSE, deadlineMs); + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java index 8ae2f1ea57612..605a2ff30c24a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java @@ -64,6 +64,11 @@ public boolean requireTimestamps() { return requireTimestamps; } + @Override + public boolean requireSubscriptionMetadata() { + return true; + } + @Override public String toStringBase() { return super.toStringBase() + diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/PausePartitionsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/PausePartitionsEvent.java new file mode 100644 index 0000000000000..14c729e1d4ab7 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/PausePartitionsEvent.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.consumer.internals.events; + +import org.apache.kafka.common.TopicPartition; + +import java.util.Collection; +import java.util.Collections; + +public class PausePartitionsEvent extends CompletableApplicationEvent { + + private final Collection partitions; + + public PausePartitionsEvent(final Collection partitions, final long deadlineMs) { + super(Type.PAUSE_PARTITIONS, deadlineMs); + this.partitions = Collections.unmodifiableCollection(partitions); + } + + public Collection partitions() { + return partitions; + } + + @Override + public String toStringBase() { + return super.toStringBase() + ", partitions=" + partitions; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetOffsetEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetOffsetEvent.java index 145b8643d356f..c7b4ff9641db1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetOffsetEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetOffsetEvent.java @@ -18,8 +18,8 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.TopicPartition; import java.time.Duration; @@ -37,9 +37,9 @@ public class ResetOffsetEvent extends CompletableApplicationEvent { private final Collection topicPartitions; - private final OffsetResetStrategy offsetResetStrategy; + private final AutoOffsetResetStrategy offsetResetStrategy; - public ResetOffsetEvent(Collection topicPartitions, OffsetResetStrategy offsetResetStrategy, long deadline) { + public ResetOffsetEvent(Collection topicPartitions, AutoOffsetResetStrategy offsetResetStrategy, long deadline) { super(Type.RESET_OFFSET, deadline); this.topicPartitions = Collections.unmodifiableCollection(topicPartitions); this.offsetResetStrategy = Objects.requireNonNull(offsetResetStrategy); @@ -49,7 +49,7 @@ public Collection topicPartitions() { return topicPartitions; } - public OffsetResetStrategy offsetResetStrategy() { + public AutoOffsetResetStrategy offsetResetStrategy() { return offsetResetStrategy; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResumePartitionsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResumePartitionsEvent.java new file mode 100644 index 0000000000000..02a49a057ad54 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResumePartitionsEvent.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.consumer.internals.events; + +import org.apache.kafka.common.TopicPartition; + +import java.util.Collection; +import java.util.Collections; + +public class ResumePartitionsEvent extends CompletableApplicationEvent { + + private final Collection partitions; + + public ResumePartitionsEvent(final Collection partitions, final long deadlineMs) { + super(Type.RESUME_PARTITIONS, deadlineMs); + this.partitions = Collections.unmodifiableCollection(partitions); + } + + public Collection partitions() { + return partitions; + } + + @Override + public String toStringBase() { + return super.toStringBase() + ", partitions=" + partitions; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java index 23d4410d07bc2..0916ab8666c09 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java @@ -23,7 +23,7 @@ public class ShareAcknowledgeOnCloseEvent extends CompletableApplicationEvent { - private Map acknowledgementsMap; + private final Map acknowledgementsMap; public ShareAcknowledgeOnCloseEvent(final Map acknowledgementsMap, final long deadlineMs) { super(Type.SHARE_ACKNOWLEDGE_ON_CLOSE, deadlineMs); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java index db3259a677933..49cb422e63325 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java @@ -23,7 +23,7 @@ public class ShareAcknowledgeSyncEvent extends CompletableApplicationEvent> { - private Map acknowledgementsMap; + private final Map acknowledgementsMap; public ShareAcknowledgeSyncEvent(final Map acknowledgementsMap, final long deadlineMs) { super(Type.SHARE_ACKNOWLEDGE_SYNC, deadlineMs); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java index d5ce57b947ad7..2a2b56e87cd78 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java @@ -23,7 +23,7 @@ public class ShareFetchEvent extends ApplicationEvent { - private Map acknowledgementsMap; + private final Map acknowledgementsMap; public ShareFetchEvent(Map acknowledgementsMap) { super(Type.SHARE_FETCH); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicRe2JPatternSubscriptionChangeEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicRe2JPatternSubscriptionChangeEvent.java new file mode 100644 index 0000000000000..37e707c7b8b13 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicRe2JPatternSubscriptionChangeEvent.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals.events; + +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.SubscriptionPattern; + +import java.util.Optional; + +/** + * Application event indicating triggered by a call to the subscribe API + * providing a {@link SubscriptionPattern} (RE2J-compatible pattern). + * This will make the consumer send the updated subscription to the + * broker on the next poll, joining the group if it is not already part of it. + */ +public class TopicRe2JPatternSubscriptionChangeEvent extends SubscriptionChangeEvent { + private final SubscriptionPattern pattern; + + public TopicRe2JPatternSubscriptionChangeEvent(final SubscriptionPattern pattern, + final Optional listener, + final long deadlineMs) { + super(Type.TOPIC_RE2J_PATTERN_SUBSCRIPTION_CHANGE, listener, deadlineMs); + this.pattern = pattern; + } + + public SubscriptionPattern pattern() { + return pattern; + } + + @Override + public String toStringBase() { + return super.toStringBase() + ", subscriptionPattern=" + pattern; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java new file mode 100644 index 0000000000000..09e84cbe985cc --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java @@ -0,0 +1,262 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals.metrics; + +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Value; + +import java.util.Arrays; + +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; + +public class AsyncConsumerMetrics extends KafkaConsumerMetrics implements AutoCloseable { + private final Metrics metrics; + + public static final String TIME_BETWEEN_NETWORK_THREAD_POLL_SENSOR_NAME = "time-between-network-thread-poll"; + public static final String APPLICATION_EVENT_QUEUE_SIZE_SENSOR_NAME = "application-event-queue-size"; + public static final String APPLICATION_EVENT_QUEUE_TIME_SENSOR_NAME = "application-event-queue-time"; + public static final String APPLICATION_EVENT_QUEUE_PROCESSING_TIME_SENSOR_NAME = "application-event-queue-processing-time"; + public static final String APPLICATION_EVENT_EXPIRED_SIZE_SENSOR_NAME = "application-events-expired-count"; + public static final String BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME = "background-event-queue-size"; + public static final String BACKGROUND_EVENT_QUEUE_TIME_SENSOR_NAME = "background-event-queue-time"; + public static final String BACKGROUND_EVENT_QUEUE_PROCESSING_TIME_SENSOR_NAME = "background-event-queue-processing-time"; + public static final String UNSENT_REQUESTS_QUEUE_SIZE_SENSOR_NAME = "unsent-requests-queue-size"; + public static final String UNSENT_REQUESTS_QUEUE_TIME_SENSOR_NAME = "unsent-requests-queue-time"; + private final Sensor timeBetweenNetworkThreadPollSensor; + private final Sensor applicationEventQueueSizeSensor; + private final Sensor applicationEventQueueTimeSensor; + private final Sensor applicationEventQueueProcessingTimeSensor; + private final Sensor applicationEventExpiredSizeSensor; + private final Sensor backgroundEventQueueSizeSensor; + private final Sensor backgroundEventQueueTimeSensor; + private final Sensor backgroundEventQueueProcessingTimeSensor; + private final Sensor unsentRequestsQueueSizeSensor; + private final Sensor unsentRequestsQueueTimeSensor; + + public AsyncConsumerMetrics(Metrics metrics) { + super(metrics, CONSUMER_METRIC_GROUP_PREFIX); + + this.metrics = metrics; + this.timeBetweenNetworkThreadPollSensor = metrics.sensor(TIME_BETWEEN_NETWORK_THREAD_POLL_SENSOR_NAME); + this.timeBetweenNetworkThreadPollSensor.add( + metrics.metricName( + "time-between-network-thread-poll-avg", + CONSUMER_METRIC_GROUP, + "The average time taken, in milliseconds, between each poll in the network thread." + ), + new Avg() + ); + this.timeBetweenNetworkThreadPollSensor.add( + metrics.metricName( + "time-between-network-thread-poll-max", + CONSUMER_METRIC_GROUP, + "The maximum time taken, in milliseconds, between each poll in the network thread." + ), + new Max() + ); + + this.applicationEventQueueSizeSensor = metrics.sensor(APPLICATION_EVENT_QUEUE_SIZE_SENSOR_NAME); + this.applicationEventQueueSizeSensor.add( + metrics.metricName( + APPLICATION_EVENT_QUEUE_SIZE_SENSOR_NAME, + CONSUMER_METRIC_GROUP, + "The current number of events in the queue to send from the application thread to the background thread." + ), + new Value() + ); + + this.applicationEventQueueTimeSensor = metrics.sensor(APPLICATION_EVENT_QUEUE_TIME_SENSOR_NAME); + this.applicationEventQueueTimeSensor.add( + metrics.metricName( + "application-event-queue-time-avg", + CONSUMER_METRIC_GROUP, + "The average time, in milliseconds, that application events are taking to be dequeued." + ), + new Avg() + ); + this.applicationEventQueueTimeSensor.add( + metrics.metricName( + "application-event-queue-time-max", + CONSUMER_METRIC_GROUP, + "The maximum time, in milliseconds, that an application event took to be dequeued." + ), + new Max() + ); + + this.applicationEventQueueProcessingTimeSensor = metrics.sensor(APPLICATION_EVENT_QUEUE_PROCESSING_TIME_SENSOR_NAME); + this.applicationEventQueueProcessingTimeSensor.add( + metrics.metricName( + "application-event-queue-processing-time-avg", + CONSUMER_METRIC_GROUP, + "The average time, in milliseconds, that the background thread takes to process all available application events." + ), + new Avg() + ); + this.applicationEventQueueProcessingTimeSensor.add( + metrics.metricName("application-event-queue-processing-time-max", + CONSUMER_METRIC_GROUP, + "The maximum time, in milliseconds, that the background thread took to process all available application events." + ), + new Max() + ); + + this.applicationEventExpiredSizeSensor = metrics.sensor(APPLICATION_EVENT_EXPIRED_SIZE_SENSOR_NAME); + this.applicationEventExpiredSizeSensor.add( + metrics.metricName( + APPLICATION_EVENT_EXPIRED_SIZE_SENSOR_NAME, + CONSUMER_METRIC_GROUP, + "The current number of expired application events." + ), + new Value() + ); + + this.unsentRequestsQueueSizeSensor = metrics.sensor(UNSENT_REQUESTS_QUEUE_SIZE_SENSOR_NAME); + this.unsentRequestsQueueSizeSensor.add( + metrics.metricName( + UNSENT_REQUESTS_QUEUE_SIZE_SENSOR_NAME, + CONSUMER_METRIC_GROUP, + "The current number of unsent requests in the background thread." + ), + new Value() + ); + + this.unsentRequestsQueueTimeSensor = metrics.sensor(UNSENT_REQUESTS_QUEUE_TIME_SENSOR_NAME); + this.unsentRequestsQueueTimeSensor.add( + metrics.metricName( + "unsent-requests-queue-time-avg", + CONSUMER_METRIC_GROUP, + "The average time, in milliseconds, that requests are taking to be sent in the background thread." + ), + new Avg() + ); + this.unsentRequestsQueueTimeSensor.add( + metrics.metricName( + "unsent-requests-queue-time-max", + CONSUMER_METRIC_GROUP, + "The maximum time, in milliseconds, that a request remained unsent in the background thread." + ), + new Max() + ); + + this.backgroundEventQueueSizeSensor = metrics.sensor(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME); + this.backgroundEventQueueSizeSensor.add( + metrics.metricName( + BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, + CONSUMER_METRIC_GROUP, + "The current number of events in the queue to send from the background thread to the application thread." + ), + new Value() + ); + + this.backgroundEventQueueTimeSensor = metrics.sensor(BACKGROUND_EVENT_QUEUE_TIME_SENSOR_NAME); + this.backgroundEventQueueTimeSensor.add( + metrics.metricName( + "background-event-queue-time-avg", + CONSUMER_METRIC_GROUP, + "The average time, in milliseconds, that background events are taking to be dequeued." + ), + new Avg() + ); + this.backgroundEventQueueTimeSensor.add( + metrics.metricName( + "background-event-queue-time-max", + CONSUMER_METRIC_GROUP, + "The maximum time, in milliseconds, that background events are taking to be dequeued." + ), + new Max() + ); + + this.backgroundEventQueueProcessingTimeSensor = metrics.sensor(BACKGROUND_EVENT_QUEUE_PROCESSING_TIME_SENSOR_NAME); + this.backgroundEventQueueProcessingTimeSensor.add( + metrics.metricName( + "background-event-queue-processing-time-avg", + CONSUMER_METRIC_GROUP, + "The average time, in milliseconds, that the consumer took to process all available background events." + ), + new Avg() + ); + this.backgroundEventQueueProcessingTimeSensor.add( + metrics.metricName( + "background-event-queue-processing-time-max", + CONSUMER_METRIC_GROUP, + "The maximum time, in milliseconds, that the consumer took to process all available background events." + ), + new Max() + ); + } + + public void recordTimeBetweenNetworkThreadPoll(long timeBetweenNetworkThreadPoll) { + this.timeBetweenNetworkThreadPollSensor.record(timeBetweenNetworkThreadPoll); + } + + public void recordApplicationEventQueueSize(int size) { + this.applicationEventQueueSizeSensor.record(size); + } + + public void recordApplicationEventQueueTime(long time) { + this.applicationEventQueueTimeSensor.record(time); + } + + public void recordApplicationEventQueueProcessingTime(long processingTime) { + this.applicationEventQueueProcessingTimeSensor.record(processingTime); + } + + public void recordApplicationEventExpiredSize(long size) { + this.applicationEventExpiredSizeSensor.record(size); + } + + public void recordUnsentRequestsQueueSize(int size, long timeMs) { + this.unsentRequestsQueueSizeSensor.record(size, timeMs); + } + + public void recordUnsentRequestsQueueTime(long time) { + this.unsentRequestsQueueTimeSensor.record(time); + } + + public void recordBackgroundEventQueueSize(int size) { + this.backgroundEventQueueSizeSensor.record(size); + } + + public void recordBackgroundEventQueueTime(long time) { + this.backgroundEventQueueTimeSensor.record(time); + } + + public void recordBackgroundEventQueueProcessingTime(long processingTime) { + this.backgroundEventQueueProcessingTimeSensor.record(processingTime); + } + + @Override + public void close() { + Arrays.asList( + timeBetweenNetworkThreadPollSensor.name(), + applicationEventQueueSizeSensor.name(), + applicationEventQueueTimeSensor.name(), + applicationEventQueueProcessingTimeSensor.name(), + applicationEventExpiredSizeSensor.name(), + backgroundEventQueueSizeSensor.name(), + backgroundEventQueueTimeSensor.name(), + backgroundEventQueueProcessingTimeSensor.name(), + unsentRequestsQueueSizeSensor.name(), + unsentRequestsQueueTimeSensor.name() + ).forEach(metrics::removeSensor); + super.close(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index e1bd7f03aca80..608bde98b6d46 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -21,6 +21,8 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetCommitCallback; @@ -297,7 +299,7 @@ public KafkaProducer(final Map configs) { */ public KafkaProducer(Map configs, Serializer keySerializer, Serializer valueSerializer) { this(new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer)), - keySerializer, valueSerializer, null, null, null, Time.SYSTEM); + keySerializer, valueSerializer, null, null, null, new ApiVersions(), Time.SYSTEM); } /** @@ -326,23 +328,6 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali this(Utils.propsToMap(properties), keySerializer, valueSerializer); } - /** - * Check if partitioner is deprecated and log a warning if it is. - */ - @SuppressWarnings("deprecation") - private void warnIfPartitionerDeprecated() { - // Using DefaultPartitioner and UniformStickyPartitioner is deprecated, see KIP-794. - if (partitioner instanceof org.apache.kafka.clients.producer.internals.DefaultPartitioner) { - log.warn("DefaultPartitioner is deprecated. Please clear " + ProducerConfig.PARTITIONER_CLASS_CONFIG - + " configuration setting to get the default partitioning behavior"); - } - if (partitioner instanceof org.apache.kafka.clients.producer.UniformStickyPartitioner) { - log.warn("UniformStickyPartitioner is deprecated. Please clear " + ProducerConfig.PARTITIONER_CLASS_CONFIG - + " configuration setting and set " + ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG - + " to 'true' to get the uniform sticky partitioning behavior"); - } - } - // visible for testing @SuppressWarnings({"unchecked", "this-escape"}) KafkaProducer(ProducerConfig config, @@ -351,6 +336,7 @@ private void warnIfPartitionerDeprecated() { ProducerMetadata metadata, KafkaClient kafkaClient, ProducerInterceptors interceptors, + ApiVersions apiVersions, Time time) { try { this.producerConfig = config; @@ -384,7 +370,6 @@ private void warnIfPartitionerDeprecated() { ProducerConfig.PARTITIONER_CLASS_CONFIG, Partitioner.class, Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); - warnIfPartitionerDeprecated(); this.partitionerIgnoreKeys = config.getBoolean(ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG); long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); long retryBackoffMaxMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); @@ -423,7 +408,7 @@ private void warnIfPartitionerDeprecated() { this.maxBlockTimeMs = config.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG); int deliveryTimeoutMs = configureDeliveryTimeout(config, log); - this.apiVersions = new ApiVersions(); + this.apiVersions = apiVersions; this.transactionManager = configureTransactionState(config, logContext); // There is no need to do work required for adaptive partitioning, if we use a custom partitioner. boolean enableAdaptivePartitioning = partitioner == null && @@ -664,6 +649,7 @@ public void initTransactions() { sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordInit(time.nanoseconds() - now); + transactionManager.maybeUpdateTransactionV2Enabled(true); } /** @@ -693,47 +679,9 @@ public void beginTransaction() throws ProducerFencedException { * Sends a list of specified offsets to the consumer group coordinator, and also marks * those offsets as part of the current transaction. These offsets will be considered * committed only if the transaction is committed successfully. The committed offset should - * be the next message your application will consume, i.e. lastProcessedMessageOffset + 1. - *

      - * This method should be used when you need to batch consumed and produced messages - * together, typically in a consume-transform-produce pattern. Thus, the specified - * {@code consumerGroupId} should be the same as config parameter {@code group.id} of the used - * {@link KafkaConsumer consumer}. Note, that the consumer should have {@code enable.auto.commit=false} - * and should also not commit offsets manually (via {@link KafkaConsumer#commitSync(Map) sync} or - * {@link KafkaConsumer#commitAsync(Map, OffsetCommitCallback) async} commits). - * - *

      - * This method is a blocking call that waits until the request has been received and acknowledged by the consumer group - * coordinator; but the offsets are not considered as committed until the transaction itself is successfully committed later (via - * the {@link #commitTransaction()} call). - * - * @throws IllegalStateException if no transactional.id has been configured, no transaction has been started - * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active - * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker - * does not support transactions (i.e. if its version is lower than 0.11.0.0) - * @throws org.apache.kafka.common.errors.UnsupportedForMessageFormatException fatal error indicating the message - * format used for the offsets topic on the broker does not support transactions - * @throws org.apache.kafka.common.errors.AuthorizationException fatal error indicating that the configured - * transactional.id is not authorized, or the consumer group id is not authorized. - * @throws org.apache.kafka.common.errors.InvalidProducerEpochException if the producer has attempted to produce with an old epoch - * to the partition leader. See the exception for more details - * @throws TimeoutException if the time taken for sending the offsets has surpassed max.block.ms. - * @throws KafkaException if the producer has encountered a previous fatal or abortable error, or for any - * other unexpected error - * - * @deprecated Since 3.0.0, please use {@link #sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} instead. - */ - @Deprecated - public void sendOffsetsToTransaction(Map offsets, - String consumerGroupId) throws ProducerFencedException { - sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId)); - } - - /** - * Sends a list of specified offsets to the consumer group coordinator, and also marks - * those offsets as part of the current transaction. These offsets will be considered - * committed only if the transaction is committed successfully. The committed offset should - * be the next message your application will consume, i.e. lastProcessedMessageOffset + 1. + * be the next message your application will consume, i.e. {@code nextRecordToBeProcessed.offset()} + * (or {@link ConsumerRecords#nextOffsets()}). You should also add the leader epoch as commit metadata, + * which can be obtained from {@link ConsumerRecord#leaderEpoch()} or {@link ConsumerRecords#nextOffsets()}. *

      * This method should be used when you need to batch consumed and produced messages * together, typically in a consume-transform-produce pattern. Thus, the specified @@ -1002,15 +950,6 @@ private void throwIfProducerClosed() { throw new IllegalStateException("Cannot perform operation after producer has been closed"); } - /** - * Call deprecated {@link Partitioner#onNewBatch} - */ - @SuppressWarnings("deprecation") - private void onNewBatch(String topic, Cluster cluster, int prevPartition) { - assert partitioner != null; - partitioner.onNewBatch(topic, cluster, prevPartition); - } - /** * Implementation of asynchronously send a record to a topic. */ @@ -1060,35 +999,20 @@ private Future doSend(ProducerRecord record, Callback call setReadOnly(record.headers()); Header[] headers = record.headers().toArray(); - int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), + int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(RecordBatch.CURRENT_MAGIC_VALUE, compression.type(), serializedKey, serializedValue, headers); ensureValidRecordSize(serializedSize); long timestamp = record.timestamp() == null ? nowMs : record.timestamp(); - // A custom partitioner may take advantage on the onNewBatch callback. - boolean abortOnNewBatch = partitioner != null; - // Append the record to the accumulator. Note, that the actual partition may be // calculated there and can be accessed via appendCallbacks.topicPartition. RecordAccumulator.RecordAppendResult result = accumulator.append(record.topic(), partition, timestamp, serializedKey, - serializedValue, headers, appendCallbacks, remainingWaitMs, abortOnNewBatch, nowMs, cluster); + serializedValue, headers, appendCallbacks, remainingWaitMs, nowMs, cluster); assert appendCallbacks.getPartition() != RecordMetadata.UNKNOWN_PARTITION; - if (result.abortForNewBatch) { - int prevPartition = partition; - onNewBatch(record.topic(), cluster, prevPartition); - partition = partition(record, serializedKey, serializedValue, cluster); - if (log.isTraceEnabled()) { - log.trace("Retrying append due to new batch creation for topic {} partition {}. The old partition was {}", record.topic(), partition, prevPartition); - } - result = accumulator.append(record.topic(), partition, timestamp, serializedKey, - serializedValue, headers, appendCallbacks, remainingWaitMs, false, nowMs, cluster); - } - // Add the partition to the transaction (if in progress) after it has been successfully // appended to the accumulator. We cannot do it before because the partition may be - // unknown or the initially selected partition may be changed when the batch is closed - // (as indicated by `abortForNewBatch`). Note that the `Sender` will refuse to dequeue + // unknown. Note that the `Sender` will refuse to dequeue // batches from the accumulator until they have been added to the transaction. if (transactionManager != null) { transactionManager.maybeAddPartition(appendCallbacks.topicPartition()); @@ -1256,11 +1180,22 @@ private void ensureValidRecordSize(int size) { * flush all buffered records before performing the commit. This ensures that all the {@link #send(ProducerRecord)} * calls made since the previous {@link #beginTransaction()} are completed before the commit. *

      + *

      + * Important: This method must not be called from within the callback provided to + * {@link #send(ProducerRecord, Callback)}. Invoking flush() in this context will result in a + * {@link KafkaException} being thrown, as it will cause a deadlock. + *

      * * @throws InterruptException If the thread is interrupted while blocked + * @throws KafkaException If the method is invoked inside a {@link #send(ProducerRecord, Callback)} callback */ @Override public void flush() { + if (Thread.currentThread() == this.ioThread) { + log.error("KafkaProducer.flush() invocation inside a callback is not permitted because it may lead to deadlock."); + throw new KafkaException("KafkaProducer.flush() invocation inside a callback is not permitted because it may lead to deadlock."); + } + log.trace("Flushing accumulated records in producer."); long start = time.nanoseconds(); @@ -1325,9 +1260,10 @@ public List partitionsFor(String topic) { */ @Override public void registerMetricForSubscription(KafkaMetric metric) { - if (clientTelemetryReporter.isPresent()) { - ClientTelemetryReporter reporter = clientTelemetryReporter.get(); - reporter.metricChange(metric); + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricChange(metric)); + } else { + log.debug("Skipping registration for metric {}. Existing producer metrics cannot be overwritten.", metric.metricName()); } } @@ -1342,9 +1278,10 @@ public void registerMetricForSubscription(KafkaMetric metric) { */ @Override public void unregisterMetricFromSubscription(KafkaMetric metric) { - if (clientTelemetryReporter.isPresent()) { - ClientTelemetryReporter reporter = clientTelemetryReporter.get(); - reporter.metricRemoval(metric); + if (!metrics().containsKey(metric.metricName())) { + clientTelemetryReporter.ifPresent(reporter -> reporter.metricRemoval(metric)); + } else { + log.debug("Skipping unregistration for metric {}. Existing producer metrics cannot be removed.", metric.metricName()); } } @@ -1375,7 +1312,7 @@ public void unregisterMetricFromSubscription(KafkaMetric metric) { */ @Override public Uuid clientInstanceId(Duration timeout) { - if (!clientTelemetryReporter.isPresent()) { + if (clientTelemetryReporter.isEmpty()) { throw new IllegalStateException("Telemetry is not enabled. Set config `" + ProducerConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`."); } @@ -1530,7 +1467,7 @@ private void throwIfInvalidGroupMetadata(ConsumerGroupMetadata groupMetadata) { throw new IllegalArgumentException("Consumer group metadata could not be null"); } else if (groupMetadata.generationId() > 0 && JoinGroupRequest.UNKNOWN_MEMBER_ID.equals(groupMetadata.memberId())) { - throw new IllegalArgumentException("Passed in group metadata " + groupMetadata + " has generationId > 0 but member.id "); + throw new IllegalArgumentException("Passed in group metadata " + groupMetadata + " has generationId > 0 but the member.id is unknown"); } } @@ -1545,6 +1482,11 @@ String getClientId() { return clientId; } + // Visible for testing + TransactionManager getTransactionManager() { + return transactionManager; + } + private static class ClusterAndWaitTime { final Cluster cluster; final long waitedOnMetadataMs; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java index 3d278c40cb067..564171608568a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java @@ -119,31 +119,6 @@ public MockProducer(final Cluster cluster, this.mockMetrics = new HashMap<>(); } - /** - * Create a new mock producer with invented metadata the given autoComplete setting and key\value serializers. - * - * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer) new MockProducer(Cluster.empty(), autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} - */ - @SuppressWarnings("deprecation") - public MockProducer(final boolean autoComplete, - final Serializer keySerializer, - final Serializer valueSerializer) { - this(Cluster.empty(), autoComplete, new org.apache.kafka.clients.producer.internals.DefaultPartitioner(), keySerializer, valueSerializer); - } - - /** - * Create a new mock producer with invented metadata the given autoComplete setting and key\value serializers. - * - * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer) new MockProducer(cluster, autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} - */ - @SuppressWarnings("deprecation") - public MockProducer(final Cluster cluster, - final boolean autoComplete, - final Serializer keySerializer, - final Serializer valueSerializer) { - this(cluster, autoComplete, new org.apache.kafka.clients.producer.internals.DefaultPartitioner(), keySerializer, valueSerializer); - } - /** * Create a new mock producer with invented metadata the given autoComplete setting, partitioner and key\value serializers. * @@ -202,14 +177,6 @@ public void beginTransaction() throws ProducerFencedException { this.sentOffsets = false; } - @Deprecated - @Override - public void sendOffsetsToTransaction(Map offsets, - String consumerGroupId) throws ProducerFencedException { - Objects.requireNonNull(consumerGroupId); - sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId)); - } - @Override public void sendOffsetsToTransaction(Map offsets, ConsumerGroupMetadata groupMetadata) throws ProducerFencedException { @@ -571,6 +538,9 @@ private int partition(ProducerRecord record, Cluster cluster) { } byte[] keyBytes = keySerializer.serialize(topic, record.headers(), record.key()); byte[] valueBytes = valueSerializer.serialize(topic, record.headers(), record.value()); + if (partitioner == null) { + return this.cluster.partitionsForTopic(record.topic()).get(0).partition(); + } return this.partitioner.partition(topic, record.key(), keyBytes, record.value(), valueBytes, cluster); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java b/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java index bcfcb2db6468e..96345d8f8b041 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java @@ -42,19 +42,4 @@ public interface Partitioner extends Configurable, Closeable { * This is called when partitioner is closed. */ void close(); - - /** - * Note this method is only implemented in DefaultPartitioner and {@link UniformStickyPartitioner} which - * are now deprecated. See KIP-794 for more info. - *

      - * Notifies the partitioner a new batch is about to be created. When using the sticky partitioner, - * this method can change the chosen sticky partition for the new batch. - * @param topic The topic name - * @param cluster The current cluster metadata - * @param prevPartition The partition previously selected for the record that triggered a new batch - * @deprecated Since 3.3.0 - */ - @Deprecated - default void onNewBatch(String topic, Cluster cluster, int prevPartition) { - } } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java b/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java index 87e9d6042eeb8..798034dda6de2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java @@ -49,13 +49,6 @@ public interface Producer extends Closeable { */ void beginTransaction() throws ProducerFencedException; - /** - * See {@link KafkaProducer#sendOffsetsToTransaction(Map, String)} - */ - @Deprecated - void sendOffsetsToTransaction(Map offsets, - String consumerGroupId) throws ProducerFencedException; - /** * See {@link KafkaProducer#sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} */ diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 8b360d4d839ab..23dd02bda98f3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -93,8 +93,11 @@ public class ProducerConfig extends AbstractConfig { + "

      " + "Note: This setting gives the upper bound of the batch size to be sent. If we have fewer than this many bytes accumulated " + "for this partition, we will 'linger' for the linger.ms time waiting for more records to show up. " - + "This linger.ms setting defaults to 0, which means we'll immediately send out a record even the accumulated " - + "batch size is under this batch.size setting."; + + "This linger.ms setting defaults to 5, which means the producer will wait for 5ms or until the record batch is " + + "of batch.size(whichever happens first) before sending the record batch. Note that broker backpressure can " + + " result in a higher effective linger time than this setting." + + "The default changed from 0 to 5 in Apache Kafka 4.0 as the efficiency gains from larger batches typically result in " + + "similar or lower producer latency despite the increased linger."; /** partitioner.adaptive.partitioning.enable */ public static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG = "partitioner.adaptive.partitioning.enable"; @@ -147,8 +150,10 @@ public class ProducerConfig extends AbstractConfig { + "of as analogous to Nagle's algorithm in TCP. This setting gives the upper bound on the delay for batching: once " + "we get " + BATCH_SIZE_CONFIG + " worth of records for a partition it will be sent immediately regardless of this " + "setting, however if we have fewer than this many bytes accumulated for this partition we will 'linger' for the " - + "specified time waiting for more records to show up. This setting defaults to 0 (i.e. no delay). Setting " + LINGER_MS_CONFIG + "=5, " - + "for example, would have the effect of reducing the number of requests sent but would add up to 5ms of latency to records sent in the absence of load."; + + "specified time waiting for more records to show up. This setting defaults to 5 (i.e. 5ms delay). Increasing " + LINGER_MS_CONFIG + "=50, " + + "for example, would have the effect of reducing the number of requests sent but would add up to 50ms of latency to records sent in the absence of load." + + "The default changed from 0 to 5 in Apache Kafka 4.0 as the efficiency gains from larger batches typically result in " + + "similar or lower producer latency despite the increased linger."; /** request.timeout.ms */ public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG; @@ -383,7 +388,7 @@ public class ProducerConfig extends AbstractConfig { .define(PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG, Type.BOOLEAN, true, Importance.LOW, PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC) .define(PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG, Type.LONG, 0, atLeast(0), Importance.LOW, PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC) .define(PARTITIONER_IGNORE_KEYS_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, PARTITIONER_IGNORE_KEYS_DOC) - .define(LINGER_MS_CONFIG, Type.LONG, 0, atLeast(0), Importance.MEDIUM, LINGER_MS_DOC) + .define(LINGER_MS_CONFIG, Type.LONG, 5, atLeast(0), Importance.MEDIUM, LINGER_MS_DOC) .define(DELIVERY_TIMEOUT_MS_CONFIG, Type.INT, 120 * 1000, atLeast(0), Importance.MEDIUM, DELIVERY_TIMEOUT_MS_DOC) .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.MEDIUM, CommonClientConfigs.CLIENT_ID_DOC) .define(SEND_BUFFER_CONFIG, Type.INT, 128 * 1024, atLeast(CommonClientConfigs.SEND_BUFFER_LOWER_BOUND), Importance.MEDIUM, CommonClientConfigs.SEND_BUFFER_DOC) @@ -527,7 +532,13 @@ public class ProducerConfig extends AbstractConfig { ConfigDef.CaseInsensitiveValidString .in(Utils.enumOptions(MetadataRecoveryStrategy.class)), Importance.LOW, - CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC); + CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC) + .define(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, + Type.LONG, + CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, + atLeast(0), + Importance.LOW, + CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); } @Override @@ -583,13 +594,8 @@ private void postProcessAndValidateIdempotenceConfigs(final Map final int inFlightConnection = this.getInt(MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION); if (MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_FOR_IDEMPOTENCE < inFlightConnection) { - if (userConfiguredIdempotence) { - throw new ConfigException("Must set " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " to at most 5" + - " to use the idempotent producer."); - } - log.warn("Idempotence will be disabled because {} is set to {}, which is greater than 5. " + - "Please note that in v4.0.0 and onward, this will become an error.", MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, inFlightConnection); - shouldDisableIdempotence = true; + throw new ConfigException("To use the idempotent producer, " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + + " must be set to at most 5. Current value is " + inFlightConnection + "."); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/RoundRobinPartitioner.java b/clients/src/main/java/org/apache/kafka/clients/producer/RoundRobinPartitioner.java index be2bc24a509c5..c736756ab4231 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/RoundRobinPartitioner.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/RoundRobinPartitioner.java @@ -69,5 +69,4 @@ private int nextValue(String topic) { } public void close() {} - } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/UniformStickyPartitioner.java b/clients/src/main/java/org/apache/kafka/clients/producer/UniformStickyPartitioner.java deleted file mode 100644 index d9faa5f162427..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/producer/UniformStickyPartitioner.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.producer; - -import org.apache.kafka.clients.producer.internals.StickyPartitionCache; -import org.apache.kafka.common.Cluster; - -import java.util.Map; - - -/** - * @deprecated Since 3.3.0, in order to use default partitioning logic - * remove the {@code partitioner.class} configuration setting and set {@code partitioner.ignore.keys=true}. - * See KIP-794 for more info. - * - * The partitioning strategy: - *

        - *
      • If a partition is specified in the record, use it - *
      • Otherwise choose the sticky partition that changes when the batch is full. - * - * NOTE: In contrast to the DefaultPartitioner, the record key is NOT used as part of the partitioning strategy in this - * partitioner. Records with the same key are not guaranteed to be sent to the same partition. - * - * See KIP-480 for details about sticky partitioning. - */ -@Deprecated -public class UniformStickyPartitioner implements Partitioner { - - private final StickyPartitionCache stickyPartitionCache = new StickyPartitionCache(); - - public void configure(Map configs) {} - - /** - * Compute the partition for the given record. - * - * @param topic The topic name - * @param key The key to partition on (or null if no key) - * @param keyBytes serialized key to partition on (or null if no key) - * @param value The value to partition on or null - * @param valueBytes serialized value to partition on or null - * @param cluster The current cluster metadata - */ - public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { - return stickyPartitionCache.partition(topic, cluster); - } - - public void close() {} - - /** - * If a batch completed for the current sticky partition, change the sticky partition. - * Alternately, if no sticky partition has been determined, set one. - */ - @SuppressWarnings("deprecation") - public void onNewBatch(String topic, Cluster cluster, int prevPartition) { - stickyPartitionCache.nextPartition(topic, cluster, prevPartition); - } -} diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/DefaultPartitioner.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/DefaultPartitioner.java deleted file mode 100644 index 716773626c8c1..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/DefaultPartitioner.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.producer.internals; - -import org.apache.kafka.clients.producer.Partitioner; -import org.apache.kafka.common.Cluster; - -import java.util.Map; - -/** - * NOTE this partitioner is deprecated and shouldn't be used. To use default partitioning logic - * remove partitioner.class configuration setting. See KIP-794 for more info. - * - * The default partitioning strategy: - *
          - *
        • If a partition is specified in the record, use it - *
        • If no partition is specified but a key is present choose a partition based on a hash of the key - *
        • If no partition or key is present choose the sticky partition that changes when the batch is full. - * - * See KIP-480 for details about sticky partitioning. - */ -@Deprecated -public class DefaultPartitioner implements Partitioner { - - private final StickyPartitionCache stickyPartitionCache = new StickyPartitionCache(); - - public void configure(Map configs) {} - - /** - * Compute the partition for the given record. - * - * @param topic The topic name - * @param key The key to partition on (or null if no key) - * @param keyBytes serialized key to partition on (or null if no key) - * @param value The value to partition on or null - * @param valueBytes serialized value to partition on or null - * @param cluster The current cluster metadata - */ - public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { - return partition(topic, key, keyBytes, value, valueBytes, cluster, cluster.partitionsForTopic(topic).size()); - } - - /** - * Compute the partition for the given record. - * - * @param topic The topic name - * @param numPartitions The number of partitions of the given {@code topic} - * @param key The key to partition on (or null if no key) - * @param keyBytes serialized key to partition on (or null if no key) - * @param value The value to partition on or null - * @param valueBytes serialized value to partition on or null - * @param cluster The current cluster metadata - */ - public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster, - int numPartitions) { - if (keyBytes == null) { - return stickyPartitionCache.partition(topic, cluster); - } - return BuiltInPartitioner.partitionForKey(keyBytes, numPartitions); - } - - public void close() {} - - /** - * If a batch completed for the current sticky partition, change the sticky partition. - * Alternately, if no sticky partition has been determined, set one. - */ - @SuppressWarnings("deprecation") - public void onNewBatch(String topic, Cluster cluster, int prevPartition) { - stickyPartitionCache.nextPartition(topic, cluster, prevPartition); - } -} diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java index f70c1a338148f..5619819dde72e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java @@ -112,7 +112,7 @@ public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, lon */ void maybeUpdateLeaderEpoch(OptionalInt latestLeaderEpoch) { if (latestLeaderEpoch.isPresent() - && (!currentLeaderEpoch.isPresent() || currentLeaderEpoch.getAsInt() < latestLeaderEpoch.getAsInt())) { + && (currentLeaderEpoch.isEmpty() || currentLeaderEpoch.getAsInt() < latestLeaderEpoch.getAsInt())) { log.trace("For {}, leader will be updated, currentLeaderEpoch: {}, attemptsWhenLeaderLastChanged:{}, latestLeaderEpoch: {}, current attempt: {}", this, currentLeaderEpoch, attemptsWhenLeaderLastChanged, latestLeaderEpoch, attempts); attemptsWhenLeaderLastChanged = attempts(); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java index ec50dc9bc26d1..64256d040a08c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java @@ -27,7 +27,6 @@ import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.compress.Compression; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.AbstractRecords; @@ -278,8 +277,6 @@ private boolean partitionChanged(String topic, * @param headers the Headers for the record * @param callbacks The callbacks to execute * @param maxTimeToBlock The maximum time in milliseconds to block for buffer memory to be available - * @param abortOnNewBatch A boolean that indicates returning before a new batch is created and - * running the partitioner's onNewBatch method before trying to append again * @param nowMs The current time, in milliseconds * @param cluster The cluster metadata */ @@ -291,7 +288,6 @@ public RecordAppendResult append(String topic, Header[] headers, AppendCallbacks callbacks, long maxTimeToBlock, - boolean abortOnNewBatch, long nowMs, Cluster cluster) throws InterruptedException { TopicInfo topicInfo = topicInfoMap.computeIfAbsent(topic, k -> new TopicInfo(createBuiltInPartitioner(logContext, k, batchSize))); @@ -337,15 +333,9 @@ public RecordAppendResult append(String topic, } } - // we don't have an in-progress record batch try to allocate a new batch - if (abortOnNewBatch) { - // Return a result that will cause another call to append. - return new RecordAppendResult(null, false, false, true, 0); - } - if (buffer == null) { - byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); - int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression.type(), key, value, headers)); + int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound( + RecordBatch.CURRENT_MAGIC_VALUE, compression.type(), key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, topic, effectivePartition, maxTimeToBlock); // This call may block if we exhausted buffer space. buffer = free.allocate(size, maxTimeToBlock); @@ -408,7 +398,7 @@ private RecordAppendResult appendNewBatch(String topic, return appendResult; } - MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, apiVersions.maxUsableProduceMagic()); + MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer); ProducerBatch batch = new ProducerBatch(new TopicPartition(topic, partition), recordsBuilder, nowMs); FutureRecordMetadata future = Objects.requireNonNull(batch.tryAppend(timestamp, key, value, headers, callbacks, nowMs)); @@ -416,15 +406,11 @@ private RecordAppendResult appendNewBatch(String topic, dq.addLast(batch); incomplete.add(batch); - return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true, false, batch.estimatedSizeInBytes()); + return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true, batch.estimatedSizeInBytes()); } - private MemoryRecordsBuilder recordsBuilder(ByteBuffer buffer, byte maxUsableMagic) { - if (transactionManager != null && maxUsableMagic < RecordBatch.MAGIC_VALUE_V2) { - throw new UnsupportedVersionException("Attempting to use idempotence with a broker which does not " + - "support the required message format (v2). The broker must be version 0.11 or later."); - } - return MemoryRecords.builder(buffer, maxUsableMagic, compression, TimestampType.CREATE_TIME, 0L); + private MemoryRecordsBuilder recordsBuilder(ByteBuffer buffer) { + return MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, TimestampType.CREATE_TIME, 0L); } /** @@ -456,7 +442,7 @@ private RecordAppendResult tryAppend(long timestamp, byte[] key, byte[] value, H last.closeForRecordAppends(); } else { int appendedBytes = last.estimatedSizeInBytes() - initialBytes; - return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false, false, appendedBytes); + return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false, appendedBytes); } } return null; @@ -1218,18 +1204,15 @@ public static final class RecordAppendResult { public final FutureRecordMetadata future; public final boolean batchIsFull; public final boolean newBatchCreated; - public final boolean abortForNewBatch; public final int appendedBytes; public RecordAppendResult(FutureRecordMetadata future, boolean batchIsFull, boolean newBatchCreated, - boolean abortForNewBatch, int appendedBytes) { this.future = future; this.batchIsFull = batchIsFull; this.newBatchCreated = newBatchCreated; - this.abortForNewBatch = abortForNewBatch; this.appendedBytes = appendedBytes; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java index 39f5616465154..9190281a66080 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java @@ -462,17 +462,10 @@ private boolean maybeSendAndPollTransactionalRequest() { return true; } - if (transactionManager.hasAbortableError() || transactionManager.isAborting()) { - if (accumulator.hasIncomplete()) { - // Attempt to get the last error that caused this abort. - RuntimeException exception = transactionManager.lastError(); - // If there was no error, but we are still aborting, - // then this is most likely a case where there was no fatal error. - if (exception == null) { - exception = new TransactionAbortedException(); - } - accumulator.abortUndrainedBatches(exception); - } + if (transactionManager.hasAbortableError()) { + accumulator.abortUndrainedBatches(transactionManager.lastError()); + } else if (transactionManager.isAborting()) { + accumulator.abortUndrainedBatches(new TransactionAbortedException()); } TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequest(accumulator.hasIncomplete()); @@ -871,27 +864,10 @@ private void sendProduceRequest(long now, int destination, short acks, int timeo return; final Map recordsByPartition = new HashMap<>(batches.size()); - - // find the minimum magic version used when creating the record sets - byte minUsedMagic = apiVersions.maxUsableProduceMagic(); - for (ProducerBatch batch : batches) { - if (batch.magic() < minUsedMagic) - minUsedMagic = batch.magic(); - } ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection(); for (ProducerBatch batch : batches) { TopicPartition tp = batch.topicPartition; MemoryRecords records = batch.records(); - - // down convert if necessary to the minimum magic used. In general, there can be a delay between the time - // that the producer starts building the batch and the time that we send the request, and we may have - // chosen the message format based on out-dated metadata. In the worst case, we optimistically chose to use - // the new message format, but found that the broker didn't support it, so we need to down-convert on the - // client before sending. This is intended to handle edge cases around cluster upgrades where brokers may - // not all support the same message format version. For example, if a partition migrates from a broker - // which is supporting the new magic version to one which doesn't, then we will need to convert. - if (!records.hasMatchingMagic(minUsedMagic)) - records = batch.records().downConvert(minUsedMagic, 0, time).records(); ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic()); if (tpData == null) { tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic()); @@ -904,16 +880,20 @@ private void sendProduceRequest(long now, int destination, short acks, int timeo } String transactionalId = null; + boolean useTransactionV1Version = false; if (transactionManager != null && transactionManager.isTransactional()) { transactionalId = transactionManager.transactionalId(); + useTransactionV1Version = !transactionManager.isTransactionV2Enabled(); } - ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(minUsedMagic, + ProduceRequest.Builder requestBuilder = ProduceRequest.builder( new ProduceRequestData() .setAcks(acks) .setTimeoutMs(timeout) .setTransactionalId(transactionalId) - .setTopicData(tpd)); + .setTopicData(tpd), + useTransactionV1Version + ); RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds()); String nodeId = Integer.toString(destination); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java deleted file mode 100644 index 1a00473f06e8a..0000000000000 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.producer.internals; - -import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.PartitionInfo; -import org.apache.kafka.common.utils.Utils; - -import java.util.List; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ThreadLocalRandom; - -/** - * An internal class that implements a cache used for sticky partitioning behavior. The cache tracks the current sticky - * partition for any given topic. This class should not be used externally. - */ -public class StickyPartitionCache { - private final ConcurrentMap indexCache; - public StickyPartitionCache() { - this.indexCache = new ConcurrentHashMap<>(); - } - - public int partition(String topic, Cluster cluster) { - Integer part = indexCache.get(topic); - if (part == null) { - return nextPartition(topic, cluster, -1); - } - return part; - } - - public int nextPartition(String topic, Cluster cluster, int prevPartition) { - List partitions = cluster.partitionsForTopic(topic); - Integer oldPart = indexCache.get(topic); - Integer newPart = oldPart; - // Check that the current sticky partition for the topic is either not set or that the partition that - // triggered the new batch matches the sticky partition that needs to be changed. - if (oldPart == null || oldPart == prevPartition) { - List availablePartitions = cluster.availablePartitionsForTopic(topic); - if (availablePartitions.isEmpty()) { - int random = Utils.toPositive(ThreadLocalRandom.current().nextInt()); - newPart = random % partitions.size(); - } else if (availablePartitions.size() == 1) { - newPart = availablePartitions.get(0).partition(); - } else { - while (newPart == null || newPart.equals(oldPart)) { - int random = Utils.toPositive(ThreadLocalRandom.current().nextInt()); - newPart = availablePartitions.get(random % availablePartitions.size()).partition(); - } - } - // Only change the sticky partition if it is null or prevPartition matches the current sticky partition. - if (oldPart == null) { - indexCache.putIfAbsent(topic, newPart); - } else { - indexCache.replace(topic, prevPartition, newPart); - } - return indexCache.get(topic); - } - return indexCache.get(topic); - } - -} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java index 8057a82c5687b..4ddf8a13de029 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java @@ -192,7 +192,7 @@ public class TransactionManager { private volatile RuntimeException lastError = null; private volatile ProducerIdAndEpoch producerIdAndEpoch; private volatile boolean transactionStarted = false; - private volatile boolean epochBumpRequired = false; + private volatile boolean clientSideEpochBumpRequired = false; private volatile long latestFinalizedFeaturesEpoch = -1; private volatile boolean isTransactionV2Enabled = false; @@ -211,7 +211,7 @@ private boolean isTransitionValid(State source, State target) { case UNINITIALIZED: return source == READY || source == ABORTABLE_ERROR; case INITIALIZING: - return source == UNINITIALIZED || source == ABORTING_TRANSACTION; + return source == UNINITIALIZED || source == COMMITTING_TRANSACTION || source == ABORTING_TRANSACTION; case READY: return source == INITIALIZING || source == COMMITTING_TRANSACTION || source == ABORTING_TRANSACTION; case IN_TRANSACTION: @@ -312,7 +312,6 @@ public synchronized void beginTransaction() { throwIfPendingState("beginTransaction"); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); - maybeUpdateTransactionV2Enabled(); } public synchronized TransactionalRequestResult beginCommit() { @@ -339,27 +338,31 @@ private TransactionalRequestResult beginCompletingTransaction(TransactionResult if (!newPartitionsInTransaction.isEmpty()) enqueueRequest(addPartitionsToTransactionHandler()); - // If the error is an INVALID_PRODUCER_ID_MAPPING error, the server will not accept an EndTxnRequest, so skip - // directly to InitProducerId. Otherwise, we must first abort the transaction, because the producer will be - // fenced if we directly call InitProducerId. - if (!(lastError instanceof InvalidPidMappingException)) { - EndTxnRequest.Builder builder = new EndTxnRequest.Builder( - new EndTxnRequestData() - .setTransactionalId(transactionalId) - .setProducerId(producerIdAndEpoch.producerId) - .setProducerEpoch(producerIdAndEpoch.epoch) - .setCommitted(transactionResult.id), - isTransactionV2Enabled - ); + EndTxnRequest.Builder builder = new EndTxnRequest.Builder( + new EndTxnRequestData() + .setTransactionalId(transactionalId) + .setProducerId(producerIdAndEpoch.producerId) + .setProducerEpoch(producerIdAndEpoch.epoch) + .setCommitted(transactionResult.id), + isTransactionV2Enabled + ); - EndTxnHandler handler = new EndTxnHandler(builder); - enqueueRequest(handler); - if (!epochBumpRequired) { - return handler.result; - } + // Maybe update the transaction version here before we enqueue the EndTxn request so there are no races with + // completion of the EndTxn request. Since this method may update clientSideEpochBumpRequired, we want to update + // before the check below, but we also want to call it after the EndTxnRequest.Builder so we complete the transaction + // with the same version as it started. + maybeUpdateTransactionV2Enabled(false); + + EndTxnHandler handler = new EndTxnHandler(builder); + enqueueRequest(handler); + + // If an epoch bump is required for recovery, initialize the transaction after completing the EndTxn request. + // If we are upgrading to TV2 transactions on the next transaction, also bump the epoch. + if (clientSideEpochBumpRequired) { + return initializeTransactions(this.producerIdAndEpoch); } - return initializeTransactions(this.producerIdAndEpoch); + return handler.result; } public synchronized TransactionalRequestResult sendOffsetsToTransaction(final Map offsets, @@ -373,15 +376,23 @@ public synchronized TransactionalRequestResult sendOffsetsToTransaction(final Ma "(currentState= " + currentState + ")"); } - log.debug("Begin adding offsets {} for consumer group {} to transaction", offsets, groupMetadata); - AddOffsetsToTxnRequest.Builder builder = new AddOffsetsToTxnRequest.Builder( - new AddOffsetsToTxnRequestData() - .setTransactionalId(transactionalId) - .setProducerId(producerIdAndEpoch.producerId) - .setProducerEpoch(producerIdAndEpoch.epoch) - .setGroupId(groupMetadata.groupId()) - ); - AddOffsetsToTxnHandler handler = new AddOffsetsToTxnHandler(builder, offsets, groupMetadata); + // In transaction V2, the client will skip sending AddOffsetsToTxn before sending txnOffsetCommit. + TxnRequestHandler handler; + if (isTransactionV2Enabled()) { + log.debug("Begin adding offsets {} for consumer group {} to transaction with transaction protocol V2", offsets, groupMetadata); + handler = txnOffsetCommitHandler(null, offsets, groupMetadata); + transactionStarted = true; + } else { + log.debug("Begin adding offsets {} for consumer group {} to transaction", offsets, groupMetadata); + AddOffsetsToTxnRequest.Builder builder = new AddOffsetsToTxnRequest.Builder( + new AddOffsetsToTxnRequestData() + .setTransactionalId(transactionalId) + .setProducerId(producerIdAndEpoch.producerId) + .setProducerEpoch(producerIdAndEpoch.epoch) + .setGroupId(groupMetadata.groupId()) + ); + handler = new AddOffsetsToTxnHandler(builder, offsets, groupMetadata); + } enqueueRequest(handler); return handler.result; @@ -398,7 +409,11 @@ public synchronized void maybeAddPartition(TopicPartition topicPartition) { } else if (currentState != State.IN_TRANSACTION) { throw new IllegalStateException("Cannot add partition " + topicPartition + " to transaction while in state " + currentState); - } else if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) { + } else if (isTransactionV2Enabled()) { + txnPartitionMap.getOrCreate(topicPartition); + partitionsInTransaction.add(topicPartition); + transactionStarted = true; + } else if (transactionContainsPartition(topicPartition) || isPartitionPendingAdd(topicPartition)) { return; } else { log.debug("Begin adding new partition {} to transaction", topicPartition); @@ -430,15 +445,26 @@ public boolean isTransactional() { return transactionalId != null; } - // Check all the finalized features from apiVersions to whether the transaction V2 is enabled. - public synchronized void maybeUpdateTransactionV2Enabled() { + /** + * Check all the finalized features from apiVersions to verify whether the transaction V2 is enabled. + * Sets clientSideEpochBumpRequired if upgrading to V2 since we need to bump the epoch. + * This is because V2 no longer adds partitions explicitly and there are some edge cases on upgrade + * that can be avoided by fencing the old V1 transaction epoch. For example, we won't consider + * partitions from the previous transaction as already added to the new V2 transaction if the epoch is fenced. + */ + + public synchronized void maybeUpdateTransactionV2Enabled(boolean onInitiatialization) { if (latestFinalizedFeaturesEpoch >= apiVersions.getMaxFinalizedFeaturesEpoch()) { return; } ApiVersions.FinalizedFeaturesInfo info = apiVersions.getFinalizedFeaturesInfo(); latestFinalizedFeaturesEpoch = info.finalizedFeaturesEpoch; Short transactionVersion = info.finalizedFeatures.get("transaction.version"); + boolean wasTransactionV2Enabled = isTransactionV2Enabled; isTransactionV2Enabled = transactionVersion != null && transactionVersion >= 2; + log.debug("Updating isTV2 enabled to {} with FinalizedFeaturesEpoch {}", isTransactionV2Enabled, latestFinalizedFeaturesEpoch); + if (!onInitiatialization && !wasTransactionV2Enabled && isTransactionV2Enabled) + clientSideEpochBumpRequired = true; } public boolean isTransactionV2Enabled() { @@ -481,9 +507,24 @@ synchronized void transitionToFatalError(RuntimeException exception) { } } - // visible for testing - synchronized boolean isPartitionAdded(TopicPartition partition) { - return partitionsInTransaction.contains(partition); + /** + * Transitions to an abortable error state if the coordinator can handle an abortable error or + * to a fatal error if not. + * + * @param abortableException The exception in case of an abortable error. + * @param fatalException The exception in case of a fatal error. + */ + private void transitionToAbortableErrorOrFatalError( + RuntimeException abortableException, + RuntimeException fatalException + ) { + if (canHandleAbortableError()) { + if (needToTriggerEpochBumpFromClient()) + clientSideEpochBumpRequired = true; + transitionToAbortableError(abortableException); + } else { + transitionToFatalError(fatalException); + } } // visible for testing @@ -548,8 +589,11 @@ private void resetSequenceNumbers() { this.partitionsWithUnresolvedSequences.clear(); } - synchronized void requestEpochBumpForPartition(TopicPartition tp) { - epochBumpRequired = true; + /** + * This method is used to trigger an epoch bump for non-transactional idempotent producers. + */ + synchronized void requestIdempotentEpochBumpForPartition(TopicPartition tp) { + clientSideEpochBumpRequired = true; this.partitionsToRewriteSequences.add(tp); } @@ -568,12 +612,12 @@ private void bumpIdempotentProducerEpoch() { } this.partitionsToRewriteSequences.clear(); - epochBumpRequired = false; + clientSideEpochBumpRequired = false; } synchronized void bumpIdempotentEpochAndResetIdIfNeeded() { if (!isTransactional()) { - if (epochBumpRequired) { + if (clientSideEpochBumpRequired) { bumpIdempotentProducerEpoch(); } if (currentState != State.INITIALIZING && !hasProducerId()) { @@ -676,11 +720,12 @@ public synchronized void maybeTransitionToErrorState(RuntimeException exception) if (exception instanceof ClusterAuthorizationException || exception instanceof TransactionalIdAuthorizationException || exception instanceof ProducerFencedException - || exception instanceof UnsupportedVersionException) { + || exception instanceof UnsupportedVersionException + || exception instanceof InvalidPidMappingException) { transitionToFatalError(exception); } else if (isTransactional()) { - if (canBumpEpoch() && !isCompleting()) { - epochBumpRequired = true; + if (needToTriggerEpochBumpFromClient() && !isCompleting()) { + clientSideEpochBumpRequired = true; } transitionToAbortableError(exception); } @@ -703,7 +748,7 @@ synchronized void handleFailedBatch(ProducerBatch batch, RuntimeException except // If we fail with an OutOfOrderSequenceException, we have a gap in the log. Bump the epoch for this // partition, which will reset the sequence number to 0 and allow us to continue - requestEpochBumpForPartition(batch.topicPartition); + requestIdempotentEpochBumpForPartition(batch.topicPartition); } else if (exception instanceof UnknownProducerIdException) { // If we get an UnknownProducerId for a partition, then the broker has no state for that producer. It will // therefore accept a write with sequence number 0. We reset the sequence number for the partition here so @@ -714,7 +759,7 @@ synchronized void handleFailedBatch(ProducerBatch batch, RuntimeException except } else { if (adjustSequenceNumbers) { if (!isTransactional()) { - requestEpochBumpForPartition(batch.topicPartition); + requestIdempotentEpochBumpForPartition(batch.topicPartition); } else { txnPartitionMap.adjustSequencesDueToFailedBatch(batch); } @@ -764,21 +809,17 @@ synchronized void maybeResolveSequences() { // For the transactional producer, we bump the epoch if possible, otherwise we transition to a fatal error String unackedMessagesErr = "The client hasn't received acknowledgment for some previously " + "sent messages and can no longer retry them. "; - if (canBumpEpoch()) { - epochBumpRequired = true; - KafkaException exception = new KafkaException(unackedMessagesErr + "It is safe to abort " + - "the transaction and continue."); - transitionToAbortableError(exception); - } else { - KafkaException exception = new KafkaException(unackedMessagesErr + "It isn't safe to continue."); - transitionToFatalError(exception); - } + KafkaException abortableException = new KafkaException(unackedMessagesErr + "It is safe to abort " + + "the transaction and continue."); + KafkaException fatalException = new KafkaException(unackedMessagesErr + "It isn't safe to continue."); + + transitionToAbortableErrorOrFatalError(abortableException, fatalException); } else { // For the idempotent producer, bump the epoch log.info("No inflight batches remaining for {}, last ack'd sequence for partition is {}, next sequence is {}. " + "Going to bump epoch and reset sequence numbers.", topicPartition, lastAckedSequence(topicPartition).orElse(TxnPartitionEntry.NO_LAST_ACKED_SEQUENCE_NUMBER), sequenceNumber(topicPartition)); - requestEpochBumpForPartition(topicPartition); + requestIdempotentEpochBumpForPartition(topicPartition); } iter.remove(); @@ -818,8 +859,13 @@ synchronized TxnRequestHandler nextRequest(boolean hasIncompleteBatches) { if (nextRequestHandler.isEndTxn() && !transactionStarted) { nextRequestHandler.result.done(); if (currentState != State.FATAL_ERROR) { - log.debug("Not sending EndTxn for completed transaction since no partitions " + - "or offsets were successfully added"); + if (isTransactionV2Enabled) { + log.debug("Not sending EndTxn for completed transaction since no send " + + "or sendOffsetsToTransaction were triggered"); + } else { + log.debug("Not sending EndTxn for completed transaction since no partitions " + + "or offsets were successfully added"); + } completeTransaction(); } nextRequestHandler = pendingRequests.poll(); @@ -893,7 +939,7 @@ boolean hasAbortableError() { } // visible for testing - synchronized boolean transactionContainsPartition(TopicPartition topicPartition) { + public synchronized boolean transactionContainsPartition(TopicPartition topicPartition) { return partitionsInTransaction.contains(topicPartition); } @@ -947,7 +993,7 @@ synchronized boolean canRetry(ProduceResponse.PartitionResponse response, Produc if (isTransactional()) { txnPartitionMap.startSequencesAtBeginning(batch.topicPartition, this.producerIdAndEpoch); } else { - requestEpochBumpForPartition(batch.topicPartition); + requestIdempotentEpochBumpForPartition(batch.topicPartition); } return true; } @@ -955,7 +1001,7 @@ synchronized boolean canRetry(ProduceResponse.PartitionResponse response, Produc if (!isTransactional()) { // For the idempotent producer, always retry UNKNOWN_PRODUCER_ID errors. If the batch has the current // producer ID and epoch, request a bump of the epoch. Otherwise just retry the produce. - requestEpochBumpForPartition(batch.topicPartition); + requestIdempotentEpochBumpForPartition(batch.topicPartition); return true; } } else if (error == Errors.OUT_OF_ORDER_SEQUENCE_NUMBER) { @@ -971,7 +1017,7 @@ synchronized boolean canRetry(ProduceResponse.PartitionResponse response, Produc // and wait to see if the sequence resolves if (!hasUnresolvedSequence(batch.topicPartition) || isNextSequenceForUnresolvedPartition(batch.topicPartition, batch.baseSequence())) { - requestEpochBumpForPartition(batch.topicPartition); + requestIdempotentEpochBumpForPartition(batch.topicPartition); } return true; } @@ -1127,8 +1173,13 @@ private TxnOffsetCommitHandler txnOffsetCommitHandler(TransactionalRequestResult pendingTxnOffsetCommits, groupMetadata.memberId(), groupMetadata.generationId(), - groupMetadata.groupInstanceId() + groupMetadata.groupInstanceId(), + isTransactionV2Enabled() ); + if (result == null) { + // In this case, transaction V2 is in use. + return new TxnOffsetCommitHandler(builder); + } return new TxnOffsetCommitHandler(result, builder); } @@ -1168,23 +1219,59 @@ private TransactionalRequestResult handleCachedTransactionRequestResult( return result; } + /** + * Determines if an epoch bump can be triggered manually based on the api versions. + * + * NOTE: + * This method should only be used for transactional producers. + * For non-transactional producers epoch bumping is always allowed. + * + *
            + *
          1. Client-Triggered Epoch Bump: + * If the coordinator supports epoch bumping (initProducerIdVersion.maxVersion() >= 3), + * client-triggered epoch bumping is allowed, returns true. + * clientSideEpochBumpTriggerRequired must be set to true in this case.
          2. + * + *
          3. No Epoch Bump Allowed: + * If the coordinator does not support epoch bumping, returns false.
          4. + * + *
          5. Server-Triggered Only: + * When TransactionV2 is enabled, epoch bumping is handled automatically + * by the server in EndTxn, so manual epoch bumping is not required, returns false.
          6. + *
          + * + * @return true if a client-triggered epoch bump is allowed, otherwise false. + */ // package-private for testing - boolean canBumpEpoch() { - if (!isTransactional()) { - return true; - } + boolean needToTriggerEpochBumpFromClient() { + return coordinatorSupportsBumpingEpoch && !isTransactionV2Enabled; + } - return coordinatorSupportsBumpingEpoch; + /** + * Determines if the coordinator can handle an abortable error. + * Recovering from an abortable error requires an epoch bump which can be triggered by the client + * or automatically taken care of at the end of every transaction (Transaction V2). + * Use needToTriggerEpochBumpFromClient to check whether the epoch bump needs to be triggered + * manually. + * + * NOTE: + * This method should only be used for transactional producers. + * There is no concept of abortable errors for idempotent producers. + * + * @return true if an abortable error can be handled, otherwise false. + */ + boolean canHandleAbortableError() { + return coordinatorSupportsBumpingEpoch || isTransactionV2Enabled; } private void completeTransaction() { - if (epochBumpRequired) { + if (clientSideEpochBumpRequired) { transitionTo(State.INITIALIZING); } else { transitionTo(State.READY); } lastError = null; - epochBumpRequired = false; + clientSideEpochBumpRequired = false; transactionStarted = false; newPartitionsInTransaction.clear(); pendingPartitionsInTransaction.clear(); @@ -1213,9 +1300,23 @@ void abortableError(RuntimeException e) { transitionToAbortableError(e); } + /** + * Determines if an error should be treated as abortable or fatal, based on transaction state and configuration. + *
            NOTE: Only use this method for transactional producers
          + * + * - Abortable Error: + * An abortable error can be handled effectively, if epoch bumping is supported. + * 1) If transactionV2 is enabled, automatic epoch bumping happens at the end of every transaction. + * 2) If the client can trigger an epoch bump, the abortable error can be handled. + * + *- Fatal Error: + * If epoch bumping is not supported, the system cannot recover and the error must be treated as fatal. + * @param e the error to determine as either abortable or fatal. + */ void abortableErrorIfPossible(RuntimeException e) { - if (canBumpEpoch()) { - epochBumpRequired = true; + if (canHandleAbortableError()) { + if (needToTriggerEpochBumpFromClient()) + clientSideEpochBumpRequired = true; abortableError(e); } else { fatalError(e); @@ -1410,7 +1511,7 @@ public void handleResponse(AbstractResponse response) { fatalError(Errors.PRODUCER_FENCED.exception()); return; } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED || - error == Errors.INVALID_TXN_STATE) { + error == Errors.INVALID_TXN_STATE || error == Errors.INVALID_PRODUCER_ID_MAPPING) { fatalError(error.exception()); return; } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { @@ -1419,7 +1520,7 @@ public void handleResponse(AbstractResponse response) { log.debug("Did not attempt to add partition {} to transaction because other partitions in the " + "batch had errors.", topicPartition); hasPartitionErrors = true; - } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { + } else if (error == Errors.UNKNOWN_PRODUCER_ID) { abortableErrorIfPossible(error.exception()); return; } else if (error == Errors.TRANSACTION_ABORTABLE) { @@ -1575,6 +1676,8 @@ public void handleResponse(AbstractResponse response) { // When Transaction Version 2 is enabled, the end txn request 5+ is used, // it mandates bumping the epoch after every transaction. // If the epoch overflows, a new producerId is returned with epoch set to 0. + // Note, we still may see EndTxn TV1 (< 5) responses when the producer has upgraded to TV2 due to the upgrade + // occurring at the end of beginCompletingTransaction. The next transaction started should be TV2. if (endTxnResponse.data().producerId() != -1) { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch( endTxnResponse.data().producerId(), @@ -1595,9 +1698,9 @@ public void handleResponse(AbstractResponse response) { // just treat it the same as PRODUCE_FENCED. fatalError(Errors.PRODUCER_FENCED.exception()); } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED || - error == Errors.INVALID_TXN_STATE) { + error == Errors.INVALID_TXN_STATE || error == Errors.INVALID_PRODUCER_ID_MAPPING) { fatalError(error.exception()); - } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { + } else if (error == Errors.UNKNOWN_PRODUCER_ID) { abortableErrorIfPossible(error.exception()); } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); @@ -1648,14 +1751,14 @@ public void handleResponse(AbstractResponse response) { reenqueue(); } else if (error.exception() instanceof RetriableException) { reenqueue(); - } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { + } else if (error == Errors.UNKNOWN_PRODUCER_ID) { abortableErrorIfPossible(error.exception()); } else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) { // We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator, // just treat it the same as PRODUCE_FENCED. fatalError(Errors.PRODUCER_FENCED.exception()); } else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED || - error == Errors.INVALID_TXN_STATE) { + error == Errors.INVALID_TXN_STATE || error == Errors.INVALID_PRODUCER_ID_MAPPING) { fatalError(error.exception()); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { abortableError(GroupAuthorizationException.forGroupId(builder.data.groupId())); @@ -1676,6 +1779,11 @@ private TxnOffsetCommitHandler(TransactionalRequestResult result, this.builder = builder; } + private TxnOffsetCommitHandler(TxnOffsetCommitRequest.Builder builder) { + super("TxnOffsetCommitHandler"); + this.builder = builder; + } + @Override TxnOffsetCommitRequest.Builder requestBuilder() { return builder; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java index 05a0fa3fbbc30..5d2be89a9dd5a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TxnPartitionMap.java @@ -90,7 +90,7 @@ void updateLastAckedOffset(TopicPartition topicPartition, boolean isTransactiona // It might happen that the TransactionManager has been reset while a request was reenqueued and got a valid // response for this. This can happen only if the producer is only idempotent (not transactional) and in // this case there will be no tracked bookkeeper entry about it, so we have to insert one. - if (!lastAckedOffset.isPresent() && !isTransactional) + if (lastAckedOffset.isEmpty() && !isTransactional) getOrCreate(topicPartition); if (lastOffset > lastAckedOffset.orElse(ProduceResponse.INVALID_OFFSET)) get(topicPartition).setLastAckedOffset(lastOffset); diff --git a/clients/src/main/java/org/apache/kafka/common/ConsumerGroupState.java b/clients/src/main/java/org/apache/kafka/common/ConsumerGroupState.java index 28dea471599b6..4f52485aaa156 100644 --- a/clients/src/main/java/org/apache/kafka/common/ConsumerGroupState.java +++ b/clients/src/main/java/org/apache/kafka/common/ConsumerGroupState.java @@ -25,7 +25,9 @@ /** * The consumer group state. + * @deprecated Since 4.0. Use {@link GroupState} instead. */ +@Deprecated public enum ConsumerGroupState { UNKNOWN("Unknown"), PREPARING_REBALANCE("PreparingRebalance"), diff --git a/clients/src/main/java/org/apache/kafka/common/GroupState.java b/clients/src/main/java/org/apache/kafka/common/GroupState.java new file mode 100644 index 0000000000000..c0bcfb999b0df --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/GroupState.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common; + +import org.apache.kafka.common.annotation.InterfaceStability; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * The group state. + *

          + * The following table shows the correspondence between the group states and types. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
          StateClassic groupConsumer groupShare group
          UNKNOWNYesYesYes
          PREPARING_REBALANCEYesYes
          COMPLETING_REBALANCEYesYes
          STABLEYesYesYes
          DEADYesYesYes
          EMPTYYesYesYes
          ASSIGNINGYes
          RECONCILINGYes
          + */ +@InterfaceStability.Evolving +public enum GroupState { + UNKNOWN("Unknown"), + PREPARING_REBALANCE("PreparingRebalance"), + COMPLETING_REBALANCE("CompletingRebalance"), + STABLE("Stable"), + DEAD("Dead"), + EMPTY("Empty"), + ASSIGNING("Assigning"), + RECONCILING("Reconciling"); + + private static final Map NAME_TO_ENUM = Arrays.stream(values()) + .collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity())); + + private final String name; + + GroupState(String name) { + this.name = name; + } + + /** + * Case-insensitive group state lookup by string name. + */ + public static GroupState parse(String name) { + GroupState state = NAME_TO_ENUM.get(name.toUpperCase(Locale.ROOT)); + return state == null ? UNKNOWN : state; + } + + public static Set groupStatesForType(GroupType type) { + if (type == GroupType.CLASSIC) { + return Set.of(PREPARING_REBALANCE, COMPLETING_REBALANCE, STABLE, DEAD, EMPTY); + } else if (type == GroupType.CONSUMER) { + return Set.of(PREPARING_REBALANCE, COMPLETING_REBALANCE, STABLE, DEAD, EMPTY, ASSIGNING, RECONCILING); + } else if (type == GroupType.SHARE) { + return Set.of(STABLE, DEAD, EMPTY); + } else { + throw new IllegalArgumentException("Group type not known"); + } + } + + @Override + public String toString() { + return name; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/Node.java b/clients/src/main/java/org/apache/kafka/common/Node.java index 020d2bcaf3355..e47d941e0f902 100644 --- a/clients/src/main/java/org/apache/kafka/common/Node.java +++ b/clients/src/main/java/org/apache/kafka/common/Node.java @@ -30,12 +30,13 @@ public class Node { private final String host; private final int port; private final String rack; + private final boolean isFenced; // Cache hashCode as it is called in performance sensitive parts of the code (e.g. RecordAccumulator.ready) private Integer hash; public Node(int id, String host, int port) { - this(id, host, port, null); + this(id, host, port, null, false); } public Node(int id, String host, int port, String rack) { @@ -44,6 +45,16 @@ public Node(int id, String host, int port, String rack) { this.host = host; this.port = port; this.rack = rack; + this.isFenced = false; + } + + public Node(int id, String host, int port, String rack, boolean isFenced) { + this.id = id; + this.idString = Integer.toString(id); + this.host = host; + this.port = port; + this.rack = rack; + this.isFenced = isFenced; } public static Node noNode() { @@ -102,6 +113,13 @@ public String rack() { return rack; } + /** + * Whether if this node is fenced + */ + public boolean isFenced() { + return isFenced; + } + @Override public int hashCode() { Integer h = this.hash; @@ -110,6 +128,7 @@ public int hashCode() { result = 31 * result + id; result = 31 * result + port; result = 31 * result + ((rack == null) ? 0 : rack.hashCode()); + result = 31 * result + Objects.hashCode(isFenced); this.hash = result; return result; } else { @@ -127,12 +146,13 @@ public boolean equals(Object obj) { return id == other.id && port == other.port && Objects.equals(host, other.host) && - Objects.equals(rack, other.rack); + Objects.equals(rack, other.rack) && + Objects.equals(isFenced, other.isFenced); } @Override public String toString() { - return host + ":" + port + " (id: " + idString + " rack: " + rack + ")"; + return host + ":" + port + " (id: " + idString + " rack: " + rack + " isFenced: " + isFenced + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/common/Uuid.java b/clients/src/main/java/org/apache/kafka/common/Uuid.java index 45e2b9f1d8fb2..6f7f09537f178 100644 --- a/clients/src/main/java/org/apache/kafka/common/Uuid.java +++ b/clients/src/main/java/org/apache/kafka/common/Uuid.java @@ -20,8 +20,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; -import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -51,11 +49,7 @@ public class Uuid implements Comparable { /** * The set of reserved UUIDs that will never be returned by the randomUuid method. */ - public static final Set RESERVED = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( - METADATA_TOPIC_ID, - ZERO_UUID, - ONE_UUID - ))); + public static final Set RESERVED = Set.of(ZERO_UUID, ONE_UUID); private final long mostSignificantBits; private final long leastSignificantBits; diff --git a/clients/src/main/java/org/apache/kafka/common/cache/LRUCache.java b/clients/src/main/java/org/apache/kafka/common/cache/LRUCache.java index 672cb65d66ab6..11f0be4f6e54f 100644 --- a/clients/src/main/java/org/apache/kafka/common/cache/LRUCache.java +++ b/clients/src/main/java/org/apache/kafka/common/cache/LRUCache.java @@ -26,7 +26,7 @@ public class LRUCache implements Cache { private final LinkedHashMap cache; public LRUCache(final int maxSize) { - cache = new LinkedHashMap(16, .75f, true) { + cache = new LinkedHashMap<>(16, .75f, true) { @Override protected boolean removeEldestEntry(Map.Entry eldest) { return this.size() > maxSize; // require this. prefix to make lgtm.com happy diff --git a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java index 9cf5fbae51514..970d9cebf7231 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java +++ b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java @@ -1501,7 +1501,7 @@ private void getConfigKeyRst(ConfigKey key, StringBuilder b) { b.append("``").append(key.name).append("``").append("\n"); if (key.documentation != null) { for (String docLine : key.documentation.split("\n")) { - if (docLine.length() == 0) { + if (docLine.isEmpty()) { continue; } b.append(" ").append(docLine).append("\n\n"); @@ -1532,7 +1532,7 @@ private List sortedConfigs() { } List configs = new ArrayList<>(configKeys.values()); - Collections.sort(configs, (k1, k2) -> compare(k1, k2, groupOrd)); + configs.sort((k1, k2) -> compare(k1, k2, groupOrd)); return configs; } diff --git a/clients/src/main/java/org/apache/kafka/common/config/SslClientAuth.java b/clients/src/main/java/org/apache/kafka/common/config/SslClientAuth.java index 75f8e3640e9fa..c9552b7c44da8 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/SslClientAuth.java +++ b/clients/src/main/java/org/apache/kafka/common/config/SslClientAuth.java @@ -17,8 +17,6 @@ package org.apache.kafka.common.config; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Locale; @@ -30,8 +28,7 @@ public enum SslClientAuth { REQUESTED, NONE; - public static final List VALUES = - Collections.unmodifiableList(Arrays.asList(SslClientAuth.values())); + public static final List VALUES = List.of(SslClientAuth.values()); public static SslClientAuth forConfig(String key) { if (key == null) { diff --git a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java index 1437577ed00ad..fb51d254cdd41 100755 --- a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java @@ -198,46 +198,9 @@ public class TopicConfig { public static final String PREALLOCATE_DOC = "True if we should preallocate the file on disk when " + "creating a new log segment."; - /** - * @deprecated since 3.0, removal planned in 4.0. The default value for this config is appropriate - * for most situations. - */ - @Deprecated - public static final String MESSAGE_FORMAT_VERSION_CONFIG = "message.format.version"; - - /** - * @deprecated since 3.0, removal planned in 4.0. The default value for this config is appropriate - * for most situations. - */ - @Deprecated - public static final String MESSAGE_FORMAT_VERSION_DOC = "[DEPRECATED] Specify the message format version the broker " + - "will use to append messages to the logs. The value of this config is always assumed to be `3.0` if " + - "`inter.broker.protocol.version` is 3.0 or higher (the actual config value is ignored). Otherwise, the value should " + - "be a valid ApiVersion. Some examples are: 0.10.0, 1.1, 2.8, 3.0. By setting a particular message format version, the " + - "user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting " + - "this value incorrectly will cause consumers with older versions to break as they will receive messages with a format " + - "that they don't understand."; - public static final String MESSAGE_TIMESTAMP_TYPE_CONFIG = "message.timestamp.type"; public static final String MESSAGE_TIMESTAMP_TYPE_DOC = "Define whether the timestamp in the message is " + - "message create time or log append time. The value should be either `CreateTime` or `LogAppendTime`"; - - /** - * @deprecated since 3.6, removal planned in 4.0. - * Use message.timestamp.before.max.ms and message.timestamp.after.max.ms instead - */ - @Deprecated - public static final String MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG = "message.timestamp.difference.max.ms"; - - /** - * @deprecated since 3.6, removal planned in 4.0. - * Use message.timestamp.before.max.ms and message.timestamp.after.max.ms instead - */ - @Deprecated - public static final String MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC = "[DEPRECATED] The maximum difference allowed between " + - "the timestamp when a broker receives a message and the timestamp specified in the message. If " + - "message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp " + - "exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime."; + "message create time or log append time."; public static final String MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG = "message.timestamp.before.max.ms"; public static final String MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC = "This configuration sets the allowable timestamp " + @@ -251,10 +214,18 @@ public class TopicConfig { "or equal to the broker's timestamp, with the maximum allowable difference determined by the value set in this " + "configuration. If message.timestamp.type=CreateTime, the message will be rejected if the difference in " + "timestamps exceeds this specified threshold. This configuration is ignored if message.timestamp.type=LogAppendTime."; + + /** + * @deprecated down-conversion is not possible in Apache Kafka 4.0 and newer, hence this configuration is a no-op, + * and it is deprecated for removal in Apache Kafka 5.0. + */ + @Deprecated public static final String MESSAGE_DOWNCONVERSION_ENABLE_CONFIG = "message.downconversion.enable"; - public static final String MESSAGE_DOWNCONVERSION_ENABLE_DOC = "This configuration controls whether " + - "down-conversion of message formats is enabled to satisfy consume requests. When set to false, " + - "broker will not perform down-conversion for consumers expecting an older message format. The broker responds " + - "with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration" + - "does not apply to any message format conversion that might be required for replication to followers."; + + /** + * @deprecated see {@link #MESSAGE_DOWNCONVERSION_ENABLE_CONFIG}. + */ + @Deprecated + public static final String MESSAGE_DOWNCONVERSION_ENABLE_DOC = "Down-conversion is not possible in Apache Kafka 4.0 and newer, " + + "hence this configuration is no-op and it is deprecated for removal in Apache Kafka 5.0."; } diff --git a/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java b/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java index 86f991bf7a625..3e0fa5ed77258 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java +++ b/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java @@ -24,7 +24,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Map; @@ -117,7 +116,7 @@ private ConfigData get(String path, Predicate fileFilter) { private static String read(Path path) { try { - return new String(Files.readAllBytes(path), StandardCharsets.UTF_8); + return Files.readString(path); } catch (IOException e) { log.error("Could not read file {} for property {}", path, path.getFileName(), e); throw new ConfigException("Could not read file " + path + " for property " + path.getFileName()); diff --git a/clients/src/main/java/org/apache/kafka/common/errors/NotLeaderOrFollowerException.java b/clients/src/main/java/org/apache/kafka/common/errors/NotLeaderOrFollowerException.java index 2db960b738e07..c45f158e69988 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/NotLeaderOrFollowerException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/NotLeaderOrFollowerException.java @@ -24,8 +24,7 @@ * satisfied by a leader or follower, this exception indicates that the broker is not a replica * of the topic partition. */ -@SuppressWarnings("deprecation") -public class NotLeaderOrFollowerException extends NotLeaderForPartitionException { +public class NotLeaderOrFollowerException extends InvalidMetadataException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/OAuthBearerValidatorCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/errors/RebootstrapRequiredException.java similarity index 69% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/OAuthBearerValidatorCallbackHandler.java rename to clients/src/main/java/org/apache/kafka/common/errors/RebootstrapRequiredException.java index 817f53b9cb12e..78a66aabd3e4a 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/OAuthBearerValidatorCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/RebootstrapRequiredException.java @@ -15,13 +15,16 @@ * limitations under the License. */ -package org.apache.kafka.common.security.oauthbearer.secured; +package org.apache.kafka.common.errors; -/** - * @deprecated See org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler - */ +public class RebootstrapRequiredException extends ApiException { + private static final long serialVersionUID = 1L; -@Deprecated -public class OAuthBearerValidatorCallbackHandler extends org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler { + public RebootstrapRequiredException(String message) { + super(message); + } + public RebootstrapRequiredException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java b/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java index aee57c47d28de..6ed441d444b9b 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java @@ -45,6 +45,9 @@ public enum DeserializationExceptionOrigin { private final ByteBuffer valueBuffer; private final Headers headers; + /** + * @deprecated Since 3.9. Use {@link #RecordDeserializationException(DeserializationExceptionOrigin, TopicPartition, long, long, TimestampType, ByteBuffer, ByteBuffer, Headers, String, Throwable)} instead. + */ @Deprecated public RecordDeserializationException(TopicPartition partition, long offset, diff --git a/core/src/main/scala/kafka/raft/SegmentPosition.scala b/clients/src/main/java/org/apache/kafka/common/errors/StreamsInvalidTopologyEpochException.java similarity index 74% rename from core/src/main/scala/kafka/raft/SegmentPosition.scala rename to clients/src/main/java/org/apache/kafka/common/errors/StreamsInvalidTopologyEpochException.java index eb6a59f35d3bc..2c14a2d37cb87 100644 --- a/core/src/main/scala/kafka/raft/SegmentPosition.scala +++ b/clients/src/main/java/org/apache/kafka/common/errors/StreamsInvalidTopologyEpochException.java @@ -14,10 +14,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package kafka.raft +package org.apache.kafka.common.errors; -import org.apache.kafka.raft.OffsetMetadata - -case class SegmentPosition(baseOffset: Long, relativePosition: Int) extends OffsetMetadata { - override def toString: String = s"(segmentBaseOffset=$baseOffset,relativePositionInSegment=$relativePosition)" +public class StreamsInvalidTopologyEpochException extends ApiException { + public StreamsInvalidTopologyEpochException(String message) { + super(message); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/StreamsInvalidTopologyException.java b/clients/src/main/java/org/apache/kafka/common/errors/StreamsInvalidTopologyException.java new file mode 100644 index 0000000000000..28a5c8ab77de8 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/StreamsInvalidTopologyException.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +public class StreamsInvalidTopologyException extends ApiException { + public StreamsInvalidTopologyException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/StreamsTopologyFencedException.java b/clients/src/main/java/org/apache/kafka/common/errors/StreamsTopologyFencedException.java new file mode 100644 index 0000000000000..8e4120221a0f1 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/StreamsTopologyFencedException.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +public class StreamsTopologyFencedException extends ApiException { + public StreamsTopologyFencedException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java index 7137f723d8fc5..52863c6c0b564 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java +++ b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java @@ -123,7 +123,7 @@ private void canWrite() { } private Iterator

          closeAware(final Iterator
          original) { - return new Iterator
          () { + return new Iterator<>() { @Override public boolean hasNext() { return original.hasNext(); diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java b/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java index bdaa22b32f5b9..0612015d1a2fa 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java @@ -70,18 +70,7 @@ public class JmxReporter implements MetricsReporter { private Predicate mbeanPredicate = s -> true; public JmxReporter() { - this(""); - } - - /** - * Create a JMX reporter that prefixes all metrics with the given string. - * @deprecated Since 2.6.0. Use {@link JmxReporter#JmxReporter()} - * Initialize JmxReporter with {@link JmxReporter#contextChange(MetricsContext)} - * Populate prefix by adding _namespace/prefix key value pair to {@link MetricsContext} - */ - @Deprecated - public JmxReporter(String prefix) { - this.prefix = prefix != null ? prefix : ""; + this.prefix = ""; } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java b/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java index b0761248894c5..a5da5294b4d4d 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java @@ -367,7 +367,7 @@ public boolean hasExpired() { } synchronized List metrics() { - return unmodifiableList(new ArrayList<>(this.metrics.values())); + return List.copyOf(this.metrics.values()); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java index 0bf64c77c6717..aea38c72cacdf 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java @@ -56,8 +56,6 @@ private ChannelBuilders() { } * @param listenerName the listenerName if contextType is SERVER or null otherwise * @param clientSaslMechanism SASL mechanism if mode is CLIENT, ignored otherwise * @param time the time instance - * @param saslHandshakeRequestEnable flag to enable Sasl handshake requests; disabled only for SASL - * inter-broker connections with inter-broker protocol version < 0.10 * @param logContext the log context instance * * @return the configured `ChannelBuilder` @@ -70,7 +68,6 @@ public static ChannelBuilder clientChannelBuilder( ListenerName listenerName, String clientSaslMechanism, Time time, - boolean saslHandshakeRequestEnable, LogContext logContext) { if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) { @@ -80,7 +77,7 @@ public static ChannelBuilder clientChannelBuilder( throw new IllegalArgumentException("`clientSaslMechanism` must be non-null in client mode if `securityProtocol` is `" + securityProtocol + "`"); } return create(securityProtocol, ConnectionMode.CLIENT, contextType, config, listenerName, false, clientSaslMechanism, - saslHandshakeRequestEnable, null, null, time, logContext, null); + null, null, time, logContext, null); } /** @@ -106,8 +103,8 @@ public static ChannelBuilder serverChannelBuilder(ListenerName listenerName, LogContext logContext, Function apiVersionSupplier) { return create(securityProtocol, ConnectionMode.SERVER, JaasContext.Type.SERVER, config, listenerName, - isInterBrokerListener, null, true, credentialCache, - tokenCache, time, logContext, apiVersionSupplier); + isInterBrokerListener, null, credentialCache, tokenCache, time, logContext, + apiVersionSupplier); } private static ChannelBuilder create(SecurityProtocol securityProtocol, @@ -117,7 +114,6 @@ private static ChannelBuilder create(SecurityProtocol securityProtocol, ListenerName listenerName, boolean isInterBrokerListener, String clientSaslMechanism, - boolean saslHandshakeRequestEnable, CredentialCache credentialCache, DelegationTokenCache tokenCache, Time time, @@ -175,7 +171,6 @@ private static ChannelBuilder create(SecurityProtocol securityProtocol, listenerName, isInterBrokerListener, clientSaslMechanism, - saslHandshakeRequestEnable, credentialCache, tokenCache, sslClientAuthOverride, diff --git a/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java index 4c73e4e3e4c94..9b3bff9b9faf2 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java @@ -85,7 +85,6 @@ public class SaslChannelBuilder implements ChannelBuilder, ListenerReconfigurabl private final String clientSaslMechanism; private final ConnectionMode connectionMode; private final Map jaasContexts; - private final boolean handshakeRequestEnable; private final CredentialCache credentialCache; private final DelegationTokenCache tokenCache; private final Map loginManagers; @@ -108,7 +107,6 @@ public SaslChannelBuilder(ConnectionMode connectionMode, ListenerName listenerName, boolean isInterBrokerListener, String clientSaslMechanism, - boolean handshakeRequestEnable, CredentialCache credentialCache, DelegationTokenCache tokenCache, String sslClientAuthOverride, @@ -122,7 +120,6 @@ public SaslChannelBuilder(ConnectionMode connectionMode, this.securityProtocol = securityProtocol; this.listenerName = listenerName; this.isInterBrokerListener = isInterBrokerListener; - this.handshakeRequestEnable = handshakeRequestEnable; this.clientSaslMechanism = clientSaslMechanism; this.credentialCache = credentialCache; this.tokenCache = tokenCache; @@ -295,7 +292,7 @@ protected SaslClientAuthenticator buildClientAuthenticator(Map config String servicePrincipal, TransportLayer transportLayer, Subject subject) { return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, - serverHost, clientSaslMechanism, handshakeRequestEnable, transportLayer, time, logContext); + serverHost, clientSaslMechanism, transportLayer, time, logContext); } // Package private for testing diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index e95882be69927..9863aad40227c 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -130,7 +130,11 @@ public enum ApiKeys { READ_SHARE_GROUP_STATE(ApiMessageType.READ_SHARE_GROUP_STATE, true), WRITE_SHARE_GROUP_STATE(ApiMessageType.WRITE_SHARE_GROUP_STATE, true), DELETE_SHARE_GROUP_STATE(ApiMessageType.DELETE_SHARE_GROUP_STATE, true), - READ_SHARE_GROUP_STATE_SUMMARY(ApiMessageType.READ_SHARE_GROUP_STATE_SUMMARY, true); + READ_SHARE_GROUP_STATE_SUMMARY(ApiMessageType.READ_SHARE_GROUP_STATE_SUMMARY, true), + STREAMS_GROUP_HEARTBEAT(ApiMessageType.STREAMS_GROUP_HEARTBEAT), + STREAMS_GROUP_DESCRIBE(ApiMessageType.STREAMS_GROUP_DESCRIBE), + DESCRIBE_SHARE_GROUP_OFFSETS(ApiMessageType.DESCRIBE_SHARE_GROUP_OFFSETS); + private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); @@ -194,7 +198,7 @@ public enum ApiKeys { private static boolean shouldRetainsBufferReference(Schema[] requestSchemas) { boolean requestRetainsBufferReference = false; for (Schema requestVersionSchema : requestSchemas) { - if (retainsBufferReference(requestVersionSchema)) { + if (requestVersionSchema != null && retainsBufferReference(requestVersionSchema)) { requestRetainsBufferReference = true; break; } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index a80ec308ebb0a..2dbf3abde9791 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -107,6 +107,7 @@ import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.ReassignmentInProgressException; import org.apache.kafka.common.errors.RebalanceInProgressException; +import org.apache.kafka.common.errors.RebootstrapRequiredException; import org.apache.kafka.common.errors.RecordBatchTooLargeException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.ReplicaNotAvailableException; @@ -118,6 +119,9 @@ import org.apache.kafka.common.errors.SnapshotNotFoundException; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.StaleMemberEpochException; +import org.apache.kafka.common.errors.StreamsInvalidTopologyEpochException; +import org.apache.kafka.common.errors.StreamsInvalidTopologyException; +import org.apache.kafka.common.errors.StreamsTopologyFencedException; import org.apache.kafka.common.errors.TelemetryTooLargeException; import org.apache.kafka.common.errors.ThrottlingQuotaExceededException; import org.apache.kafka.common.errors.TimeoutException; @@ -411,7 +415,11 @@ public enum Errors { INVALID_VOTER_KEY(125, "The voter key doesn't match the receiving replica's key.", InvalidVoterKeyException::new), DUPLICATE_VOTER(126, "The voter is already part of the set of voters.", DuplicateVoterException::new), VOTER_NOT_FOUND(127, "The voter is not part of the set of voters.", VoterNotFoundException::new), - INVALID_REGULAR_EXPRESSION(128, "The regular expression is not valid.", InvalidRegularExpression::new); + INVALID_REGULAR_EXPRESSION(128, "The regular expression is not valid.", InvalidRegularExpression::new), + REBOOTSTRAP_REQUIRED(129, "Client metadata is stale, client should rebootstrap to obtain new metadata.", RebootstrapRequiredException::new), + STREAMS_INVALID_TOPOLOGY(130, "The supplied topology is invalid.", StreamsInvalidTopologyException::new), + STREAMS_INVALID_TOPOLOGY_EPOCH(131, "The supplied topology epoch is invalid.", StreamsInvalidTopologyEpochException::new), + STREAMS_TOPOLOGY_FENCED(132, "The supplied topology epoch is outdated.", StreamsTopologyFencedException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java b/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java index ab6600a7d059f..90db613841de9 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java @@ -225,11 +225,35 @@ public static ByteBuffer toVersionPrefixedByteBuffer(final short version, final public static byte[] toVersionPrefixedBytes(final short version, final Message message) { ByteBuffer buffer = toVersionPrefixedByteBuffer(version, message); - // take the inner array directly if it is full with data + // take the inner array directly if it is full of data. if (buffer.hasArray() && - buffer.arrayOffset() == 0 && - buffer.position() == 0 && - buffer.limit() == buffer.array().length) return buffer.array(); + buffer.arrayOffset() == 0 && + buffer.position() == 0 && + buffer.limit() == buffer.array().length) return buffer.array(); + else return Utils.toArray(buffer); + } + + public static ByteBuffer toCoordinatorTypePrefixedByteBuffer(final short type, final Message message) { + if (message.highestSupportedVersion() != 0 || message.lowestSupportedVersion() != 0) { + throw new IllegalArgumentException("Cannot serialize a message with a different version than 0."); + } + + ObjectSerializationCache cache = new ObjectSerializationCache(); + int messageSize = message.size(cache, (short) 0); + ByteBufferAccessor bytes = new ByteBufferAccessor(ByteBuffer.allocate(messageSize + 2)); + bytes.writeShort(type); + message.write(bytes, cache, (short) 0); + bytes.flip(); + return bytes.buffer(); + } + + public static byte[] toCoordinatorTypePrefixedBytes(final short type, final Message message) { + ByteBuffer buffer = toCoordinatorTypePrefixedByteBuffer(type, message); + // take the inner array directly if it is full of data. + if (buffer.hasArray() && + buffer.arrayOffset() == 0 && + buffer.position() == 0 && + buffer.limit() == buffer.array().length) return buffer.array(); else return Utils.toArray(buffer); } } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java index 090d7c92eb06d..614c8cd22e4db 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java @@ -31,10 +31,7 @@ public class Protocol { private static String indentString(int size) { - StringBuilder b = new StringBuilder(size); - for (int i = 0; i < size; i++) - b.append(" "); - return b.toString(); + return " ".repeat(Math.max(0, size)); } private static void schemaToBnfHtml(Schema schema, StringBuilder b, int indentSize) { diff --git a/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java b/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java index 9ab8715236e74..e47d7c866ccc3 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java +++ b/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java @@ -235,7 +235,7 @@ CloseableIterator iterator(BufferSupplier bufferSupplier) { if (isCompressed()) return new DeepRecordsIterator(this, false, Integer.MAX_VALUE, bufferSupplier); - return new CloseableIterator() { + return new CloseableIterator<>() { private boolean hasNext = true; @Override diff --git a/clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java b/clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java index 5fddaae40624d..16ee3596ea309 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java @@ -71,7 +71,7 @@ public DefaultRecordsSend toSend() { } private Iterator recordsIterator() { - return new AbstractIterator() { + return new AbstractIterator<>() { private final Iterator batches = batches().iterator(); private Iterator records; diff --git a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecords.java b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecords.java deleted file mode 100644 index 50a8f27f42c5c..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecords.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.record; - -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.utils.AbstractIterator; -import org.apache.kafka.common.utils.Time; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -/** - * Encapsulation for holding records that require down-conversion in a lazy, chunked manner (KIP-283). See - * {@link LazyDownConversionRecordsSend} for the actual chunked send implementation. - */ -public class LazyDownConversionRecords implements BaseRecords { - private final TopicPartition topicPartition; - private final Records records; - private final byte toMagic; - private final long firstOffset; - private ConvertedRecords firstConvertedBatch; - private final int sizeInBytes; - private final Time time; - - /** - * @param topicPartition The topic-partition to which records belong - * @param records Records to lazily down-convert - * @param toMagic Magic version to down-convert to - * @param firstOffset The starting offset for down-converted records. This only impacts some cases. See - * {@link RecordsUtil#downConvert(Iterable, byte, long, Time)} for an explanation. - * @param time The time instance to use - * - * @throws org.apache.kafka.common.errors.UnsupportedCompressionTypeException If the first batch to down-convert - * has a compression type which we do not support down-conversion for. - */ - public LazyDownConversionRecords(TopicPartition topicPartition, Records records, byte toMagic, long firstOffset, Time time) { - this.topicPartition = Objects.requireNonNull(topicPartition); - this.records = Objects.requireNonNull(records); - this.toMagic = toMagic; - this.firstOffset = firstOffset; - this.time = Objects.requireNonNull(time); - - // To make progress, kafka consumers require at least one full record batch per partition, i.e. we need to - // ensure we can accommodate one full batch of down-converted messages. We achieve this by having `sizeInBytes` - // factor in the size of the first down-converted batch and we return at least that many bytes. - java.util.Iterator> it = iterator(0); - if (it.hasNext()) { - firstConvertedBatch = it.next(); - sizeInBytes = Math.max(records.sizeInBytes(), firstConvertedBatch.records().sizeInBytes()); - } else { - // If there are messages before down-conversion and no messages after down-conversion, - // make sure we are able to send at least an overflow message to the consumer so that it can throw - // a RecordTooLargeException. Typically, the consumer would need to increase the fetch size in such cases. - // If there are no messages before down-conversion, we return an empty record batch. - firstConvertedBatch = null; - sizeInBytes = records.batches().iterator().hasNext() ? LazyDownConversionRecordsSend.MIN_OVERFLOW_MESSAGE_LENGTH : 0; - } - } - - @Override - public int sizeInBytes() { - return sizeInBytes; - } - - @Override - public LazyDownConversionRecordsSend toSend() { - return new LazyDownConversionRecordsSend(this); - } - - public TopicPartition topicPartition() { - return topicPartition; - } - - @Override - public boolean equals(Object o) { - if (o instanceof LazyDownConversionRecords) { - LazyDownConversionRecords that = (LazyDownConversionRecords) o; - return toMagic == that.toMagic && - firstOffset == that.firstOffset && - topicPartition.equals(that.topicPartition) && - records.equals(that.records); - } - return false; - } - - @Override - public int hashCode() { - int result = toMagic; - result = 31 * result + Long.hashCode(firstOffset); - result = 31 * result + topicPartition.hashCode(); - result = 31 * result + records.hashCode(); - return result; - } - - @Override - public String toString() { - return "LazyDownConversionRecords(size=" + sizeInBytes + - ", underlying=" + records + - ", toMagic=" + toMagic + - ", firstOffset=" + firstOffset + - ")"; - } - - public final java.util.Iterator> iterator(long maximumReadSize) { - // We typically expect only one iterator instance to be created, so null out the first converted batch after - // first use to make it available for GC. - ConvertedRecords firstBatch = firstConvertedBatch; - firstConvertedBatch = null; - return new Iterator(records, maximumReadSize, firstBatch); - } - - /** - * Implementation for being able to iterate over down-converted records. Goal of this implementation is to keep - * it as memory-efficient as possible by not having to maintain all down-converted records in-memory. Maintains - * a view into batches of down-converted records. - */ - private class Iterator extends AbstractIterator> { - private final AbstractIterator batchIterator; - private final long maximumReadSize; - private ConvertedRecords firstConvertedBatch; - - /** - * @param recordsToDownConvert Records that require down-conversion - * @param maximumReadSize Maximum possible size of underlying records that will be down-converted in each call to - * {@link #makeNext()}. This is a soft limit as {@link #makeNext()} will always convert - * and return at least one full message batch. - */ - private Iterator(Records recordsToDownConvert, long maximumReadSize, ConvertedRecords firstConvertedBatch) { - this.batchIterator = recordsToDownConvert.batchIterator(); - this.maximumReadSize = maximumReadSize; - this.firstConvertedBatch = firstConvertedBatch; - // If we already have the first down-converted batch, advance the underlying records iterator to next batch - if (firstConvertedBatch != null) - this.batchIterator.next(); - } - - /** - * Make next set of down-converted records - * @return Down-converted records - */ - @Override - protected ConvertedRecords makeNext() { - // If we have cached the first down-converted batch, return that now - if (firstConvertedBatch != null) { - ConvertedRecords convertedBatch = firstConvertedBatch; - firstConvertedBatch = null; - return convertedBatch; - } - - while (batchIterator.hasNext()) { - final List batches = new ArrayList<>(); - boolean isFirstBatch = true; - long sizeSoFar = 0; - - // Figure out batches we should down-convert based on the size constraints - while (batchIterator.hasNext() && - (isFirstBatch || (batchIterator.peek().sizeInBytes() + sizeSoFar) <= maximumReadSize)) { - RecordBatch currentBatch = batchIterator.next(); - batches.add(currentBatch); - sizeSoFar += currentBatch.sizeInBytes(); - isFirstBatch = false; - } - - ConvertedRecords convertedRecords = RecordsUtil.downConvert(batches, toMagic, firstOffset, time); - // During conversion, it is possible that we drop certain batches because they do not have an equivalent - // representation in the message format we want to convert to. For example, V0 and V1 message formats - // have no notion of transaction markers which were introduced in V2 so they get dropped during conversion. - // We return converted records only when we have at least one valid batch of messages after conversion. - if (convertedRecords.records().sizeInBytes() > 0) - return convertedRecords; - } - return allDone(); - } - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java deleted file mode 100644 index 1bced605579b7..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.record; - -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; -import org.apache.kafka.common.network.TransferableChannel; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Iterator; - -/** - * Encapsulation for {@link RecordsSend} for {@link LazyDownConversionRecords}. Records are down-converted in batches and - * on-demand when {@link #writeTo} method is called. - */ -public final class LazyDownConversionRecordsSend extends RecordsSend { - private static final Logger log = LoggerFactory.getLogger(LazyDownConversionRecordsSend.class); - private static final int MAX_READ_SIZE = 128 * 1024; - static final int MIN_OVERFLOW_MESSAGE_LENGTH = Records.LOG_OVERHEAD; - - private final RecordValidationStats recordValidationStats; - private final Iterator> convertedRecordsIterator; - - private RecordsSend convertedRecordsWriter; - - public LazyDownConversionRecordsSend(LazyDownConversionRecords records) { - super(records, records.sizeInBytes()); - convertedRecordsWriter = null; - recordValidationStats = new RecordValidationStats(); - convertedRecordsIterator = records().iterator(MAX_READ_SIZE); - } - - private MemoryRecords buildOverflowBatch(int remaining) { - // We do not have any records left to down-convert. Construct an overflow message for the length remaining. - // This message will be ignored by the consumer because its length will be past the length of maximum - // possible response size. - // DefaultRecordBatch => - // BaseOffset => Int64 - // Length => Int32 - // ... - ByteBuffer overflowMessageBatch = ByteBuffer.allocate( - Math.max(MIN_OVERFLOW_MESSAGE_LENGTH, Math.min(remaining + 1, MAX_READ_SIZE))); - overflowMessageBatch.putLong(-1L); - - // Fill in the length of the overflow batch. A valid batch must be at least as long as the minimum batch - // overhead. - overflowMessageBatch.putInt(Math.max(remaining + 1, DefaultRecordBatch.RECORD_BATCH_OVERHEAD)); - log.debug("Constructed overflow message batch for partition {} with length={}", topicPartition(), remaining); - return MemoryRecords.readableRecords(overflowMessageBatch); - } - - @Override - public int writeTo(TransferableChannel channel, int previouslyWritten, int remaining) throws IOException { - if (convertedRecordsWriter == null || convertedRecordsWriter.completed()) { - MemoryRecords convertedRecords; - - try { - // Check if we have more chunks left to down-convert - if (convertedRecordsIterator.hasNext()) { - // Get next chunk of down-converted messages - ConvertedRecords recordsAndStats = convertedRecordsIterator.next(); - convertedRecords = (MemoryRecords) recordsAndStats.records(); - recordValidationStats.add(recordsAndStats.recordConversionStats()); - log.debug("Down-converted records for partition {} with length={}", topicPartition(), convertedRecords.sizeInBytes()); - } else { - convertedRecords = buildOverflowBatch(remaining); - } - } catch (UnsupportedCompressionTypeException e) { - // We have encountered a compression type which does not support down-conversion (e.g. zstd). - // Since we have already sent at least one batch and we have committed to the fetch size, we - // send an overflow batch. The consumer will read the first few records and then fetch from the - // offset of the batch which has the unsupported compression type. At that time, we will - // send back the UNSUPPORTED_COMPRESSION_TYPE error which will allow the consumer to fail gracefully. - convertedRecords = buildOverflowBatch(remaining); - } - - convertedRecordsWriter = new DefaultRecordsSend<>(convertedRecords, Math.min(convertedRecords.sizeInBytes(), remaining)); - } - // safe to cast to int since `remaining` is an int - return (int) convertedRecordsWriter.writeTo(channel); - } - - public RecordValidationStats recordConversionStats() { - return recordValidationStats; - } - - public TopicPartition topicPartition() { - return records().topicPartition(); - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java index c01bca2496ed1..3aee889aded6e 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.common.record; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.message.KRaftVersionRecord; @@ -137,12 +136,8 @@ public Integer firstBatchSize() { /** * Filter the records into the provided ByteBuffer. * - * @param partition The partition that is filtered (used only for logging) * @param filter The filter function * @param destinationBuffer The byte buffer to write the filtered records to - * @param maxRecordBatchSize The maximum record batch size. Note this is not a hard limit: if a batch - * exceeds this after filtering, we log a warning, but the batch will still be - * created. * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. For small * record batches, allocating a potentially large buffer (64 KB for LZ4) will * dominate the cost of decompressing and iterating over the records in the @@ -150,18 +145,16 @@ public Integer firstBatchSize() { * performance impact. * @return A FilterResult with a summary of the output (for metrics) and potentially an overflow buffer */ - public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, - int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { - return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); + public FilterResult filterTo(RecordFilter filter, ByteBuffer destinationBuffer, BufferSupplier decompressionBufferSupplier) { + return filterTo(batches(), filter, destinationBuffer, decompressionBufferSupplier); } /** * Note: This method is also used to convert the first timestamp of the batch (which is usually the timestamp of the first record) * to the delete horizon of the tombstones or txn markers which are present in the batch. */ - private static FilterResult filterTo(TopicPartition partition, Iterable batches, - RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, - BufferSupplier decompressionBufferSupplier) { + private static FilterResult filterTo(Iterable batches, RecordFilter filter, + ByteBuffer destinationBuffer, BufferSupplier decompressionBufferSupplier) { FilterResult filterResult = new FilterResult(destinationBuffer); ByteBufferOutputStream bufferOutputStream = new ByteBufferOutputStream(destinationBuffer); for (MutableRecordBatch batch : batches) { @@ -174,15 +167,9 @@ private static FilterResult filterTo(TopicPartition partition, Iterable retainedRecords = new ArrayList<>(); - - final BatchFilterResult iterationResult = filterBatch(batch, decompressionBufferSupplier, filterResult, filter, - batchMagic, true, retainedRecords); + final BatchFilterResult iterationResult = filterBatch(batch, decompressionBufferSupplier, filterResult, + filter); + List retainedRecords = iterationResult.retainedRecords; boolean containsTombstones = iterationResult.containsTombstones; boolean writeOriginalBatch = iterationResult.writeOriginalBatch; long maxOffset = iterationResult.maxOffset; @@ -191,8 +178,8 @@ private static FilterResult filterTo(TopicPartition partition, Iterable= RecordBatch.MAGIC_VALUE_V2 && (containsTombstones || containsMarkerForEmptyTxn) - && !batch.deleteHorizonMs().isPresent(); + boolean needToSetDeleteHorizon = (containsTombstones || containsMarkerForEmptyTxn) && + batch.deleteHorizonMs().isEmpty(); if (writeOriginalBatch && !needToSetDeleteHorizon) { batch.writeTo(bufferOutputStream); filterResult.updateRetainedBatchMetadata(batch, retainedRecords.size(), false); @@ -202,26 +189,21 @@ private static FilterResult filterTo(TopicPartition partition, Iterable batch.sizeInBytes() && filteredBatchSize > maxRecordBatchSize) - log.warn("Record batch from {} with last offset {} exceeded max record batch size {} after cleaning " + - "(new size is {}). Consumers with version earlier than 0.10.1.0 may need to " + - "increase their fetch sizes.", - partition, batch.lastOffset(), maxRecordBatchSize, filteredBatchSize); - MemoryRecordsBuilder.RecordsInfo info = builder.info(); filterResult.updateRetainedBatchMetadata(info.maxTimestamp, info.shallowOffsetOfMaxTimestamp, maxOffset, retainedRecords.size(), filteredBatchSize); } } } else if (batchRetention == BatchRetention.RETAIN_EMPTY) { - if (batchMagic < RecordBatch.MAGIC_VALUE_V2) + if (batch.magic() < RecordBatch.MAGIC_VALUE_V2) // should never happen throw new IllegalStateException("Empty batches are only supported for magic v2 and above"); bufferOutputStream.ensureRemaining(DefaultRecordBatch.RECORD_BATCH_OVERHEAD); - DefaultRecordBatch.writeEmptyHeader(bufferOutputStream.buffer(), batchMagic, batch.producerId(), + DefaultRecordBatch.writeEmptyHeader(bufferOutputStream.buffer(), RecordBatch.CURRENT_MAGIC_VALUE, batch.producerId(), batch.producerEpoch(), batch.baseSequence(), batch.baseOffset(), batch.lastOffset(), batch.partitionLeaderEpoch(), batch.timestampType(), batch.maxTimestamp(), batch.isTransactional(), batch.isControlBatch()); @@ -243,23 +225,18 @@ private static FilterResult filterTo(TopicPartition partition, Iterable retainedRecords) { - long maxOffset = -1; - boolean containsTombstones = false; + RecordFilter filter) { try (final CloseableIterator iterator = batch.streamingIterator(decompressionBufferSupplier)) { + long maxOffset = -1; + boolean containsTombstones = false; + // Convert records with old record versions + boolean writeOriginalBatch = batch.magic() >= RecordBatch.CURRENT_MAGIC_VALUE; + List retainedRecords = new ArrayList<>(); while (iterator.hasNext()) { Record record = iterator.next(); filterResult.messagesRead += 1; if (filter.shouldRetainRecord(batch, record)) { - // Check for log corruption due to KAFKA-4298. If we find it, make sure that we overwrite - // the corrupted batch with correct data. - if (!record.hasMagic(batchMagic)) - writeOriginalBatch = false; - if (record.offset() > maxOffset) maxOffset = record.offset(); @@ -272,17 +249,20 @@ private static BatchFilterResult filterBatch(RecordBatch batch, writeOriginalBatch = false; } } - return new BatchFilterResult(writeOriginalBatch, containsTombstones, maxOffset); + return new BatchFilterResult(retainedRecords, writeOriginalBatch, containsTombstones, maxOffset); } } private static class BatchFilterResult { + private final List retainedRecords; private final boolean writeOriginalBatch; private final boolean containsTombstones; private final long maxOffset; - private BatchFilterResult(final boolean writeOriginalBatch, - final boolean containsTombstones, - final long maxOffset) { + private BatchFilterResult(List retainedRecords, + final boolean writeOriginalBatch, + final boolean containsTombstones, + final long maxOffset) { + this.retainedRecords = retainedRecords; this.writeOriginalBatch = writeOriginalBatch; this.containsTombstones = containsTombstones; this.maxOffset = maxOffset; @@ -293,15 +273,20 @@ private static MemoryRecordsBuilder buildRetainedRecordsInto(RecordBatch origina List retainedRecords, ByteBufferOutputStream bufferOutputStream, final long deleteHorizonMs) { - byte magic = originalBatch.magic(); Compression compression = Compression.of(originalBatch.compressionType()).build(); - TimestampType timestampType = originalBatch.timestampType(); + // V0 has no timestamp type or timestamp, so we set the timestamp to CREATE_TIME and timestamp to NO_TIMESTAMP. + // Note that this differs from produce up-conversion where the timestamp type topic config is used and the log append + // time is generated if the config is LOG_APPEND_TIME. The reason for the different behavior is that there is + // no appropriate log append time we can generate at compaction time. + TimestampType timestampType = originalBatch.timestampType() == TimestampType.NO_TIMESTAMP_TYPE ? + TimestampType.CREATE_TIME : originalBatch.timestampType(); long logAppendTime = timestampType == TimestampType.LOG_APPEND_TIME ? originalBatch.maxTimestamp() : RecordBatch.NO_TIMESTAMP; - long baseOffset = magic >= RecordBatch.MAGIC_VALUE_V2 ? + long baseOffset = originalBatch.magic() >= RecordBatch.MAGIC_VALUE_V2 ? originalBatch.baseOffset() : retainedRecords.get(0).offset(); - MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferOutputStream, magic, + // Convert records with older record versions to the current one + MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferOutputStream, RecordBatch.CURRENT_MAGIC_VALUE, compression, timestampType, baseOffset, logAppendTime, originalBatch.producerId(), originalBatch.producerEpoch(), originalBatch.baseSequence(), originalBatch.isTransactional(), originalBatch.isControlBatch(), originalBatch.partitionLeaderEpoch(), bufferOutputStream.limit(), deleteHorizonMs); @@ -309,7 +294,7 @@ private static MemoryRecordsBuilder buildRetainedRecordsInto(RecordBatch origina for (Record record : retainedRecords) builder.append(record); - if (magic >= RecordBatch.MAGIC_VALUE_V2) + if (originalBatch.magic() >= RecordBatch.MAGIC_VALUE_V2) // we must preserve the last offset from the initial batch in order to ensure that the // last sequence number from the batch remains even after compaction. Otherwise, the producer // could incorrectly see an out of sequence error. diff --git a/clients/src/main/java/org/apache/kafka/common/record/MultiRecordsSend.java b/clients/src/main/java/org/apache/kafka/common/record/MultiRecordsSend.java index 929b16467c12d..ab9c892bac380 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MultiRecordsSend.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MultiRecordsSend.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.network.TransferableChannel; @@ -25,8 +24,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import java.util.Queue; /** @@ -37,7 +34,6 @@ public class MultiRecordsSend implements Send { private final Queue sendQueue; private final long size; - private Map recordConversionStats; private long totalWritten = 0; private Send current; @@ -94,7 +90,6 @@ public long writeTo(TransferableChannel channel) throws IOException { totalWrittenPerCall += written; sendComplete = current.completed(); if (sendComplete) { - updateRecordConversionStats(current); current = sendQueue.poll(); } } while (!completed() && sendComplete); @@ -110,14 +105,6 @@ public long writeTo(TransferableChannel channel) throws IOException { return totalWrittenPerCall; } - /** - * Get any statistics that were recorded as part of executing this {@link MultiRecordsSend}. - * @return Records processing statistics (could be null if no statistics were collected) - */ - public Map recordConversionStats() { - return recordConversionStats; - } - @Override public String toString() { return "MultiRecordsSend(" + @@ -125,17 +112,4 @@ public String toString() { ", totalWritten=" + totalWritten + ')'; } - - private void updateRecordConversionStats(Send completedSend) { - // The underlying send might have accumulated statistics that need to be recorded. For example, - // LazyDownConversionRecordsSend accumulates statistics related to the number of bytes down-converted, the amount - // of temporary memory used for down-conversion, etc. Pull out any such statistics from the underlying send - // and fold it up appropriately. - if (completedSend instanceof LazyDownConversionRecordsSend) { - if (recordConversionStats == null) - recordConversionStats = new HashMap<>(); - LazyDownConversionRecordsSend lazyRecordsSend = (LazyDownConversionRecordsSend) completedSend; - recordConversionStats.put(lazyRecordsSend.topicPartition(), lazyRecordsSend.recordConversionStats()); - } - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index 87a7a82686951..f2a7c5ee63c79 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -350,6 +350,12 @@ private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, return DeleteShareGroupStateRequest.parse(buffer, apiVersion); case READ_SHARE_GROUP_STATE_SUMMARY: return ReadShareGroupStateSummaryRequest.parse(buffer, apiVersion); + case STREAMS_GROUP_HEARTBEAT: + return StreamsGroupHeartbeatRequest.parse(buffer, apiVersion); + case STREAMS_GROUP_DESCRIBE: + return StreamsGroupDescribeRequest.parse(buffer, apiVersion); + case DESCRIBE_SHARE_GROUP_OFFSETS: + return DescribeShareGroupOffsetsRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index 83f29471ba3d4..8f344cb718e87 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -287,6 +287,12 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer response return DeleteShareGroupStateResponse.parse(responseBuffer, version); case READ_SHARE_GROUP_STATE_SUMMARY: return ReadShareGroupStateSummaryResponse.parse(responseBuffer, version); + case STREAMS_GROUP_HEARTBEAT: + return StreamsGroupHeartbeatResponse.parse(responseBuffer, version); + case STREAMS_GROUP_DESCRIBE: + return StreamsGroupDescribeResponse.parse(responseBuffer, version); + case DESCRIBE_SHARE_GROUP_OFFSETS: + return DescribeShareGroupOffsetsResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java index 142210f765d01..8b5031f2a68a2 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java @@ -68,14 +68,12 @@ public static class Builder extends AbstractRequest.Builder activeControllerApiVersions, boolean enableUnstableLastVersion, boolean clientTelemetryEnabled ) { ApiVersionCollection apiKeys = new ApiVersionCollection(); for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) { - if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) { - final Optional brokerApiVersion = apiKey.toApiVersion(enableUnstableLastVersion); - if (!brokerApiVersion.isPresent()) { - // Broker does not support this API key. - continue; - } + final Optional brokerApiVersion = apiKey.toApiVersion(enableUnstableLastVersion); + if (brokerApiVersion.isEmpty()) { + // Broker does not support this API key. + continue; + } - // Skip telemetry APIs if client telemetry is disabled. - if ((apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY) && !clientTelemetryEnabled) - continue; + // Skip telemetry APIs if client telemetry is disabled. + if ((apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY) && !clientTelemetryEnabled) + continue; - final ApiVersion finalApiVersion; - if (!apiKey.forwardable) { - finalApiVersion = brokerApiVersion.get(); + final ApiVersion finalApiVersion; + if (!apiKey.forwardable) { + finalApiVersion = brokerApiVersion.get(); + } else { + Optional intersectVersion = intersect( + brokerApiVersion.get(), + activeControllerApiVersions.getOrDefault(apiKey, null) + ); + if (intersectVersion.isPresent()) { + finalApiVersion = intersectVersion.get(); } else { - Optional intersectVersion = intersect( - brokerApiVersion.get(), - activeControllerApiVersions.getOrDefault(apiKey, null) - ); - if (intersectVersion.isPresent()) { - finalApiVersion = intersectVersion.get(); - } else { - // Controller doesn't support this API key, or there is no intersection. - continue; - } + // Controller doesn't support this API key, or there is no intersection. + continue; } - - apiKeys.add(finalApiVersion.duplicate()); } + + apiKeys.add(finalApiVersion.duplicate()); } return apiKeys; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java index 937110efcad4d..5b09131d49470 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.common.requests; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; @@ -45,6 +46,10 @@ public class ConsumerGroupHeartbeatRequest extends AbstractRequest { */ public static final int CONSUMER_GENERATED_MEMBER_ID_REQUIRED_VERSION = 1; + public static final String REGEX_RESOLUTION_NOT_SUPPORTED_MSG = "The cluster does not support " + + "regular expressions resolution on ConsumerGroupHeartbeat API version 0. It must be upgraded to use " + + "ConsumerGroupHeartbeat API version >= 1 to allow to subscribe to a SubscriptionPattern."; + public static class Builder extends AbstractRequest.Builder { private final ConsumerGroupHeartbeatRequestData data; @@ -59,6 +64,9 @@ public Builder(ConsumerGroupHeartbeatRequestData data, boolean enableUnstableLas @Override public ConsumerGroupHeartbeatRequest build(short version) { + if (version == 0 && data.subscribedTopicRegex() != null) { + throw new UnsupportedVersionException(REGEX_RESOLUTION_NOT_SUPPORTED_MSG); + } return new ConsumerGroupHeartbeatRequest(data, version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java index 4964a8a8a9d17..7c892874214e8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java @@ -39,7 +39,7 @@ public DescribeClusterResponse(DescribeClusterResponseData data) { public Map nodes() { return data.brokers().valuesList().stream() - .map(b -> new Node(b.brokerId(), b.host(), b.port(), b.rack())) + .map(b -> new Node(b.brokerId(), b.host(), b.port(), b.rack(), b.isFenced())) .collect(Collectors.toMap(Node::id, Function.identity())); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java index cb128b8b42cf1..fa4070622b37d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.clients.admin.ConfigEntry; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.message.DescribeConfigsResponseData; import org.apache.kafka.common.protocol.ApiKeys; @@ -225,29 +224,6 @@ public DescribeConfigsResponse(DescribeConfigsResponseData data) { this.data = data; } - // This constructor should only be used after deserialization, it has special handling for version 0 - private DescribeConfigsResponse(DescribeConfigsResponseData data, short version) { - super(ApiKeys.DESCRIBE_CONFIGS); - this.data = data; - if (version == 0) { - for (DescribeConfigsResponseData.DescribeConfigsResult result : data.results()) { - for (DescribeConfigsResponseData.DescribeConfigsResourceResult config : result.configs()) { - if (config.isDefault()) { - config.setConfigSource(ConfigSource.DEFAULT_CONFIG.id); - } else { - if (result.resourceType() == ConfigResource.Type.BROKER.id()) { - config.setConfigSource(ConfigSource.STATIC_BROKER_CONFIG.id); - } else if (result.resourceType() == ConfigResource.Type.TOPIC.id()) { - config.setConfigSource(ConfigSource.TOPIC_CONFIG.id); - } else { - config.setConfigSource(ConfigSource.UNKNOWN.id); - } - } - } - } - } - } - @Override public DescribeConfigsResponseData data() { return data; @@ -273,7 +249,7 @@ public Map errorCounts() { } public static DescribeConfigsResponse parse(ByteBuffer buffer, short version) { - return new DescribeConfigsResponse(new DescribeConfigsResponseData(new ByteBufferAccessor(buffer), version), version); + return new DescribeConfigsResponse(new DescribeConfigsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java index bd341e19f9a29..b02480553397b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java @@ -110,6 +110,14 @@ public static DescribedGroup groupError(String groupId, Errors error) { DescribeGroupsResponse.UNKNOWN_PROTOCOL, Collections.emptyList(), AUTHORIZED_OPERATIONS_OMITTED); } + public static DescribedGroup groupError(String groupId, Errors error, String errorMessage) { + return new DescribedGroup() + .setGroupId(groupId) + .setGroupState(DescribeGroupsResponse.UNKNOWN_STATE) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); + } + @Override public DescribeGroupsResponseData data() { return data; diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java index 0177b2f0f5535..b2245d3edce95 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.DescribeLogDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; @@ -69,71 +68,6 @@ public static DescribeLogDirsResponse parse(ByteBuffer buffer, short version) { return new DescribeLogDirsResponse(new DescribeLogDirsResponseData(new ByteBufferAccessor(buffer), version)); } - // Note this class is part of the public API, reachable from Admin.describeLogDirs() - /** - * Possible error code: - * - * KAFKA_STORAGE_ERROR (56) - * UNKNOWN (-1) - * - * @deprecated Deprecated Since Kafka 2.7. - * Use {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#descriptions()} - * and {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#allDescriptions()} to access the replacement - * class {@link org.apache.kafka.clients.admin.LogDirDescription}. - */ - @Deprecated - public static class LogDirInfo { - public final Errors error; - public final Map replicaInfos; - - public LogDirInfo(Errors error, Map replicaInfos) { - this.error = error; - this.replicaInfos = replicaInfos; - } - - @Override - public String toString() { - return "(error=" + - error + - ", replicas=" + - replicaInfos + - ")"; - } - } - - // Note this class is part of the public API, reachable from Admin.describeLogDirs() - - /** - * @deprecated Deprecated Since Kafka 2.7. - * Use {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#descriptions()} - * and {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#allDescriptions()} to access the replacement - * class {@link org.apache.kafka.clients.admin.ReplicaInfo}. - */ - @Deprecated - public static class ReplicaInfo { - - public final long size; - public final long offsetLag; - public final boolean isFuture; - - public ReplicaInfo(long size, long offsetLag, boolean isFuture) { - this.size = size; - this.offsetLag = offsetLag; - this.isFuture = isFuture; - } - - @Override - public String toString() { - return "(size=" + - size + - ", offsetLag=" + - offsetLag + - ", isFuture=" + - isFuture + - ")"; - } - } - @Override public boolean shouldClientThrottle(short version) { return version >= 1; diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsRequest.java new file mode 100644 index 0000000000000..072b16e944362 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsRequest.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestData; +import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class DescribeShareGroupOffsetsRequest extends AbstractRequest { + public static class Builder extends AbstractRequest.Builder { + + private final DescribeShareGroupOffsetsRequestData data; + + public Builder(DescribeShareGroupOffsetsRequestData data) { + this(data, false); + } + + public Builder(DescribeShareGroupOffsetsRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS, enableUnstableLastVersion); + this.data = data; + } + + @Override + public DescribeShareGroupOffsetsRequest build(short version) { + return new DescribeShareGroupOffsetsRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final DescribeShareGroupOffsetsRequestData data; + + public DescribeShareGroupOffsetsRequest(DescribeShareGroupOffsetsRequestData data, short version) { + super(ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS, version); + this.data = data; + } + + @Override + public DescribeShareGroupOffsetsResponse getErrorResponse(int throttleTimeMs, Throwable e) { + List results = new ArrayList<>(); + data.topics().forEach( + topicResult -> results.add(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic() + .setTopicName(topicResult.topicName()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition() + .setPartitionIndex(partitionData) + .setErrorCode(Errors.forException(e).code())) + .collect(Collectors.toList())))); + return new DescribeShareGroupOffsetsResponse(new DescribeShareGroupOffsetsResponseData() + .setResponses(results)); + } + + @Override + public DescribeShareGroupOffsetsRequestData data() { + return data; + } + + public static DescribeShareGroupOffsetsRequest parse(ByteBuffer buffer, short version) { + return new DescribeShareGroupOffsetsRequest( + new DescribeShareGroupOffsetsRequestData(new ByteBufferAccessor(buffer), version), + version + ); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsResponse.java new file mode 100644 index 0000000000000..183cdb14113de --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeShareGroupOffsetsResponse.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +public class DescribeShareGroupOffsetsResponse extends AbstractResponse { + private final DescribeShareGroupOffsetsResponseData data; + + public DescribeShareGroupOffsetsResponse(DescribeShareGroupOffsetsResponseData data) { + super(ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS); + this.data = data; + } + + @Override + public DescribeShareGroupOffsetsResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + Map counts = new HashMap<>(); + data.responses().forEach( + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) + ); + return counts; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static DescribeShareGroupOffsetsResponse parse(ByteBuffer buffer, short version) { + return new DescribeShareGroupOffsetsResponse( + new DescribeShareGroupOffsetsResponseData(new ByteBufferAccessor(buffer), version) + ); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java index a9ab8a7f4e9d6..fb2a5a3c87b7a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java @@ -202,10 +202,6 @@ public static FetchResponseData.PartitionData partitionResponse(int partition, E /** * Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`. * - * If this response was deserialized after a fetch, this method should never fail. An example where this would - * fail is a down-converted response (e.g. LazyDownConversionRecords) on the broker (before it's serialized and - * sent on the wire). - * * @param partition partition data * @return Records or empty record if the records in PartitionData is null. */ diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java index db4f8749e4483..7b61ca847ffe3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java @@ -30,7 +30,6 @@ import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -137,7 +136,6 @@ private ListOffsetsRequest(ListOffsetsRequestData data, short version) { @Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { - short versionId = version(); short errorCode = Errors.forException(e).code(); List responses = new ArrayList<>(); @@ -148,12 +146,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { ListOffsetsPartitionResponse partitionResponse = new ListOffsetsPartitionResponse() .setErrorCode(errorCode) .setPartitionIndex(partition.partitionIndex()); - if (versionId == 0) { - partitionResponse.setOldStyleOffsets(Collections.emptyList()); - } else { - partitionResponse.setOffset(ListOffsetsResponse.UNKNOWN_OFFSET) - .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP); - } + partitionResponse.setOffset(ListOffsetsResponse.UNKNOWN_OFFSET) + .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP); partitions.add(partitionResponse); } topicResponse.setPartitions(partitions); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java index 48609b1666c63..2e60e04b2aa06 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java @@ -156,6 +156,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { } responseData.setThrottleTimeMs(throttleTimeMs); + responseData.setErrorCode(error.code()); return new MetadataResponse(responseData, true); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java index d7d9a6c3ba4de..3a7e4f276d9dc 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java @@ -104,6 +104,10 @@ public Map errors() { return errors; } + public Errors topLevelError() { + return Errors.forCode(data.errorCode()); + } + /** * Get a map of the topicIds which had metadata errors * @return the map diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java index f9f4f23c00aa1..aced8fc57ffa4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java @@ -65,11 +65,12 @@ public static Builder forConsumer(OffsetForLeaderTopicCollection epochsByPartiti return new Builder((short) 3, ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(), data); } - public static Builder forFollower(short version, OffsetForLeaderTopicCollection epochsByPartition, int replicaId) { + public static Builder forFollower(OffsetForLeaderTopicCollection epochsByPartition, int replicaId) { OffsetForLeaderEpochRequestData data = new OffsetForLeaderEpochRequestData(); data.setReplicaId(replicaId); data.setTopics(epochsByPartition); - return new Builder(version, version, data); + // If we introduce new versions, we should gate them behind the appropriate metadata version + return new Builder((short) 4, (short) 4, data); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java index 3f2679c95aa72..8fbd86cb9bb43 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java @@ -40,26 +40,18 @@ import static org.apache.kafka.common.requests.ProduceResponse.INVALID_OFFSET; public class ProduceRequest extends AbstractRequest { + public static final short LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 = 11; - public static Builder forMagic(byte magic, ProduceRequestData data) { - // Message format upgrades correspond with a bump in the produce request version. Older - // message format versions are generally not supported by the produce request versions - // following the bump. - - final short minVersion; - final short maxVersion; - if (magic < RecordBatch.MAGIC_VALUE_V2) { - minVersion = 2; - maxVersion = 2; - } else { - minVersion = 3; - maxVersion = ApiKeys.PRODUCE.latestVersion(); - } - return new Builder(minVersion, maxVersion, data); + public static Builder builder(ProduceRequestData data, boolean useTransactionV1Version) { + // When we use transaction V1 protocol in transaction we set the request version upper limit to + // LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 so that the broker knows that we're using transaction protocol V1. + short maxVersion = useTransactionV1Version ? + LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 : ApiKeys.PRODUCE.latestVersion(); + return new Builder(ApiKeys.PRODUCE.oldestVersion(), maxVersion, data); } - public static Builder forCurrentMagic(ProduceRequestData data) { - return forMagic(RecordBatch.CURRENT_MAGIC_VALUE, data); + public static Builder builder(ProduceRequestData data) { + return builder(data, false); } public static class Builder extends AbstractRequest.Builder { @@ -223,53 +215,33 @@ public void clearPartitionRecords() { } public static void validateRecords(short version, BaseRecords baseRecords) { - if (version >= 3) { - if (baseRecords instanceof Records) { - Records records = (Records) baseRecords; - Iterator iterator = records.batches().iterator(); - if (!iterator.hasNext()) - throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + - "one record batch per partition"); - - RecordBatch entry = iterator.next(); - if (entry.magic() != RecordBatch.MAGIC_VALUE_V2) - throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + - "contain record batches with magic version 2"); - if (version < 7 && entry.compressionType() == CompressionType.ZSTD) { - throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " + - "use ZStandard compression"); - } - - if (iterator.hasNext()) - throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + - "contain exactly one record batch per partition"); + if (baseRecords instanceof Records) { + Records records = (Records) baseRecords; + Iterator iterator = records.batches().iterator(); + if (!iterator.hasNext()) + throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + + "one record batch per partition"); + + RecordBatch entry = iterator.next(); + if (entry.magic() != RecordBatch.MAGIC_VALUE_V2) + throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + + "contain record batches with magic version 2"); + if (version < 7 && entry.compressionType() == CompressionType.ZSTD) { + throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " + + "use ZStandard compression"); } - } - // Note that we do not do similar validation for older versions to ensure compatibility with - // clients which send the wrong magic version in the wrong version of the produce request. The broker - // did not do this validation before, so we maintain that behavior here. + if (iterator.hasNext()) + throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + + "contain exactly one record batch per partition"); + } } public static ProduceRequest parse(ByteBuffer buffer, short version) { return new ProduceRequest(new ProduceRequestData(new ByteBufferAccessor(buffer), version), version); } - public static byte requiredMagicForVersion(short produceRequestVersion) { - if (produceRequestVersion < ApiKeys.PRODUCE.oldestVersion() || produceRequestVersion > ApiKeys.PRODUCE.latestVersion()) - throw new IllegalArgumentException("Magic value to use for produce request version " + - produceRequestVersion + " is not known"); - - switch (produceRequestVersion) { - case 0: - case 1: - return RecordBatch.MAGIC_VALUE_V0; - - case 2: - return RecordBatch.MAGIC_VALUE_V1; - - default: - return RecordBatch.MAGIC_VALUE_V2; - } + public static boolean isTransactionV2Requested(short version) { + return version > LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2; } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java index 27daa78967d35..dcec662e6a071 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java @@ -64,16 +64,16 @@ public ReadShareGroupStateSummaryRequest(ReadShareGroupStateSummaryRequestData d public ReadShareGroupStateSummaryResponse getErrorResponse(int throttleTimeMs, Throwable e) { List results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code()) - .setErrorMessage(Errors.forException(e).message())) - .collect(Collectors.toList())))); + topicResult -> results.add(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code()) + .setErrorMessage(Errors.forException(e).message())) + .collect(Collectors.toList())))); return new ReadShareGroupStateSummaryResponse(new ReadShareGroupStateSummaryResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -83,8 +83,8 @@ public ReadShareGroupStateSummaryRequestData data() { public static ReadShareGroupStateSummaryRequest parse(ByteBuffer buffer, short version) { return new ReadShareGroupStateSummaryRequest( - new ReadShareGroupStateSummaryRequestData(new ByteBufferAccessor(buffer), version), - version + new ReadShareGroupStateSummaryRequestData(new ByteBufferAccessor(buffer), version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java index 77c1dac65a1b8..0374e7759433c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java @@ -17,6 +17,7 @@ package org.apache.kafka.common.requests; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; @@ -24,6 +25,7 @@ import java.nio.ByteBuffer; import java.util.HashMap; +import java.util.List; import java.util.Map; public class ReadShareGroupStateSummaryResponse extends AbstractResponse { @@ -43,9 +45,9 @@ public ReadShareGroupStateSummaryResponseData data() { public Map errorCounts() { Map counts = new HashMap<>(); data.results().forEach( - result -> result.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) ); return counts; } @@ -59,9 +61,64 @@ public int throttleTimeMs() { public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } + public static ReadShareGroupStateSummaryResponse parse(ByteBuffer buffer, short version) { return new ReadShareGroupStateSummaryResponse( - new ReadShareGroupStateSummaryResponseData(new ByteBufferAccessor(buffer), version) + new ReadShareGroupStateSummaryResponseData(new ByteBufferAccessor(buffer), version) ); } + + public static ReadShareGroupStateSummaryResponseData toErrorResponseData( + Uuid topicId, + int partitionId, + Errors error, + String errorMessage + ) { + return new ReadShareGroupStateSummaryResponseData().setResults( + List.of(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of(new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage))))); + } + + public static ReadShareGroupStateSummaryResponseData.PartitionResult toErrorResponsePartitionResult( + int partitionId, + Errors error, + String errorMessage + ) { + return new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); + } + + public static ReadShareGroupStateSummaryResponseData toResponseData( + Uuid topicId, + int partition, + long startOffset, + int stateEpoch + ) { + return new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setStartOffset(startOffset) + .setStateEpoch(stateEpoch) + )) + )); + } + + public static ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult toResponseReadStateSummaryResult( + Uuid topicId, + List partitionResults + ) { + return new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(partitionResults); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java b/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java index ece4a13fd88d0..c15fa960abf12 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java @@ -151,7 +151,7 @@ public ByteBuffer buildResponseEnvelopePayload(AbstractResponse body) { } private boolean isUnsupportedApiVersionsRequest() { - return header.apiKey() == API_VERSIONS && !API_VERSIONS.isVersionSupported(header.apiVersion()); + return header.apiKey() == API_VERSIONS && !header.isApiVersionSupported(); } public short apiVersion() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RequestHeader.java b/clients/src/main/java/org/apache/kafka/common/requests/RequestHeader.java index 5bcce29a888ae..45063b816bc4c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RequestHeader.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RequestHeader.java @@ -108,6 +108,14 @@ public int size() { return size; } + public boolean isApiVersionSupported() { + return apiKey().isVersionSupported(apiVersion()); + } + + public boolean isApiVersionDeprecated() { + return apiKey().isVersionDeprecated(apiVersion()); + } + public ResponseHeader toResponseHeader() { return new ResponseHeader(data.correlationId(), apiKey().responseHeaderVersion(apiVersion())); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java b/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java index cc6e5a2303879..d434e6e7b185e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java @@ -16,6 +16,13 @@ */ package org.apache.kafka.common.requests; +import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.AuthorizationException; +import org.apache.kafka.common.errors.MismatchedEndpointTypeException; +import org.apache.kafka.common.errors.SecurityDisabledException; +import org.apache.kafka.common.errors.UnsupportedEndpointTypeException; +import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ProduceRequestData; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Message; @@ -77,4 +84,14 @@ public static ByteBuffer serialize( writable.flip(); return writable.buffer(); } + + public static boolean isFatalException(Throwable e) { + return e instanceof AuthenticationException || + e instanceof AuthorizationException || + e instanceof MismatchedEndpointTypeException || + e instanceof SecurityDisabledException || + e instanceof UnsupportedVersionException || + e instanceof UnsupportedEndpointTypeException || + e instanceof UnsupportedForMessageFormatException; + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java index 7ed14b4bdb102..f1a5753fef1d8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -49,7 +49,7 @@ public Builder(ShareFetchRequestData data, boolean enableUnstableLastVersion) { } public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, - int maxWait, int minBytes, int maxBytes, int fetchSize, + int maxWait, int minBytes, int maxBytes, int fetchSize, int batchSize, List send, List forget, Map> acknowledgementsMap) { ShareFetchRequestData data = new ShareFetchRequestData(); @@ -67,6 +67,7 @@ public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, data.setMaxWaitMs(maxWait); data.setMinBytes(minBytes); data.setMaxBytes(maxBytes); + data.setBatchSize(batchSize); // Build a map of topics to fetch keyed by topic ID, and within each a map of partitions keyed by index Map> fetchMap = new HashMap<>(); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java index 58ce62f6c14cc..619e740029dfa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java @@ -123,10 +123,6 @@ public static ShareFetchResponse parse(ByteBuffer buffer, short version) { /** * Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`. * - *

          If this response was deserialized after a share fetch, this method should never fail. An example where this would - * fail is a down-converted response (e.g. LazyDownConversionRecords) on the broker (before it's serialized and - * sent on the wire). - * * @param partition partition data * @return Records or empty record if the records in PartitionData is null. */ diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeRequest.java new file mode 100644 index 0000000000000..1f0c46fafe1c6 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeRequest.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.StreamsGroupDescribeRequestData; +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; + +public class StreamsGroupDescribeRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + + private final StreamsGroupDescribeRequestData data; + + public Builder(StreamsGroupDescribeRequestData data) { + this(data, false); + } + + public Builder(StreamsGroupDescribeRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.STREAMS_GROUP_DESCRIBE, enableUnstableLastVersion); + this.data = data; + } + + @Override + public StreamsGroupDescribeRequest build(short version) { + return new StreamsGroupDescribeRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final StreamsGroupDescribeRequestData data; + + public StreamsGroupDescribeRequest(StreamsGroupDescribeRequestData data, short version) { + super(ApiKeys.STREAMS_GROUP_DESCRIBE, version); + this.data = data; + } + + @Override + public StreamsGroupDescribeResponse getErrorResponse(int throttleTimeMs, Throwable e) { + StreamsGroupDescribeResponseData data = new StreamsGroupDescribeResponseData() + .setThrottleTimeMs(throttleTimeMs); + // Set error for each group + this.data.groupIds().forEach( + groupId -> data.groups().add( + new StreamsGroupDescribeResponseData.DescribedGroup() + .setGroupId(groupId) + .setErrorCode(Errors.forException(e).code()) + ) + ); + return new StreamsGroupDescribeResponse(data); + } + + @Override + public StreamsGroupDescribeRequestData data() { + return data; + } + + public static StreamsGroupDescribeRequest parse(ByteBuffer buffer, short version) { + return new StreamsGroupDescribeRequest( + new StreamsGroupDescribeRequestData(new ByteBufferAccessor(buffer), version), + version + ); + } + + public static List getErrorDescribedGroupList( + List groupIds, + Errors error + ) { + return groupIds.stream() + .map(groupId -> new StreamsGroupDescribeResponseData.DescribedGroup() + .setGroupId(groupId) + .setErrorCode(error.code()) + ).collect(Collectors.toList()); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeResponse.java new file mode 100644 index 0000000000000..83db6700a4a2d --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupDescribeResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +/** + * Possible error codes. + * + * - {@link Errors#GROUP_AUTHORIZATION_FAILED} + * - {@link Errors#NOT_COORDINATOR} + * - {@link Errors#COORDINATOR_NOT_AVAILABLE} + * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} + * - {@link Errors#INVALID_REQUEST} + * - {@link Errors#INVALID_GROUP_ID} + * - {@link Errors#GROUP_ID_NOT_FOUND} + */ +public class StreamsGroupDescribeResponse extends AbstractResponse { + + private final StreamsGroupDescribeResponseData data; + + public StreamsGroupDescribeResponse(StreamsGroupDescribeResponseData data) { + super(ApiKeys.STREAMS_GROUP_DESCRIBE); + this.data = data; + } + + @Override + public StreamsGroupDescribeResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + HashMap counts = new HashMap<>(); + data.groups().forEach( + group -> updateErrorCounts(counts, Errors.forCode(group.errorCode())) + ); + return counts; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static StreamsGroupDescribeResponse parse(ByteBuffer buffer, short version) { + return new StreamsGroupDescribeResponse( + new StreamsGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) + ); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatRequest.java new file mode 100644 index 0000000000000..51ef4069089e6 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatRequest.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.StreamsGroupHeartbeatRequestData; +import org.apache.kafka.common.message.StreamsGroupHeartbeatResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; + +public class StreamsGroupHeartbeatRequest extends AbstractRequest { + + /** + * A member epoch of -1 means that the member wants to leave the group. + */ + public static final int LEAVE_GROUP_MEMBER_EPOCH = -1; + public static final int LEAVE_GROUP_STATIC_MEMBER_EPOCH = -2; + + /** + * A member epoch of 0 means that the member wants to join the group. + */ + public static final int JOIN_GROUP_MEMBER_EPOCH = 0; + + public static class Builder extends AbstractRequest.Builder { + private final StreamsGroupHeartbeatRequestData data; + + public Builder(StreamsGroupHeartbeatRequestData data) { + this(data, false); + } + + public Builder(StreamsGroupHeartbeatRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.STREAMS_GROUP_HEARTBEAT, enableUnstableLastVersion); + this.data = data; + } + + @Override + public StreamsGroupHeartbeatRequest build(short version) { + return new StreamsGroupHeartbeatRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final StreamsGroupHeartbeatRequestData data; + + public StreamsGroupHeartbeatRequest(StreamsGroupHeartbeatRequestData data, short version) { + super(ApiKeys.STREAMS_GROUP_HEARTBEAT, version); + this.data = data; + } + + @Override + public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { + return new StreamsGroupHeartbeatResponse( + new StreamsGroupHeartbeatResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(Errors.forException(e).code()) + ); + } + + @Override + public StreamsGroupHeartbeatRequestData data() { + return data; + } + + public static StreamsGroupHeartbeatRequest parse(ByteBuffer buffer, short version) { + return new StreamsGroupHeartbeatRequest(new StreamsGroupHeartbeatRequestData( + new ByteBufferAccessor(buffer), version), version); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatResponse.java new file mode 100644 index 0000000000000..760d1e33d224c --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/StreamsGroupHeartbeatResponse.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.StreamsGroupHeartbeatResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Map; + +/** + * Possible error codes. + * + * - {@link Errors#GROUP_AUTHORIZATION_FAILED} + * - {@link Errors#NOT_COORDINATOR} + * - {@link Errors#COORDINATOR_NOT_AVAILABLE} + * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} + * - {@link Errors#INVALID_REQUEST} + * - {@link Errors#UNKNOWN_MEMBER_ID} + * - {@link Errors#FENCED_MEMBER_EPOCH} + * - {@link Errors#UNRELEASED_INSTANCE_ID} + * - {@link Errors#GROUP_MAX_SIZE_REACHED} + * - {@link Errors#GROUP_ID_NOT_FOUND} + * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} + * - {@link Errors#CLUSTER_AUTHORIZATION_FAILED} + * - {@link Errors#STREAMS_INVALID_TOPOLOGY} + * - {@link Errors#STREAMS_INVALID_TOPOLOGY_EPOCH} + * - {@link Errors#STREAMS_TOPOLOGY_FENCED} + */ +public class StreamsGroupHeartbeatResponse extends AbstractResponse { + private final StreamsGroupHeartbeatResponseData data; + + public StreamsGroupHeartbeatResponse(StreamsGroupHeartbeatResponseData data) { + super(ApiKeys.STREAMS_GROUP_HEARTBEAT); + this.data = data; + } + + @Override + public StreamsGroupHeartbeatResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static StreamsGroupHeartbeatResponse parse(ByteBuffer buffer, short version) { + return new StreamsGroupHeartbeatResponse(new StreamsGroupHeartbeatResponseData( + new ByteBufferAccessor(buffer), version)); + } + + public enum Status { + STALE_TOPOLOGY((byte) 0, "The topology epoch supplied is inconsistent with the topology for this streams group."), + MISSING_SOURCE_TOPICS((byte) 1, "One or more source topics are missing or a source topic regex resolves to zero topics."), + INCORRECTLY_PARTITIONED_TOPICS((byte) 2, "One or more topics expected to be copartitioned are not copartitioned."), + MISSING_INTERNAL_TOPICS((byte) 3, "One or more internal topics are missing."), + SHUTDOWN_APPLICATION((byte) 4, "A client requested the shutdown of the whole application."); + + private final byte code; + private final String message; + + Status(final byte code, final String message) { + this.code = code; + this.message = message; + } + + public byte code() { + return code; + } + + public String message() { + return message; + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java index e79b3bbc7b3be..14d1665a8dcd9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java @@ -39,19 +39,21 @@ import java.util.stream.Collectors; public class TxnOffsetCommitRequest extends AbstractRequest { + public static final short LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 = 4; private final TxnOffsetCommitRequestData data; public static class Builder extends AbstractRequest.Builder { public final TxnOffsetCommitRequestData data; - + public final boolean isTransactionV2Enabled; public Builder(final String transactionalId, final String consumerGroupId, final long producerId, final short producerEpoch, - final Map pendingTxnOffsetCommits) { + final Map pendingTxnOffsetCommits, + final boolean isTransactionV2Enabled) { this(transactionalId, consumerGroupId, producerId, @@ -59,7 +61,8 @@ public Builder(final String transactionalId, pendingTxnOffsetCommits, JoinGroupRequest.UNKNOWN_MEMBER_ID, JoinGroupRequest.UNKNOWN_GENERATION_ID, - Optional.empty()); + Optional.empty(), + isTransactionV2Enabled); } public Builder(final String transactionalId, @@ -69,22 +72,25 @@ public Builder(final String transactionalId, final Map pendingTxnOffsetCommits, final String memberId, final int generationId, - final Optional groupInstanceId) { + final Optional groupInstanceId, + final boolean isTransactionV2Enabled) { super(ApiKeys.TXN_OFFSET_COMMIT); + this.isTransactionV2Enabled = isTransactionV2Enabled; this.data = new TxnOffsetCommitRequestData() - .setTransactionalId(transactionalId) - .setGroupId(consumerGroupId) - .setProducerId(producerId) - .setProducerEpoch(producerEpoch) - .setTopics(getTopics(pendingTxnOffsetCommits)) - .setMemberId(memberId) - .setGenerationId(generationId) - .setGroupInstanceId(groupInstanceId.orElse(null)); + .setTransactionalId(transactionalId) + .setGroupId(consumerGroupId) + .setProducerId(producerId) + .setProducerEpoch(producerEpoch) + .setTopics(getTopics(pendingTxnOffsetCommits)) + .setMemberId(memberId) + .setGenerationId(generationId) + .setGroupInstanceId(groupInstanceId.orElse(null)); } public Builder(final TxnOffsetCommitRequestData data) { super(ApiKeys.TXN_OFFSET_COMMIT); this.data = data; + this.isTransactionV2Enabled = true; } @Override @@ -93,6 +99,9 @@ public TxnOffsetCommitRequest build(short version) { throw new UnsupportedVersionException("Broker doesn't support group metadata commit API on version " + version + ", minimum supported request version is 3 which requires brokers to be on version 2.5 or above."); } + if (!isTransactionV2Enabled) { + version = (short) Math.min(version, LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2); + } return new TxnOffsetCommitRequest(data, version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java index 531c33b5f83dd..619b5bd78e50c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java @@ -71,10 +71,11 @@ public static VoteRequest parse(ByteBuffer buffer, short version) { public static VoteRequestData singletonRequest(TopicPartition topicPartition, String clusterId, - int candidateEpoch, - int candidateId, + int replicaEpoch, + int replicaId, int lastEpoch, - long lastEpochEndOffset) { + long lastEpochEndOffset, + boolean preVote) { return new VoteRequestData() .setClusterId(clusterId) .setTopics(Collections.singletonList( @@ -83,10 +84,11 @@ public static VoteRequestData singletonRequest(TopicPartition topicPartition, .setPartitions(Collections.singletonList( new VoteRequestData.PartitionData() .setPartitionIndex(topicPartition.partition()) - .setCandidateEpoch(candidateEpoch) - .setCandidateId(candidateId) + .setReplicaEpoch(replicaEpoch) + .setReplicaId(replicaId) .setLastOffsetEpoch(lastEpoch) - .setLastOffset(lastEpochEndOffset)) + .setLastOffset(lastEpochEndOffset) + .setPreVote(preVote)) ))); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java index 73a6b6c41d71d..f68435fb4c550 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java @@ -110,8 +110,8 @@ public Builder(WriteTxnMarkersRequestData data) { this.data = data; } - public Builder(short version, final List markers) { - super(ApiKeys.WRITE_TXN_MARKERS, version); + public Builder(final List markers) { + super(ApiKeys.WRITE_TXN_MARKERS, (short) 1); // if we add new versions, gate them behind metadata version List dataMarkers = new ArrayList<>(); for (TxnMarkerEntry marker : markers) { final Map topicMap = new HashMap<>(); diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java index 6211c5355cc22..029b6881fdb58 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java +++ b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java @@ -23,9 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -177,7 +175,7 @@ public JaasContext(String name, Type type, Configuration configuration, Password AppConfigurationEntry[] entries = configuration.getAppConfigurationEntry(name); if (entries == null) throw new IllegalArgumentException("Could not find a '" + name + "' entry in this JAAS configuration."); - this.configurationEntries = Collections.unmodifiableList(new ArrayList<>(Arrays.asList(entries))); + this.configurationEntries = List.of(entries); this.dynamicJaasConfig = dynamicJaasConfig; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java b/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java index 742319c4f49a3..cfbca0c6d6185 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java @@ -16,67 +16,13 @@ */ package org.apache.kafka.common.security; -import org.apache.kafka.common.KafkaException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.security.auth.login.Configuration; - public final class JaasUtils { - private static final Logger LOG = LoggerFactory.getLogger(JaasUtils.class); public static final String JAVA_LOGIN_CONFIG_PARAM = "java.security.auth.login.config"; public static final String DISALLOWED_LOGIN_MODULES_CONFIG = "org.apache.kafka.disallowed.login.modules"; - public static final String DISALLOWED_LOGIN_MODULES_DEFAULT = "com.sun.security.auth.module.JndiLoginModule"; + public static final String DISALLOWED_LOGIN_MODULES_DEFAULT = + "com.sun.security.auth.module.JndiLoginModule,com.sun.security.auth.module.LdapLoginModule"; public static final String SERVICE_NAME = "serviceName"; - public static final String ZK_SASL_CLIENT = "zookeeper.sasl.client"; - public static final String ZK_LOGIN_CONTEXT_NAME_KEY = "zookeeper.sasl.clientconfig"; - - private static final String DEFAULT_ZK_LOGIN_CONTEXT_NAME = "Client"; - private static final String DEFAULT_ZK_SASL_CLIENT = "true"; - private JaasUtils() {} - public static String zkSecuritySysConfigString() { - String loginConfig = System.getProperty(JAVA_LOGIN_CONFIG_PARAM); - String clientEnabled = System.getProperty(ZK_SASL_CLIENT, "default:" + DEFAULT_ZK_SASL_CLIENT); - String contextName = System.getProperty(ZK_LOGIN_CONTEXT_NAME_KEY, "default:" + DEFAULT_ZK_LOGIN_CONTEXT_NAME); - return "[" + - JAVA_LOGIN_CONFIG_PARAM + "=" + loginConfig + - ", " + - ZK_SASL_CLIENT + "=" + clientEnabled + - ", " + - ZK_LOGIN_CONTEXT_NAME_KEY + "=" + contextName + - "]"; - } - - public static boolean isZkSaslEnabled() { - // Technically a client must also check if TLS mutual authentication has been configured, - // but we will leave that up to the client code to determine since direct connectivity to ZooKeeper - // has been deprecated in many clients and we don't wish to re-introduce a ZooKeeper jar dependency here. - boolean zkSaslEnabled = Boolean.parseBoolean(System.getProperty(ZK_SASL_CLIENT, DEFAULT_ZK_SASL_CLIENT)); - String zkLoginContextName = System.getProperty(ZK_LOGIN_CONTEXT_NAME_KEY, DEFAULT_ZK_LOGIN_CONTEXT_NAME); - - LOG.debug("Checking login config for Zookeeper JAAS context {}", zkSecuritySysConfigString()); - - boolean foundLoginConfigEntry; - try { - Configuration loginConf = Configuration.getConfiguration(); - foundLoginConfigEntry = loginConf.getAppConfigurationEntry(zkLoginContextName) != null; - } catch (Exception e) { - throw new KafkaException("Exception while loading Zookeeper JAAS login context " + - zkSecuritySysConfigString(), e); - } - - if (foundLoginConfigEntry && !zkSaslEnabled) { - LOG.error("JAAS configuration is present, but system property " + - ZK_SASL_CLIENT + " is set to false, which disables " + - "SASL in the ZooKeeper client"); - throw new KafkaException("Exception while determining if ZooKeeper is secure " + - zkSecuritySysConfigString()); - } - - return foundLoginConfigEntry; - } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java b/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java index 8156c6fe23199..3210c859baa27 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/SaslExtensions.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.security.auth; import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.StringJoiner; @@ -51,7 +50,7 @@ public class SaslExtensions { private final Map extensionsMap; public SaslExtensions(Map extensionsMap) { - this.extensionsMap = Collections.unmodifiableMap(new HashMap<>(extensionsMap)); + this.extensionsMap = Map.copyOf(extensionsMap); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java index 45169feb6570d..addacd92722c8 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java @@ -177,7 +177,6 @@ public SaslClientAuthenticator(Map configs, String servicePrincipal, String host, String mechanism, - boolean handshakeRequestEnable, TransportLayer transportLayer, Time time, LogContext logContext) { @@ -196,7 +195,7 @@ public SaslClientAuthenticator(Map configs, this.reauthInfo = new ReauthInfo(); try { - setSaslState(handshakeRequestEnable ? SaslState.SEND_APIVERSIONS_REQUEST : SaslState.INITIAL); + setSaslState(SaslState.SEND_APIVERSIONS_REQUEST); // determine client principal from subject for Kerberos to use as authorization id for the SaslClient. // For other mechanisms, the authenticated principal (username for PLAIN and SCRAM) is used as diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java index b8fb7299a8a43..e2ebaa31cd260 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java @@ -106,7 +106,7 @@ public class SaslServerAuthenticator implements Authenticator { * state and likewise ends at either {@link #COMPLETE} or {@link #FAILED}. */ private enum SaslState { - INITIAL_REQUEST, // May be GSSAPI token, SaslHandshake or ApiVersions for authentication + INITIAL_REQUEST, // May be SaslHandshake or ApiVersions for authentication HANDSHAKE_OR_VERSIONS_REQUEST, // May be SaslHandshake or ApiVersions HANDSHAKE_REQUEST, // After an ApiVersions request, next request must be SaslHandshake AUTHENTICATE, // Authentication tokens (SaslHandshake v1 and above indicate SaslAuthenticate headers) @@ -277,15 +277,11 @@ public void authenticate() throws IOException { case REAUTH_PROCESS_HANDSHAKE: case HANDSHAKE_OR_VERSIONS_REQUEST: case HANDSHAKE_REQUEST: + case INITIAL_REQUEST: handleKafkaRequest(clientToken); break; case REAUTH_BAD_MECHANISM: throw new SaslAuthenticationException(reauthInfo.badMechanismErrorMessage); - case INITIAL_REQUEST: - if (handleKafkaRequest(clientToken)) - break; - // For default GSSAPI, fall through to authenticate using the client token as the first GSSAPI packet. - // This is required for interoperability with 0.9.0.x clients which do not send handshake request case AUTHENTICATE: handleSaslToken(clientToken); // When the authentication exchange is complete and no more tokens are expected from the client, @@ -438,7 +434,6 @@ private void handleSaslToken(byte[] clientToken) throws IOException { ByteBuffer requestBuffer = ByteBuffer.wrap(clientToken); RequestHeader header = RequestHeader.parse(requestBuffer); ApiKeys apiKey = header.apiKey(); - short version = header.apiVersion(); RequestContext requestContext = new RequestContext(header, connectionId, clientAddress(), Optional.of(clientPort()), KafkaPrincipal.ANONYMOUS, listenerName, securityProtocol, ClientInformation.EMPTY, false); RequestAndSize requestAndSize = requestContext.parseRequest(requestBuffer); @@ -447,7 +442,8 @@ private void handleSaslToken(byte[] clientToken) throws IOException { buildResponseOnAuthenticateFailure(requestContext, requestAndSize.request.getErrorResponse(e)); throw e; } - if (!apiKey.isVersionSupported(version)) { + short version = header.apiVersion(); + if (!header.isApiVersionSupported()) { // We cannot create an error response if the request version of SaslAuthenticate is not supported // This should not normally occur since clients typically check supported versions using ApiVersionsRequest throw new UnsupportedVersionException("Version " + version + " is not supported for apiKey " + apiKey); @@ -503,63 +499,51 @@ private void handleSaslToken(byte[] clientToken) throws IOException { } } - private boolean handleKafkaRequest(byte[] requestBytes) throws IOException, AuthenticationException { - boolean isKafkaRequest = false; - String clientMechanism = null; + /** + * @throws InvalidRequestException if the request is not in Kafka format or if the API key is invalid. Clients + * that support SASL without support for KIP-43 (e.g. Kafka Clients 0.9.x) are in the former bucket - the first + * packet such clients send is a GSSAPI token starting with 0x60. + */ + private void handleKafkaRequest(byte[] requestBytes) throws IOException, AuthenticationException { try { ByteBuffer requestBuffer = ByteBuffer.wrap(requestBytes); RequestHeader header = RequestHeader.parse(requestBuffer); ApiKeys apiKey = header.apiKey(); - // A valid Kafka request header was received. SASL authentication tokens are now expected only - // following a SaslHandshakeRequest since this is not a GSSAPI client token from a Kafka 0.9.0.x client. - if (saslState == SaslState.INITIAL_REQUEST) - setSaslState(SaslState.HANDSHAKE_OR_VERSIONS_REQUEST); - isKafkaRequest = true; - // Raise an error prior to parsing if the api cannot be handled at this layer. This avoids // unnecessary exposure to some of the more complex schema types. if (apiKey != ApiKeys.API_VERSIONS && apiKey != ApiKeys.SASL_HANDSHAKE) - throw new IllegalSaslStateException("Unexpected Kafka request of type " + apiKey + " during SASL handshake."); + throw new InvalidRequestException("Unexpected Kafka request of type " + apiKey + " during SASL handshake."); LOG.debug("Handling Kafka request {} during {}", apiKey, reauthInfo.authenticationOrReauthenticationText()); - RequestContext requestContext = new RequestContext(header, connectionId, clientAddress(), Optional.of(clientPort()), KafkaPrincipal.ANONYMOUS, listenerName, securityProtocol, ClientInformation.EMPTY, false); RequestAndSize requestAndSize = requestContext.parseRequest(requestBuffer); + + // A valid Kafka request was received, we can now update the sasl state + if (saslState == SaslState.INITIAL_REQUEST) + setSaslState(SaslState.HANDSHAKE_OR_VERSIONS_REQUEST); + if (apiKey == ApiKeys.API_VERSIONS) handleApiVersionsRequest(requestContext, (ApiVersionsRequest) requestAndSize.request); - else - clientMechanism = handleHandshakeRequest(requestContext, (SaslHandshakeRequest) requestAndSize.request); + else { + String clientMechanism = handleHandshakeRequest(requestContext, (SaslHandshakeRequest) requestAndSize.request); + if (!reauthInfo.reauthenticating() || reauthInfo.saslMechanismUnchanged(clientMechanism)) { + createSaslServer(clientMechanism); + setSaslState(SaslState.AUTHENTICATE); + } + } } catch (InvalidRequestException e) { if (saslState == SaslState.INITIAL_REQUEST) { - // InvalidRequestException is thrown if the request is not in Kafka format or if the API key - // is invalid. For compatibility with 0.9.0.x where the first packet is a GSSAPI token - // starting with 0x60, revert to GSSAPI for both these exceptions. - if (LOG.isDebugEnabled()) { - StringBuilder tokenBuilder = new StringBuilder(); - for (byte b : requestBytes) { - tokenBuilder.append(String.format("%02x", b)); - if (tokenBuilder.length() >= 20) - break; - } - LOG.debug("Received client packet of length {} starting with bytes 0x{}, process as GSSAPI packet", requestBytes.length, tokenBuilder); - } - if (enabledMechanisms.contains(SaslConfigs.GSSAPI_MECHANISM)) { - LOG.debug("First client packet is not a SASL mechanism request, using default mechanism GSSAPI"); - clientMechanism = SaslConfigs.GSSAPI_MECHANISM; - } else - throw new UnsupportedSaslMechanismException("Exception handling first SASL packet from client, GSSAPI is not supported by server", e); - } else - throw e; - } - if (clientMechanism != null && (!reauthInfo.reauthenticating() - || reauthInfo.saslMechanismUnchanged(clientMechanism))) { - createSaslServer(clientMechanism); - setSaslState(SaslState.AUTHENTICATE); + // InvalidRequestException is thrown if the request is not in Kafka format or if the API key is invalid. + // If it's the initial request, this could be an ancient client (see method documentation for more details), + // a client configured with the wrong security protocol or a non kafka-client altogether (eg http client). + throw new InvalidRequestException("Invalid request, potential reasons: kafka client configured with the " + + "wrong security protocol, it does not support KIP-43 or it is not a kafka client.", e); + } + throw e; } - return isKafkaRequest; } private String handleHandshakeRequest(RequestContext context, SaslHandshakeRequest handshakeRequest) throws IOException, UnsupportedSaslMechanismException { diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponse.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponse.java index 3e29841baa5b4..75c62b696cf73 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponse.java @@ -136,7 +136,7 @@ public byte[] toBytes() { /** * Return the always non-null token value * - * @return the always non-null toklen value + * @return the always non-null token value */ public String tokenValue() { return tokenValue; diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java index 6561f12f503a9..447678163b4e8 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java @@ -29,7 +29,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -131,14 +130,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(incoming, offset, offset + len); + throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(outgoing, offset, offset + len); + throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java index bf5c4723ee1a3..a60f33d0ef156 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java @@ -31,7 +31,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -134,14 +133,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(incoming, offset, offset + len); + throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(outgoing, offset, offset + len); + throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java index 5fa4c620d902f..e4b39e5cc53c6 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java @@ -19,8 +19,6 @@ import org.jose4j.keys.resolvers.VerificationKeyResolver; -import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -57,7 +55,7 @@ public static AccessTokenValidator create(Map configs, List l = cu.get(SASL_OAUTHBEARER_EXPECTED_AUDIENCE); if (l != null) - expectedAudiences = Collections.unmodifiableSet(new HashSet<>(l)); + expectedAudiences = Set.copyOf(l); Integer clockSkew = cu.validateInteger(SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, false); String expectedIssuer = cu.validateString(SASL_OAUTHBEARER_EXPECTED_ISSUER, false); diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java index a878ae7a2618e..fdc5707278a60 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java @@ -33,7 +33,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.UnsupportedEncodingException; import java.net.HttpURLConnection; import java.net.URL; import java.net.URLEncoder; @@ -261,14 +260,14 @@ static String handleOutput(final HttpURLConnection con) throws IOException { ByteArrayOutputStream os = new ByteArrayOutputStream(); log.debug("handleOutput - preparing to read response body from {}", con.getURL()); copy(is, os); - responseBody = os.toString(StandardCharsets.UTF_8.name()); + responseBody = os.toString(StandardCharsets.UTF_8); } catch (Exception e) { // there still can be useful error response from the servers, lets get it try (InputStream is = con.getErrorStream()) { ByteArrayOutputStream os = new ByteArrayOutputStream(); log.debug("handleOutput - preparing to read error response body from {}", con.getURL()); copy(is, os); - errorResponseBody = os.toString(StandardCharsets.UTF_8.name()); + errorResponseBody = os.toString(StandardCharsets.UTF_8); } catch (Exception e2) { log.warn("handleOutput - error retrieving error information", e2); } @@ -354,15 +353,14 @@ static String parseAccessToken(String responseBody) throws IOException { return sanitizeString("the token endpoint response's access_token JSON attribute", accessTokenNode.textValue()); } - static String formatAuthorizationHeader(String clientId, String clientSecret, boolean urlencode) throws - UnsupportedEncodingException { + static String formatAuthorizationHeader(String clientId, String clientSecret, boolean urlencode) { clientId = sanitizeString("the token endpoint request client ID parameter", clientId); clientSecret = sanitizeString("the token endpoint request client secret parameter", clientSecret); // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 if (urlencode) { - clientId = URLEncoder.encode(clientId, StandardCharsets.UTF_8.name()); - clientSecret = URLEncoder.encode(clientSecret, StandardCharsets.UTF_8.name()); + clientId = URLEncoder.encode(clientId, StandardCharsets.UTF_8); + clientSecret = URLEncoder.encode(clientSecret, StandardCharsets.UTF_8); } String s = String.format("%s:%s", clientId, clientSecret); @@ -371,22 +369,17 @@ static String formatAuthorizationHeader(String clientId, String clientSecret, bo return String.format("Basic %s", encoded); } - static String formatRequestBody(String scope) throws IOException { - try { - StringBuilder requestParameters = new StringBuilder(); - requestParameters.append("grant_type=client_credentials"); - - if (scope != null && !scope.trim().isEmpty()) { - scope = scope.trim(); - String encodedScope = URLEncoder.encode(scope, StandardCharsets.UTF_8.name()); - requestParameters.append("&scope=").append(encodedScope); - } + static String formatRequestBody(String scope) { + StringBuilder requestParameters = new StringBuilder(); + requestParameters.append("grant_type=client_credentials"); - return requestParameters.toString(); - } catch (UnsupportedEncodingException e) { - // The world has gone crazy! - throw new IOException(String.format("Encoding %s not supported", StandardCharsets.UTF_8.name())); + if (scope != null && !scope.trim().isEmpty()) { + scope = scope.trim(); + String encodedScope = URLEncoder.encode(scope, StandardCharsets.UTF_8); + requestParameters.append("&scope=").append(encodedScope); } + + return requestParameters.toString(); } private static String sanitizeString(String name, String value) { diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java index 3618005ebe1d7..62261fed58df8 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java @@ -142,7 +142,7 @@ public final class RefreshingHttpsJwks implements Initable, Closeable { this.refreshRetryBackoffMs = refreshRetryBackoffMs; this.refreshRetryBackoffMaxMs = refreshRetryBackoffMaxMs; this.executorService = executorService; - this.missingKeyIds = new LinkedHashMap(MISSING_KEY_ID_CACHE_MAX_ENTRIES, .75f, true) { + this.missingKeyIds = new LinkedHashMap<>(MISSING_KEY_ID_CACHE_MAX_ENTRIES, .75f, true) { @Override protected boolean removeEldestEntry(Map.Entry eldest) { return this.size() > MISSING_KEY_ID_CACHE_MAX_ENTRIES; diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java index 82c63111d1f2c..f45865fa63848 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java @@ -39,18 +39,17 @@ public SerializedJwt(String token) { token = token.trim(); if (token.isEmpty()) - throw new ValidateException("Empty JWT provided; expected three sections (header, payload, and signature)"); + throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); String[] splits = token.split("\\."); if (splits.length != 3) - throw new ValidateException(String.format("Malformed JWT provided (%s); expected three sections (header, payload, and signature), but %d sections provided", - token, splits.length)); + throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); this.token = token.trim(); - this.header = validateSection(splits[0], "header"); - this.payload = validateSection(splits[1], "payload"); - this.signature = validateSection(splits[2], "signature"); + this.header = validateSection(splits[0]); + this.payload = validateSection(splits[1]); + this.signature = validateSection(splits[2]); } /** @@ -93,13 +92,11 @@ public String getSignature() { return signature; } - private String validateSection(String section, String sectionName) throws ValidateException { + private String validateSection(String section) throws ValidateException { section = section.trim(); if (section.isEmpty()) - throw new ValidateException(String.format( - "Malformed JWT provided; expected at least three sections (header, payload, and signature), but %s section missing", - sectionName)); + throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); return section; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java index 64f5ddf070b00..6b1148e291b4c 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java @@ -349,9 +349,7 @@ private Set calculateScope() { if (Utils.isBlank(scopeClaimValue)) return Collections.emptySet(); else { - Set retval = new HashSet<>(); - retval.add(scopeClaimValue.trim()); - return Collections.unmodifiableSet(retval); + return Set.of(scopeClaimValue.trim()); } } List scopeClaimValue = claim(scopeClaimName, List.class); diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java index 2241eb50fdc07..455fda983c345 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandler.java @@ -32,12 +32,10 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Arrays; import java.util.Base64; import java.util.Base64.Encoder; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -104,8 +102,7 @@ public class OAuthBearerUnsecuredLoginCallbackHandler implements AuthenticateCal private static final String PRINCIPAL_CLAIM_NAME_OPTION = OPTION_PREFIX + "PrincipalClaimName"; private static final String LIFETIME_SECONDS_OPTION = OPTION_PREFIX + "LifetimeSeconds"; private static final String SCOPE_CLAIM_NAME_OPTION = OPTION_PREFIX + "ScopeClaimName"; - private static final Set RESERVED_CLAIMS = Collections - .unmodifiableSet(new HashSet<>(Arrays.asList("iat", "exp"))); + private static final Set RESERVED_CLAIMS = Set.of("iat", "exp"); private static final String DEFAULT_PRINCIPAL_CLAIM_NAME = "sub"; private static final String DEFAULT_LIFETIME_SECONDS_ONE_HOUR = "3600"; private static final String DEFAULT_SCOPE_CLAIM_NAME = "scope"; diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java index a817d7b8f2773..53e099688d9f4 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandler.java @@ -193,7 +193,7 @@ private List requiredScope() { private int allowableClockSkewMs() { String allowableClockSkewMsValue = option(ALLOWABLE_CLOCK_SKEW_MILLIS_OPTION); - int allowableClockSkewMs = 0; + int allowableClockSkewMs; try { allowableClockSkewMs = Utils.isBlank(allowableClockSkewMsValue) ? 0 : Integer.parseInt(allowableClockSkewMsValue.trim()); } catch (NumberFormatException e) { diff --git a/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java b/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java index 6dcb6d62b1621..999862160f57f 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java +++ b/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java @@ -21,7 +21,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -162,14 +161,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(incoming, offset, offset + len); + throw new IllegalStateException("PLAIN supports neither integrity nor privacy"); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(outgoing, offset, offset + len); + throw new IllegalStateException("PLAIN supports neither integrity nor privacy"); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java index 852875b9e5fe7..9afcd6c07e37b 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java +++ b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java @@ -162,14 +162,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(incoming, offset, offset + len); + throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(outgoing, offset, offset + len); + throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java index 8be920d9827a9..e8576e03798de 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java +++ b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java @@ -35,7 +35,6 @@ import java.security.InvalidKeyException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.Set; @@ -148,7 +147,7 @@ public byte[] evaluateResponse(byte[] response) throws SaslException, SaslAuthen case RECEIVE_CLIENT_FINAL_MESSAGE: try { ClientFinalMessage clientFinalMessage = new ClientFinalMessage(response); - if (!clientFinalMessage.nonce().endsWith(serverFirstMessage.nonce())) { + if (!clientFinalMessage.nonce().equals(serverFirstMessage.nonce())) { throw new SaslException("Invalid client nonce in the final client message."); } verifyClientProof(clientFinalMessage); @@ -205,14 +204,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(incoming, offset, offset + len); + throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - return Arrays.copyOfRange(outgoing, offset, offset + len); + throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapper.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapper.java index cae9e19ad2d6b..a7b16906d5d16 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapper.java +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/CommonNameLoggingTrustManagerFactoryWrapper.java @@ -121,7 +121,7 @@ public CommonNameLoggingTrustManager(X509TrustManager originalTrustManager, int this.origTm = originalTrustManager; this.nrOfRememberedBadCerts = nrOfRememberedBadCerts; // Restrict maximal size of the LinkedHashMap to avoid security attacks causing OOM - this.previouslyRejectedClientCertChains = new LinkedHashMap() { + this.previouslyRejectedClientCertChains = new LinkedHashMap<>() { @Override protected boolean removeEldestEntry(final Map.Entry eldest) { return size() > nrOfRememberedBadCerts; @@ -238,7 +238,7 @@ public static X509Certificate[] sortChainAnWrapEndCertificate(X509Certificate[] principalToCertMap.put(principal, cert); } // Thus, expect certificate chain to be broken, e.g. containing multiple enbd certificates - HashSet endCertificates = new HashSet<>(); + Set endCertificates = new HashSet<>(); for (X509Certificate cert: origChain) { X500Principal subjectPrincipal = cert.getSubjectX500Principal(); if (!issuedbyPrincipalToCertificatesMap.containsKey(subjectPrincipal)) { diff --git a/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java b/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java index 9fd8d50be744d..ad8d2bfe4d4f6 100644 --- a/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java +++ b/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java @@ -63,7 +63,15 @@ default T deserialize(String topic, Headers headers, byte[] data) { } /** - * Deserialize a record value from a ByteBuffer into a value or object. + * Deserialize a record value from a {@link ByteBuffer} into a value or object. + * + *

          If {@code ByteBufferDeserializer} is used by an application, the application code cannot make any assumptions + * about the returned {@link ByteBuffer} like the position, limit, capacity, etc., or if it is backed by + * {@link ByteBuffer#hasArray() an array or not}. + * + *

          Similarly, if this method is overridden, the implementation cannot make any assumptions about the + * passed in {@link ByteBuffer} either. + * * @param topic topic associated with the data * @param headers headers associated with the record; may be empty. * @param data serialized ByteBuffer; may be null; implementations are recommended to handle null by returning a value or null rather than throwing an exception. diff --git a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java index 91df6b8aac513..705aafaaa70db 100644 --- a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java +++ b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java @@ -342,6 +342,10 @@ public long timeToNextUpdate(long requestTimeoutMs) { timeMs = Long.MAX_VALUE; log.trace("For telemetry state {}, returning the value {} ms; the terminating push is in progress, disabling telemetry for further requests", localState, timeMs); break; + case TERMINATED: + timeMs = Long.MAX_VALUE; + log.trace("For telemetry state {}, returning the value {} ms; telemetry is terminated, no further requests will be made", localState, timeMs); + break; case TERMINATING_PUSH_NEEDED: timeMs = 0; log.trace("For telemetry state {}, returning the value {} ms; the client should try to submit the final {} network API request ASAP before closing", localState, timeMs, ApiKeys.PUSH_TELEMETRY.name); diff --git a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/TelemetryMetricNamingConvention.java b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/TelemetryMetricNamingConvention.java index 9e6dbc0c559ab..cb8c245a613a1 100644 --- a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/TelemetryMetricNamingConvention.java +++ b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/TelemetryMetricNamingConvention.java @@ -41,7 +41,7 @@ public class TelemetryMetricNamingConvention { public static MetricNamingStrategy getClientTelemetryMetricNamingStrategy(String prefix) { Objects.requireNonNull(prefix, "prefix cannot be null"); - return new MetricNamingStrategy() { + return new MetricNamingStrategy<>() { @Override public MetricKey metricKey(MetricName metricName) { Objects.requireNonNull(metricName, "metric name cannot be null"); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java b/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java index f3ebfddb1a31d..546a2fdac3322 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java @@ -16,34 +16,15 @@ */ package org.apache.kafka.common.utils; -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; import java.nio.ByteBuffer; import java.util.zip.Checksum; /** * Utility methods for `Checksum` instances. * - * Implementation note: we can add methods to our implementations of CRC32 and CRC32C, but we cannot do the same for - * the Java implementations (we prefer the Java 9 implementation of CRC32C if available). A utility class is the - * simplest way to add methods that are useful for all Checksum implementations. - * * NOTE: This class is intended for INTERNAL usage only within Kafka. */ public final class Checksums { - private static final MethodHandle BYTE_BUFFER_UPDATE; - - static { - MethodHandle byteBufferUpdate = null; - try { - byteBufferUpdate = MethodHandles.publicLookup().findVirtual(Checksum.class, "update", - MethodType.methodType(void.class, ByteBuffer.class)); - } catch (Throwable t) { - handleUpdateThrowable(t); - } - BYTE_BUFFER_UPDATE = byteBufferUpdate; - } private Checksums() { } @@ -63,7 +44,7 @@ public static void update(Checksum checksum, ByteBuffer buffer, int length) { public static void update(Checksum checksum, ByteBuffer buffer, int offset, int length) { if (buffer.hasArray()) { checksum.update(buffer.array(), buffer.position() + buffer.arrayOffset() + offset, length); - } else if (BYTE_BUFFER_UPDATE != null && buffer.isDirect()) { + } else if (buffer.isDirect()) { final int oldPosition = buffer.position(); final int oldLimit = buffer.limit(); try { @@ -71,9 +52,7 @@ public static void update(Checksum checksum, ByteBuffer buffer, int offset, int final int start = oldPosition + offset; buffer.limit(start + length); buffer.position(start); - BYTE_BUFFER_UPDATE.invokeExact(checksum, buffer); - } catch (Throwable t) { - handleUpdateThrowable(t); + checksum.update(buffer); } finally { // reset buffer's offsets buffer.limit(oldLimit); @@ -87,16 +66,6 @@ public static void update(Checksum checksum, ByteBuffer buffer, int offset, int } } } - - private static void handleUpdateThrowable(Throwable t) { - if (t instanceof RuntimeException) { - throw (RuntimeException) t; - } - if (t instanceof Error) { - throw (Error) t; - } - throw new IllegalStateException(t); - } public static void updateInt(Checksum checksum, int input) { checksum.update((byte) (input >> 24)); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java b/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java index ce1680b62a7fe..9a891e0846384 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java @@ -112,7 +112,7 @@ public Enumeration getResources(String name) throws IOException { Enumeration urls1 = findResources(name); Enumeration urls2 = getParent() != null ? getParent().getResources(name) : null; - return new Enumeration() { + return new Enumeration<>() { @Override public boolean hasMoreElements() { return (urls1 != null && urls1.hasMoreElements()) || (urls2 != null && urls2.hasMoreElements()); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/CloseableIterator.java b/clients/src/main/java/org/apache/kafka/common/utils/CloseableIterator.java index 50b06369b471e..1709b5d47fbd3 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/CloseableIterator.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/CloseableIterator.java @@ -29,7 +29,7 @@ public interface CloseableIterator extends Iterator, Closeable { void close(); static CloseableIterator wrap(Iterator inner) { - return new CloseableIterator() { + return new CloseableIterator<>() { @Override public void close() {} diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java b/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java index 49ad34d19648a..09c0e518bbf70 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java @@ -17,35 +17,17 @@ package org.apache.kafka.common.utils; -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; import java.nio.ByteBuffer; +import java.util.zip.CRC32C; import java.util.zip.Checksum; /** * A class that can be used to compute the CRC32C (Castagnoli) of a ByteBuffer or array of bytes. * - * We use java.util.zip.CRC32C (introduced in Java 9). - * java.util.zip.CRC32C is significantly faster on reasonably modern CPUs as it uses the CRC32 instruction introduced - * in SSE4.2. - * * NOTE: This class is intended for INTERNAL usage only within Kafka. */ public final class Crc32C { - private static final MethodHandle CRC32C_CONSTRUCTOR; - - static { - try { - Class cls = Class.forName("java.util.zip.CRC32C"); - CRC32C_CONSTRUCTOR = MethodHandles.publicLookup().findConstructor(cls, MethodType.methodType(void.class)); - } catch (ReflectiveOperationException e) { - // Should never happen - throw new RuntimeException(e); - } - } - private Crc32C() {} /** @@ -57,7 +39,7 @@ private Crc32C() {} * @return The CRC32C */ public static long compute(byte[] bytes, int offset, int size) { - Checksum crc = create(); + Checksum crc = new CRC32C(); crc.update(bytes, offset, size); return crc.getValue(); } @@ -71,17 +53,8 @@ public static long compute(byte[] bytes, int offset, int size) { * @return The CRC32C */ public static long compute(ByteBuffer buffer, int offset, int size) { - Checksum crc = create(); + Checksum crc = new CRC32C(); Checksums.update(crc, buffer, offset, size); return crc.getValue(); } - - public static Checksum create() { - try { - return (Checksum) CRC32C_CONSTRUCTOR.invoke(); - } catch (Throwable throwable) { - // Should never happen - throw new RuntimeException(throwable); - } - } } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java b/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java deleted file mode 100644 index fb96901b309de..0000000000000 --- a/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java +++ /dev/null @@ -1,645 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * Some portions of this file Copyright (c) 2004-2006 Intel Corporation and - * licensed under the BSD license. - */ -package org.apache.kafka.common.utils; - -import java.util.zip.Checksum; - -/** - * This class was taken from Hadoop: org.apache.hadoop.util.PureJavaCrc32C. - * - * A pure-java implementation of the CRC32 checksum that uses - * the CRC32-C polynomial, the same polynomial used by iSCSI - * and implemented on many Intel chipsets supporting SSE4.2. - * - * NOTE: This class is intended for INTERNAL usage only within Kafka. - */ -// The exact version that was retrieved from Hadoop: -// https://github.com/apache/hadoop/blob/224de4f92c222a7b915e9c5d6bdd1a4a3fcbcf31/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java -public class PureJavaCrc32C implements Checksum { - - /** the current CRC value, bit-flipped */ - private int crc; - - public PureJavaCrc32C() { - reset(); - } - - @Override - public long getValue() { - long ret = crc; - return (~ret) & 0xffffffffL; - } - - @Override - public final void reset() { - crc = 0xffffffff; - } - - @SuppressWarnings("fallthrough") - @Override - public void update(byte[] b, int off, int len) { - int localCrc = crc; - - while (len > 7) { - final int c0 = (b[off + 0] ^ localCrc) & 0xff; - final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; - final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; - final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; - localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) - ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); - - final int c4 = b[off + 4] & 0xff; - final int c5 = b[off + 5] & 0xff; - final int c6 = b[off + 6] & 0xff; - final int c7 = b[off + 7] & 0xff; - - localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) - ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); - - off += 8; - len -= 8; - } - - /* loop unroll - duff's device style */ - switch (len) { - case 7: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 6: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 5: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 4: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 3: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 2: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - case 1: - localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; - default: - /* nothing */ - } - - // Publish crc out to object - crc = localCrc; - } - - @Override - public final void update(int b) { - crc = (crc >>> 8) ^ T[T8_0_START + ((crc ^ b) & 0xff)]; - } - - // CRC polynomial tables generated by: - // java -cp build/test/classes/:build/classes/ \ - // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78 - - private static final int T8_0_START = 0 * 256; - private static final int T8_1_START = 1 * 256; - private static final int T8_2_START = 2 * 256; - private static final int T8_3_START = 3 * 256; - private static final int T8_4_START = 4 * 256; - private static final int T8_5_START = 5 * 256; - private static final int T8_6_START = 6 * 256; - private static final int T8_7_START = 7 * 256; - - private static final int[] T = new int[]{ - /* T8_0 */ - 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, - 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, - 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, - 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, - 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, - 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, - 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, - 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, - 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, - 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, - 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, - 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, - 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, - 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, - 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, - 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, - 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, - 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, - 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, - 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, - 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, - 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, - 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, - 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, - 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, - 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, - 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, - 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, - 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, - 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, - 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, - 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, - 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, - 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, - 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, - 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, - 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, - 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, - 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, - 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, - 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, - 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, - 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, - 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, - 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, - 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, - 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, - 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, - 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, - 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, - 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, - 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, - 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, - 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, - 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, - 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, - 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, - 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, - 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, - 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, - 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, - 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, - 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, - 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, - /* T8_1 */ - 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, - 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, - 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, - 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, - 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, - 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, - 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, - 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, - 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, - 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, - 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, - 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, - 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, - 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, - 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, - 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, - 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, - 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, - 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, - 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, - 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, - 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, - 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, - 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, - 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, - 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, - 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, - 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, - 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, - 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, - 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, - 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, - 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, - 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, - 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, - 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, - 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, - 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, - 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, - 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, - 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, - 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, - 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, - 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, - 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, - 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, - 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, - 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, - 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, - 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, - 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, - 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, - 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, - 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, - 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, - 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, - 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, - 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, - 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, - 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, - 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, - 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, - 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, - 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483, - /* T8_2 */ - 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, - 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, - 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, - 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, - 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, - 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, - 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, - 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, - 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, - 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, - 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, - 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, - 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, - 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, - 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, - 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, - 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, - 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, - 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, - 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, - 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, - 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, - 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, - 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, - 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, - 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, - 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, - 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, - 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, - 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, - 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, - 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, - 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, - 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, - 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, - 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, - 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, - 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, - 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, - 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, - 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, - 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, - 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, - 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, - 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, - 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, - 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, - 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, - 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, - 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, - 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, - 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, - 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, - 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, - 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, - 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, - 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, - 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, - 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, - 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, - 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, - 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, - 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, - 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8, - /* T8_3 */ - 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, - 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, - 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, - 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, - 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, - 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, - 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, - 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, - 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, - 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, - 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, - 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, - 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, - 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, - 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, - 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, - 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, - 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, - 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, - 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, - 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, - 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, - 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, - 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, - 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, - 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, - 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, - 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, - 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, - 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, - 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, - 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, - 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, - 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, - 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, - 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, - 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, - 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, - 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, - 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, - 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, - 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, - 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, - 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, - 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, - 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, - 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, - 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, - 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, - 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, - 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, - 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, - 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, - 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, - 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, - 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, - 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, - 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, - 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, - 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, - 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, - 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, - 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, - 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842, - /* T8_4 */ - 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, - 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, - 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, - 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, - 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, - 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, - 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, - 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, - 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, - 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, - 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, - 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, - 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, - 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, - 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, - 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, - 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, - 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, - 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, - 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, - 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, - 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, - 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, - 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, - 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, - 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, - 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, - 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, - 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, - 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, - 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, - 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, - 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, - 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, - 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, - 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, - 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, - 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, - 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, - 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, - 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, - 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, - 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, - 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, - 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, - 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, - 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, - 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, - 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, - 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, - 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, - 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, - 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, - 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, - 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, - 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, - 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, - 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, - 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, - 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, - 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, - 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, - 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, - 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3, - /* T8_5 */ - 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, - 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, - 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, - 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, - 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, - 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, - 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, - 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, - 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, - 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, - 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, - 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, - 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, - 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, - 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, - 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, - 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, - 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, - 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, - 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, - 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, - 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, - 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, - 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, - 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, - 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, - 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, - 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, - 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, - 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, - 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, - 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, - 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, - 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, - 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, - 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, - 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, - 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, - 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, - 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, - 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, - 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, - 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, - 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, - 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, - 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, - 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, - 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, - 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, - 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, - 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, - 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, - 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, - 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, - 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, - 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, - 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, - 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, - 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, - 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, - 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, - 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, - 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, - 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C, - /* T8_6 */ - 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, - 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, - 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, - 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, - 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, - 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, - 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, - 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, - 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, - 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, - 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, - 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, - 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, - 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, - 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, - 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, - 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, - 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, - 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, - 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, - 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, - 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, - 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, - 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, - 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, - 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, - 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, - 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, - 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, - 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, - 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, - 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, - 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, - 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, - 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, - 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, - 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, - 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, - 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, - 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, - 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, - 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, - 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, - 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, - 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, - 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, - 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, - 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, - 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, - 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, - 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, - 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, - 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, - 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, - 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, - 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, - 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, - 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, - 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, - 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, - 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, - 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, - 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, - 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F, - /* T8_7 */ - 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, - 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, - 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, - 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, - 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, - 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, - 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, - 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, - 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, - 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, - 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, - 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, - 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, - 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, - 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, - 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, - 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, - 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, - 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, - 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, - 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, - 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, - 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, - 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, - 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, - 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, - 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, - 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, - 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, - 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, - 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, - 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, - 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, - 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, - 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, - 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, - 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, - 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, - 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, - 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, - 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, - 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, - 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, - 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, - 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, - 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, - 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, - 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, - 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, - 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, - 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, - 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, - 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, - 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, - 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, - 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, - 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, - 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, - 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, - 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, - 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, - 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, - 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, - 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 - }; -} diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Sanitizer.java b/clients/src/main/java/org/apache/kafka/common/utils/Sanitizer.java index 61d29a1a35302..669b9c3776314 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Sanitizer.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Sanitizer.java @@ -16,9 +16,6 @@ */ package org.apache.kafka.common.utils; -import org.apache.kafka.common.KafkaException; - -import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; @@ -27,14 +24,11 @@ import javax.management.ObjectName; /** - * Utility class for sanitizing/desanitizing/quoting values used in JMX metric names - * or as ZooKeeper node name. + * Utility class for sanitizing/desanitizing/quoting values used in JMX metric names. *

          - * User principals and client-ids are URL-encoded using ({@link #sanitize(String)} - * for use as ZooKeeper node names. User principals are URL-encoded in all metric - * names as well. All other metric tags including client-id are quoted if they - * contain special characters using {@link #jmxSanitize(String)} when - * registering in JMX. + * User principals are URL-encoded using ({@link #sanitize(String)} in all metric names. + * All other metric tags including client-id are quoted if they contain special characters + * using {@link #jmxSanitize(String)} when registering in JMX. */ public class Sanitizer { @@ -46,39 +40,29 @@ public class Sanitizer { private static final Pattern MBEAN_PATTERN = Pattern.compile("[\\w-%\\. \t]*"); /** - * Sanitize `name` for safe use as JMX metric name as well as ZooKeeper node name - * using URL-encoding. + * Sanitize `name` for safe use as JMX metric name. */ public static String sanitize(String name) { - try { - String encoded = URLEncoder.encode(name, StandardCharsets.UTF_8.name()); - StringBuilder builder = new StringBuilder(); - for (int i = 0; i < encoded.length(); i++) { - char c = encoded.charAt(i); - if (c == '*') { // Metric ObjectName treats * as pattern - builder.append("%2A"); - } else if (c == '+') { // Space URL-encoded as +, replace with percent encoding - builder.append("%20"); - } else { - builder.append(c); - } + String encoded = URLEncoder.encode(name, StandardCharsets.UTF_8); + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < encoded.length(); i++) { + char c = encoded.charAt(i); + if (c == '*') { // Metric ObjectName treats * as pattern + builder.append("%2A"); + } else if (c == '+') { // Space URL-encoded as +, replace with percent encoding + builder.append("%20"); + } else { + builder.append(c); } - return builder.toString(); - } catch (UnsupportedEncodingException e) { - throw new KafkaException(e); } + return builder.toString(); } /** - * Desanitize name that was URL-encoded using {@link #sanitize(String)}. This - * is used to obtain the desanitized version of node names in ZooKeeper. + * Desanitize name that was URL-encoded using {@link #sanitize(String)}. */ public static String desanitize(String name) { - try { - return URLDecoder.decode(name, StandardCharsets.UTF_8.name()); - } catch (UnsupportedEncodingException e) { - throw new KafkaException(e); - } + return URLDecoder.decode(name, StandardCharsets.UTF_8); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ThreadUtils.java b/clients/src/main/java/org/apache/kafka/common/utils/ThreadUtils.java index 51cfe74fcdee5..a47e9ddb36a12 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ThreadUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ThreadUtils.java @@ -25,12 +25,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import static java.lang.Thread.UncaughtExceptionHandler; + /** * Utilities for working with threads. */ public class ThreadUtils { private static final Logger log = LoggerFactory.getLogger(ThreadUtils.class); + /** * Create a new ThreadFactory. * @@ -42,6 +45,22 @@ public class ThreadUtils { */ public static ThreadFactory createThreadFactory(final String pattern, final boolean daemon) { + return createThreadFactory(pattern, daemon, null); + } + + /** + * Create a new ThreadFactory. + * + * @param pattern The pattern to use. If this contains %d, it will be + * replaced with a thread number. It should not contain more + * than one %d. + * @param daemon True if we want daemon threads. + * @param ueh thread's uncaught exception handler. + * @return The new ThreadFactory. + */ + public static ThreadFactory createThreadFactory(final String pattern, + final boolean daemon, + final UncaughtExceptionHandler ueh) { return new ThreadFactory() { private final AtomicLong threadEpoch = new AtomicLong(0); @@ -55,6 +74,9 @@ public Thread newThread(Runnable r) { } Thread thread = new Thread(r, threadName); thread.setDaemon(daemon); + if (ueh != null) { + thread.setUncaughtExceptionHandler(ueh); + } return thread; } }; @@ -64,12 +86,15 @@ public Thread newThread(Runnable r) { * Shuts down an executor service in two phases, first by calling shutdown to reject incoming tasks, * and then calling shutdownNow, if necessary, to cancel any lingering tasks. * After the timeout/on interrupt, the service is forcefully closed. + * This pattern of shutting down thread pool is adopted from here: + * ExecutorService * @param executorService The service to shut down. - * @param timeout The timeout of the shutdown. - * @param timeUnit The time unit of the shutdown timeout. + * @param timeout The timeout of the shutdown. + * @param timeUnit The time unit of the shutdown timeout. */ public static void shutdownExecutorServiceQuietly(ExecutorService executorService, - long timeout, TimeUnit timeUnit) { + long timeout, + TimeUnit timeUnit) { if (executorService == null) { return; } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index c826f90558483..02a62ee4524b8 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -100,7 +100,7 @@ private Utils() {} private static final Pattern VALID_HOST_CHARACTERS = Pattern.compile("([0-9a-zA-Z\\-%._:]*)"); - // Prints up to 2 decimal digits. Used for human readable printing + // Prints up to 2 decimal digits. Used for human-readable printing private static final DecimalFormat TWO_DIGIT_FORMAT = new DecimalFormat("0.##", DecimalFormatSymbols.getInstance(Locale.ENGLISH)); @@ -346,7 +346,7 @@ public static byte[] copyArray(byte[] src) { * Compares two character arrays for equality using a constant-time algorithm, which is needed * for comparing passwords. Two arrays are equal if they have the same length and all * characters at corresponding positions are equal. - * + *

          * All characters in the first array are examined to determine equality. * The calculation time depends only on the length of this first character array; it does not * depend on the length of the second character array or the contents of either array. @@ -572,9 +572,12 @@ public static String formatAddress(String host, Integer port) { } /** - * Formats a byte number as a human-readable String ("3.2 MB") - * @param bytes some size in bytes - * @return + * Formats a byte value into a human-readable string with an appropriate unit + * (e.g., "3.2 KB", "1.5 MB", "2.1 GB"). The format includes two decimal places. + * + * @param bytes the size in bytes + * @return a string representing the size with the appropriate unit (e.g., "3.2 KB", "1.5 MB"). + * If the value is negative or too large, the input is returned as a string (e.g., "-500", "999999999999999"). */ public static String formatBytes(long bytes) { if (bytes < 0) { @@ -615,7 +618,7 @@ public static String mkString(Map map, String begin, String end, /** * Converts an extensions string into a {@code Map}. - * + *

          * Example: * {@code parseMap("key=hey,keyTwo=hi,keyThree=hello", "=", ",") => { key: "hey", keyTwo: "hi", keyThree: "hello" }} * @@ -888,9 +891,12 @@ public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOE } /** - * Returns an empty collection if this list is null - * @param other - * @return + * Returns an empty list if the provided list is null, otherwise returns the list itself. + *

          + * This method is useful for avoiding {@code NullPointerException} when working with potentially null lists. + * + * @param other the list to check for null + * @return an empty list if the provided list is null, otherwise the original list */ public static List safe(List other) { return other == null ? Collections.emptyList() : other; @@ -906,7 +912,7 @@ public static ClassLoader getKafkaClassLoader() { /** * Get the Context ClassLoader on this thread or, if not present, the ClassLoader that * loaded Kafka. - * + *

          * This should be used whenever passing a ClassLoader to Class.forName */ public static ClassLoader getContextOrKafkaClassLoader() { @@ -957,7 +963,7 @@ public static void atomicMoveWithFallback(Path source, Path target, boolean need /** * Flushes dirty directories to guarantee crash consistency. - * + *

          * Note: We don't fsync directories on Windows OS because otherwise it'll throw AccessDeniedException (KAFKA-13391) * * @throws IOException if flushing the directory fails. @@ -1060,7 +1066,7 @@ public static void swallow(final Logger log, final Level level, final String wha /** * An {@link AutoCloseable} interface without a throws clause in the signature - * + *

          * This is used with lambda expressions in try-with-resources clauses * to avoid casting un-checked exceptions to checked exceptions unnecessarily. */ @@ -1149,7 +1155,7 @@ public static void closeAllQuietly(AtomicReference firstException, St /** * Invokes every function in `all` even if one or more functions throws an exception. - * + *

          * If any of the functions throws an exception, the first one will be rethrown at the end with subsequent exceptions * added as suppressed exceptions. */ @@ -1176,7 +1182,7 @@ public static void tryAll(List> all) throws Throwable { * positive, the original value is returned. When the input number is negative, the returned * positive value is the original value bit AND against 0x7fffffff which is not its absolute * value. - * + *

          * Note: changing this method in the future will possibly cause partition selection not to be * compatible with the existing messages already placed on a partition since it is used * in producer's partition selection logic {@link org.apache.kafka.clients.producer.KafkaProducer} diff --git a/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java b/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java index c501dce65d33a..6dcc80f1e1dfa 100644 --- a/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java +++ b/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java @@ -112,8 +112,7 @@ public interface Authorizer extends Configurable, Closeable { * to process the update synchronously on the request thread. * * @param requestContext Request context if the ACL is being created by a broker to handle - * a client request to create ACLs. This may be null if ACLs are created directly in ZooKeeper - * using AclCommand. + * a client request to create ACLs. * @param aclBindings ACL bindings to create * * @return Create result for each ACL binding in the same order as in the input list. Each result @@ -131,8 +130,7 @@ public interface Authorizer extends Configurable, Closeable { * Refer to the authorizer implementation docs for details on concurrent update guarantees. * * @param requestContext Request context if the ACL is being deleted by a broker to handle - * a client request to delete ACLs. This may be null if ACLs are deleted directly in ZooKeeper - * using AclCommand. + * a client request to delete ACLs. * @param aclBindingFilters Filters to match ACL bindings that are to be deleted * * @return Delete result for each filter in the same order as in the input list. @@ -201,15 +199,15 @@ op, new ResourcePattern(resourceType, "hardcode", PatternType.LITERAL), resourceTypeFilter, AccessControlEntryFilter.ANY); EnumMap> denyPatterns = - new EnumMap>(PatternType.class) {{ - put(PatternType.LITERAL, new HashSet<>()); - put(PatternType.PREFIXED, new HashSet<>()); - }}; + new EnumMap<>(PatternType.class) {{ + put(PatternType.LITERAL, new HashSet<>()); + put(PatternType.PREFIXED, new HashSet<>()); + }}; EnumMap> allowPatterns = - new EnumMap>(PatternType.class) {{ - put(PatternType.LITERAL, new HashSet<>()); - put(PatternType.PREFIXED, new HashSet<>()); - }}; + new EnumMap<>(PatternType.class) {{ + put(PatternType.LITERAL, new HashSet<>()); + put(PatternType.PREFIXED, new HashSet<>()); + }}; boolean hasWildCardAllow = false; diff --git a/clients/src/main/resources/common/message/AddPartitionsToTxnRequest.json b/clients/src/main/resources/common/message/AddPartitionsToTxnRequest.json index 2270f7a8469f5..2ed84be2180ba 100644 --- a/clients/src/main/resources/common/message/AddPartitionsToTxnRequest.json +++ b/clients/src/main/resources/common/message/AddPartitionsToTxnRequest.json @@ -59,7 +59,7 @@ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "entityType": "topicName", "about": "The name of the topic." }, { "name": "Partitions", "type": "[]int32", "versions": "0+", - "about": "The partition indexes to add to the transaction" } + "about": "The partition indexes to add to the transaction." } ]} ] } diff --git a/clients/src/main/resources/common/message/AddPartitionsToTxnResponse.json b/clients/src/main/resources/common/message/AddPartitionsToTxnResponse.json index 6c4eefd2cc001..a621740decc14 100644 --- a/clients/src/main/resources/common/message/AddPartitionsToTxnResponse.json +++ b/clients/src/main/resources/common/message/AddPartitionsToTxnResponse.json @@ -48,7 +48,7 @@ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "entityType": "topicName", "about": "The topic name." }, { "name": "ResultsByPartition", "type": "[]AddPartitionsToTxnPartitionResult", "versions": "0+", - "about": "The results for each partition" } + "about": "The results for each partition." } ]}, { "name": "AddPartitionsToTxnPartitionResult", "versions": "0+", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "mapKey": true, diff --git a/clients/src/main/resources/common/message/AddRaftVoterRequest.json b/clients/src/main/resources/common/message/AddRaftVoterRequest.json index 6d282633773c7..74b7638ea2463 100644 --- a/clients/src/main/resources/common/message/AddRaftVoterRequest.json +++ b/clients/src/main/resources/common/message/AddRaftVoterRequest.json @@ -21,20 +21,22 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+" }, - { "name": "TimeoutMs", "type": "int32", "versions": "0+" }, + { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+", + "about": "The cluster id." }, + { "name": "TimeoutMs", "type": "int32", "versions": "0+", + "about": "The maximum time to wait for the request to complete before returning."}, { "name": "VoterId", "type": "int32", "versions": "0+", - "about": "The replica id of the voter getting added to the topic partition" }, + "about": "The replica id of the voter getting added to the topic partition." }, { "name": "VoterDirectoryId", "type": "uuid", "versions": "0+", - "about": "The directory id of the voter getting added to the topic partition" }, + "about": "The directory id of the voter getting added to the topic partition." }, { "name": "Listeners", "type": "[]Listener", "versions": "0+", - "about": "The endpoints that can be used to communicate with the voter", "fields": [ + "about": "The endpoints that can be used to communicate with the voter.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, - "about": "The name of the endpoint" }, + "about": "The name of the endpoint." }, { "name": "Host", "type": "string", "versions": "0+", - "about": "The hostname" }, + "about": "The hostname." }, { "name": "Port", "type": "uint16", "versions": "0+", - "about": "The port" } + "about": "The port." } ]} ] } diff --git a/clients/src/main/resources/common/message/AddRaftVoterResponse.json b/clients/src/main/resources/common/message/AddRaftVoterResponse.json index 3173f0d4d3a57..c48f9cdda4e85 100644 --- a/clients/src/main/resources/common/message/AddRaftVoterResponse.json +++ b/clients/src/main/resources/common/message/AddRaftVoterResponse.json @@ -23,7 +23,7 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error" }, + "about": "The error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "ignorable": true, "about": "The error message, or null if there was no error." } ] diff --git a/clients/src/main/resources/common/message/AllocateProducerIdsRequest.json b/clients/src/main/resources/common/message/AllocateProducerIdsRequest.json index 7256c6b684a01..e8271c60321a3 100644 --- a/clients/src/main/resources/common/message/AllocateProducerIdsRequest.json +++ b/clients/src/main/resources/common/message/AllocateProducerIdsRequest.json @@ -22,8 +22,8 @@ "flexibleVersions": "0+", "fields": [ { "name": "BrokerId", "type": "int32", "versions": "0+", "entityType": "brokerId", - "about": "The ID of the requesting broker" }, + "about": "The ID of the requesting broker." }, { "name": "BrokerEpoch", "type": "int64", "versions": "0+", "default": "-1", - "about": "The epoch of the requesting broker" } + "about": "The epoch of the requesting broker." } ] } diff --git a/clients/src/main/resources/common/message/AllocateProducerIdsResponse.json b/clients/src/main/resources/common/message/AllocateProducerIdsResponse.json index 0d849c098568b..7d003e992b661 100644 --- a/clients/src/main/resources/common/message/AllocateProducerIdsResponse.json +++ b/clients/src/main/resources/common/message/AllocateProducerIdsResponse.json @@ -23,10 +23,10 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top level response error code" }, + "about": "The top level response error code." }, { "name": "ProducerIdStart", "type": "int64", "versions": "0+", "entityType": "producerId", - "about": "The first producer ID in this range, inclusive"}, + "about": "The first producer ID in this range, inclusive."}, { "name": "ProducerIdLen", "type": "int32", "versions": "0+", - "about": "The number of producer IDs in this range"} + "about": "The number of producer IDs in this range."} ] } diff --git a/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json b/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json index 3fa08883d1bd5..0b8f60b0baba8 100644 --- a/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json +++ b/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json @@ -29,9 +29,9 @@ { "name": "Responses", "type": "[]ReassignableTopicResponse", "versions": "0+", "about": "The responses to topics to reassign.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name" }, + "about": "The topic name." }, { "name": "Partitions", "type": "[]ReassignablePartitionResponse", "versions": "0+", - "about": "The responses to partitions to reassign", "fields": [ + "about": "The responses to partitions to reassign.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", diff --git a/clients/src/main/resources/common/message/AlterPartitionRequest.json b/clients/src/main/resources/common/message/AlterPartitionRequest.json index 2e880cd64fa64..89b5dbfdd9516 100644 --- a/clients/src/main/resources/common/message/AlterPartitionRequest.json +++ b/clients/src/main/resources/common/message/AlterPartitionRequest.json @@ -27,22 +27,25 @@ "flexibleVersions": "0+", "fields": [ { "name": "BrokerId", "type": "int32", "versions": "0+", "entityType": "brokerId", - "about": "The ID of the requesting broker" }, + "about": "The ID of the requesting broker." }, { "name": "BrokerEpoch", "type": "int64", "versions": "0+", "default": "-1", - "about": "The epoch of the requesting broker" }, - { "name": "Topics", "type": "[]TopicData", "versions": "0+", "fields": [ + "about": "The epoch of the requesting broker." }, + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topics to alter ISRs for.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0-1", "ignorable": true, "entityType": "topicName", - "about": "The name of the topic to alter ISRs for" }, + "about": "The name of the topic to alter ISRs for." }, { "name": "TopicId", "type": "uuid", "versions": "2+", "ignorable": true, - "about": "The ID of the topic to alter ISRs for" }, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "fields": [ + "about": "The ID of the topic to alter ISRs for." }, + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partitions to alter ISRs for.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index" }, + "about": "The partition index." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The leader epoch of this partition" }, + "about": "The leader epoch of this partition." }, { "name": "NewIsr", "type": "[]int32", "versions": "0-2", "entityType": "brokerId", "about": "The ISR for this partition. Deprecated since version 3." }, - { "name": "NewIsrWithEpochs", "type": "[]BrokerState", "versions": "3+", "fields": [ + { "name": "NewIsrWithEpochs", "type": "[]BrokerState", "versions": "3+", + "about": "The ISR for this partition.", "fields": [ { "name": "BrokerId", "type": "int32", "versions": "3+", "entityType": "brokerId", "about": "The ID of the broker." }, { "name": "BrokerEpoch", "type": "int64", "versions": "3+", "default": "-1", diff --git a/clients/src/main/resources/common/message/AlterPartitionResponse.json b/clients/src/main/resources/common/message/AlterPartitionResponse.json index e5a5408624cba..13b26abeb3af1 100644 --- a/clients/src/main/resources/common/message/AlterPartitionResponse.json +++ b/clients/src/main/resources/common/message/AlterPartitionResponse.json @@ -29,17 +29,19 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top level response error code" }, - { "name": "Topics", "type": "[]TopicData", "versions": "0+", "fields": [ + "about": "The top level response error code." }, + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The responses for each topic.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0-1", "ignorable": true, "entityType": "topicName", - "about": "The name of the topic" }, + "about": "The name of the topic." }, { "name": "TopicId", "type": "uuid", "versions": "2+", "ignorable": true, - "about": "The ID of the topic" }, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "fields": [ + "about": "The ID of the topic." }, + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The responses for each partition.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index" }, + "about": "The partition index." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The partition level error code" }, + "about": "The partition level error code." }, { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The broker ID of the leader." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/AlterReplicaLogDirsRequest.json b/clients/src/main/resources/common/message/AlterReplicaLogDirsRequest.json index c85c61d6e5439..b309243fb62d9 100644 --- a/clients/src/main/resources/common/message/AlterReplicaLogDirsRequest.json +++ b/clients/src/main/resources/common/message/AlterReplicaLogDirsRequest.json @@ -18,10 +18,11 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "AlterReplicaLogDirsRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // Version 1 is the same as version 0. // Version 2 enables flexible versions. - "validVersions": "0-2", - "deprecatedVersions": "0", + "validVersions": "1-2", "flexibleVersions": "2+", "fields": [ { "name": "Dirs", "type": "[]AlterReplicaLogDir", "versions": "0+", diff --git a/clients/src/main/resources/common/message/AlterReplicaLogDirsResponse.json b/clients/src/main/resources/common/message/AlterReplicaLogDirsResponse.json index 386e24e9d8600..d26c9e873f22e 100644 --- a/clients/src/main/resources/common/message/AlterReplicaLogDirsResponse.json +++ b/clients/src/main/resources/common/message/AlterReplicaLogDirsResponse.json @@ -17,9 +17,10 @@ "apiKey": 34, "type": "response", "name": "AlterReplicaLogDirsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Starting in version 1, on quota violation brokers send out responses before throttling. // Version 2 enables flexible versions. - "validVersions": "0-2", + "validVersions": "1-2", "flexibleVersions": "2+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ApiVersionsResponse.json b/clients/src/main/resources/common/message/ApiVersionsResponse.json index c39d7edcf5773..1017f24436047 100644 --- a/clients/src/main/resources/common/message/ApiVersionsResponse.json +++ b/clients/src/main/resources/common/message/ApiVersionsResponse.json @@ -45,10 +45,10 @@ ]}, { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "SupportedFeatures", "type": "[]SupportedFeatureKey", "ignorable": true, - "versions": "3+", "tag": 0, "taggedVersions": "3+", + { "name": "SupportedFeatures", "type": "[]SupportedFeatureKey", "ignorable": true, + "versions": "3+", "tag": 0, "taggedVersions": "3+", "about": "Features supported by the broker. Note: in v0-v3, features with MinSupportedVersion = 0 are omitted.", - "fields": [ + "fields": [ { "name": "Name", "type": "string", "versions": "3+", "mapKey": true, "about": "The name of the feature." }, { "name": "MinVersion", "type": "int16", "versions": "3+", @@ -59,21 +59,21 @@ }, { "name": "FinalizedFeaturesEpoch", "type": "int64", "versions": "3+", "tag": 1, "taggedVersions": "3+", "default": "-1", "ignorable": true, - "about": "The monotonically increasing epoch for the finalized features information. Valid values are >= 0. A value of -1 is special and represents unknown epoch."}, - { "name": "FinalizedFeatures", "type": "[]FinalizedFeatureKey", "ignorable": true, - "versions": "3+", "tag": 2, "taggedVersions": "3+", + "about": "The monotonically increasing epoch for the finalized features information. Valid values are >= 0. A value of -1 is special and represents unknown epoch." }, + { "name": "FinalizedFeatures", "type": "[]FinalizedFeatureKey", "ignorable": true, + "versions": "3+", "tag": 2, "taggedVersions": "3+", "about": "List of cluster-wide finalized features. The information is valid only if FinalizedFeaturesEpoch >= 0.", - "fields": [ - {"name": "Name", "type": "string", "versions": "3+", "mapKey": true, - "about": "The name of the feature."}, - {"name": "MaxVersionLevel", "type": "int16", "versions": "3+", - "about": "The cluster-wide finalized max version level for the feature."}, - {"name": "MinVersionLevel", "type": "int16", "versions": "3+", - "about": "The cluster-wide finalized min version level for the feature."} + "fields": [ + { "name": "Name", "type": "string", "versions": "3+", "mapKey": true, + "about": "The name of the feature." }, + { "name": "MaxVersionLevel", "type": "int16", "versions": "3+", + "about": "The cluster-wide finalized max version level for the feature." }, + { "name": "MinVersionLevel", "type": "int16", "versions": "3+", + "about": "The cluster-wide finalized min version level for the feature." } ] }, - { "name": "ZkMigrationReady", "type": "bool", "versions": "3+", "taggedVersions": "3+", + { "name": "ZkMigrationReady", "type": "bool", "versions": "3+", "taggedVersions": "3+", "tag": 3, "ignorable": true, "default": "false", - "about": "Set by a KRaft controller if the required configurations for ZK migration are present" } + "about": "Set by a KRaft controller if the required configurations for ZK migration are present." } ] } diff --git a/clients/src/main/resources/common/message/AssignReplicasToDirsRequest.json b/clients/src/main/resources/common/message/AssignReplicasToDirsRequest.json index b349520745b34..b8eb9a3659653 100644 --- a/clients/src/main/resources/common/message/AssignReplicasToDirsRequest.json +++ b/clients/src/main/resources/common/message/AssignReplicasToDirsRequest.json @@ -22,17 +22,20 @@ "flexibleVersions": "0+", "fields": [ { "name": "BrokerId", "type": "int32", "versions": "0+", "entityType": "brokerId", - "about": "The ID of the requesting broker" }, + "about": "The ID of the requesting broker." }, { "name": "BrokerEpoch", "type": "int64", "versions": "0+", "default": "-1", - "about": "The epoch of the requesting broker" }, - { "name": "Directories", "type": "[]DirectoryData", "versions": "0+", "fields": [ - { "name": "Id", "type": "uuid", "versions": "0+", "about": "The ID of the directory" }, - { "name": "Topics", "type": "[]TopicData", "versions": "0+", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The ID of the assigned topic" }, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "fields": [ + "about": "The epoch of the requesting broker." }, + { "name": "Directories", "type": "[]DirectoryData", "versions": "0+", + "about": "The directories to which replicas should be assigned.", "fields": [ + { "name": "Id", "type": "uuid", "versions": "0+", "about": "The ID of the directory." }, + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topics assigned to the directory.", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The ID of the assigned topic." }, + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partitions assigned to the directory.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index" } + "about": "The partition index." } ]} ]} ]} diff --git a/clients/src/main/resources/common/message/AssignReplicasToDirsResponse.json b/clients/src/main/resources/common/message/AssignReplicasToDirsResponse.json index 185d379584cfe..04adb87cea31f 100644 --- a/clients/src/main/resources/common/message/AssignReplicasToDirsResponse.json +++ b/clients/src/main/resources/common/message/AssignReplicasToDirsResponse.json @@ -23,17 +23,20 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top level response error code" }, - { "name": "Directories", "type": "[]DirectoryData", "versions": "0+", "fields": [ - { "name": "Id", "type": "uuid", "versions": "0+", "about": "The ID of the directory" }, - { "name": "Topics", "type": "[]TopicData", "versions": "0+", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The ID of the assigned topic" }, - { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "fields": [ + "about": "The top level response error code." }, + { "name": "Directories", "type": "[]DirectoryData", "versions": "0+", + "about": "The list of directories and their assigned partitions.", "fields": [ + { "name": "Id", "type": "uuid", "versions": "0+", "about": "The ID of the directory." }, + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The list of topics and their assigned partitions.", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The ID of the assigned topic." }, + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The list of assigned partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index" }, + "about": "The partition index." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The partition level error code" } + "about": "The partition level error code." } ]} ]} ]} diff --git a/clients/src/main/resources/common/message/BeginQuorumEpochRequest.json b/clients/src/main/resources/common/message/BeginQuorumEpochRequest.json index 9302bd603d6e7..378e2d5756af5 100644 --- a/clients/src/main/resources/common/message/BeginQuorumEpochRequest.json +++ b/clients/src/main/resources/common/message/BeginQuorumEpochRequest.json @@ -23,32 +23,33 @@ "flexibleVersions": "1+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "0+", - "nullableVersions": "0+", "default": "null"}, + "nullableVersions": "0+", "default": "null", + "about": "The cluster id." }, { "name": "VoterId", "type": "int32", "versions": "1+", "ignorable": true, "default": "-1", "entityType": "brokerId", - "about": "The replica id of the voter receiving the request" }, - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + "about": "The replica id of the voter receiving the request." }, + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topics.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name" }, - { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + "about": "The topic name." }, + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", - "about": "The partition index" }, + "about": "The partition index." }, { "name": "VoterDirectoryId", "type": "uuid", "versions": "1+", "ignorable": true, - "about": "The directory id of the receiving replica" }, + "about": "The directory id of the receiving replica." }, { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", - "about": "The ID of the newly elected leader"}, + "about": "The ID of the newly elected leader." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The epoch of the newly elected leader"} + "about": "The epoch of the newly elected leader." } ] } ] }, { "name": "LeaderEndpoints", "type": "[]LeaderEndpoint", "versions": "1+", "ignorable": true, - "about": "Endpoints for the leader", "fields": [ - { "name": "Name", "type": "string", "versions": "1+", "mapKey": true, "about": "The name of the endpoint" }, - { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname" }, - { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port" } + "about": "Endpoints for the leader.", "fields": [ + { "name": "Name", "type": "string", "versions": "1+", "mapKey": true, "about": "The name of the endpoint." }, + { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname." }, + { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port." } ] } ] diff --git a/clients/src/main/resources/common/message/BeginQuorumEpochResponse.json b/clients/src/main/resources/common/message/BeginQuorumEpochResponse.json index b8aeba56a45fb..71a6a1f7bb12b 100644 --- a/clients/src/main/resources/common/message/BeginQuorumEpochResponse.json +++ b/clients/src/main/resources/common/message/BeginQuorumEpochResponse.json @@ -23,29 +23,30 @@ "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The top level error code."}, - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topic data.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partition data.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+"}, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The error code for this partition."}, { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The ID of the current leader or -1 if the leader is unknown."}, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch"} + "about": "The latest known leader epoch."} ] } ] }, { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "1+", "taggedVersions": "1+", "tag": 0, - "about": "Endpoints for all leaders enumerated in PartitionData", "fields": [ + "about": "Endpoints for all leaders enumerated in PartitionData.", "fields": [ { "name": "NodeId", "type": "int32", "versions": "1+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node" }, - { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname" }, - { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port" } + "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, + { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname." }, + { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port." } ] } ] diff --git a/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json b/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json index 9ebdc3707d140..8f574b41fc458 100644 --- a/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json +++ b/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json @@ -31,7 +31,7 @@ "about": "True if the broker wants to be fenced, false otherwise." }, { "name": "WantShutDown", "type": "bool", "versions": "0+", "about": "True if the broker wants to be shut down, false otherwise." }, - { "name": "OfflineLogDirs", "type": "[]uuid", "versions": "1+", "taggedVersions": "1+", "tag": "0", + { "name": "OfflineLogDirs", "type": "[]uuid", "versions": "1+", "taggedVersions": "1+", "tag": 0, "about": "Log directories that failed and went offline." } ] } diff --git a/clients/src/main/resources/common/message/BrokerRegistrationRequest.json b/clients/src/main/resources/common/message/BrokerRegistrationRequest.json index 0c5153e64e79c..8a9348596edf5 100644 --- a/clients/src/main/resources/common/message/BrokerRegistrationRequest.json +++ b/clients/src/main/resources/common/message/BrokerRegistrationRequest.json @@ -36,7 +36,7 @@ { "name": "IncarnationId", "type": "uuid", "versions": "0+", "about": "The incarnation id of the broker process." }, { "name": "Listeners", "type": "[]Listener", - "about": "The listeners of this broker", "versions": "0+", "fields": [ + "about": "The listeners of this broker.", "versions": "0+", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "about": "The name of the endpoint." }, { "name": "Host", "type": "string", "versions": "0+", @@ -60,7 +60,7 @@ { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "about": "The rack which this broker is in." }, { "name": "IsMigratingZkBroker", "type": "bool", "versions": "1+", "default": "false", - "about": "If the required configurations for ZK migration are present, this value is set to true" }, + "about": "If the required configurations for ZK migration are present, this value is set to true." }, { "name": "LogDirs", "type": "[]uuid", "versions": "2+", "about": "Log directories configured in this broker which are available.", "ignorable": true }, { "name": "PreviousBrokerEpoch", "type": "int64", "versions": "3+", "default": "-1", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ConsumerGroupDescribeRequest.json b/clients/src/main/resources/common/message/ConsumerGroupDescribeRequest.json index a581d15dee326..1530167aeaef7 100644 --- a/clients/src/main/resources/common/message/ConsumerGroupDescribeRequest.json +++ b/clients/src/main/resources/common/message/ConsumerGroupDescribeRequest.json @@ -18,11 +18,13 @@ "type": "request", "listeners": ["broker"], "name": "ConsumerGroupDescribeRequest", - "validVersions": "0", + // Version 1 adds MemberType field to ConsumerGroupDescribeResponse (KIP-1099). + // For ConsumerGroupDescribeRequest, version 1 is same as version 0. + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", - "about": "The ids of the groups to describe" }, + "about": "The ids of the groups to describe." }, { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "0+", "about": "Whether to include authorized operations." } ] diff --git a/clients/src/main/resources/common/message/ConsumerGroupDescribeResponse.json b/clients/src/main/resources/common/message/ConsumerGroupDescribeResponse.json index 3c6ed4e78de39..14d80e20ce2f5 100644 --- a/clients/src/main/resources/common/message/ConsumerGroupDescribeResponse.json +++ b/clients/src/main/resources/common/message/ConsumerGroupDescribeResponse.json @@ -17,7 +17,8 @@ "apiKey": 69, "type": "response", "name": "ConsumerGroupDescribeResponse", - "validVersions": "0", + // Version 1 adds MemberType field (KIP-1099). + "validVersions": "0-1", "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -69,7 +70,9 @@ { "name": "Assignment", "type": "Assignment", "versions": "0+", "about": "The current assignment." }, { "name": "TargetAssignment", "type": "Assignment", "versions": "0+", - "about": "The target assignment." } + "about": "The target assignment." }, + { "name": "MemberType", "type": "int8", "versions": "1+", "default": "-1", "ignorable": true, + "about": "-1 for unknown. 0 for classic member. +1 for consumer member." } ]}, { "name": "AuthorizedOperations", "type": "int32", "versions": "0+", "default": "-2147483648", "about": "32-bit bitfield to represent authorized operations for this group." } diff --git a/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json index fbe680b208b58..c97bf4eb0b22d 100644 --- a/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json +++ b/clients/src/main/resources/common/message/ConsumerGroupHeartbeatRequest.json @@ -21,7 +21,6 @@ // Version 1 adds SubscribedTopicRegex (KIP-848), and requires the consumer to generate their own Member ID (KIP-1082) "validVersions": "0-1", "flexibleVersions": "0+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The group identifier." }, @@ -38,7 +37,7 @@ { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "topicName", "about": "null if it didn't change since the last heartbeat; the subscribed topic names otherwise." }, { "name": "SubscribedTopicRegex", "type": "string", "versions": "1+", "nullableVersions": "1+", "default": "null", - "about": "null if it didn't change since the last heartbeat; the subscribed topic regex otherwise" }, + "about": "null if it didn't change since the last heartbeat; the subscribed topic regex otherwise." }, { "name": "ServerAssignor", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "null if not used or if it didn't change since the last heartbeat; the server side assignor to use otherwise." }, { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", "nullableVersions": "0+", "default": "null", diff --git a/clients/src/main/resources/common/message/ConsumerGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/ConsumerGroupHeartbeatResponse.json index 5ccf7675c52d8..956cfab8262d8 100644 --- a/clients/src/main/resources/common/message/ConsumerGroupHeartbeatResponse.json +++ b/clients/src/main/resources/common/message/ConsumerGroupHeartbeatResponse.json @@ -35,7 +35,7 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top-level error code, or 0 if there was no error" }, + "about": "The top-level error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", diff --git a/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json b/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json index fe07aaeadfff2..afa02a1bb0fe3 100644 --- a/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json +++ b/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json @@ -28,12 +28,15 @@ "flexibleVersions": "none", "fields": [ { "name": "AssignedPartitions", "type": "[]TopicPartition", "versions": "0+", - "fields": [ - { "name": "Topic", "type": "string", "mapKey": true, "versions": "0+", "entityType": "topicName" }, - { "name": "Partitions", "type": "[]int32", "versions": "0+" } + "about": "The list of topics and partitions assigned to this consumer.", "fields": [ + { "name": "Topic", "type": "string", "mapKey": true, "versions": "0+", "entityType": "topicName", + "about": "The topic name."}, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The list of partitions assigned to this consumer."} ] }, { "name": "UserData", "type": "bytes", "versions": "0+", "nullableVersions": "0+", - "default": "null", "zeroCopy": true } + "default": "null", "zeroCopy": true, + "about": "User data."} ] } diff --git a/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json b/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json index 49801c65f771b..ae7aa2b2b5aa7 100644 --- a/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json +++ b/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json @@ -28,16 +28,22 @@ "validVersions": "0-3", "flexibleVersions": "none", "fields": [ - { "name": "Topics", "type": "[]string", "versions": "0+" }, + { "name": "Topics", "type": "[]string", "versions": "0+", + "about": "The topics that the member wants to consume."}, { "name": "UserData", "type": "bytes", "versions": "0+", "nullableVersions": "0+", - "default": "null", "zeroCopy": true }, + "default": "null", "zeroCopy": true, + "about": "User data that will be passed back to the consumer."}, { "name": "OwnedPartitions", "type": "[]TopicPartition", "versions": "1+", "ignorable": true, - "fields": [ - { "name": "Topic", "type": "string", "mapKey": true, "versions": "1+", "entityType": "topicName" }, - { "name": "Partitions", "type": "[]int32", "versions": "1+"} + "about": "The partitions that the member owns.", "fields": [ + { "name": "Topic", "type": "string", "mapKey": true, "versions": "1+", "entityType": "topicName", + "about": "The topic name."}, + { "name": "Partitions", "type": "[]int32", "versions": "1+", + "about": "The partition ids."} ] }, - { "name": "GenerationId", "type": "int32", "versions": "2+", "default": "-1", "ignorable": true }, - { "name": "RackId", "type": "string", "versions": "3+", "nullableVersions": "3+", "default": "null", "ignorable": true } + { "name": "GenerationId", "type": "int32", "versions": "2+", "default": "-1", "ignorable": true, + "about": "The generation id of the member."}, + { "name": "RackId", "type": "string", "versions": "3+", "nullableVersions": "3+", "default": "null", "ignorable": true, + "about": "The rack id of the member."} ] } diff --git a/clients/src/main/resources/common/message/ControllerRegistrationRequest.json b/clients/src/main/resources/common/message/ControllerRegistrationRequest.json index 56647a0d4b720..8f3112dfcda77 100644 --- a/clients/src/main/resources/common/message/ControllerRegistrationRequest.json +++ b/clients/src/main/resources/common/message/ControllerRegistrationRequest.json @@ -28,7 +28,7 @@ { "name": "ZkMigrationReady", "type": "bool", "versions": "0+", "about": "Set if the required configurations for ZK migration are present." }, { "name": "Listeners", "type": "[]Listener", - "about": "The listeners of this controller", "versions": "0+", "fields": [ + "about": "The listeners of this controller.", "versions": "0+", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "about": "The name of the endpoint." }, { "name": "Host", "type": "string", "versions": "0+", @@ -39,7 +39,7 @@ "about": "The security protocol." } ]}, { "name": "Features", "type": "[]Feature", - "about": "The features on this controller", "versions": "0+", "fields": [ + "about": "The features on this controller.", "versions": "0+", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "about": "The feature name." }, { "name": "MinSupportedVersion", "type": "int16", "versions": "0+", diff --git a/clients/src/main/resources/common/message/CreateAclsRequest.json b/clients/src/main/resources/common/message/CreateAclsRequest.json index df1f49d29c480..0f4582030747f 100644 --- a/clients/src/main/resources/common/message/CreateAclsRequest.json +++ b/clients/src/main/resources/common/message/CreateAclsRequest.json @@ -18,11 +18,11 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "CreateAclsRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 adds resource pattern type. // Version 2 enables flexible versions. // Version 3 adds user resource type. - "validVersions": "0-3", - "deprecatedVersions": "0", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "Creations", "type": "[]AclCreation", "versions": "0+", diff --git a/clients/src/main/resources/common/message/CreateAclsResponse.json b/clients/src/main/resources/common/message/CreateAclsResponse.json index da1632c03b3b9..0e3168794687e 100644 --- a/clients/src/main/resources/common/message/CreateAclsResponse.json +++ b/clients/src/main/resources/common/message/CreateAclsResponse.json @@ -17,10 +17,11 @@ "apiKey": 30, "type": "response", "name": "CreateAclsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Starting in version 1, on quota violation, brokers send out responses before throttling. // Version 2 enables flexible versions. // Version 3 adds user resource type. - "validVersions": "0-3", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/CreateDelegationTokenRequest.json b/clients/src/main/resources/common/message/CreateDelegationTokenRequest.json index 3978c55ecf20d..276cbe57901f5 100644 --- a/clients/src/main/resources/common/message/CreateDelegationTokenRequest.json +++ b/clients/src/main/resources/common/message/CreateDelegationTokenRequest.json @@ -18,13 +18,14 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "CreateDelegationTokenRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // Version 1 is the same as version 0. // // Version 2 is the first flexible version. // // Version 3 adds owner principal - "validVersions": "0-3", - "deprecatedVersions": "0", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "OwnerPrincipalType", "type": "string", "versions": "3+", "nullableVersions": "3+", diff --git a/clients/src/main/resources/common/message/CreateDelegationTokenResponse.json b/clients/src/main/resources/common/message/CreateDelegationTokenResponse.json index bf8be5573bc0b..0067cac44ccdc 100644 --- a/clients/src/main/resources/common/message/CreateDelegationTokenResponse.json +++ b/clients/src/main/resources/common/message/CreateDelegationTokenResponse.json @@ -17,12 +17,14 @@ "apiKey": 38, "type": "response", "name": "CreateDelegationTokenResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // Starting in version 1, on quota violation, brokers send out responses before throttling. // // Version 2 is the first flexible version. // // Version 3 adds token requester details - "validVersions": "0-3", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", diff --git a/clients/src/main/resources/common/message/CreateTopicsRequest.json b/clients/src/main/resources/common/message/CreateTopicsRequest.json index 65f43ef3c8958..9aed4d236dbf0 100644 --- a/clients/src/main/resources/common/message/CreateTopicsRequest.json +++ b/clients/src/main/resources/common/message/CreateTopicsRequest.json @@ -18,6 +18,8 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "CreateTopicsRequest", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 adds validateOnly. // // Version 4 makes partitions/replicationFactor optional even when assignments are not present (KIP-464) @@ -29,8 +31,7 @@ // in the response if the topics creation is throttled (KIP-599). // // Version 7 is the same as version 6. - "validVersions": "0-7", - "deprecatedVersions": "0-1", + "validVersions": "2-7", "flexibleVersions": "5+", "fields": [ { "name": "Topics", "type": "[]CreatableTopic", "versions": "0+", diff --git a/clients/src/main/resources/common/message/CreateTopicsResponse.json b/clients/src/main/resources/common/message/CreateTopicsResponse.json index 00fa348a31a28..94e728d08bca5 100644 --- a/clients/src/main/resources/common/message/CreateTopicsResponse.json +++ b/clients/src/main/resources/common/message/CreateTopicsResponse.json @@ -17,6 +17,8 @@ "apiKey": 19, "type": "response", "name": "CreateTopicsResponse", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 adds a per-topic error message string. // // Version 2 adds the throttle time. @@ -32,7 +34,7 @@ // in the response if the topics creation is throttled (KIP-599). // // Version 7 returns the topic ID of the newly created topic if creation is successful. - "validVersions": "0-7", + "validVersions": "2-7", "flexibleVersions": "5+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, @@ -41,7 +43,7 @@ "about": "Results for each topic we tried to create.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "entityType": "topicName", "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "7+", "ignorable": true, "about": "The unique topic ID"}, + { "name": "TopicId", "type": "uuid", "versions": "7+", "ignorable": true, "about": "The unique topic ID."}, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "1+", "nullableVersions": "0+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/DefaultPrincipalData.json b/clients/src/main/resources/common/message/DefaultPrincipalData.json index e06295d1783f8..08cedbb9bd6d0 100644 --- a/clients/src/main/resources/common/message/DefaultPrincipalData.json +++ b/clients/src/main/resources/common/message/DefaultPrincipalData.json @@ -21,11 +21,11 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - {"name": "Type", "type": "string", "versions": "0+", - "about": "The principal type"}, - {"name": "Name", "type": "string", "versions": "0+", - "about": "The principal name"}, - {"name": "TokenAuthenticated", "type": "bool", "versions": "0+", + { "name": "Type", "type": "string", "versions": "0+", + "about": "The principal type."}, + { "name": "Name", "type": "string", "versions": "0+", + "about": "The principal name."}, + { "name": "TokenAuthenticated", "type": "bool", "versions": "0+", "about": "Whether the principal was authenticated by a delegation token on the forwarding broker."} ] } diff --git a/clients/src/main/resources/common/message/DeleteAclsRequest.json b/clients/src/main/resources/common/message/DeleteAclsRequest.json index 38ec5324824bd..b430364d8611b 100644 --- a/clients/src/main/resources/common/message/DeleteAclsRequest.json +++ b/clients/src/main/resources/common/message/DeleteAclsRequest.json @@ -18,11 +18,11 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "DeleteAclsRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 adds the pattern type. // Version 2 enables flexible versions. // Version 3 adds the user resource type. - "validVersions": "0-3", - "deprecatedVersions": "0", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "Filters", "type": "[]DeleteAclsFilter", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DeleteAclsResponse.json b/clients/src/main/resources/common/message/DeleteAclsResponse.json index e00969df7a70b..516d589cb22d8 100644 --- a/clients/src/main/resources/common/message/DeleteAclsResponse.json +++ b/clients/src/main/resources/common/message/DeleteAclsResponse.json @@ -17,11 +17,12 @@ "apiKey": 31, "type": "response", "name": "DeleteAclsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 adds the resource pattern type. // Starting in version 1, on quota violation, brokers send out responses before throttling. // Version 2 enables flexible versions. // Version 3 adds the user resource type. - "validVersions": "0-3", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DeleteGroupsRequest.json b/clients/src/main/resources/common/message/DeleteGroupsRequest.json index 75dda36ee1a03..1ac6a053e63b3 100644 --- a/clients/src/main/resources/common/message/DeleteGroupsRequest.json +++ b/clients/src/main/resources/common/message/DeleteGroupsRequest.json @@ -22,7 +22,6 @@ // // Version 2 is the first flexible version. "validVersions": "0-2", - "deprecatedVersions": "0", "flexibleVersions": "2+", "fields": [ { "name": "GroupsNames", "type": "[]string", "versions": "0+", "entityType": "groupId", diff --git a/clients/src/main/resources/common/message/DeleteGroupsResponse.json b/clients/src/main/resources/common/message/DeleteGroupsResponse.json index 37e06a55b9913..168cde03ba341 100644 --- a/clients/src/main/resources/common/message/DeleteGroupsResponse.json +++ b/clients/src/main/resources/common/message/DeleteGroupsResponse.json @@ -26,9 +26,9 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "Results", "type": "[]DeletableGroupResult", "versions": "0+", - "about": "The deletion results", "fields": [ + "about": "The deletion results.", "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "mapKey": true, "entityType": "groupId", - "about": "The group id" }, + "about": "The group id." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The deletion error, or 0 if the deletion succeeded." } ]} diff --git a/clients/src/main/resources/common/message/DeleteShareGroupStateResponse.json b/clients/src/main/resources/common/message/DeleteShareGroupStateResponse.json index e0a0e6c935729..3ffc459b7cf2f 100644 --- a/clients/src/main/resources/common/message/DeleteShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/DeleteShareGroupStateResponse.json @@ -28,9 +28,9 @@ // - INVALID_REQUEST (version 0+) "fields": [ { "name": "Results", "type": "[]DeleteStateResult", "versions": "0+", - "about": "The delete results", "fields": [ + "about": "The delete results.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic identifier" }, + "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionResult", "versions": "0+", "about" : "The results for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DeleteTopicsRequest.json b/clients/src/main/resources/common/message/DeleteTopicsRequest.json index 917e7ee2c739f..465d9e0b31f4e 100644 --- a/clients/src/main/resources/common/message/DeleteTopicsRequest.json +++ b/clients/src/main/resources/common/message/DeleteTopicsRequest.json @@ -18,6 +18,7 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "DeleteTopicsRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Versions 0, 1, 2, and 3 are the same. // // Version 4 is the first flexible version. @@ -26,17 +27,16 @@ // in the response if the topics deletion is throttled (KIP-599). // // Version 6 reorganizes topics, adds topic IDs and allows topic names to be null. - "validVersions": "0-6", - "deprecatedVersions": "0", + "validVersions": "1-6", "flexibleVersions": "4+", "fields": [ - { "name": "Topics", "type": "[]DeleteTopicState", "versions": "6+", "about": "The name or topic ID of the topic", + { "name": "Topics", "type": "[]DeleteTopicState", "versions": "6+", "about": "The name or topic ID of the topic.", "fields": [ - {"name": "Name", "type": "string", "versions": "6+", "nullableVersions": "6+", "default": "null", "entityType": "topicName", "about": "The topic name"}, - {"name": "TopicId", "type": "uuid", "versions": "6+", "about": "The unique topic ID"} + {"name": "Name", "type": "string", "versions": "6+", "nullableVersions": "6+", "default": "null", "entityType": "topicName", "about": "The topic name."}, + {"name": "TopicId", "type": "uuid", "versions": "6+", "about": "The unique topic ID."} ]}, { "name": "TopicNames", "type": "[]string", "versions": "0-5", "entityType": "topicName", "ignorable": true, - "about": "The names of the topics to delete" }, + "about": "The names of the topics to delete." }, { "name": "TimeoutMs", "type": "int32", "versions": "0+", "about": "The length of time in milliseconds to wait for the deletions to complete." } ] diff --git a/clients/src/main/resources/common/message/DeleteTopicsResponse.json b/clients/src/main/resources/common/message/DeleteTopicsResponse.json index 19a81630463d5..a31d41c2bf46c 100644 --- a/clients/src/main/resources/common/message/DeleteTopicsResponse.json +++ b/clients/src/main/resources/common/message/DeleteTopicsResponse.json @@ -17,6 +17,8 @@ "apiKey": 20, "type": "response", "name": "DeleteTopicsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // Version 1 adds the throttle time. // // Starting in version 2, on quota violation, brokers send out responses before throttling. @@ -31,7 +33,7 @@ // Version 6 adds topic ID to responses. An UNSUPPORTED_VERSION error code will be returned when attempting to // delete using topic IDs when IBP < 2.8. UNKNOWN_TOPIC_ID error code will be returned when IBP is at least 2.8, but // the topic ID was not found. - "validVersions": "0-6", + "validVersions": "1-6", "flexibleVersions": "4+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, @@ -39,8 +41,8 @@ { "name": "Responses", "type": "[]DeletableTopicResult", "versions": "0+", "about": "The results for each topic we tried to delete.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "nullableVersions": "6+", "mapKey": true, "entityType": "topicName", - "about": "The topic name" }, - {"name": "TopicId", "type": "uuid", "versions": "6+", "ignorable": true, "about": "the unique topic ID"}, + "about": "The topic name." }, + {"name": "TopicId", "type": "uuid", "versions": "6+", "ignorable": true, "about": "The unique topic ID."}, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The deletion error, or 0 if the deletion succeeded." }, { "name": "ErrorMessage", "type": "string", "versions": "5+", "nullableVersions": "5+", "ignorable": true, "default": "null", diff --git a/clients/src/main/resources/common/message/DescribeAclsRequest.json b/clients/src/main/resources/common/message/DescribeAclsRequest.json index 89b50170b0c24..a9bdfba40e7fb 100644 --- a/clients/src/main/resources/common/message/DescribeAclsRequest.json +++ b/clients/src/main/resources/common/message/DescribeAclsRequest.json @@ -18,11 +18,11 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "DescribeAclsRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 adds resource pattern type. // Version 2 enables flexible versions. // Version 3 adds user resource type. - "validVersions": "0-3", - "deprecatedVersions": "0", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "ResourceTypeFilter", "type": "int8", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DescribeAclsResponse.json b/clients/src/main/resources/common/message/DescribeAclsResponse.json index 19de109445846..e11ce4658299e 100644 --- a/clients/src/main/resources/common/message/DescribeAclsResponse.json +++ b/clients/src/main/resources/common/message/DescribeAclsResponse.json @@ -17,11 +17,12 @@ "apiKey": 29, "type": "response", "name": "DescribeAclsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 adds PatternType. // Starting in version 1, on quota violation, brokers send out responses before throttling. // Version 2 enables flexible versions. // Version 3 adds user resource type. - "validVersions": "0-3", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DescribeClientQuotasResponse.json b/clients/src/main/resources/common/message/DescribeClientQuotasResponse.json index 0dd0c9c7bf831..87d7ed6e29411 100644 --- a/clients/src/main/resources/common/message/DescribeClientQuotasResponse.json +++ b/clients/src/main/resources/common/message/DescribeClientQuotasResponse.json @@ -36,7 +36,7 @@ "about": "The entity name, or null if the default." } ]}, { "name": "Values", "type": "[]ValueData", "versions": "0+", - "about": "The quota values for the entity.", "fields": [ + "about": "The quota values for the entity.", "fields": [ { "name": "Key", "type": "string", "versions": "0+", "about": "The quota configuration key." }, { "name": "Value", "type": "float64", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DescribeClusterRequest.json b/clients/src/main/resources/common/message/DescribeClusterRequest.json index 34ebe013bb1a0..71e00df09b2f9 100644 --- a/clients/src/main/resources/common/message/DescribeClusterRequest.json +++ b/clients/src/main/resources/common/message/DescribeClusterRequest.json @@ -20,13 +20,16 @@ "name": "DescribeClusterRequest", // // Version 1 adds EndpointType for KIP-919 support. + // Version 2 adds IncludeFencedBrokers for KIP-1073 support. // - "validVersions": "0-1", + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "IncludeClusterAuthorizedOperations", "type": "bool", "versions": "0+", "about": "Whether to include cluster authorized operations." }, { "name": "EndpointType", "type": "int8", "versions": "1+", "default": "1", - "about": "The endpoint type to describe. 1=brokers, 2=controllers." } + "about": "The endpoint type to describe. 1=brokers, 2=controllers." }, + { "name": "IncludeFencedBrokers", "type": "bool", "versions": "2+", + "about": "Whether to include fenced brokers when listing brokers." } ] } diff --git a/clients/src/main/resources/common/message/DescribeClusterResponse.json b/clients/src/main/resources/common/message/DescribeClusterResponse.json index 6cccd1d26c471..a17e427c8c3e2 100644 --- a/clients/src/main/resources/common/message/DescribeClusterResponse.json +++ b/clients/src/main/resources/common/message/DescribeClusterResponse.json @@ -20,14 +20,15 @@ // // Version 1 adds the EndpointType field, and makes MISMATCHED_ENDPOINT_TYPE and // UNSUPPORTED_ENDPOINT_TYPE valid top-level response error codes. + // Version 2 adds IsFenced field to Brokers for KIP-1073 support. // - "validVersions": "0-1", + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top-level error code, or 0 if there was no error" }, + "about": "The top-level error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, { "name": "EndpointType", "type": "int8", "versions": "1+", "default": "1", @@ -45,7 +46,9 @@ { "name": "Port", "type": "int32", "versions": "0+", "about": "The broker port." }, { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The rack of the broker, or null if it has not been assigned to a rack." } + "about": "The rack of the broker, or null if it has not been assigned to a rack." }, + { "name": "IsFenced", "type": "bool", "versions": "2+", + "about": "Whether the broker is fenced" } ]}, { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "0+", "default": "-2147483648", "about": "32-bit bitfield to represent authorized operations for this cluster." } diff --git a/clients/src/main/resources/common/message/DescribeConfigsRequest.json b/clients/src/main/resources/common/message/DescribeConfigsRequest.json index ab2780b1a37d0..a382d9fecaf44 100644 --- a/clients/src/main/resources/common/message/DescribeConfigsRequest.json +++ b/clients/src/main/resources/common/message/DescribeConfigsRequest.json @@ -18,11 +18,11 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "DescribeConfigsRequest", - // Version 1 adds IncludeSynonyms. + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // Version 1 adds IncludeSynonyms and removes IsDefault. // Version 2 is the same as version 1. // Version 4 enables flexible versions. - "validVersions": "0-4", - "deprecatedVersions": "0", + "validVersions": "1-4", "flexibleVersions": "4+", "fields": [ { "name": "Resources", "type": "[]DescribeConfigsResource", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DescribeConfigsResponse.json b/clients/src/main/resources/common/message/DescribeConfigsResponse.json index f2f57ad1a7367..b7127b69f165c 100644 --- a/clients/src/main/resources/common/message/DescribeConfigsResponse.json +++ b/clients/src/main/resources/common/message/DescribeConfigsResponse.json @@ -17,10 +17,11 @@ "apiKey": 32, "type": "response", "name": "DescribeConfigsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 adds ConfigSource and the synonyms. // Starting in version 2, on quota violation, brokers send out responses before throttling. // Version 4 enables flexible versions. - "validVersions": "0-4", + "validVersions": "1-4", "flexibleVersions": "4+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", @@ -43,11 +44,6 @@ "about": "The configuration value." }, { "name": "ReadOnly", "type": "bool", "versions": "0+", "about": "True if the configuration is read-only." }, - { "name": "IsDefault", "type": "bool", "versions": "0", - "about": "True if the configuration is not set." }, - // Note: the v0 default for this field that should be exposed to callers is - // context-dependent. For example, if the resource is a broker, this should default to 4. - // -1 is just a placeholder value. { "name": "ConfigSource", "type": "int8", "versions": "1+", "default": "-1", "ignorable": true, "about": "The configuration source." }, { "name": "IsSensitive", "type": "bool", "versions": "0+", @@ -62,7 +58,7 @@ "about": "The synonym source." } ]}, { "name": "ConfigType", "type": "int8", "versions": "3+", "default": "0", "ignorable": true, - "about": "The configuration data type. Type can be one of the following values - BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD" }, + "about": "The configuration data type. Type can be one of the following values - BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD." }, { "name": "Documentation", "type": "string", "versions": "3+", "nullableVersions": "0+", "ignorable": true, "about": "The configuration documentation." } ]} diff --git a/clients/src/main/resources/common/message/DescribeDelegationTokenRequest.json b/clients/src/main/resources/common/message/DescribeDelegationTokenRequest.json index 80d58d750c905..d62eb28a29fb4 100644 --- a/clients/src/main/resources/common/message/DescribeDelegationTokenRequest.json +++ b/clients/src/main/resources/common/message/DescribeDelegationTokenRequest.json @@ -18,11 +18,11 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "DescribeDelegationTokenRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 is the same as version 0. // Version 2 adds flexible version support // Version 3 adds token requester into the response - "validVersions": "0-3", - "deprecatedVersions": "0", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "Owners", "type": "[]DescribeDelegationTokenOwner", "versions": "0+", "nullableVersions": "0+", diff --git a/clients/src/main/resources/common/message/DescribeDelegationTokenResponse.json b/clients/src/main/resources/common/message/DescribeDelegationTokenResponse.json index 3258164caf552..a5ec404584e70 100644 --- a/clients/src/main/resources/common/message/DescribeDelegationTokenResponse.json +++ b/clients/src/main/resources/common/message/DescribeDelegationTokenResponse.json @@ -17,10 +17,11 @@ "apiKey": 41, "type": "response", "name": "DescribeDelegationTokenResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Starting in version 1, on quota violation, brokers send out responses before throttling. // Version 2 adds flexible version support // Version 3 adds token requester details - "validVersions": "0-3", + "validVersions": "1-3", "flexibleVersions": "2+", "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", @@ -48,9 +49,9 @@ { "name": "Renewers", "type": "[]DescribedDelegationTokenRenewer", "versions": "0+", "about": "Those who are able to renew this token before it expires.", "fields": [ { "name": "PrincipalType", "type": "string", "versions": "0+", - "about": "The renewer principal type" }, + "about": "The renewer principal type." }, { "name": "PrincipalName", "type": "string", "versions": "0+", - "about": "The renewer principal name" } + "about": "The renewer principal name." } ]} ]}, { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DescribeGroupsRequest.json b/clients/src/main/resources/common/message/DescribeGroupsRequest.json index 6b10b0637a205..8dabf71bd5255 100644 --- a/clients/src/main/resources/common/message/DescribeGroupsRequest.json +++ b/clients/src/main/resources/common/message/DescribeGroupsRequest.json @@ -25,11 +25,13 @@ // Starting in version 4, the response will include group.instance.id info for members. // // Version 5 is the first flexible version. - "validVersions": "0-5", + // + // Version 6 returns error code GROUP_ID_NOT_FOUND if the group ID is not found (KIP-1043). + "validVersions": "0-6", "flexibleVersions": "5+", "fields": [ { "name": "Groups", "type": "[]string", "versions": "0+", "entityType": "groupId", - "about": "The names of the groups to describe" }, + "about": "The names of the groups to describe." }, { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "3+", "about": "Whether to include authorized operations." } ] diff --git a/clients/src/main/resources/common/message/DescribeGroupsResponse.json b/clients/src/main/resources/common/message/DescribeGroupsResponse.json index 99ef4203e06f4..2101fd7ca382c 100644 --- a/clients/src/main/resources/common/message/DescribeGroupsResponse.json +++ b/clients/src/main/resources/common/message/DescribeGroupsResponse.json @@ -26,7 +26,9 @@ // Starting in version 4, the response will optionally include group.instance.id info for members. // // Version 5 is the first flexible version. - "validVersions": "0-5", + // + // Version 6 returns error code GROUP_ID_NOT_FOUND if the group ID is not found (KIP-1043). + "validVersions": "0-6", "flexibleVersions": "5+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, @@ -35,6 +37,8 @@ "about": "Each described group.", "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The describe error, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "6+", "nullableVersions": "6+", "default": "null", + "about": "The describe error message, or null if there was no error." }, { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The group ID string." }, { "name": "GroupState", "type": "string", "versions": "0+", @@ -48,7 +52,7 @@ { "name": "Members", "type": "[]DescribedGroupMember", "versions": "0+", "about": "The group members.", "fields": [ { "name": "MemberId", "type": "string", "versions": "0+", - "about": "The member id" }, + "about": "The member id." }, { "name": "GroupInstanceId", "type": "string", "versions": "4+", "ignorable": true, "nullableVersions": "4+", "default": "null", "about": "The unique identifier of the consumer instance provided by end user." }, diff --git a/clients/src/main/resources/common/message/DescribeLogDirsRequest.json b/clients/src/main/resources/common/message/DescribeLogDirsRequest.json index 1d467eb24e1c0..115947ff394b2 100644 --- a/clients/src/main/resources/common/message/DescribeLogDirsRequest.json +++ b/clients/src/main/resources/common/message/DescribeLogDirsRequest.json @@ -18,18 +18,18 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "DescribeLogDirsRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 is the same as version 0. // Version 2 is the first flexible version. // Version 3 is the same as version 2 (new field in response). // Version 4 is the same as version 2 (new fields in response). - "validVersions": "0-4", - "deprecatedVersions": "0", + "validVersions": "1-4", "flexibleVersions": "2+", "fields": [ { "name": "Topics", "type": "[]DescribableLogDirTopic", "versions": "0+", "nullableVersions": "0+", "about": "Each topic that we want to describe log directories for, or null for all topics.", "fields": [ { "name": "Topic", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, - "about": "The topic name" }, + "about": "The topic name." }, { "name": "Partitions", "type": "[]int32", "versions": "0+", "about": "The partition indexes." } ]} diff --git a/clients/src/main/resources/common/message/DescribeLogDirsResponse.json b/clients/src/main/resources/common/message/DescribeLogDirsResponse.json index fec69d17a030c..d05785fe8d862 100644 --- a/clients/src/main/resources/common/message/DescribeLogDirsResponse.json +++ b/clients/src/main/resources/common/message/DescribeLogDirsResponse.json @@ -17,11 +17,12 @@ "apiKey": 35, "type": "response", "name": "DescribeLogDirsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Starting in version 1, on quota violation, brokers send out responses before throttling. - "validVersions": "0-4", // Version 2 is the first flexible version. // Version 3 adds the top-level ErrorCode field // Version 4 adds the TotalBytes and UsableBytes fields + "validVersions": "1-4", "flexibleVersions": "2+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", @@ -35,19 +36,19 @@ { "name": "LogDir", "type": "string", "versions": "0+", "about": "The absolute log directory path." }, { "name": "Topics", "type": "[]DescribeLogDirsTopic", "versions": "0+", - "about": "Each topic.", "fields": [ + "about": "The topics.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "Partitions", "type": "[]DescribeLogDirsPartition", "versions": "0+", "fields": [ + { "name": "Partitions", "type": "[]DescribeLogDirsPartition", "versions": "0+", + "about": "The partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "PartitionSize", "type": "int64", "versions": "0+", "about": "The size of the log segments in this partition in bytes." }, { "name": "OffsetLag", "type": "int64", "versions": "0+", - "about": "The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or current replica's LEO (if it is the future log for the partition)" }, + "about": "The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or current replica's LEO (if it is the future log for the partition)." }, { "name": "IsFutureKey", "type": "bool", "versions": "0+", - "about": "True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future." } - ]} + "about": "True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future." }]} ]}, { "name": "TotalBytes", "type": "int64", "versions": "4+", "ignorable": true, "default": "-1", "about": "The total size in bytes of the volume the log directory is in." diff --git a/clients/src/main/resources/common/message/DescribeProducersRequest.json b/clients/src/main/resources/common/message/DescribeProducersRequest.json index 0e3813bb02014..7a54c65622da6 100644 --- a/clients/src/main/resources/common/message/DescribeProducersRequest.json +++ b/clients/src/main/resources/common/message/DescribeProducersRequest.json @@ -21,7 +21,8 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - { "name": "Topics", "type": "[]TopicRequest", "versions": "0+", "fields": [ + { "name": "Topics", "type": "[]TopicRequest", "versions": "0+", + "about": "The topics to list producers for.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, { "name": "PartitionIndexes", "type": "[]int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DescribeProducersResponse.json b/clients/src/main/resources/common/message/DescribeProducersResponse.json index c456ee4fb985f..4de4292f38a8d 100644 --- a/clients/src/main/resources/common/message/DescribeProducersResponse.json +++ b/clients/src/main/resources/common/message/DescribeProducersResponse.json @@ -25,7 +25,7 @@ { "name": "Topics", "type": "[]TopicResponse", "versions": "0+", "about": "Each topic in the response.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name" }, + "about": "The topic name." }, { "name": "Partitions", "type": "[]PartitionResponse", "versions": "0+", "about": "Each partition in the response.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", @@ -33,14 +33,21 @@ { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The partition error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The partition error message, which may be null if no additional details are available" }, - { "name": "ActiveProducers", "type": "[]ProducerState", "versions": "0+", "fields": [ - { "name": "ProducerId", "type": "int64", "versions": "0+", "entityType": "producerId" }, - { "name": "ProducerEpoch", "type": "int32", "versions": "0+" }, - { "name": "LastSequence", "type": "int32", "versions": "0+", "default": "-1" }, - { "name": "LastTimestamp", "type": "int64", "versions": "0+", "default": "-1" }, - { "name": "CoordinatorEpoch", "type": "int32", "versions": "0+" }, - { "name": "CurrentTxnStartOffset", "type": "int64", "versions": "0+", "default": "-1" } + "about": "The partition error message, which may be null if no additional details are available." }, + { "name": "ActiveProducers", "type": "[]ProducerState", "versions": "0+", + "about": "The active producers for the partition.", "fields": [ + { "name": "ProducerId", "type": "int64", "versions": "0+", "entityType": "producerId", + "about": "The producer id."}, + { "name": "ProducerEpoch", "type": "int32", "versions": "0+", + "about": "The producer epoch."}, + { "name": "LastSequence", "type": "int32", "versions": "0+", "default": "-1", + "about": "The last sequence number sent by the producer."}, + { "name": "LastTimestamp", "type": "int64", "versions": "0+", "default": "-1", + "about": "The last timestamp sent by the producer."}, + { "name": "CoordinatorEpoch", "type": "int32", "versions": "0+", + "about": "The current epoch of the producer group."}, + { "name": "CurrentTxnStartOffset", "type": "int64", "versions": "0+", "default": "-1", + "about": "The current transaction start offset of the producer."} ]} ]} ]} diff --git a/clients/src/main/resources/common/message/DescribeQuorumRequest.json b/clients/src/main/resources/common/message/DescribeQuorumRequest.json index 86d9975f7448a..7b9ee5a2328ec 100644 --- a/clients/src/main/resources/common/message/DescribeQuorumRequest.json +++ b/clients/src/main/resources/common/message/DescribeQuorumRequest.json @@ -24,12 +24,12 @@ "flexibleVersions": "0+", "latestVersionUnstable": false, "fields": [ - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topics to describe.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partitions to describe.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." } ] diff --git a/clients/src/main/resources/common/message/DescribeQuorumResponse.json b/clients/src/main/resources/common/message/DescribeQuorumResponse.json index e0be61781f5fc..18b6bc32ce030 100644 --- a/clients/src/main/resources/common/message/DescribeQuorumResponse.json +++ b/clients/src/main/resources/common/message/DescribeQuorumResponse.json @@ -26,50 +26,57 @@ "about": "The top level error code."}, { "name": "ErrorMessage", "type": "string", "versions": "2+", "nullableVersions": "2+", "ignorable": true, "about": "The error message, or null if there was no error." }, - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The response from the describe quorum API.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partition data.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+"}, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The partition error code."}, { "name": "ErrorMessage", "type": "string", "versions": "2+", "nullableVersions": "2+", "ignorable": true, "about": "The error message, or null if there was no error." }, { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The ID of the current leader or -1 if the leader is unknown."}, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch"}, - { "name": "HighWatermark", "type": "int64", "versions": "0+"}, - { "name": "CurrentVoters", "type": "[]ReplicaState", "versions": "0+" }, - { "name": "Observers", "type": "[]ReplicaState", "versions": "0+" } + "about": "The latest known leader epoch."}, + { "name": "HighWatermark", "type": "int64", "versions": "0+", + "about": "The high water mark."}, + { "name": "CurrentVoters", "type": "[]ReplicaState", "versions": "0+", + "about": "The current voters of the partition."}, + { "name": "Observers", "type": "[]ReplicaState", "versions": "0+", + "about": "The observers of the partition."} ]} ]}, - { "name": "Nodes", "type": "[]Node", "versions": "2+", "fields": [ + { "name": "Nodes", "type": "[]Node", "versions": "2+", + "about": "The nodes in the quorum.", "fields": [ { "name": "NodeId", "type": "int32", "versions": "2+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node" }, + "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, { "name": "Listeners", "type": "[]Listener", - "about": "The listeners of this controller", "versions": "2+", "fields": [ + "about": "The listeners of this controller.", "versions": "2+", "fields": [ { "name": "Name", "type": "string", "versions": "2+", "mapKey": true, - "about": "The name of the endpoint" }, + "about": "The name of the endpoint." }, { "name": "Host", "type": "string", "versions": "2+", - "about": "The hostname" }, + "about": "The hostname." }, { "name": "Port", "type": "uint16", "versions": "2+", - "about": "The port" } + "about": "The port." } ]} ]} ], "commonStructs": [ { "name": "ReplicaState", "versions": "0+", "fields": [ - { "name": "ReplicaId", "type": "int32", "versions": "0+", "entityType": "brokerId" }, - { "name": "ReplicaDirectoryId", "type": "uuid", "versions": "2+" }, + { "name": "ReplicaId", "type": "int32", "versions": "0+", "entityType": "brokerId", + "about": "The ID of the replica."}, + { "name": "ReplicaDirectoryId", "type": "uuid", "versions": "2+", + "about": "The replica directory ID of the replica."}, { "name": "LogEndOffset", "type": "int64", "versions": "0+", - "about": "The last known log end offset of the follower or -1 if it is unknown"}, + "about": "The last known log end offset of the follower or -1 if it is unknown."}, { "name": "LastFetchTimestamp", "type": "int64", "versions": "1+", "ignorable": true, "default": -1, - "about": "The last known leader wall clock time time when a follower fetched from the leader. This is reported as -1 both for the current leader or if it is unknown for a voter"}, + "about": "The last known leader wall clock time time when a follower fetched from the leader. This is reported as -1 both for the current leader or if it is unknown for a voter."}, { "name": "LastCaughtUpTimestamp", "type": "int64", "versions": "1+", "ignorable": true, "default": -1, - "about": "The leader wall clock append time of the offset for which the follower made the most recent fetch request. This is reported as the current time for the leader and -1 if unknown for a voter"} + "about": "The leader wall clock append time of the offset for which the follower made the most recent fetch request. This is reported as the current time for the leader and -1 if unknown for a voter."} ]} ] } diff --git a/clients/src/main/resources/common/message/DescribeShareGroupOffsetsRequest.json b/clients/src/main/resources/common/message/DescribeShareGroupOffsetsRequest.json new file mode 100644 index 0000000000000..04ed6a910dcf3 --- /dev/null +++ b/clients/src/main/resources/common/message/DescribeShareGroupOffsetsRequest.json @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 90, + "type": "request", + "listeners": ["broker"], + "name": "DescribeShareGroupOffsetsRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "latestVersionUnstable": true, + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", + "about": "The group identifier." }, + { "name": "Topics", "type": "[]DescribeShareGroupOffsetsRequestTopic", "versions": "0+", + "about": "The topics to describe offsets for.", "fields": [ + { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The topic name." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions." } + ]} + ] +} diff --git a/clients/src/main/resources/common/message/DescribeShareGroupOffsetsResponse.json b/clients/src/main/resources/common/message/DescribeShareGroupOffsetsResponse.json new file mode 100644 index 0000000000000..80a541f1a2fcf --- /dev/null +++ b/clients/src/main/resources/common/message/DescribeShareGroupOffsetsResponse.json @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +{ + "apiKey": 90, + "type": "response", + "name": "DescribeShareGroupOffsetsResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - GROUP_ID_NOT_FOUND (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_SERVER_ERROR (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "Responses", "type": "[]DescribeShareGroupOffsetsResponseTopic", "versions": "0+", + "about": "The results for each topic.", "fields": [ + { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The topic name." }, + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The unique topic ID." }, + { "name": "Partitions", "type": "[]DescribeShareGroupOffsetsResponsePartition", "versions": "0+", "fields": [ + { "name": "PartitionIndex", "type": "int32", "versions": "0+", + "about": "The partition index." }, + { "name": "StartOffset", "type": "int64", "versions": "0+", + "about": "The share-partition start offset." }, + { "name": "LeaderEpoch", "type": "int32", "versions": "0+", + "about": "The leader epoch of the partition." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The error code, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The error message, or null if there was no error." } + ]} + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/DescribeTopicPartitionsRequest.json b/clients/src/main/resources/common/message/DescribeTopicPartitionsRequest.json index 63c5b5c32ad20..fa79989ff1f45 100644 --- a/clients/src/main/resources/common/message/DescribeTopicPartitionsRequest.json +++ b/clients/src/main/resources/common/message/DescribeTopicPartitionsRequest.json @@ -24,17 +24,17 @@ { "name": "Topics", "type": "[]TopicRequest", "versions": "0+", "about": "The topics to fetch details for.", "fields": [ - { "name": "Name", "type": "string", "versions": "0+", - "about": "The topic name", "versions": "0+", "entityType": "topicName"} + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The topic name." } ] }, { "name": "ResponsePartitionLimit", "type": "int32", "versions": "0+", "default": "2000", "about": "The maximum number of partitions included in the response." }, { "name": "Cursor", "type": "Cursor", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The first topic and partition index to fetch details for.", "fields": [ - { "name": "TopicName", "type": "string", "versions": "0+", - "about": "The name for the first topic to process", "versions": "0+", "entityType": "topicName"}, - { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index to start with"} + { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The name for the first topic to process." }, + { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index to start with." } ]} ] } diff --git a/clients/src/main/resources/common/message/DescribeTopicPartitionsResponse.json b/clients/src/main/resources/common/message/DescribeTopicPartitionsResponse.json index e8eee7dcb64aa..668c85431c805 100644 --- a/clients/src/main/resources/common/message/DescribeTopicPartitionsResponse.json +++ b/clients/src/main/resources/common/message/DescribeTopicPartitionsResponse.json @@ -58,9 +58,9 @@ }, { "name": "NextCursor", "type": "Cursor", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The next topic and partition index to fetch details for.", "fields": [ - { "name": "TopicName", "type": "string", "versions": "0+", - "about": "The name for the first topic to process", "versions": "0+", "entityType": "topicName"}, - { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index to start with"} + { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The name for the first topic to process." }, + { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index to start with." } ]} ] } diff --git a/clients/src/main/resources/common/message/DescribeTransactionsResponse.json b/clients/src/main/resources/common/message/DescribeTransactionsResponse.json index 15f52a473d25e..66b109ae24d7a 100644 --- a/clients/src/main/resources/common/message/DescribeTransactionsResponse.json +++ b/clients/src/main/resources/common/message/DescribeTransactionsResponse.json @@ -22,19 +22,29 @@ "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "TransactionStates", "type": "[]TransactionState", "versions": "0+", "fields": [ - { "name": "ErrorCode", "type": "int16", "versions": "0+" }, - { "name": "TransactionalId", "type": "string", "versions": "0+", "entityType": "transactionalId" }, - { "name": "TransactionState", "type": "string", "versions": "0+" }, - { "name": "TransactionTimeoutMs", "type": "int32", "versions": "0+" }, - { "name": "TransactionStartTimeMs", "type": "int64", "versions": "0+" }, - { "name": "ProducerId", "type": "int64", "versions": "0+", "entityType": "producerId" }, - { "name": "ProducerEpoch", "type": "int16", "versions": "0+" }, + { "name": "TransactionStates", "type": "[]TransactionState", "versions": "0+", + "about": "The current state of the transaction.", "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The error code."}, + { "name": "TransactionalId", "type": "string", "versions": "0+", "entityType": "transactionalId", + "about": "The transactional id."}, + { "name": "TransactionState", "type": "string", "versions": "0+", + "about": "The current transaction state of the producer."}, + { "name": "TransactionTimeoutMs", "type": "int32", "versions": "0+", + "about": "The timeout in milliseconds for the transaction."}, + { "name": "TransactionStartTimeMs", "type": "int64", "versions": "0+", + "about": "The start time of the transaction in milliseconds."}, + { "name": "ProducerId", "type": "int64", "versions": "0+", "entityType": "producerId", + "about": "The current producer id associated with the transaction."}, + { "name": "ProducerEpoch", "type": "int16", "versions": "0+", + "about": "The current epoch associated with the producer id."}, { "name": "Topics", "type": "[]TopicData", "versions": "0+", "about": "The set of partitions included in the current transaction (if active). When a transaction is preparing to commit or abort, this will include only partitions which do not have markers.", "fields": [ - { "name": "Topic", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true }, - { "name": "Partitions", "type": "[]int32", "versions": "0+" } + { "name": "Topic", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, + "about": "The topic name."}, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partition ids included in the current transaction."} ] } ]} diff --git a/clients/src/main/resources/common/message/ElectLeadersResponse.json b/clients/src/main/resources/common/message/ElectLeadersResponse.json index 15468c78d1f70..2da4982da9817 100644 --- a/clients/src/main/resources/common/message/ElectLeadersResponse.json +++ b/clients/src/main/resources/common/message/ElectLeadersResponse.json @@ -30,11 +30,11 @@ { "name": "ReplicaElectionResults", "type": "[]ReplicaElectionResult", "versions": "0+", "about": "The election results, or an empty array if the requester did not have permission and the request asks for all partitions.", "fields": [ { "name": "Topic", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name" }, + "about": "The topic name." }, { "name": "PartitionResult", "type": "[]PartitionResult", "versions": "0+", - "about": "The results for each partition", "fields": [ + "about": "The results for each partition.", "fields": [ { "name": "PartitionId", "type": "int32", "versions": "0+", - "about": "The partition id" }, + "about": "The partition id." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The result error, or zero if there was no error."}, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", diff --git a/clients/src/main/resources/common/message/EndQuorumEpochRequest.json b/clients/src/main/resources/common/message/EndQuorumEpochRequest.json index 0c9c56f3f3220..28aef9d52ca0d 100644 --- a/clients/src/main/resources/common/message/EndQuorumEpochRequest.json +++ b/clients/src/main/resources/common/message/EndQuorumEpochRequest.json @@ -24,25 +24,28 @@ "flexibleVersions": "1+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "0+", - "nullableVersions": "0+", "default": "null"}, - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + "nullableVersions": "0+", "default": "null", + "about": "The cluster id."}, + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topics.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", - "about": "The current leader ID that is resigning"}, + "about": "The current leader ID that is resigning."}, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The current epoch"}, + "about": "The current epoch."}, { "name": "PreferredSuccessors", "type": "[]int32", "versions": "0", "ignorable": true, - "about": "A sorted list of preferred successors to start the election" }, + "about": "A sorted list of preferred successors to start the election." }, { "name": "PreferredCandidates", "type": "[]ReplicaInfo", "versions": "1+", "ignorable": true, - "about": "A sorted list of preferred candidates to start the election", "fields": [ - { "name": "CandidateId", "type": "int32", "versions": "1+", "entityType": "brokerId" }, - { "name": "CandidateDirectoryId", "type": "uuid", "versions": "1+" } + "about": "A sorted list of preferred candidates to start the election.", "fields": [ + { "name": "CandidateId", "type": "int32", "versions": "1+", "entityType": "brokerId", + "about": "The ID of the candidate replica."}, + { "name": "CandidateDirectoryId", "type": "uuid", "versions": "1+", + "about": "The directory ID of the candidate replica."} ] } ] @@ -50,10 +53,10 @@ ] }, { "name": "LeaderEndpoints", "type": "[]LeaderEndpoint", "versions": "1+", "ignorable": true, - "about": "Endpoints for the leader", "fields": [ - { "name": "Name", "type": "string", "versions": "1+", "mapKey": true, "about": "The name of the endpoint" }, - { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname" }, - { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port" } + "about": "Endpoints for the leader.", "fields": [ + { "name": "Name", "type": "string", "versions": "1+", "mapKey": true, "about": "The name of the endpoint." }, + { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname." }, + { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port." } ] } ] diff --git a/clients/src/main/resources/common/message/EndQuorumEpochResponse.json b/clients/src/main/resources/common/message/EndQuorumEpochResponse.json index 95aa442017f34..ec8a1a82a461a 100644 --- a/clients/src/main/resources/common/message/EndQuorumEpochResponse.json +++ b/clients/src/main/resources/common/message/EndQuorumEpochResponse.json @@ -23,29 +23,30 @@ "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The top level error code."}, - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topic data.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The partition data.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+"}, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The partition level error code."}, { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The ID of the current leader or -1 if the leader is unknown."}, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch"} + "about": "The latest known leader epoch."} ] } ] }, { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "1+", "taggedVersions": "1+", "tag": 0, - "about": "Endpoints for all leaders enumerated in PartitionData", "fields": [ + "about": "Endpoints for all leaders enumerated in PartitionData.", "fields": [ { "name": "NodeId", "type": "int32", "versions": "1+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node" }, - { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname" }, - { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port" } + "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, + { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname." }, + { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port." } ] } ] diff --git a/clients/src/main/resources/common/message/ExpireDelegationTokenRequest.json b/clients/src/main/resources/common/message/ExpireDelegationTokenRequest.json index 2d8f7511c5f46..2694243f1f3c9 100644 --- a/clients/src/main/resources/common/message/ExpireDelegationTokenRequest.json +++ b/clients/src/main/resources/common/message/ExpireDelegationTokenRequest.json @@ -18,10 +18,10 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "ExpireDelegationTokenRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 is the same as version 0. // Version 2 adds flexible version support - "validVersions": "0-2", - "deprecatedVersions": "0", + "validVersions": "1-2", "flexibleVersions": "2+", "fields": [ { "name": "Hmac", "type": "bytes", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ExpireDelegationTokenResponse.json b/clients/src/main/resources/common/message/ExpireDelegationTokenResponse.json index f2d4bf48b6aeb..d3dc29ff408ba 100644 --- a/clients/src/main/resources/common/message/ExpireDelegationTokenResponse.json +++ b/clients/src/main/resources/common/message/ExpireDelegationTokenResponse.json @@ -17,9 +17,10 @@ "apiKey": 40, "type": "response", "name": "ExpireDelegationTokenResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Starting in version 1, on quota violation, brokers send out responses before throttling. // Version 2 adds flexible version support - "validVersions": "0-2", + "validVersions": "1-2", "flexibleVersions": "2+", "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", diff --git a/clients/src/main/resources/common/message/FetchRequest.json b/clients/src/main/resources/common/message/FetchRequest.json index f92ccefb8dcbe..c49dd1a9b0a4c 100644 --- a/clients/src/main/resources/common/message/FetchRequest.json +++ b/clients/src/main/resources/common/message/FetchRequest.json @@ -18,12 +18,11 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "FetchRequest", + // Versions 0-3 were removed in Apache Kafka 4.0, Version 4 is the new baseline. // // Version 1 is the same as version 0. - // // Starting in Version 2, the requester must be able to handle Kafka Log // Message format version 1. - // // Version 3 adds MaxBytes. Starting in version 3, the partition ordering in // the request is now relevant. Partitions will be processed in the order // they appear in the request. @@ -57,8 +56,7 @@ // Version 16 is the same as version 15 (KIP-951). // // Version 17 adds directory id support from KIP-853 - "validVersions": "0-17", - "deprecatedVersions": "0-3", + "validVersions": "4-17", "flexibleVersions": "12+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "12+", "nullableVersions": "12+", "default": "null", @@ -66,7 +64,8 @@ "about": "The clusterId if known. This is used to validate metadata fetches prior to broker registration." }, { "name": "ReplicaId", "type": "int32", "versions": "0-14", "default": "-1", "entityType": "brokerId", "about": "The broker ID of the follower, of -1 if this request is from a consumer." }, - { "name": "ReplicaState", "type": "ReplicaState", "versions": "15+", "taggedVersions": "15+", "tag": 1, "fields": [ + { "name": "ReplicaState", "type": "ReplicaState", "versions": "15+", "taggedVersions": "15+", "tag": 1, + "about": "The state of the replica in the follower.", "fields": [ { "name": "ReplicaId", "type": "int32", "versions": "15+", "default": "-1", "entityType": "brokerId", "about": "The replica ID of the follower, or -1 if this request is from a consumer." }, { "name": "ReplicaEpoch", "type": "int64", "versions": "15+", "default": "-1", @@ -79,7 +78,7 @@ { "name": "MaxBytes", "type": "int32", "versions": "3+", "default": "0x7fffffff", "ignorable": true, "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, { "name": "IsolationLevel", "type": "int8", "versions": "4+", "default": "0", "ignorable": true, - "about": "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records" }, + "about": "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records." }, { "name": "SessionId", "type": "int32", "versions": "7+", "default": "0", "ignorable": true, "about": "The fetch session ID." }, { "name": "SessionEpoch", "type": "int32", "versions": "7+", "default": "-1", "ignorable": true, @@ -88,7 +87,7 @@ "about": "The topics to fetch.", "fields": [ { "name": "Topic", "type": "string", "versions": "0-12", "entityType": "topicName", "ignorable": true, "about": "The name of the topic to fetch." }, - { "name": "TopicId", "type": "uuid", "versions": "13+", "ignorable": true, "about": "The unique topic ID"}, + { "name": "TopicId", "type": "uuid", "versions": "13+", "ignorable": true, "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]FetchPartition", "versions": "0+", "about": "The partitions to fetch.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", @@ -98,24 +97,24 @@ { "name": "FetchOffset", "type": "int64", "versions": "0+", "about": "The message offset." }, { "name": "LastFetchedEpoch", "type": "int32", "versions": "12+", "default": "-1", "ignorable": false, - "about": "The epoch of the last fetched record or -1 if there is none"}, + "about": "The epoch of the last fetched record or -1 if there is none."}, { "name": "LogStartOffset", "type": "int64", "versions": "5+", "default": "-1", "ignorable": true, "about": "The earliest available offset of the follower replica. The field is only used when the request is sent by the follower."}, { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", "about": "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored." }, { "name": "ReplicaDirectoryId", "type": "uuid", "versions": "17+", "taggedVersions": "17+", "tag": 0, "ignorable": true, - "about": "The directory id of the follower fetching" } + "about": "The directory id of the follower fetching." } ]} ]}, { "name": "ForgottenTopicsData", "type": "[]ForgottenTopic", "versions": "7+", "ignorable": false, "about": "In an incremental fetch request, the partitions to remove.", "fields": [ { "name": "Topic", "type": "string", "versions": "7-12", "entityType": "topicName", "ignorable": true, "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "13+", "ignorable": true, "about": "The unique topic ID"}, + { "name": "TopicId", "type": "uuid", "versions": "13+", "ignorable": true, "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]int32", "versions": "7+", "about": "The partitions indexes to forget." } ]}, { "name": "RackId", "type": "string", "versions": "11+", "default": "", "ignorable": true, - "about": "Rack ID of the consumer making this request"} + "about": "Rack ID of the consumer making this request."} ] } diff --git a/clients/src/main/resources/common/message/FetchResponse.json b/clients/src/main/resources/common/message/FetchResponse.json index 605c7c3ff6248..dc8d35175661f 100644 --- a/clients/src/main/resources/common/message/FetchResponse.json +++ b/clients/src/main/resources/common/message/FetchResponse.json @@ -17,10 +17,9 @@ "apiKey": 1, "type": "response", "name": "FetchResponse", + // Versions 0-3 were removed in Apache Kafka 4.0, Version 4 is the new baseline. // - // Version 1 adds throttle time. - // - // Version 2 and 3 are the same as version 1. + // Version 1 adds throttle time. Version 2 and 3 are the same as version 1. // // Version 4 adds features for transactional consumption. // @@ -49,7 +48,7 @@ // Version 16 adds the 'NodeEndpoints' field (KIP-951). // // Version 17 no changes to the response (KIP-853). - "validVersions": "0-17", + "validVersions": "4-17", "flexibleVersions": "12+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, @@ -62,7 +61,7 @@ "about": "The response topics.", "fields": [ { "name": "Topic", "type": "string", "versions": "0-12", "ignorable": true, "entityType": "topicName", "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "13+", "ignorable": true, "about": "The unique topic ID"}, + { "name": "TopicId", "type": "uuid", "versions": "13+", "ignorable": true, "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "about": "The topic partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", @@ -72,28 +71,31 @@ { "name": "HighWatermark", "type": "int64", "versions": "0+", "about": "The current high water mark." }, { "name": "LastStableOffset", "type": "int64", "versions": "4+", "default": "-1", "ignorable": true, - "about": "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)" }, + "about": "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)." }, { "name": "LogStartOffset", "type": "int64", "versions": "5+", "default": "-1", "ignorable": true, "about": "The current log start offset." }, { "name": "DivergingEpoch", "type": "EpochEndOffset", "versions": "12+", "taggedVersions": "12+", "tag": 0, - "about": "In case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge", - "fields": [ - { "name": "Epoch", "type": "int32", "versions": "12+", "default": "-1" }, - { "name": "EndOffset", "type": "int64", "versions": "12+", "default": "-1" } + "about": "In case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge.", "fields": [ + { "name": "Epoch", "type": "int32", "versions": "12+", "default": "-1", + "about": "The largest epoch." }, + { "name": "EndOffset", "type": "int64", "versions": "12+", "default": "-1", + "about": "The end offset of the epoch." } ]}, { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", - "versions": "12+", "taggedVersions": "12+", "tag": 1, "fields": [ + "versions": "12+", "taggedVersions": "12+", "tag": 1, + "about": "The current leader of the partition.", "fields": [ { "name": "LeaderId", "type": "int32", "versions": "12+", "default": "-1", "entityType": "brokerId", "about": "The ID of the current leader or -1 if the leader is unknown."}, { "name": "LeaderEpoch", "type": "int32", "versions": "12+", "default": "-1", - "about": "The latest known leader epoch"} + "about": "The latest known leader epoch." } ]}, { "name": "SnapshotId", "type": "SnapshotId", "versions": "12+", "taggedVersions": "12+", "tag": 2, - "about": "In the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request.", - "fields": [ - { "name": "EndOffset", "type": "int64", "versions": "0+", "default": "-1" }, - { "name": "Epoch", "type": "int32", "versions": "0+", "default": "-1" } + "about": "In the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request.", "fields": [ + { "name": "EndOffset", "type": "int64", "versions": "0+", "default": "-1", + "about": "The end offset of the epoch." }, + { "name": "Epoch", "type": "int32", "versions": "0+", "default": "-1", + "about": "The largest epoch." } ]}, { "name": "AbortedTransactions", "type": "[]AbortedTransaction", "versions": "4+", "nullableVersions": "4+", "ignorable": true, "about": "The aborted transactions.", "fields": [ @@ -103,7 +105,7 @@ "about": "The first offset in the aborted transaction." } ]}, { "name": "PreferredReadReplica", "type": "int32", "versions": "11+", "default": "-1", "ignorable": false, "entityType": "brokerId", - "about": "The preferred read replica for the consumer to use on its next fetch request"}, + "about": "The preferred read replica for the consumer to use on its next fetch request."}, { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."} ]} ]}, diff --git a/clients/src/main/resources/common/message/FetchSnapshotRequest.json b/clients/src/main/resources/common/message/FetchSnapshotRequest.json index c43eeb4ce0976..7d2200aad9e97 100644 --- a/clients/src/main/resources/common/message/FetchSnapshotRequest.json +++ b/clients/src/main/resources/common/message/FetchSnapshotRequest.json @@ -23,31 +23,33 @@ "flexibleVersions": "0+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "taggedVersions": "0+", "tag": 0, - "about": "The clusterId if known, this is used to validate metadata fetches prior to broker registration" }, + "about": "The clusterId if known, this is used to validate metadata fetches prior to broker registration." }, { "name": "ReplicaId", "type": "int32", "versions": "0+", "default": "-1", "entityType": "brokerId", - "about": "The broker ID of the follower" }, + "about": "The broker ID of the follower." }, { "name": "MaxBytes", "type": "int32", "versions": "0+", "default": "0x7fffffff", - "about": "The maximum bytes to fetch from all of the snapshots" }, + "about": "The maximum bytes to fetch from all of the snapshots." }, { "name": "Topics", "type": "[]TopicSnapshot", "versions": "0+", - "about": "The topics to fetch", "fields": [ + "about": "The topics to fetch.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The name of the topic to fetch" }, + "about": "The name of the topic to fetch." }, { "name": "Partitions", "type": "[]PartitionSnapshot", "versions": "0+", - "about": "The partitions to fetch", "fields": [ + "about": "The partitions to fetch.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", - "about": "The partition index" }, + "about": "The partition index." }, { "name": "CurrentLeaderEpoch", "type": "int32", "versions": "0+", - "about": "The current leader epoch of the partition, -1 for unknown leader epoch" }, + "about": "The current leader epoch of the partition, -1 for unknown leader epoch." }, { "name": "SnapshotId", "type": "SnapshotId", "versions": "0+", - "about": "The snapshot endOffset and epoch to fetch", "fields": [ - { "name": "EndOffset", "type": "int64", "versions": "0+" }, - { "name": "Epoch", "type": "int32", "versions": "0+" } + "about": "The snapshot endOffset and epoch to fetch.", "fields": [ + { "name": "EndOffset", "type": "int64", "versions": "0+", + "about": "The end offset of the snapshot."}, + { "name": "Epoch", "type": "int32", "versions": "0+", + "about": "The epoch of the snapshot."} ] }, { "name": "Position", "type": "int64", "versions": "0+", - "about": "The byte position within the snapshot to start fetching from" }, + "about": "The byte position within the snapshot to start fetching from." }, { "name": "ReplicaDirectoryId", "type": "uuid", "versions": "1+", "taggedVersions": "1+", "tag": 0, "ignorable": true, - "about": "The directory id of the follower fetching" } + "about": "The directory id of the follower fetching." } ] } ] diff --git a/clients/src/main/resources/common/message/FetchSnapshotResponse.json b/clients/src/main/resources/common/message/FetchSnapshotResponse.json index e5d391ae02e58..a1447b918989d 100644 --- a/clients/src/main/resources/common/message/FetchSnapshotResponse.json +++ b/clients/src/main/resources/common/message/FetchSnapshotResponse.json @@ -36,17 +36,20 @@ { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The error code, or 0 if there was no fetch error." }, { "name": "SnapshotId", "type": "SnapshotId", "versions": "0+", - "about": "The snapshot endOffset and epoch fetched", "fields": [ - { "name": "EndOffset", "type": "int64", "versions": "0+" }, - { "name": "Epoch", "type": "int32", "versions": "0+" } + "about": "The snapshot endOffset and epoch fetched.", "fields": [ + { "name": "EndOffset", "type": "int64", "versions": "0+", + "about": "The snapshot end offset."}, + { "name": "Epoch", "type": "int32", "versions": "0+", + "about": "The snapshot epoch."} ] }, { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", - "versions": "0+", "taggedVersions": "0+", "tag": 0, "fields": [ + "versions": "0+", "taggedVersions": "0+", "tag": 0, + "about": "The leader of the partition at the time of the snapshot.", "fields": [ { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The ID of the current leader or -1 if the leader is unknown."}, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch"} + "about": "The latest known leader epoch."} ] }, { "name": "Size", "type": "int64", "versions": "0+", @@ -54,17 +57,17 @@ { "name": "Position", "type": "int64", "versions": "0+", "about": "The starting byte position within the snapshot included in the Bytes field." }, { "name": "UnalignedRecords", "type": "records", "versions": "0+", - "about": "Snapshot data in records format which may not be aligned on an offset boundary" } + "about": "Snapshot data in records format which may not be aligned on an offset boundary." } ] } ] }, { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "1+", "taggedVersions": "1+", "tag": 0, - "about": "Endpoints for all current-leaders enumerated in PartitionSnapshot", "fields": [ + "about": "Endpoints for all current-leaders enumerated in PartitionSnapshot.", "fields": [ { "name": "NodeId", "type": "int32", "versions": "1+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node" }, - { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname" }, - { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port" } + "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node." }, + { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname." }, + { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port." } ] } ] diff --git a/clients/src/main/resources/common/message/FindCoordinatorRequest.json b/clients/src/main/resources/common/message/FindCoordinatorRequest.json index 8dfe3a4d79f2b..7a926501f7bb3 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorRequest.json +++ b/clients/src/main/resources/common/message/FindCoordinatorRequest.json @@ -31,13 +31,12 @@ // Version 6 adds support for share groups (KIP-932). // For key type SHARE (2), the coordinator key format is "groupId:topicId:partition". "validVersions": "0-6", - "deprecatedVersions": "0", "flexibleVersions": "3+", "fields": [ { "name": "Key", "type": "string", "versions": "0-3", "about": "The coordinator key." }, { "name": "KeyType", "type": "int8", "versions": "1+", "default": "0", "ignorable": false, - "about": "The coordinator key type. (Group, transaction, etc.)" }, + "about": "The coordinator key type. (group, transaction, share)." }, { "name": "CoordinatorKeys", "type": "[]string", "versions": "4+", "about": "The coordinator keys." } ] diff --git a/clients/src/main/resources/common/message/FindCoordinatorResponse.json b/clients/src/main/resources/common/message/FindCoordinatorResponse.json index be0479f908c96..40c43b65f9ccf 100644 --- a/clients/src/main/resources/common/message/FindCoordinatorResponse.json +++ b/clients/src/main/resources/common/message/FindCoordinatorResponse.json @@ -43,7 +43,7 @@ "about": "The host name." }, { "name": "Port", "type": "int32", "versions": "0-3", "about": "The port." }, - { "name": "Coordinators", "type": "[]Coordinator", "versions": "4+", "about": "Each coordinator result in the response", "fields": [ + { "name": "Coordinators", "type": "[]Coordinator", "versions": "4+", "about": "Each coordinator result in the response.", "fields": [ { "name": "Key", "type": "string", "versions": "4+", "about": "The coordinator key." }, { "name": "NodeId", "type": "int32", "versions": "4+", "entityType": "brokerId", "about": "The node id." }, diff --git a/clients/src/main/resources/common/message/GetTelemetrySubscriptionsResponse.json b/clients/src/main/resources/common/message/GetTelemetrySubscriptionsResponse.json index 54687f4cf4f2c..7fc6af81aa896 100644 --- a/clients/src/main/resources/common/message/GetTelemetrySubscriptionsResponse.json +++ b/clients/src/main/resources/common/message/GetTelemetrySubscriptionsResponse.json @@ -50,7 +50,7 @@ }, { "name": "DeltaTemporality", "type": "bool", "versions": "0+", - "about": "Flag to indicate monotonic/counter metrics are to be emitted as deltas or cumulative values" + "about": "Flag to indicate monotonic/counter metrics are to be emitted as deltas or cumulative values." }, { "name": "RequestedMetrics", "type": "[]string", "versions": "0+", diff --git a/clients/src/main/resources/common/message/InitializeShareGroupStateResponse.json b/clients/src/main/resources/common/message/InitializeShareGroupStateResponse.json index f36a98e0ff97e..cf00b47b233fb 100644 --- a/clients/src/main/resources/common/message/InitializeShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/InitializeShareGroupStateResponse.json @@ -26,9 +26,9 @@ // - INVALID_REQUEST (version 0+) "fields": [ { "name": "Results", "type": "[]InitializeStateResult", "versions": "0+", - "about": "The initialization results", "fields": [ + "about": "The initialization results.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic identifier" }, + "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionResult", "versions": "0+", "about" : "The results for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/JoinGroupRequest.json b/clients/src/main/resources/common/message/JoinGroupRequest.json index 54abf53e250d2..2c4c9fdfd62db 100644 --- a/clients/src/main/resources/common/message/JoinGroupRequest.json +++ b/clients/src/main/resources/common/message/JoinGroupRequest.json @@ -18,9 +18,9 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "JoinGroupRequest", - // Version 1 adds RebalanceTimeoutMs. + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. // - // Version 2 and 3 are the same as version 1. + // Version 1 adds RebalanceTimeoutMs. Version 2 and 3 are the same as version 1. // // Starting from version 4, the client needs to issue a second request to join group // @@ -34,8 +34,7 @@ // Version 8 adds the Reason field (KIP-800). // // Version 9 is the same as version 8. - "validVersions": "0-9", - "deprecatedVersions": "0-1", + "validVersions": "2-9", "flexibleVersions": "6+", "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", diff --git a/clients/src/main/resources/common/message/JoinGroupResponse.json b/clients/src/main/resources/common/message/JoinGroupResponse.json index d01c2c1c02879..364309596eb95 100644 --- a/clients/src/main/resources/common/message/JoinGroupResponse.json +++ b/clients/src/main/resources/common/message/JoinGroupResponse.json @@ -17,6 +17,8 @@ "apiKey": 11, "type": "response", "name": "JoinGroupResponse", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 is the same as version 0. // // Version 2 adds throttle time. @@ -35,7 +37,7 @@ // Version 8 is the same as version 7. // // Version 9 adds the SkipAssignment field. - "validVersions": "0-9", + "validVersions": "2-9", "flexibleVersions": "6+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, @@ -55,14 +57,15 @@ "about": "True if the leader must skip running the assignment." }, { "name": "MemberId", "type": "string", "versions": "0+", "about": "The member ID assigned by the group coordinator." }, - { "name": "Members", "type": "[]JoinGroupResponseMember", "versions": "0+", "fields": [ + { "name": "Members", "type": "[]JoinGroupResponseMember", "versions": "0+", + "about": "The group members.", "fields": [ { "name": "MemberId", "type": "string", "versions": "0+", "about": "The group member ID." }, { "name": "GroupInstanceId", "type": "string", "versions": "5+", "ignorable": true, "nullableVersions": "5+", "default": "null", "about": "The unique identifier of the consumer instance provided by end user." }, { "name": "Metadata", "type": "bytes", "versions": "0+", - "about": "The group member metadata." } - ]} + "about": "The group member metadata." }] + } ] } diff --git a/clients/src/main/resources/common/message/KRaftVersionRecord.json b/clients/src/main/resources/common/message/KRaftVersionRecord.json index 8610f75fe5d73..7c8a32481b42e 100644 --- a/clients/src/main/resources/common/message/KRaftVersionRecord.json +++ b/clients/src/main/resources/common/message/KRaftVersionRecord.json @@ -20,8 +20,8 @@ "flexibleVersions": "0+", "fields": [ { "name": "Version", "type": "int16", "versions": "0+", - "about": "The version of the kraft version record" }, + "about": "The version of the kraft version record." }, { "name": "KRaftVersion", "type": "int16", "versions": "0+", - "about": "The kraft protocol version" } + "about": "The kraft protocol version." } ] } diff --git a/clients/src/main/resources/common/message/LeaderAndIsrRequest.json b/clients/src/main/resources/common/message/LeaderAndIsrRequest.json index e049d088c534c..ec0219a233a89 100644 --- a/clients/src/main/resources/common/message/LeaderAndIsrRequest.json +++ b/clients/src/main/resources/common/message/LeaderAndIsrRequest.json @@ -37,13 +37,13 @@ { "name": "ControllerId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The current controller ID." }, { "name": "isKRaftController", "type": "bool", "versions": "7+", "default": "false", - "about": "If KRaft controller id is used during migration. See KIP-866" }, + "about": "If KRaft controller id is used during migration. See KIP-866." }, { "name": "ControllerEpoch", "type": "int32", "versions": "0+", "about": "The current controller epoch." }, { "name": "BrokerEpoch", "type": "int64", "versions": "2+", "ignorable": true, "default": "-1", "about": "The current broker epoch." }, { "name": "Type", "type": "int8", "versions": "5+", - "about": "The type that indicates whether all topics are included in the request"}, + "about": "The type that indicates whether all topics are included in the request."}, { "name": "UngroupedPartitionStates", "type": "[]LeaderAndIsrPartitionState", "versions": "0-1", "about": "The state of each partition, in a v0 or v1 message." }, // In v0 or v1 requests, each partition is listed alongside its topic name. @@ -56,7 +56,7 @@ { "name": "TopicId", "type": "uuid", "versions": "5+", "ignorable": true, "about": "The unique topic ID." }, { "name": "PartitionStates", "type": "[]LeaderAndIsrPartitionState", "versions": "2+", - "about": "The state of each partition" } + "about": "The state of each partition." } ]}, { "name": "LiveLeaders", "type": "[]LeaderAndIsrLiveLeader", "versions": "0+", "about": "The current live leaders.", "fields": [ @@ -83,7 +83,7 @@ { "name": "Isr", "type": "[]int32", "versions": "0+", "entityType": "brokerId", "about": "The in-sync replica IDs." }, { "name": "PartitionEpoch", "type": "int32", "versions": "0+", - "about": "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)" }, + "about": "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)." }, { "name": "Replicas", "type": "[]int32", "versions": "0+", "entityType": "brokerId", "about": "The replica IDs." }, { "name": "AddingReplicas", "type": "[]int32", "versions": "3+", "ignorable": true, "entityType": "brokerId", diff --git a/clients/src/main/resources/common/message/LeaderAndIsrResponse.json b/clients/src/main/resources/common/message/LeaderAndIsrResponse.json index f4f2a4308fdcc..d97f55bf4b9b0 100644 --- a/clients/src/main/resources/common/message/LeaderAndIsrResponse.json +++ b/clients/src/main/resources/common/message/LeaderAndIsrResponse.json @@ -35,9 +35,9 @@ { "name": "PartitionErrors", "type": "[]LeaderAndIsrPartitionError", "versions": "0-4", "about": "Each partition in v0 to v4 message."}, { "name": "Topics", "type": "[]LeaderAndIsrTopicError", "versions": "5+", - "about": "Each topic", "fields": [ + "about": "Each topic.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "5+", "mapKey": true, - "about": "The unique topic ID" }, + "about": "The unique topic ID." }, { "name": "PartitionErrors", "type": "[]LeaderAndIsrPartitionError", "versions": "5+", "about": "Each partition."} ]} diff --git a/clients/src/main/resources/common/message/LeaderChangeMessage.json b/clients/src/main/resources/common/message/LeaderChangeMessage.json index 4d5b8932ef5ce..7d56c4601fb15 100644 --- a/clients/src/main/resources/common/message/LeaderChangeMessage.json +++ b/clients/src/main/resources/common/message/LeaderChangeMessage.json @@ -20,20 +20,21 @@ "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ - {"name": "Version", "type": "int16", "versions": "0+", - "about": "The version of the leader change message"}, - {"name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", - "about": "The ID of the newly elected leader"}, - {"name": "Voters", "type": "[]Voter", "versions": "0+", - "about": "The set of voters in the quorum for this epoch"}, - {"name": "GrantingVoters", "type": "[]Voter", "versions": "0+", - "about": "The voters who voted for the leader at the time of election"} + { "name": "Version", "type": "int16", "versions": "0+", + "about": "The version of the leader change message."}, + { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", + "about": "The ID of the newly elected leader."}, + { "name": "Voters", "type": "[]Voter", "versions": "0+", + "about": "The set of voters in the quorum for this epoch."}, + { "name": "GrantingVoters", "type": "[]Voter", "versions": "0+", + "about": "The voters who voted for the leader at the time of election."} ], "commonStructs": [ { "name": "Voter", "versions": "0+", "fields": [ - {"name": "VoterId", "type": "int32", "versions": "0+"}, - {"name": "VoterDirectoryId", "type": "uuid", "versions": "1+", - "about": "The directory id of the voter"} + { "name": "VoterId", "type": "int32", "versions": "0+", + "about": "The ID of the voter."}, + { "name": "VoterDirectoryId", "type": "uuid", "versions": "1+", + "about": "The directory id of the voter."} ]} ] } diff --git a/clients/src/main/resources/common/message/ListOffsetsRequest.json b/clients/src/main/resources/common/message/ListOffsetsRequest.json index f341468739551..5a864d8ddc1f8 100644 --- a/clients/src/main/resources/common/message/ListOffsetsRequest.json +++ b/clients/src/main/resources/common/message/ListOffsetsRequest.json @@ -18,6 +18,8 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "ListOffsetsRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // Version 1 removes MaxNumOffsets. From this version forward, only a single // offset can be returned. // @@ -38,15 +40,14 @@ // Version 9 enables listing offsets by last tiered offset (KIP-1005). // // Version 10 enables async remote list offsets support (KIP-1075) - "validVersions": "0-10", - "deprecatedVersions": "0", + "validVersions": "1-10", "flexibleVersions": "6+", "latestVersionUnstable": false, "fields": [ { "name": "ReplicaId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The broker ID of the requester, or -1 if this request is being made by a normal consumer." }, { "name": "IsolationLevel", "type": "int8", "versions": "2+", - "about": "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records" }, + "about": "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records." }, { "name": "Topics", "type": "[]ListOffsetsTopic", "versions": "0+", "about": "Each topic in the request.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", @@ -58,9 +59,7 @@ { "name": "CurrentLeaderEpoch", "type": "int32", "versions": "4+", "default": "-1", "ignorable": true, "about": "The current leader epoch." }, { "name": "Timestamp", "type": "int64", "versions": "0+", - "about": "The current timestamp." }, - { "name": "MaxNumOffsets", "type": "int32", "versions": "0", "default": "1", - "about": "The maximum number of offsets to report." } + "about": "The current timestamp." } ]} ]}, { "name": "TimeoutMs", "type": "int32", "versions": "10+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListOffsetsResponse.json b/clients/src/main/resources/common/message/ListOffsetsResponse.json index a271d01190645..7f9588847b9a0 100644 --- a/clients/src/main/resources/common/message/ListOffsetsResponse.json +++ b/clients/src/main/resources/common/message/ListOffsetsResponse.json @@ -17,6 +17,8 @@ "apiKey": 2, "type": "response", "name": "ListOffsetsResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // Version 1 removes the offsets array in favor of returning a single offset. // Version 1 also adds the timestamp associated with the returned offset. // @@ -38,7 +40,7 @@ // Version 9 enables listing offsets by last tiered offset (KIP-1005). // // Version 10 enables async remote list offsets support (KIP-1075) - "validVersions": "0-10", + "validVersions": "1-10", "flexibleVersions": "6+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, @@ -46,20 +48,19 @@ { "name": "Topics", "type": "[]ListOffsetsTopicResponse", "versions": "0+", "about": "Each topic in the response.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name" }, + "about": "The topic name." }, { "name": "Partitions", "type": "[]ListOffsetsPartitionResponse", "versions": "0+", "about": "Each partition in the response.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The partition error code, or 0 if there was no error." }, - { "name": "OldStyleOffsets", "type": "[]int64", "versions": "0", "ignorable": false, - "about": "The result offsets." }, { "name": "Timestamp", "type": "int64", "versions": "1+", "default": "-1", "ignorable": false, "about": "The timestamp associated with the returned offset." }, { "name": "Offset", "type": "int64", "versions": "1+", "default": "-1", "ignorable": false, "about": "The returned offset." }, - { "name": "LeaderEpoch", "type": "int32", "versions": "4+", "default": "-1" } + { "name": "LeaderEpoch", "type": "int32", "versions": "4+", "default": "-1", + "about": "The leader epoch associated with the returned offset."} ]} ]} ] diff --git a/clients/src/main/resources/common/message/ListPartitionReassignmentsRequest.json b/clients/src/main/resources/common/message/ListPartitionReassignmentsRequest.json index 61022091ea369..952a3db0d23ef 100644 --- a/clients/src/main/resources/common/message/ListPartitionReassignmentsRequest.json +++ b/clients/src/main/resources/common/message/ListPartitionReassignmentsRequest.json @@ -26,7 +26,7 @@ { "name": "Topics", "type": "[]ListPartitionReassignmentsTopics", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The topics to list partition reassignments for, or null to list everything.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", - "about": "The topic name" }, + "about": "The topic name." }, { "name": "PartitionIndexes", "type": "[]int32", "versions": "0+", "about": "The partitions to list partition reassignments for." } ]} diff --git a/clients/src/main/resources/common/message/ListPartitionReassignmentsResponse.json b/clients/src/main/resources/common/message/ListPartitionReassignmentsResponse.json index 753d9bfd76844..a8aeb60a892a9 100644 --- a/clients/src/main/resources/common/message/ListPartitionReassignmentsResponse.json +++ b/clients/src/main/resources/common/message/ListPartitionReassignmentsResponse.json @@ -23,7 +23,7 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top-level error code, or 0 if there was no error" }, + "about": "The top-level error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "about": "The top-level error message, or null if there was no error." }, { "name": "Topics", "type": "[]OngoingTopicReassignment", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ListTransactionsRequest.json b/clients/src/main/resources/common/message/ListTransactionsRequest.json index 2aeeaa62e28c8..4879c4d5f957e 100644 --- a/clients/src/main/resources/common/message/ListTransactionsRequest.json +++ b/clients/src/main/resources/common/message/ListTransactionsRequest.json @@ -23,13 +23,13 @@ "flexibleVersions": "0+", "fields": [ { "name": "StateFilters", "type": "[]string", "versions": "0+", - "about": "The transaction states to filter by: if empty, all transactions are returned; if non-empty, then only transactions matching one of the filtered states will be returned" + "about": "The transaction states to filter by: if empty, all transactions are returned; if non-empty, then only transactions matching one of the filtered states will be returned." }, { "name": "ProducerIdFilters", "type": "[]int64", "versions": "0+", "entityType": "producerId", - "about": "The producerIds to filter by: if empty, all transactions will be returned; if non-empty, only transactions which match one of the filtered producerIds will be returned" + "about": "The producerIds to filter by: if empty, all transactions will be returned; if non-empty, only transactions which match one of the filtered producerIds will be returned." }, { "name": "DurationFilter", "type": "int64", "versions": "1+", "default": -1, - "about": "Duration (in millis) to filter by: if < 0, all transactions will be returned; otherwise, only transactions running longer than this duration will be returned" + "about": "Duration (in millis) to filter by: if < 0, all transactions will be returned; otherwise, only transactions running longer than this duration will be returned." } ] } diff --git a/clients/src/main/resources/common/message/ListTransactionsResponse.json b/clients/src/main/resources/common/message/ListTransactionsResponse.json index e9924801cc059..3872cf24a3075 100644 --- a/clients/src/main/resources/common/message/ListTransactionsResponse.json +++ b/clients/src/main/resources/common/message/ListTransactionsResponse.json @@ -23,14 +23,18 @@ "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+" }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The error code, or 0 if there was no error." }, { "name": "UnknownStateFilters", "type": "[]string", "versions": "0+", - "about": "Set of state filters provided in the request which were unknown to the transaction coordinator" }, - { "name": "TransactionStates", "type": "[]TransactionState", "versions": "0+", "fields": [ - { "name": "TransactionalId", "type": "string", "versions": "0+", "entityType": "transactionalId" }, - { "name": "ProducerId", "type": "int64", "versions": "0+", "entityType": "producerId" }, + "about": "Set of state filters provided in the request which were unknown to the transaction coordinator." }, + { "name": "TransactionStates", "type": "[]TransactionState", "versions": "0+", + "about": "The current state of the transaction for the transactional id.", "fields": [ + { "name": "TransactionalId", "type": "string", "versions": "0+", "entityType": "transactionalId", + "about": "The transactional id." }, + { "name": "ProducerId", "type": "int64", "versions": "0+", "entityType": "producerId", + "about": "The producer id." }, { "name": "TransactionState", "type": "string", "versions": "0+", - "about": "The current transaction state of the producer" } + "about": "The current transaction state of the producer." } ]} ] } diff --git a/clients/src/main/resources/common/message/MetadataRequest.json b/clients/src/main/resources/common/message/MetadataRequest.json index 552dea0a6f8a7..eaee4a3453d41 100644 --- a/clients/src/main/resources/common/message/MetadataRequest.json +++ b/clients/src/main/resources/common/message/MetadataRequest.json @@ -18,14 +18,14 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "MetadataRequest", - "validVersions": "0-12", - "deprecatedVersions": "0-3", + "validVersions": "4-13", "flexibleVersions": "9+", "fields": [ + // Versions 0-3 were removed in Apache Kafka 4.0, Version 4 is the new baseline. + // // In version 0, an empty array indicates "request metadata for all topics." In version 1 and // higher, an empty array indicates "request metadata for no topics," and a null array is used to // indicate "request metadata for all topics." - // // Version 2 and 3 are the same as version 1. // // Version 4 adds AllowAutoTopicCreation. @@ -40,6 +40,7 @@ // Version 11 deprecates IncludeClusterAuthorizedOperations field. This is now exposed // by the DescribeCluster API (KIP-700). // Version 12 supports topic Id. + // Version 13 supports top-level error code in the response. { "name": "Topics", "type": "[]MetadataRequestTopic", "versions": "0+", "nullableVersions": "1+", "about": "The topics to fetch metadata for.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, "about": "The topic id." }, diff --git a/clients/src/main/resources/common/message/MetadataResponse.json b/clients/src/main/resources/common/message/MetadataResponse.json index 408cdc7940a13..6b31fdcccfca9 100644 --- a/clients/src/main/resources/common/message/MetadataResponse.json +++ b/clients/src/main/resources/common/message/MetadataResponse.json @@ -17,11 +17,11 @@ "apiKey": 3, "type": "response", "name": "MetadataResponse", + // Versions 0-3 were removed in Apache Kafka 4.0, Version 4 is the new baseline. + // // Version 1 adds fields for the rack of each broker, the controller id, and // whether or not the topic is internal. - // // Version 2 adds the cluster ID field. - // // Version 3 adds the throttle time. // // Version 4 is the same as version 3. @@ -42,7 +42,8 @@ // Version 11 deprecates ClusterAuthorizedOperations. This is now exposed // by the DescribeCluster API (KIP-700). // Version 12 supports topicId. - "validVersions": "0-12", + // Version 13 supports top-level error code in the response. + "validVersions": "4-13", "flexibleVersions": "9+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "3+", "ignorable": true, @@ -93,6 +94,9 @@ "about": "32-bit bitfield to represent authorized operations for this topic." } ]}, { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8-10", "default": "-2147483648", - "about": "32-bit bitfield to represent authorized operations for this cluster." } + "about": "32-bit bitfield to represent authorized operations for this cluster." }, + { "name": "ErrorCode", "type": "int16", "versions": "13+", "ignorable": true, + "about": "The top-level error code, or 0 if there was no error." } + ] } diff --git a/clients/src/main/resources/common/message/OffsetCommitRequest.json b/clients/src/main/resources/common/message/OffsetCommitRequest.json index 5b3029a53014a..8f9e1d74d96d6 100644 --- a/clients/src/main/resources/common/message/OffsetCommitRequest.json +++ b/clients/src/main/resources/common/message/OffsetCommitRequest.json @@ -18,6 +18,8 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "OffsetCommitRequest", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 adds timestamp and group membership information, as well as the commit timestamp. // // Version 2 adds retention time. It removes the commit timestamp added in version 1. @@ -34,8 +36,7 @@ // // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The // request is the same as version 8. - "validVersions": "0-9", - "deprecatedVersions": "0-1", + "validVersions": "2-9", "flexibleVersions": "8+", "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", @@ -61,9 +62,6 @@ "about": "The message offset to be committed." }, { "name": "CommittedLeaderEpoch", "type": "int32", "versions": "6+", "default": "-1", "ignorable": true, "about": "The leader epoch of this partition." }, - // CommitTimestamp has been removed from v2 and later. - { "name": "CommitTimestamp", "type": "int64", "versions": "1", "default": "-1", - "about": "The timestamp of the commit." }, { "name": "CommittedMetadata", "type": "string", "versions": "0+", "nullableVersions": "0+", "about": "Any associated metadata the client wants to keep." } ]} diff --git a/clients/src/main/resources/common/message/OffsetCommitResponse.json b/clients/src/main/resources/common/message/OffsetCommitResponse.json index dbb23e3d4388c..0cccd64816c47 100644 --- a/clients/src/main/resources/common/message/OffsetCommitResponse.json +++ b/clients/src/main/resources/common/message/OffsetCommitResponse.json @@ -17,6 +17,8 @@ "apiKey": 8, "type": "response", "name": "OffsetCommitResponse", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Versions 1 and 2 are the same as version 0. // // Version 3 adds the throttle time to the response. @@ -32,7 +34,7 @@ // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The response is // the same as version 8 but can return STALE_MEMBER_EPOCH when the new consumer group protocol is used and // GROUP_ID_NOT_FOUND when the group does not exist for both protocols. - "validVersions": "0-9", + "validVersions": "2-9", "flexibleVersions": "8+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) diff --git a/clients/src/main/resources/common/message/OffsetDeleteRequest.json b/clients/src/main/resources/common/message/OffsetDeleteRequest.json index 4a9dea60805a2..4583030060add 100644 --- a/clients/src/main/resources/common/message/OffsetDeleteRequest.json +++ b/clients/src/main/resources/common/message/OffsetDeleteRequest.json @@ -24,7 +24,7 @@ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The unique group identifier." }, { "name": "Topics", "type": "[]OffsetDeleteRequestTopic", "versions": "0+", - "about": "The topics to delete offsets for", "fields": [ + "about": "The topics to delete offsets for.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "entityType": "topicName", "about": "The topic name." }, { "name": "Partitions", "type": "[]OffsetDeleteRequestPartition", "versions": "0+", diff --git a/clients/src/main/resources/common/message/OffsetFetchRequest.json b/clients/src/main/resources/common/message/OffsetFetchRequest.json index 82ac806541352..d9d97da384b62 100644 --- a/clients/src/main/resources/common/message/OffsetFetchRequest.json +++ b/clients/src/main/resources/common/message/OffsetFetchRequest.json @@ -18,6 +18,8 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "OffsetFetchRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // In version 0, the request read offsets from ZK. // // Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets topic. @@ -36,8 +38,7 @@ // // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). It adds // the MemberId and MemberEpoch fields. Those are filled in and validated when the new consumer protocol is used. - "validVersions": "0-9", - "deprecatedVersions": "0", + "validVersions": "1-9", "flexibleVersions": "6+", "fields": [ { "name": "GroupId", "type": "string", "versions": "0-7", "entityType": "groupId", @@ -50,11 +51,11 @@ "about": "The partition indexes we would like to fetch offsets for." } ]}, { "name": "Groups", "type": "[]OffsetFetchRequestGroup", "versions": "8+", - "about": "Each group we would like to fetch offsets for", "fields": [ + "about": "Each group we would like to fetch offsets for.", "fields": [ { "name": "GroupId", "type": "string", "versions": "8+", "entityType": "groupId", "about": "The group ID."}, { "name": "MemberId", "type": "string", "versions": "9+", "nullableVersions": "9+", "default": "null", "ignorable": true, - "about": "The member id" }, + "about": "The member id." }, { "name": "MemberEpoch", "type": "int32", "versions": "9+", "default": "-1", "ignorable": true, "about": "The member epoch if using the new consumer protocol (KIP-848)." }, { "name": "Topics", "type": "[]OffsetFetchRequestTopics", "versions": "8+", "nullableVersions": "8+", diff --git a/clients/src/main/resources/common/message/OffsetFetchResponse.json b/clients/src/main/resources/common/message/OffsetFetchResponse.json index 0b4cc10c3b49b..9f0a5157cc424 100644 --- a/clients/src/main/resources/common/message/OffsetFetchResponse.json +++ b/clients/src/main/resources/common/message/OffsetFetchResponse.json @@ -17,6 +17,8 @@ "apiKey": 9, "type": "response", "name": "OffsetFetchResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. + // // Version 1 is the same as version 0. // // Version 2 adds a top-level error code. @@ -36,7 +38,7 @@ // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The response is // the same as version 8 but can return STALE_MEMBER_EPOCH and UNKNOWN_MEMBER_ID errors when the new consumer group // protocol is used. - "validVersions": "0-9", + "validVersions": "1-9", "flexibleVersions": "6+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -55,7 +57,7 @@ { "name": "Name", "type": "string", "versions": "0-7", "entityType": "topicName", "about": "The topic name." }, { "name": "Partitions", "type": "[]OffsetFetchResponsePartition", "versions": "0-7", - "about": "The responses per partition", "fields": [ + "about": "The responses per partition.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0-7", "about": "The partition index." }, { "name": "CommittedOffset", "type": "int64", "versions": "0-7", @@ -79,7 +81,7 @@ { "name": "Name", "type": "string", "versions": "8+", "entityType": "topicName", "about": "The topic name." }, { "name": "Partitions", "type": "[]OffsetFetchResponsePartitions", "versions": "8+", - "about": "The responses per partition", "fields": [ + "about": "The responses per partition.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "8+", "about": "The partition index." }, { "name": "CommittedOffset", "type": "int64", "versions": "8+", diff --git a/clients/src/main/resources/common/message/OffsetForLeaderEpochRequest.json b/clients/src/main/resources/common/message/OffsetForLeaderEpochRequest.json index 04796be0c43a9..b2126a4001449 100644 --- a/clients/src/main/resources/common/message/OffsetForLeaderEpochRequest.json +++ b/clients/src/main/resources/common/message/OffsetForLeaderEpochRequest.json @@ -18,6 +18,8 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "OffsetForLeaderEpochRequest", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 is the same as version 0. // // Version 2 adds the current leader epoch to support fencing. @@ -27,8 +29,7 @@ // Followers will use this replicaId when using an older version of the protocol. // // Version 4 enables flexible versions. - "validVersions": "0-4", - "deprecatedVersions": "0-1", + "validVersions": "2-4", "flexibleVersions": "4+", "fields": [ { "name": "ReplicaId", "type": "int32", "versions": "3+", "default": -2, "ignorable": true, "entityType": "brokerId", diff --git a/clients/src/main/resources/common/message/OffsetForLeaderEpochResponse.json b/clients/src/main/resources/common/message/OffsetForLeaderEpochResponse.json index 2b0810e1ec3ce..f82aa09b7ed84 100644 --- a/clients/src/main/resources/common/message/OffsetForLeaderEpochResponse.json +++ b/clients/src/main/resources/common/message/OffsetForLeaderEpochResponse.json @@ -17,6 +17,8 @@ "apiKey": 23, "type": "response", "name": "OffsetForLeaderEpochResponse", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 added the leader epoch to the response. // // Version 2 added the throttle time. @@ -24,7 +26,7 @@ // Version 3 is the same as version 2. // // Version 4 enables flexible versions. - "validVersions": "0-4", + "validVersions": "2-4", "flexibleVersions": "4+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ProduceRequest.json b/clients/src/main/resources/common/message/ProduceRequest.json index ae01fe5c8c08a..90e46a3041b22 100644 --- a/clients/src/main/resources/common/message/ProduceRequest.json +++ b/clients/src/main/resources/common/message/ProduceRequest.json @@ -18,13 +18,15 @@ "type": "request", "listeners": ["zkBroker", "broker"], "name": "ProduceRequest", + // Versions 0-2 were removed in Apache Kafka 4.0, Version 3 is the new baseline. + // // Version 1 and 2 are the same as version 0. // // Version 3 adds the transactional ID, which is used for authorization when attempting to write // transactional data. Version 3 also adds support for Kafka Message Format v2. // // Version 4 is the same as version 3, but the requester must be prepared to handle a - // KAFKA_STORAGE_ERROR. + // KAFKA_STORAGE_ERROR. // // Version 5 and 6 are the same as version 3. // @@ -37,8 +39,12 @@ // Version 10 is the same as version 9 (KIP-951). // // Version 11 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - "validVersions": "0-11", - "deprecatedVersions": "0-6", + // + // Version 12 is the same as version 11 (KIP-890). Note when produce requests are used in transaction, if + // transaction V2 (KIP_890 part 2) is enabled, the produce request will also include the function for a + // AddPartitionsToTxn call. If V2 is disabled, the client can't use produce request version higher than 11 within + // a transaction. + "validVersions": "3-12", "flexibleVersions": "9+", "fields": [ { "name": "TransactionalId", "type": "string", "versions": "3+", "nullableVersions": "3+", "default": "null", "entityType": "transactionalId", diff --git a/clients/src/main/resources/common/message/ProduceResponse.json b/clients/src/main/resources/common/message/ProduceResponse.json index 92c7a2223da09..5c12539dfb118 100644 --- a/clients/src/main/resources/common/message/ProduceResponse.json +++ b/clients/src/main/resources/common/message/ProduceResponse.json @@ -17,16 +17,16 @@ "apiKey": 0, "type": "response", "name": "ProduceResponse", - // Version 1 added the throttle time. + // Versions 0-2 were removed in Apache Kafka 4.0, Version 3 is the new baseline. // + // Version 1 added the throttle time. // Version 2 added the log append time. // // Version 3 is the same as version 2. // // Version 4 added KAFKA_STORAGE_ERROR as a possible error code. // - // Version 5 added LogStartOffset to filter out spurious - // OutOfOrderSequenceExceptions on the client. + // Version 5 added LogStartOffset to filter out spurious OutOfOrderSequenceExceptions on the client. // // Version 8 added RecordErrors and ErrorMessage to include information about // records that cause the whole batch to be dropped. See KIP-467 for details. @@ -36,13 +36,15 @@ // Version 10 adds 'CurrentLeader' and 'NodeEndpoints' as tagged fields (KIP-951) // // Version 11 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - "validVersions": "0-11", + // + // Version 12 is the same as version 10 (KIP-890). + "validVersions": "3-12", "flexibleVersions": "9+", "fields": [ { "name": "Responses", "type": "[]TopicProduceResponse", "versions": "0+", - "about": "Each produce response", "fields": [ + "about": "Each produce response.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, - "about": "The topic name" }, + "about": "The topic name." }, { "name": "PartitionResponses", "type": "[]PartitionProduceResponse", "versions": "0+", "about": "Each partition that we produced to within the topic.", "fields": [ { "name": "Index", "type": "int32", "versions": "0+", @@ -56,18 +58,20 @@ { "name": "LogStartOffset", "type": "int64", "versions": "5+", "default": "-1", "ignorable": true, "about": "The log start offset." }, { "name": "RecordErrors", "type": "[]BatchIndexAndErrorMessage", "versions": "8+", "ignorable": true, - "about": "The batch indices of records that caused the batch to be dropped", "fields": [ + "about": "The batch indices of records that caused the batch to be dropped.", "fields": [ { "name": "BatchIndex", "type": "int32", "versions": "8+", - "about": "The batch index of the record that cause the batch to be dropped" }, + "about": "The batch index of the record that caused the batch to be dropped." }, { "name": "BatchIndexErrorMessage", "type": "string", "default": "null", "versions": "8+", "nullableVersions": "8+", - "about": "The error message of the record that caused the batch to be dropped"} + "about": "The error message of the record that caused the batch to be dropped."} ]}, { "name": "ErrorMessage", "type": "string", "default": "null", "versions": "8+", "nullableVersions": "8+", "ignorable": true, - "about": "The global error message summarizing the common root cause of the records that caused the batch to be dropped"}, - { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "10+", "taggedVersions": "10+", "tag": 0, "fields": [ + "about": "The global error message summarizing the common root cause of the records that caused the batch to be dropped."}, + { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "10+", "taggedVersions": "10+", "tag": 0, + "about": "The leader broker that the producer should use for future requests.", "fields": [ { "name": "LeaderId", "type": "int32", "versions": "10+", "default": "-1", "entityType": "brokerId", "about": "The ID of the current leader or -1 if the leader is unknown."}, - { "name": "LeaderEpoch", "type": "int32", "versions": "10+", "default": "-1", "about": "The latest known leader epoch"} + { "name": "LeaderEpoch", "type": "int32", "versions": "10+", "default": "-1", + "about": "The latest known leader epoch."} ]} ]} ]}, diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json b/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json index dbce2e7b53296..7815f7b50c7d1 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json @@ -27,9 +27,9 @@ // - INVALID_REQUEST (version 0+) "fields": [ { "name": "Results", "type": "[]ReadStateResult", "versions": "0+", - "about": "The read results", "fields": [ + "about": "The read results.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic identifier" }, + "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionResult", "versions": "0+", "about" : "The results for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", @@ -42,7 +42,8 @@ "about": "The state epoch for this share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", "about": "The share-partition start offset, which can be -1 if it is not yet initialized." }, - { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", "fields":[ + { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", + "about": "The state batches for this share-partition.", "fields":[ { "name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The base offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json index 9a81344ecca72..ddf9d7044a6a3 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json @@ -28,9 +28,9 @@ // - INVALID_REQUEST (version 0+) "fields": [ { "name": "Results", "type": "[]ReadStateSummaryResult", "versions": "0+", - "about": "The read results", "fields": [ + "about": "The read results.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic identifier" }, + "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionResult", "versions": "0+", "about" : "The results for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/RemoveRaftVoterRequest.json b/clients/src/main/resources/common/message/RemoveRaftVoterRequest.json index bba4c294efef9..7d11086e5367a 100644 --- a/clients/src/main/resources/common/message/RemoveRaftVoterRequest.json +++ b/clients/src/main/resources/common/message/RemoveRaftVoterRequest.json @@ -21,10 +21,11 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+" }, + { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+", + "about": "The cluster id of the request."}, { "name": "VoterId", "type": "int32", "versions": "0+", - "about": "The replica id of the voter getting removed from the topic partition" }, + "about": "The replica id of the voter getting removed from the topic partition." }, { "name": "VoterDirectoryId", "type": "uuid", "versions": "0+", - "about": "The directory id of the voter getting removed from the topic partition" } + "about": "The directory id of the voter getting removed from the topic partition." } ] } diff --git a/clients/src/main/resources/common/message/RemoveRaftVoterResponse.json b/clients/src/main/resources/common/message/RemoveRaftVoterResponse.json index 5f62059f35047..155c61f386bf8 100644 --- a/clients/src/main/resources/common/message/RemoveRaftVoterResponse.json +++ b/clients/src/main/resources/common/message/RemoveRaftVoterResponse.json @@ -23,7 +23,7 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error" }, + "about": "The error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "ignorable": true, "about": "The error message, or null if there was no error." } ] diff --git a/clients/src/main/resources/common/message/RenewDelegationTokenRequest.json b/clients/src/main/resources/common/message/RenewDelegationTokenRequest.json index 96aeee7737b44..302e5d3e2ba1c 100644 --- a/clients/src/main/resources/common/message/RenewDelegationTokenRequest.json +++ b/clients/src/main/resources/common/message/RenewDelegationTokenRequest.json @@ -18,10 +18,10 @@ "type": "request", "listeners": ["zkBroker", "broker", "controller"], "name": "RenewDelegationTokenRequest", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Version 1 is the same as version 0. // Version 2 adds flexible version support - "validVersions": "0-2", - "deprecatedVersions": "0", + "validVersions": "1-2", "flexibleVersions": "2+", "fields": [ { "name": "Hmac", "type": "bytes", "versions": "0+", diff --git a/clients/src/main/resources/common/message/RenewDelegationTokenResponse.json b/clients/src/main/resources/common/message/RenewDelegationTokenResponse.json index c429dadd0cf4f..48e285280b19c 100644 --- a/clients/src/main/resources/common/message/RenewDelegationTokenResponse.json +++ b/clients/src/main/resources/common/message/RenewDelegationTokenResponse.json @@ -17,9 +17,10 @@ "apiKey": 39, "type": "response", "name": "RenewDelegationTokenResponse", + // Version 0 was removed in Apache Kafka 4.0, Version 1 is the new baseline. // Starting in version 1, on quota violation, brokers send out responses before throttling. // Version 2 adds flexible version support - "validVersions": "0-2", + "validVersions": "1-2", "flexibleVersions": "2+", "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", diff --git a/clients/src/main/resources/common/message/SaslHandshakeRequest.json b/clients/src/main/resources/common/message/SaslHandshakeRequest.json index 3339f6a7afd58..a370a80df3949 100644 --- a/clients/src/main/resources/common/message/SaslHandshakeRequest.json +++ b/clients/src/main/resources/common/message/SaslHandshakeRequest.json @@ -23,7 +23,6 @@ // client negotiation for clients <= 2.4. // See https://issues.apache.org/jira/browse/KAFKA-9577 "validVersions": "0-1", - "deprecatedVersions": "0", "flexibleVersions": "none", "fields": [ { "name": "Mechanism", "type": "string", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json index 638ca10c64b3b..1f726a0c7d6a4 100644 --- a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json +++ b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json @@ -40,7 +40,8 @@ "about": "The top-level error message, or null if there was no error." }, { "name": "Responses", "type": "[]ShareAcknowledgeTopicResponse", "versions": "0+", "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, "about": "The unique topic ID."}, + { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, + "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "about": "The topic partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", @@ -49,7 +50,8 @@ "about": "The error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The error message, or null if there was no error." }, - { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ + { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", + "about": "The current leader of the partition.", "fields": [ { "name": "LeaderId", "type": "int32", "versions": "0+", "about": "The ID of the current leader or -1 if the leader is unknown." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ShareFetchRequest.json b/clients/src/main/resources/common/message/ShareFetchRequest.json index 6af767979961f..b0b91b82228a3 100644 --- a/clients/src/main/resources/common/message/ShareFetchRequest.json +++ b/clients/src/main/resources/common/message/ShareFetchRequest.json @@ -37,6 +37,8 @@ "about": "The minimum bytes to accumulate in the response." }, { "name": "MaxBytes", "type": "int32", "versions": "0+", "default": "0x7fffffff", "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, + { "name": "BatchSize", "type": "int32", "versions": "0+", + "about": "The optimal number of records for batches of acquired records and acknowledgements." }, { "name": "Topics", "type": "[]FetchTopic", "versions": "0+", "about": "The topics to fetch.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, @@ -45,7 +47,7 @@ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", - "about": "The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, + "about": "TO BE REMOVED. The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", "about": "Record batches to acknowledge.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ShareFetchResponse.json b/clients/src/main/resources/common/message/ShareFetchResponse.json index c4a166bf22525..858b0bdd46fca 100644 --- a/clients/src/main/resources/common/message/ShareFetchResponse.json +++ b/clients/src/main/resources/common/message/ShareFetchResponse.json @@ -41,7 +41,8 @@ "about": "The top-level error message, or null if there was no error." }, { "name": "Responses", "type": "[]ShareFetchableTopicResponse", "versions": "0+", "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "about": "The topic partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", @@ -54,7 +55,8 @@ "about": "The acknowledge error code, or 0 if there was no acknowledge error." }, { "name": "AcknowledgeErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The acknowledge error message, or null if there was no acknowledge error." }, - { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", "fields": [ + { "name": "CurrentLeader", "type": "LeaderIdAndEpoch", "versions": "0+", + "about": "The current leader of the partition.", "fields": [ { "name": "LeaderId", "type": "int32", "versions": "0+", "about": "The ID of the current leader or -1 if the leader is unknown." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json index c95790c9b198f..5efd435939db1 100644 --- a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json +++ b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json @@ -26,7 +26,7 @@ "latestVersionUnstable": true, "fields": [ { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", - "about": "The ids of the groups to describe" }, + "about": "The ids of the groups to describe." }, { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "0+", "about": "Whether to include authorized operations." } ] diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json index 554bb5033a868..523150a92476c 100644 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json @@ -28,12 +28,12 @@ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The group identifier." }, { "name": "MemberId", "type": "string", "versions": "0+", - "about": "The member id" }, + "about": "The member id." }, { "name": "MemberEpoch", "type": "int32", "versions": "0+", "about": "The current member epoch; 0 to join the group; -1 to leave the group." }, { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "null if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise." }, - { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", + { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "topicName", "about": "null if it didn't change since the last heartbeat; the subscribed topic names otherwise." } ] } \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json index 8f97cb0080750..e0ff5a93d54ee 100644 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json @@ -31,7 +31,7 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The top-level error code, or 0 if there was no error" }, + "about": "The top-level error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, { "name": "MemberId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", diff --git a/clients/src/main/resources/common/message/SnapshotFooterRecord.json b/clients/src/main/resources/common/message/SnapshotFooterRecord.json index 0d776b301cff8..054833f9f4106 100644 --- a/clients/src/main/resources/common/message/SnapshotFooterRecord.json +++ b/clients/src/main/resources/common/message/SnapshotFooterRecord.json @@ -19,7 +19,7 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - {"name": "Version", "type": "int16", "versions": "0+", - "about": "The version of the snapshot footer record"} + { "name": "Version", "type": "int16", "versions": "0+", + "about": "The version of the snapshot footer record."} ] } diff --git a/clients/src/main/resources/common/message/SnapshotHeaderRecord.json b/clients/src/main/resources/common/message/SnapshotHeaderRecord.json index 0a03b9cec4b53..10d9d6502d860 100644 --- a/clients/src/main/resources/common/message/SnapshotHeaderRecord.json +++ b/clients/src/main/resources/common/message/SnapshotHeaderRecord.json @@ -19,9 +19,9 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - {"name": "Version", "type": "int16", "versions": "0+", - "about": "The version of the snapshot header record"}, - {"name": "LastContainedLogTimestamp", "type": "int64", "versions": "0+", - "about": "The append time of the last record from the log contained in this snapshot"} + { "name": "Version", "type": "int16", "versions": "0+", + "about": "The version of the snapshot header record."}, + { "name": "LastContainedLogTimestamp", "type": "int64", "versions": "0+", + "about": "The append time of the last record from the log contained in this snapshot."} ] } diff --git a/clients/src/main/resources/common/message/StopReplicaRequest.json b/clients/src/main/resources/common/message/StopReplicaRequest.json index 7c82c97aa715e..c5f0d4c812484 100644 --- a/clients/src/main/resources/common/message/StopReplicaRequest.json +++ b/clients/src/main/resources/common/message/StopReplicaRequest.json @@ -32,7 +32,7 @@ { "name": "ControllerId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The controller id." }, { "name": "isKRaftController", "type": "bool", "versions": "4+", "default": "false", - "about": "If KRaft controller id is used during migration. See KIP-866" }, + "about": "If KRaft controller id is used during migration. See KIP-866." }, { "name": "ControllerEpoch", "type": "int32", "versions": "0+", "about": "The controller epoch." }, { "name": "BrokerEpoch", "type": "int64", "versions": "1+", "default": "-1", "ignorable": true, @@ -58,7 +58,7 @@ { "name": "TopicName", "type": "string", "versions": "3+", "entityType": "topicName", "about": "The topic name." }, { "name": "PartitionStates", "type": "[]StopReplicaPartitionState", "versions": "3+", - "about": "The state of each partition", "fields": [ + "about": "The state of each partition.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "3+", "about": "The partition index." }, { "name": "LeaderEpoch", "type": "int32", "versions": "3+", "default": "-1", diff --git a/clients/src/main/resources/common/message/StreamsGroupDescribeRequest.json b/clients/src/main/resources/common/message/StreamsGroupDescribeRequest.json new file mode 100644 index 0000000000000..6e36479043aa0 --- /dev/null +++ b/clients/src/main/resources/common/message/StreamsGroupDescribeRequest.json @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 89, + "type": "request", + "listeners": ["broker"], + "name": "StreamsGroupDescribeRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", + "about": "The ids of the groups to describe" }, + { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "0+", + "about": "Whether to include authorized operations." } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/StreamsGroupDescribeResponse.json b/clients/src/main/resources/common/message/StreamsGroupDescribeResponse.json new file mode 100644 index 0000000000000..9cf2954c17fa4 --- /dev/null +++ b/clients/src/main/resources/common/message/StreamsGroupDescribeResponse.json @@ -0,0 +1,167 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 89, + "type": "response", + "name": "StreamsGroupDescribeResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - INVALID_GROUP_ID (version 0+) + // - GROUP_ID_NOT_FOUND (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "Groups", "type": "[]DescribedGroup", "versions": "0+", + "about": "Each described group.", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The describe error, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, + { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", + "about": "The group ID string." }, + { "name": "GroupState", "type": "string", "versions": "0+", + "about": "The group state string, or the empty string." }, + { "name": "GroupEpoch", "type": "int32", "versions": "0+", + "about": "The group epoch." }, + { "name": "AssignmentEpoch", "type": "int32", "versions": "0+", + "about": "The assignment epoch." }, + + { "name": "Topology", "type": "Topology", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The topology metadata currently initialized for the streams application. Can be null in case of a describe error.", + "fields": [ + { "name": "Epoch", "type": "int32", "versions": "0+", + "about": "The epoch of the currently initialized topology for this group." }, + { "name": "Subtopologies", "type": "[]Subtopology", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The subtopologies of the streams application. This contains the configured subtopologies, where the number of partitions are set and any regular expressions are resolved to actual topics. Null if the group is uninitialized, source topics are missing or incorrectly partitioned.", + "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "String to uniquely identify the subtopology." }, + { "name": "SourceTopics", "type": "[]string", "versions": "0+", + "about": "The topics the subtopology reads from." }, + { "name": "RepartitionSinkTopics", "type": "[]string", "versions": "0+", + "about": "The repartition topics the subtopology writes to." }, + { "name": "StateChangelogTopics", "type": "[]TopicInfo", "versions": "0+", + "about": "The set of state changelog topics associated with this subtopology. Created automatically." }, + { "name": "RepartitionSourceTopics", "type": "[]TopicInfo", "versions": "0+", + "about": "The set of source topics that are internally created repartition topics. Created automatically." } + ]} + ]}, + { "name": "Members", "type": "[]Member", "versions": "0+", + "about": "The members.", + "fields": [ + { "name": "MemberId", "type": "string", "versions": "0+", + "about": "The member ID." }, + { "name": "MemberEpoch", "type": "int32", "versions": "0+", + "about": "The member epoch." }, + { "name": "InstanceId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The member instance ID for static membership." }, + { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The rack ID." }, + + { "name": "ClientId", "type": "string", "versions": "0+", + "about": "The client ID." }, + { "name": "ClientHost", "type": "string", "versions": "0+", + "about": "The client host." }, + + { "name": "TopologyEpoch", "type": "int32", "versions": "0+", + "about": "The epoch of the topology on the client." }, + + { "name": "ProcessId", "type": "string", "versions": "0+", + "about": "Identity of the streams instance that may have multiple clients. " }, + { "name": "UserEndpoint", "type": "Endpoint", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "User-defined endpoint for Interactive Queries. Null if not defined for this client." }, + { "name": "ClientTags", "type": "[]KeyValue", "versions": "0+", + "about": "Used for rack-aware assignment algorithm." }, + { "name": "TaskOffsets", "type": "[]TaskOffset", "versions": "0+", + "about": "Cumulative changelog offsets for tasks." }, + { "name": "TaskEndOffsets", "type": "[]TaskOffset", "versions": "0+", + "about": "Cumulative changelog end offsets for tasks." }, + + { "name": "Assignment", "type": "Assignment", "versions": "0+", + "about": "The current assignment." }, + { "name": "TargetAssignment", "type": "Assignment", "versions": "0+", + "about": "The target assignment." }, + { "name": "IsClassic", "type": "bool", "versions": "0+", + "about": "True for classic members that have not been upgraded yet." } + ]}, + { "name": "AuthorizedOperations", "type": "int32", "versions": "0+", "default": "-2147483648", + "about": "32-bit bitfield to represent authorized operations for this group." } + ] + } + ], + "commonStructs": [ + { "name": "Endpoint", "versions": "0+", "fields": [ + { "name": "Host", "type": "string", "versions": "0+", + "about": "host of the endpoint" }, + { "name": "Port", "type": "uint16", "versions": "0+", + "about": "port of the endpoint" } + ]}, + { "name": "TaskOffset", "versions": "0+", "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "The subtopology identifier." }, + { "name": "Partition", "type": "int32", "versions": "0+", + "about": "The partition." }, + { "name": "Offset", "type": "int64", "versions": "0+", + "about": "The offset." } + ]}, + { "name": "TopicPartitions", "versions": "0+", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The topic ID." }, + { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The topic name." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions." } + ]}, + { "name": "Assignment", "versions": "0+", "fields": [ + { "name": "ActiveTasks", "type": "[]TaskIds", "versions": "0+", + "about": "Active tasks for this client." }, + { "name": "StandbyTasks", "type": "[]TaskIds", "versions": "0+", + "about": "Standby tasks for this client." }, + { "name": "WarmupTasks", "type": "[]TaskIds", "versions": "0+", + "about": "Warm-up tasks for this client. " } + ]}, + { "name": "TaskIds", "versions": "0+", "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "The subtopology identifier." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions of the input topics processed by this member." } + ]}, + { "name": "KeyValue", "versions": "0+", "fields": [ + { "name": "Key", "type": "string", "versions": "0+", + "about": "key of the config" }, + { "name": "Value", "type": "string", "versions": "0+", + "about": "value of the config" } + ]}, + { "name": "TopicInfo", "versions": "0+", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", + "about": "The name of the topic." }, + { "name": "Partitions", "type": "int32", "versions": "0+", + "about": "The number of partitions in the topic. Can be 0 if no specific number of partitions is enforced. Always 0 for changelog topics." }, + { "name": "ReplicationFactor", "type": "int16", "versions": "0+", + "about": "The replication factor of the topic. Can be 0 if the default replication factor should be used." }, + { "name": "TopicConfigs", "type": "[]KeyValue", "versions": "0+", + "about": "Topic-level configurations as key-value pairs." + } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/StreamsGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/StreamsGroupHeartbeatRequest.json new file mode 100644 index 0000000000000..3395688983b84 --- /dev/null +++ b/clients/src/main/resources/common/message/StreamsGroupHeartbeatRequest.json @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 88, + "type": "request", + "listeners": ["broker"], + "name": "StreamsGroupHeartbeatRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", + "about": "The group identifier." }, + { "name": "MemberId", "type": "string", "versions": "0+", + "about": "The member ID generated by the streams consumer. The member ID must be kept during the entire lifetime of the streams consumer process." }, + { "name": "MemberEpoch", "type": "int32", "versions": "0+", + "about": "The current member epoch; 0 to join the group; -1 to leave the group; -2 to indicate that the static member will rejoin." }, + { "name": "InstanceId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "null if not provided or if it didn't change since the last heartbeat; the instance ID for static membership otherwise." }, + { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "null if not provided or if it didn't change since the last heartbeat; the rack ID of the member otherwise." }, + { "name": "RebalanceTimeoutMs", "type": "int32", "versions": "0+", "default": -1, + "about": "-1 if it didn't change since the last heartbeat; the maximum time in milliseconds that the coordinator will wait on the member to revoke its tasks otherwise." }, + + { "name": "Topology", "type": "Topology", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The topology metadata of the streams application. Used to initialize the topology of the group and to check if the topology corresponds to the topology initialized for the group. Only sent when memberEpoch = 0, must be non-empty. Null otherwise.", + "fields": [ + { "name": "Epoch", "type": "int32", "versions": "0+", + "about": "The epoch of the topology. Used to check if the topology corresponds to the topology initialized on the brokers." }, + { "name": "Subtopologies", "type": "[]Subtopology", "versions": "0+", + "about": "The sub-topologies of the streams application.", + "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "String to uniquely identify the subtopology. Deterministically generated from the topology" }, + { "name": "SourceTopics", "type": "[]string", "versions": "0+", + "about": "The topics the topology reads from." }, + { "name": "SourceTopicRegex", "type": "[]string", "versions": "0+", + "about": "The regular expressions identifying topics the subtopology reads from." }, + { "name": "StateChangelogTopics", "type": "[]TopicInfo", "versions": "0+", + "about": "The set of state changelog topics associated with this subtopology. Created automatically." }, + { "name": "RepartitionSinkTopics", "type": "[]string", "versions": "0+", + "about": "The repartition topics the subtopology writes to." }, + { "name": "RepartitionSourceTopics", "type": "[]TopicInfo", "versions": "0+", + "about": "The set of source topics that are internally created repartition topics. Created automatically." }, + { "name": "CopartitionGroups", "type": "[]CopartitionGroup", "versions": "0+", + "about": "A subset of source topics that must be copartitioned.", + "fields": [ + { "name": "SourceTopics", "type": "[]int16", "versions": "0+", + "about": "The topics the topology reads from. Index into the array on the subtopology level." }, + { "name": "SourceTopicRegex", "type": "[]int16", "versions": "0+", + "about": "Regular expressions identifying topics the subtopology reads from. Index into the array on the subtopology level." }, + { "name": "RepartitionSourceTopics", "type": "[]int16", "versions": "0+", + "about": "The set of source topics that are internally created repartition topics. Index into the array on the subtopology level." } + ]} + ]} + ] + }, + + { "name": "ActiveTasks", "type": "[]TaskIds", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Currently owned active tasks for this client. Null if unchanged since last heartbeat." }, + { "name": "StandbyTasks", "type": "[]TaskIds", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Currently owned standby tasks for this client. Null if unchanged since last heartbeat." }, + { "name": "WarmupTasks", "type": "[]TaskIds", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Currently owned warm-up tasks for this client. Null if unchanged since last heartbeat." }, + + { "name": "ProcessId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Identity of the streams instance that may have multiple consumers. Null if unchanged since last heartbeat." }, + { "name": "UserEndpoint", "type": "Endpoint", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "User-defined endpoint for Interactive Queries. Null if unchanged since last heartbeat, or if not defined on the client." }, + { "name": "ClientTags", "type": "[]KeyValue", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Used for rack-aware assignment algorithm. Null if unchanged since last heartbeat." }, + + { "name": "TaskOffsets", "type": "[]TaskOffset", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Cumulative changelog offsets for tasks. Only updated when a warm-up task has caught up, and according to the task offset interval. Null if unchanged since last heartbeat." }, + { "name": "TaskEndOffsets", "type": "[]TaskOffset", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Cumulative changelog end-offsets for tasks. Only updated when a warm-up task has caught up, and according to the task offset interval. Null if unchanged since last heartbeat." }, + { "name": "ShutdownApplication", "type": "bool", "versions": "0+", "default": false, + "about": "Whether all Streams clients in the group should shut down." } + ], + + "commonStructs": [ + { "name": "KeyValue", "versions": "0+", "fields": [ + { "name": "Key", "type": "string", "versions": "0+", + "about": "key of the config" }, + { "name": "Value", "type": "string", "versions": "0+", + "about": "value of the config" } + ]}, + { "name": "TopicInfo", "versions": "0+", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", + "about": "The name of the topic." }, + { "name": "Partitions", "type": "int32", "versions": "0+", + "about": "The number of partitions in the topic. Can be 0 if no specific number of partitions is enforced. Always 0 for changelog topics." }, + { "name": "ReplicationFactor", "type": "int16", "versions": "0+", + "about": "The replication factor of the topic. Can be 0 if the default replication factor should be used." }, + { "name": "TopicConfigs", "type": "[]KeyValue", "versions": "0+", + "about": "Topic-level configurations as key-value pairs." + } + ]}, + { "name": "Endpoint", "versions": "0+", "fields": [ + { "name": "Host", "type": "string", "versions": "0+", + "about": "host of the endpoint" }, + { "name": "Port", "type": "uint16", "versions": "0+", + "about": "port of the endpoint" } + ]}, + { "name": "TaskOffset", "versions": "0+", "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "The subtopology identifier." }, + { "name": "Partition", "type": "int32", "versions": "0+", + "about": "The partition." }, + { "name": "Offset", "type": "int64", "versions": "0+", + "about": "The offset." } + ]}, + { "name": "TaskIds", "versions": "0+", "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "The subtopology identifier." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions of the input topics processed by this member." } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/StreamsGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/StreamsGroupHeartbeatResponse.json new file mode 100644 index 0000000000000..43b5268e20562 --- /dev/null +++ b/clients/src/main/resources/common/message/StreamsGroupHeartbeatResponse.json @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 88, + "type": "response", + "name": "StreamsGroupHeartbeatResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - GROUP_ID_NOT_FOUND (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_MEMBER_ID (version 0+) + // - FENCED_MEMBER_EPOCH (version 0+) + // - UNRELEASED_INSTANCE_ID (version 0+) + // - GROUP_MAX_SIZE_REACHED (version 0+) + // - TOPIC_AUTHORIZATION_FAILED (version 0+) + // - CLUSTER_AUTHORIZATION_FAILED (version 0+) + // - STREAMS_INVALID_TOPOLOGY (version 0+) + // - STREAMS_INVALID_TOPOLOGY_EPOCH (version 0+) + // - STREAMS_TOPOLOGY_FENCED (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top-level error code, or 0 if there was no error" }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, + { "name": "MemberId", "type": "string", "versions": "0+", + "about": "The member id is always generated by the streams consumer."}, + { "name": "MemberEpoch", "type": "int32", "versions": "0+", + "about": "The member epoch." }, + { "name": "HeartbeatIntervalMs", "type": "int32", "versions": "0+", + "about": "The heartbeat interval in milliseconds." }, + { "name": "AcceptableRecoveryLag", "type": "int32", "versions": "0+", + "about": "The maximal lag a warm-up task can have to be considered caught-up." }, + { "name": "TaskOffsetIntervalMs", "type": "int32", "versions": "0+", + "about": "The interval in which the task changelog offsets on a client are updated on the broker. The offsets are sent with the next heartbeat after this time has passed." }, + + { "name": "Status", "type": "[]Status", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Indicate zero or more status for the group. Null if unchanged since last heartbeat." }, + + // The streams app knows which partitions to fetch from given this information + { "name": "ActiveTasks", "type": "[]TaskIds", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Assigned active tasks for this client. Null if unchanged since last heartbeat." }, + { "name": "StandbyTasks", "type": "[]TaskIds", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Assigned standby tasks for this client. Null if unchanged since last heartbeat." }, + { "name": "WarmupTasks", "type": "[]TaskIds", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Assigned warm-up tasks for this client. Null if unchanged since last heartbeat." }, + + // IQ-related information + { "name": "PartitionsByUserEndpoint", "type": "[]EndpointToPartitions", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "Global assignment information used for IQ. Null if unchanged since last heartbeat." , + "fields": [ + { "name": "UserEndpoint", "type": "Endpoint", "versions": "0+", + "about": "User-defined endpoint to connect to the node" }, + { "name": "Partitions", "type": "[]TopicPartition", "versions": "0+", + "about": "All partitions available on the node" } + ] + } + ], + "commonStructs": [ + { "name": "Status", "versions": "0+", "fields": [ + // Possible status codes + // 0 - STALE_TOPOLOGY - The topology epoch supplied is lower than the topology epoch for this streams group. + // 1 - MISSING_SOURCE_TOPICS - One or more source topics are missing or a source topic regex resolves to zero topics. + // Missing topics are indicated in the StatusDetail. + // 2 - INCORRECTLY_PARTITIONED_TOPICS - One or more topics are incorrectly partitioned, that is, they are not copartitioned despite being + // part of a copartition group, or the number of partitions in a changelog topic does not correspond + // to the maximal number of source topic partition for that subtopology. + // Incorrectly partitioned topics are indicated in the StatusDetail. + // 3 - MISSING_INTERNAL_TOPICS - One or more internal topics are missing. + // Missing topics are indicated in the StatusDetail. + // The group coordinator will attempt to create all missing internal topics, if any errors occur during + // topic creation, this will be indicated in StatusDetail. + // 4 - SHUTDOWN_APPLICATION - A client requested the shutdown of the whole application. + { "name": "StatusCode", "type": "int8", "versions": "0+", + "about": "A code to indicate that a particular status is active for the group membership" }, + { "name": "StatusDetail", "type": "string", "versions": "0+", + "about": "A string representation of the status." } + ]}, + { "name": "TopicPartition", "versions": "0+", "fields": [ + { "name": "Topic", "type": "string", "versions": "0+", + "about": "topic name" }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "partitions" } + ]}, + { "name": "TaskIds", "versions": "0+", "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "The subtopology identifier." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions of the input topics processed by this member." } + ]}, + { "name": "Endpoint", "versions": "0+", "fields": [ + { "name": "Host", "type": "string", "versions": "0+", + "about": "host of the endpoint" }, + { "name": "Port", "type": "uint16", "versions": "0+", + "about": "port of the endpoint" } + ]} + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json b/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json index 3cb63aa8fb8e3..fd2c34b74906c 100644 --- a/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json +++ b/clients/src/main/resources/common/message/TxnOffsetCommitRequest.json @@ -25,7 +25,12 @@ // Version 3 adds the member.id, group.instance.id and generation.id. // // Version 4 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - "validVersions": "0-4", + // + // Version 5 is the same as version 4 (KIP-890). Note when TxnOffsetCommit requests are used in transaction, if + // transaction V2 (KIP_890 part 2) is enabled, the TxnOffsetCommit request will also include the function for a + // AddOffsetsToTxn call. If V2 is disabled, the client can't use TxnOffsetCommit request version higher than 4 within + // a transaction. + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "TransactionalId", "type": "string", "versions": "0+", "entityType": "transactionalId", diff --git a/clients/src/main/resources/common/message/TxnOffsetCommitResponse.json b/clients/src/main/resources/common/message/TxnOffsetCommitResponse.json index 1a04cef9d5e4a..9769ed2aa97bb 100644 --- a/clients/src/main/resources/common/message/TxnOffsetCommitResponse.json +++ b/clients/src/main/resources/common/message/TxnOffsetCommitResponse.json @@ -24,7 +24,9 @@ // Version 3 adds illegal generation, fenced instance id, and unknown member id errors. // // Version 4 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - "validVersions": "0-4", + // + // Version 5 is the same with version 3 (KIP-890). + "validVersions": "0-5", "flexibleVersions": "3+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/UpdateFeaturesRequest.json b/clients/src/main/resources/common/message/UpdateFeaturesRequest.json index 97b0986c101aa..8de1eeedd908d 100644 --- a/clients/src/main/resources/common/message/UpdateFeaturesRequest.json +++ b/clients/src/main/resources/common/message/UpdateFeaturesRequest.json @@ -28,16 +28,16 @@ "about": "How long to wait in milliseconds before timing out the request." }, { "name": "FeatureUpdates", "type": "[]FeatureUpdateKey", "versions": "0+", "about": "The list of updates to finalized features.", "fields": [ - {"name": "Feature", "type": "string", "versions": "0+", "mapKey": true, + { "name": "Feature", "type": "string", "versions": "0+", "mapKey": true, "about": "The name of the finalized feature to be updated."}, - {"name": "MaxVersionLevel", "type": "int16", "versions": "0+", + { "name": "MaxVersionLevel", "type": "int16", "versions": "0+", "about": "The new maximum version level for the finalized feature. A value >= 1 is valid. A value < 1, is special, and can be used to request the deletion of the finalized feature."}, - {"name": "AllowDowngrade", "type": "bool", "versions": "0", + { "name": "AllowDowngrade", "type": "bool", "versions": "0", "about": "DEPRECATED in version 1 (see DowngradeType). When set to true, the finalized feature version level is allowed to be downgraded/deleted. The downgrade request will fail if the new maximum version level is a value that's not lower than the existing maximum finalized version level."}, - {"name": "UpgradeType", "type": "int8", "versions": "1+", "default": 1, + { "name": "UpgradeType", "type": "int8", "versions": "1+", "default": 1, "about": "Determine which type of upgrade will be performed: 1 will perform an upgrade only (default), 2 is safe downgrades only (lossless), 3 is unsafe downgrades (lossy)."} ]}, - {"name": "ValidateOnly", "type": "bool", "versions": "1+", "default": false, + { "name": "ValidateOnly", "type": "bool", "versions": "1+", "default": false, "about": "True if we should validate the request, but not perform the upgrade or downgrade."} ] } diff --git a/clients/src/main/resources/common/message/UpdateMetadataRequest.json b/clients/src/main/resources/common/message/UpdateMetadataRequest.json index 1b90dee6a7ad8..c4ab4c442328c 100644 --- a/clients/src/main/resources/common/message/UpdateMetadataRequest.json +++ b/clients/src/main/resources/common/message/UpdateMetadataRequest.json @@ -37,10 +37,10 @@ { "name": "ControllerId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The controller id." }, { "name": "isKRaftController", "type": "bool", "versions": "8+", "default": "false", - "about": "If KRaft controller id is used during migration. See KIP-866" }, + "about": "If KRaft controller id is used during migration. See KIP-866." }, { "name": "Type", "type": "int8", "versions": "8+", "default": 0, "tag": 0, "taggedVersions": "8+", - "about": "Indicates if this request is a Full metadata snapshot (2), Incremental (1), or Unknown (0). Using during ZK migration, see KIP-866"}, + "about": "Indicates if this request is a Full metadata snapshot (2), Incremental (1), or Unknown (0). Using during ZK migration, see KIP-866."}, { "name": "ControllerEpoch", "type": "int32", "versions": "0+", "about": "The controller epoch." }, { "name": "BrokerEpoch", "type": "int64", "versions": "5+", "ignorable": true, "default": "-1", @@ -55,7 +55,8 @@ { "name": "PartitionStates", "type": "[]UpdateMetadataPartitionState", "versions": "5+", "about": "The partition that we would like to update." } ]}, - { "name": "LiveBrokers", "type": "[]UpdateMetadataBroker", "versions": "0+", "fields": [ + { "name": "LiveBrokers", "type": "[]UpdateMetadataBroker", "versions": "0+", + "about": "The brokers that we know about.", "fields": [ { "name": "Id", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The broker id." }, // Version 0 of the protocol only allowed specifying a single host and @@ -67,9 +68,9 @@ { "name": "Endpoints", "type": "[]UpdateMetadataEndpoint", "versions": "1+", "ignorable": true, "about": "The broker endpoints.", "fields": [ { "name": "Port", "type": "int32", "versions": "1+", - "about": "The port of this endpoint" }, + "about": "The port of this endpoint." }, { "name": "Host", "type": "string", "versions": "1+", - "about": "The hostname of this endpoint" }, + "about": "The hostname of this endpoint." }, { "name": "Listener", "type": "string", "versions": "3+", "ignorable": true, "about": "The listener name." }, { "name": "SecurityProtocol", "type": "int16", "versions": "1+", diff --git a/clients/src/main/resources/common/message/UpdateRaftVoterRequest.json b/clients/src/main/resources/common/message/UpdateRaftVoterRequest.json index dadca902f1529..dc16b428429f8 100644 --- a/clients/src/main/resources/common/message/UpdateRaftVoterRequest.json +++ b/clients/src/main/resources/common/message/UpdateRaftVoterRequest.json @@ -21,28 +21,29 @@ "validVersions": "0", "flexibleVersions": "0+", "fields": [ - { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+" }, + { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+", + "about": "The cluster id."}, { "name": "CurrentLeaderEpoch", "type": "int32", "versions": "0+", - "about": "The current leader epoch of the partition, -1 for unknown leader epoch" }, + "about": "The current leader epoch of the partition, -1 for unknown leader epoch." }, { "name": "VoterId", "type": "int32", "versions": "0+", - "about": "The replica id of the voter getting updated in the topic partition" }, + "about": "The replica id of the voter getting updated in the topic partition." }, { "name": "VoterDirectoryId", "type": "uuid", "versions": "0+", - "about": "The directory id of the voter getting updated in the topic partition" }, + "about": "The directory id of the voter getting updated in the topic partition." }, { "name": "Listeners", "type": "[]Listener", "versions": "0+", - "about": "The endpoint that can be used to communicate with the leader", "fields": [ + "about": "The endpoint that can be used to communicate with the leader.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, - "about": "The name of the endpoint" }, + "about": "The name of the endpoint." }, { "name": "Host", "type": "string", "versions": "0+", - "about": "The hostname" }, + "about": "The hostname." }, { "name": "Port", "type": "uint16", "versions": "0+", - "about": "The port" } + "about": "The port." } ]}, { "name": "KRaftVersionFeature", "type": "KRaftVersionFeature", "versions": "0+", - "about": "The range of versions of the protocol that the replica supports", "fields": [ + "about": "The range of versions of the protocol that the replica supports.", "fields": [ { "name": "MinSupportedVersion", "type": "int16", "versions": "0+", - "about": "The minimum supported KRaft protocol version" }, + "about": "The minimum supported KRaft protocol version." }, { "name": "MaxSupportedVersion", "type": "int16", "versions": "0+", - "about": "The maximum supported KRaft protocol version" } + "about": "The maximum supported KRaft protocol version." } ]} ] } diff --git a/clients/src/main/resources/common/message/UpdateRaftVoterResponse.json b/clients/src/main/resources/common/message/UpdateRaftVoterResponse.json index 33b49c37198bb..12c696faacd16 100644 --- a/clients/src/main/resources/common/message/UpdateRaftVoterResponse.json +++ b/clients/src/main/resources/common/message/UpdateRaftVoterResponse.json @@ -23,15 +23,15 @@ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", - "about": "The error code, or 0 if there was no error" }, + "about": "The error code, or 0 if there was no error." }, { "name": "CurrentLeader", "type": "CurrentLeader", "versions": "0+", - "taggedVersions": "0+", "tag": 0, "fields": [ + "taggedVersions": "0+", "tag": 0, "about": "Details of the current Raft cluster leader.", "fields": [ { "name": "LeaderId", "type": "int32", "versions": "0+", "default": "-1", "entityType" : "brokerId", - "about": "The replica id of the current leader or -1 if the leader is unknown" }, + "about": "The replica id of the current leader or -1 if the leader is unknown." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", "default": "-1", - "about": "The latest known leader epoch" }, - { "name": "Host", "type": "string", "versions": "0+", "about": "The node's hostname" }, - { "name": "Port", "type": "int32", "versions": "0+", "about": "The node's port" } + "about": "The latest known leader epoch." }, + { "name": "Host", "type": "string", "versions": "0+", "about": "The node's hostname." }, + { "name": "Port", "type": "int32", "versions": "0+", "about": "The node's port." } ] } ] diff --git a/clients/src/main/resources/common/message/VoteRequest.json b/clients/src/main/resources/common/message/VoteRequest.json index b010765cd90f4..80cb580d1995c 100644 --- a/clients/src/main/resources/common/message/VoteRequest.json +++ b/clients/src/main/resources/common/message/VoteRequest.json @@ -18,34 +18,38 @@ "type": "request", "listeners": ["controller"], "name": "VoteRequest", - // Version 1 adds voter key and candidate directory id (KIP-853) - "validVersions": "0-1", + // Version 1 adds voter key and directory id (KIP-853) + // Version 2 adds PreVote field and renames candidate to replica + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "0+", - "nullableVersions": "0+", "default": "null"}, + "nullableVersions": "0+", "default": "null", + "about": "The cluster id."}, { "name": "VoterId", "type": "int32", "versions": "1+", "ignorable": true, "default": "-1", "entityType": "brokerId", - "about": "The replica id of the voter receiving the request" }, - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + "about": "The replica id of the voter receiving the request." }, + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The topic data.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + "versions": "0+", "about": "The partition data.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, - { "name": "CandidateEpoch", "type": "int32", "versions": "0+", - "about": "The bumped epoch of the candidate sending the request"}, - { "name": "CandidateId", "type": "int32", "versions": "0+", "entityType": "brokerId", + { "name": "ReplicaEpoch", "type": "int32", "versions": "0+", + "about": "The epoch of the voter sending the request"}, + { "name": "ReplicaId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The replica id of the voter sending the request"}, - { "name": "CandidateDirectoryId", "type": "uuid", "versions": "1+", "ignorable": true, + { "name": "ReplicaDirectoryId", "type": "uuid", "versions": "1+", "ignorable": true, "about": "The directory id of the voter sending the request" }, { "name": "VoterDirectoryId", "type": "uuid", "versions": "1+", "ignorable": true, - "about": "The ID of the voter sending the request"}, + "about": "The directory id of the voter receiving the request"}, { "name": "LastOffsetEpoch", "type": "int32", "versions": "0+", - "about": "The epoch of the last record written to the metadata log"}, + "about": "The epoch of the last record written to the metadata log."}, { "name": "LastOffset", "type": "int64", "versions": "0+", - "about": "The offset of the last record written to the metadata log"} + "about": "The log end offset of the metadata log of the voter sending the request."}, + { "name": "PreVote", "type": "bool", "versions": "2+", + "about": "Whether the request is a PreVote request (not persisted) or not."} ] } ] diff --git a/clients/src/main/resources/common/message/VoteResponse.json b/clients/src/main/resources/common/message/VoteResponse.json index cb59ed8957879..d8ffa4bb4f897 100644 --- a/clients/src/main/resources/common/message/VoteResponse.json +++ b/clients/src/main/resources/common/message/VoteResponse.json @@ -18,36 +18,38 @@ "type": "response", "name": "VoteResponse", // Version 1 adds leader endpoint (KIP-853) - "validVersions": "0-1", + // Version 2 handles PreVote requests + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The top level error code."}, - { "name": "Topics", "type": "[]TopicData", - "versions": "0+", "fields": [ + { "name": "Topics", "type": "[]TopicData", "versions": "0+", + "about": "The results for each topic.", "fields": [ { "name": "TopicName", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "Partitions", "type": "[]PartitionData", - "versions": "0+", "fields": [ + { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", + "about": "The results for each partition.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+"}, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The partition level error code."}, { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", "about": "The ID of the current leader or -1 if the leader is unknown."}, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The latest known leader epoch"}, + "about": "The latest known leader epoch."}, { "name": "VoteGranted", "type": "bool", "versions": "0+", - "about": "True if the vote was granted and false otherwise"} + "about": "True if the vote was granted and false otherwise."} ] } ] }, { "name": "NodeEndpoints", "type": "[]NodeEndpoint", "versions": "1+", "taggedVersions": "1+", "tag": 0, - "about": "Endpoints for all current-leaders enumerated in PartitionData", "fields": [ + "about": "Endpoints for all current-leaders enumerated in PartitionData.", "fields": [ { "name": "NodeId", "type": "int32", "versions": "1+", - "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node"}, - { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname" }, - { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port" } + "mapKey": true, "entityType": "brokerId", "about": "The ID of the associated node."}, + { "name": "Host", "type": "string", "versions": "1+", "about": "The node's hostname." }, + { "name": "Port", "type": "uint16", "versions": "1+", "about": "The node's port." } ] } ] diff --git a/clients/src/main/resources/common/message/VotersRecord.json b/clients/src/main/resources/common/message/VotersRecord.json index e9df56ad4f142..cf6d4ef24eafc 100644 --- a/clients/src/main/resources/common/message/VotersRecord.json +++ b/clients/src/main/resources/common/message/VotersRecord.json @@ -20,27 +20,28 @@ "flexibleVersions": "0+", "fields": [ { "name": "Version", "type": "int16", "versions": "0+", - "about": "The version of the voters record" }, - { "name": "Voters", "type": "[]Voter", "versions": "0+", "fields": [ + "about": "The version of the voters record." }, + { "name": "Voters", "type": "[]Voter", "versions": "0+", + "about": "The set of voters in the quorum for this epoch.", "fields": [ { "name": "VoterId", "type": "int32", "versions": "0+", "entityType": "brokerId", - "about": "The replica id of the voter in the topic partition" }, + "about": "The replica id of the voter in the topic partition." }, { "name": "VoterDirectoryId", "type": "uuid", "versions": "0+", - "about": "The directory id of the voter in the topic partition" }, + "about": "The directory id of the voter in the topic partition." }, { "name": "Endpoints", "type": "[]Endpoint", "versions": "0+", - "about": "The endpoint that can be used to communicate with the voter", "fields": [ + "about": "The endpoint that can be used to communicate with the voter.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, - "about": "The name of the endpoint" }, + "about": "The name of the endpoint." }, { "name": "Host", "type": "string", "versions": "0+", - "about": "The hostname" }, + "about": "The hostname." }, { "name": "Port", "type": "uint16", "versions": "0+", - "about": "The port" } + "about": "The port." } ]}, { "name": "KRaftVersionFeature", "type": "KRaftVersionFeature", "versions": "0+", - "about": "The range of versions of the protocol that the replica supports", "fields": [ + "about": "The range of versions of the protocol that the replica supports.", "fields": [ { "name": "MinSupportedVersion", "type": "int16", "versions": "0+", - "about": "The minimum supported KRaft protocol version" }, + "about": "The minimum supported KRaft protocol version." }, { "name": "MaxSupportedVersion", "type": "int16", "versions": "0+", - "about": "The maximum supported KRaft protocol version" } + "about": "The maximum supported KRaft protocol version." } ]} ]} ] diff --git a/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json b/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json index 93d55de8b02d5..c0584542739ea 100644 --- a/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json @@ -38,13 +38,14 @@ "about": "The leader epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", "about": "The share-partition start offset, or -1 if the start offset is not being written." }, - { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", "fields": [ + { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", + "about": "The state batches for the share-partition.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The base offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this state batch." }, { "name": "DeliveryState", "type": "int8", "versions": "0+", - "about": "The state - 0:Available,2:Acked,4:Archived" }, + "about": "The state - 0:Available,2:Acked,4:Archived." }, { "name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count." } ]} diff --git a/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json b/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json index 7fae63f394ea0..e529126c44b77 100644 --- a/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json @@ -28,9 +28,9 @@ // - INVALID_REQUEST (version 0+) "fields": [ { "name": "Results", "type": "[]WriteStateResult", "versions": "0+", - "about": "The write results", "fields": [ + "about": "The write results.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", - "about": "The topic identifier" }, + "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionResult", "versions": "0+", "about" : "The results for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/WriteTxnMarkersRequest.json b/clients/src/main/resources/common/message/WriteTxnMarkersRequest.json index 9e29fb39f4525..dc4b5be42a7a4 100644 --- a/clients/src/main/resources/common/message/WriteTxnMarkersRequest.json +++ b/clients/src/main/resources/common/message/WriteTxnMarkersRequest.json @@ -38,7 +38,7 @@ "about": "The indexes of the partitions to write transaction markers for." } ]}, { "name": "CoordinatorEpoch", "type": "int32", "versions": "0+", - "about": "Epoch associated with the transaction state partition hosted by this transaction coordinator" } + "about": "Epoch associated with the transaction state partition hosted by this transaction coordinator." } ]} ] } diff --git a/clients/src/test/java/org/apache/kafka/clients/ApiVersionsTest.java b/clients/src/test/java/org/apache/kafka/clients/ApiVersionsTest.java index 7c8a629ba05fc..65be3c2b16620 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ApiVersionsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ApiVersionsTest.java @@ -17,47 +17,15 @@ package org.apache.kafka.clients; import org.apache.kafka.common.message.ApiVersionsResponseData; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.record.RecordBatch; import org.junit.jupiter.api.Test; import java.util.Arrays; -import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; public class ApiVersionsTest { - @Test - public void testMaxUsableProduceMagic() { - ApiVersions apiVersions = new ApiVersions(); - assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); - - apiVersions.update("0", NodeApiVersions.create()); - assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); - - apiVersions.update("1", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2)); - assertEquals(RecordBatch.MAGIC_VALUE_V1, apiVersions.maxUsableProduceMagic()); - - apiVersions.remove("1"); - assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); - } - - @Test - public void testMaxUsableProduceMagicWithRaftController() { - ApiVersions apiVersions = new ApiVersions(); - assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); - - // something that doesn't support PRODUCE, which is the case with Raft-based controllers - apiVersions.update("2", NodeApiVersions.create(Collections.singleton( - new ApiVersionsResponseData.ApiVersion() - .setApiKey(ApiKeys.FETCH.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 2)))); - assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); - } - @Test public void testFinalizedFeaturesUpdate() { ApiVersions apiVersions = new ApiVersions(); @@ -69,7 +37,6 @@ public void testFinalizedFeaturesUpdate() { .setName("transaction.version") .setMaxVersion((short) 2) .setMinVersion((short) 0)), - false, Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() .setName("transaction.version") .setMaxVersionLevel((short) 2) @@ -85,7 +52,6 @@ public void testFinalizedFeaturesUpdate() { .setName("transaction.version") .setMaxVersion((short) 2) .setMinVersion((short) 0)), - false, Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() .setName("transaction.version") .setMaxVersionLevel((short) 1) @@ -96,4 +62,5 @@ public void testFinalizedFeaturesUpdate() { assertEquals(1, info.finalizedFeaturesEpoch); assertEquals((short) 2, info.finalizedFeatures.get("transaction.version")); } + } diff --git a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java index 071bf7635fd8a..2368a91137f16 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java @@ -19,24 +19,27 @@ import org.apache.kafka.common.config.ConfigException; import org.junit.jupiter.api.Test; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.when; public class ClientUtilsTest { - private final HostResolver hostResolver = new DefaultHostResolver(); - @Test public void testParseAndValidateAddresses() { checkWithoutLookup("127.0.0.1:8000"); @@ -57,15 +60,39 @@ public void testParseAndValidateAddressesWithReverseLookup() { checkWithoutLookup("[::1]:8000"); checkWithoutLookup("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "localhost:10000"); - // With lookup of example.com, either one or two addresses are expected depending on - // whether ipv4 and ipv6 are enabled - List validatedAddresses = checkWithLookup(Collections.singletonList("example.com:10000")); - assertFalse(validatedAddresses.isEmpty(), "Unexpected addresses " + validatedAddresses); - List validatedHostNames = validatedAddresses.stream().map(InetSocketAddress::getHostName) - .collect(Collectors.toList()); - List expectedHostNames = asList("93.184.215.14", "2606:2800:21f:cb07:6820:80da:af6b:8b2c"); - assertTrue(expectedHostNames.containsAll(validatedHostNames), "Unexpected addresses " + validatedHostNames); - validatedAddresses.forEach(address -> assertEquals(10000, address.getPort())); + String hostname = "example.com"; + Integer port = 10000; + String canonicalHostname1 = "canonical_hostname1"; + String canonicalHostname2 = "canonical_hostname2"; + try (final MockedStatic inetAddress = mockStatic(InetAddress.class)) { + InetAddress inetAddress1 = mock(InetAddress.class); + when(inetAddress1.getCanonicalHostName()).thenReturn(canonicalHostname1); + InetAddress inetAddress2 = mock(InetAddress.class); + when(inetAddress2.getCanonicalHostName()).thenReturn(canonicalHostname2); + inetAddress.when(() -> InetAddress.getAllByName(hostname)) + .thenReturn(new InetAddress[]{inetAddress1, inetAddress2}); + try (MockedConstruction inetSocketAddress = + mockConstruction( + InetSocketAddress.class, + (mock, context) -> { + when(mock.isUnresolved()).thenReturn(false); + when(mock.getHostName()).thenReturn((String) context.arguments().get(0)); + when(mock.getPort()).thenReturn((Integer) context.arguments().get(1)); + }) + ) { + List validatedAddresses = checkWithLookup(Collections.singletonList(hostname + ":" + port)); + assertEquals(2, inetSocketAddress.constructed().size()); + assertEquals(2, validatedAddresses.size()); + assertTrue(validatedAddresses.containsAll(List.of( + inetSocketAddress.constructed().get(0), + inetSocketAddress.constructed().get(1))) + ); + validatedAddresses.forEach(address -> assertEquals(port, address.getPort())); + validatedAddresses.stream().map(InetSocketAddress::getHostName).forEach( + hostName -> assertTrue(List.of(canonicalHostname1, canonicalHostname2).contains(hostName)) + ); + } + } } @Test @@ -86,7 +113,21 @@ public void testInvalidPort() { @Test public void testOnlyBadHostname() { - assertThrows(ConfigException.class, () -> checkWithoutLookup("some.invalid.hostname.foo.bar.local:9999")); + try (MockedConstruction inetSocketAddress = + mockConstruction( + InetSocketAddress.class, + (mock, context) -> when(mock.isUnresolved()).thenReturn(true) + ) + ) { + Exception exception = assertThrows( + ConfigException.class, + () -> checkWithoutLookup("some.invalid.hostname.foo.bar.local:9999") + ); + assertEquals( + "No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, + exception.getMessage() + ); + } } @Test @@ -109,8 +150,13 @@ public void testFilterPreferredAddresses() throws UnknownHostException { @Test public void testResolveUnknownHostException() { - assertThrows(UnknownHostException.class, - () -> ClientUtils.resolve("some.invalid.hostname.foo.bar.local", hostResolver)); + HostResolver throwingHostResolver = host -> { + throw new UnknownHostException(); + }; + assertThrows( + UnknownHostException.class, + () -> ClientUtils.resolve("some.invalid.hostname.foo.bar.local", throwingHostResolver) + ); } @Test @@ -129,5 +175,4 @@ private List checkWithoutLookup(String... url) { private List checkWithLookup(List url) { return ClientUtils.parseAndValidateAddresses(url, ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY); } - } diff --git a/clients/src/test/java/org/apache/kafka/clients/MockClient.java b/clients/src/test/java/org/apache/kafka/clients/MockClient.java index 8a195184e937c..fca0a9ca2121b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/MockClient.java +++ b/clients/src/test/java/org/apache/kafka/clients/MockClient.java @@ -71,6 +71,7 @@ public FutureResponse(Node node, private int correlation; private Runnable wakeupHook; + private boolean advanceTimeDuringPoll; private final Time time; private final MockMetadataUpdater metadataUpdater; private final Map connections = new HashMap<>(); @@ -138,7 +139,11 @@ public long connectionDelay(Node node, long now) { @Override public long pollDelayMs(Node node, long now) { - return connectionDelay(node, now); + return connectionState(node.idString()).pollDelayMs(now); + } + + public void advanceTimeDuringPoll(boolean advanceTimeDuringPoll) { + this.advanceTimeDuringPoll = advanceTimeDuringPoll; } public void backoff(Node node, long durationMs) { @@ -336,6 +341,12 @@ public List poll(long timeoutMs, long now) { copy.add(response); } + // In real life, if poll() is called and we get to the end with no responses, + // time equal to timeoutMs would have passed. + if (advanceTimeDuringPoll) { + time.sleep(timeoutMs); + } + return copy; } @@ -795,6 +806,13 @@ long connectionDelay(long now) { return 0; } + long pollDelayMs(long now) { + if (notThrottled(now)) + return connectionDelay(now); + + return throttledUntilMs - now; + } + boolean ready(long now) { switch (state) { case CONNECTED: diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index 89f567157c3ad..ce7d4d83506d1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.RebootstrapRequiredException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.message.ApiMessageType; @@ -59,6 +60,7 @@ import org.junit.jupiter.api.Test; import java.net.InetAddress; +import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -213,7 +215,7 @@ public void testClose() { client.poll(1, time.milliseconds()); assertTrue(client.isReady(node, time.milliseconds()), "The client should be ready"); - ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData() + ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks((short) 1) .setTimeoutMs(1000)); @@ -241,6 +243,109 @@ public void testUnsupportedVersionDuringInternalMetadataRequest() { assertEquals(UnsupportedVersionException.class, metadataUpdater.getAndClearFailure().getClass()); } + @Test + public void testRebootstrap() { + long rebootstrapTriggerMs = 1000; + AtomicInteger rebootstrapCount = new AtomicInteger(); + Metadata metadata = new Metadata(50, 50, 5000, new LogContext(), new ClusterResourceListeners()) { + @Override + public synchronized void rebootstrap() { + super.rebootstrap(); + rebootstrapCount.incrementAndGet(); + } + }; + + NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, + reconnectBackoffMsTest, 0, 64 * 1024, 64 * 1024, + defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), new LogContext(), + rebootstrapTriggerMs, + MetadataRecoveryStrategy.REBOOTSTRAP); + MetadataUpdater metadataUpdater = TestUtils.fieldValue(client, NetworkClient.class, "metadataUpdater"); + metadata.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 9999))); + + metadata.requestUpdate(true); + client.poll(0, time.milliseconds()); + time.sleep(rebootstrapTriggerMs + 1); + client.poll(0, time.milliseconds()); + assertEquals(1, rebootstrapCount.get()); + time.sleep(1); + client.poll(0, time.milliseconds()); + assertEquals(1, rebootstrapCount.get()); + + metadata.requestUpdate(true); + client.poll(0, time.milliseconds()); + assertEquals(1, rebootstrapCount.get()); + metadataUpdater.handleFailedRequest(time.milliseconds(), Optional.of(new KafkaException())); + client.poll(0, time.milliseconds()); + assertEquals(1, rebootstrapCount.get()); + time.sleep(rebootstrapTriggerMs); + client.poll(0, time.milliseconds()); + assertEquals(2, rebootstrapCount.get()); + + metadata.requestUpdate(true); + client.poll(0, time.milliseconds()); + assertEquals(2, rebootstrapCount.get()); + + MetadataRequest.Builder builder = new MetadataRequest.Builder(Collections.emptyList(), true); + ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true); + MetadataResponse rebootstrapResponse = (MetadataResponse) builder.build().getErrorResponse(0, new RebootstrapRequiredException("rebootstrap")); + metadataUpdater.handleSuccessfulResponse(request.makeHeader(builder.latestAllowedVersion()), time.milliseconds(), rebootstrapResponse); + assertEquals(2, rebootstrapCount.get()); + time.sleep(50); + client.poll(0, time.milliseconds()); + assertEquals(3, rebootstrapCount.get()); + } + + @Test + public void testInflightRequestsDuringRebootstrap() { + long refreshBackoffMs = 50; + long rebootstrapTriggerMs = 1000; + int defaultRequestTimeoutMs = 5000; + AtomicInteger rebootstrapCount = new AtomicInteger(); + Metadata metadata = new Metadata(refreshBackoffMs, refreshBackoffMs, 5000, new LogContext(), new ClusterResourceListeners()) { + @Override + public synchronized void rebootstrap() { + super.rebootstrap(); + rebootstrapCount.incrementAndGet(); + } + }; + metadata.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 9999))); + NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, + reconnectBackoffMsTest, 0, 64 * 1024, 64 * 1024, + defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), new LogContext(), + rebootstrapTriggerMs, MetadataRecoveryStrategy.REBOOTSTRAP); + + MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap()); + metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds()); + List nodes = metadata.fetch().nodes(); + nodes.forEach(node -> { + client.ready(node, time.milliseconds()); + awaitReady(client, node); + }); + + // Queue a request + sendEmptyProduceRequest(client, nodes.get(0).idString()); + List responses = client.poll(0, time.milliseconds()); + assertEquals(0, responses.size()); + assertEquals(1, client.inFlightRequestCount()); + + // Trigger rebootstrap + metadata.requestUpdate(true); + time.sleep(refreshBackoffMs); + responses = client.poll(0, time.milliseconds()); + assertEquals(0, responses.size()); + assertEquals(2, client.inFlightRequestCount()); + time.sleep(rebootstrapTriggerMs + 1); + responses = client.poll(0, time.milliseconds()); + + // Verify that inflight produce request was aborted with disconnection + assertEquals(1, responses.size()); + assertEquals(PRODUCE, responses.get(0).requestHeader().apiKey()); + assertTrue(responses.get(0).wasDisconnected()); + assertEquals(0, client.inFlightRequestCount()); + assertEquals(Collections.emptySet(), nodes.stream().filter(node -> !client.connectionFailed(node)).collect(Collectors.toSet())); + } + private void checkSimpleRequestResponse(NetworkClient networkClient) { awaitReady(networkClient, node); // has to be before creating any request, as it may send ApiVersionsRequest and its response is mocked with correlation id 0 short requestVersion = PRODUCE.latestVersion(); @@ -491,14 +596,14 @@ public void testDefaultRequestTimeout() { /** * This is a helper method that will execute two produce calls. The first call is expected to work and the * second produce call is intentionally made to emulate a request timeout. In the case that a timeout occurs - * during a request, we want to ensure that we {@link Metadata#requestUpdate() request a metadata update} so that + * during a request, we want to ensure that we {@link Metadata#requestUpdate(boolean) request a metadata update} so that * on a subsequent invocation of {@link NetworkClient#poll(long, long) poll}, the metadata request will be sent. * *

          * * The {@link MetadataUpdater} has a specific method to handle * {@link NetworkClient.DefaultMetadataUpdater#handleServerDisconnect(long, String, Optional) server disconnects} - * which is where we {@link Metadata#requestUpdate() request a metadata update}. This test helper method ensures + * which is where we {@link Metadata#requestUpdate(boolean) request a metadata update}. This test helper method ensures * that is invoked by checking {@link Metadata#updateRequested()} after the simulated timeout. * * @param requestTimeoutMs Timeout in ms @@ -527,7 +632,7 @@ private void testRequestTimeout(int requestTimeoutMs) { private ClientResponse produce(NetworkClient client, int requestTimeoutMs, boolean shouldEmulateTimeout) { awaitReady(client, node); // has to be before creating any request, as it may send ApiVersionsRequest and its response is mocked with correlation id 0 - ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData() + ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks((short) 1) .setTimeoutMs(1000)); @@ -661,33 +766,12 @@ private ApiVersionsResponse createExpectedApiVersionsResponse(ApiKeys key, short .setApiKeys(versionList)); } - @Test - public void testThrottlingNotEnabledForConnectionToOlderBroker() { - // Instrument the test so that the max protocol version for PRODUCE returned from the node is 5 and thus - // client-side throttling is not enabled. Also, return a response with a 100ms throttle delay. - setExpectedApiVersionsResponse(createExpectedApiVersionsResponse(PRODUCE, (short) 5)); - while (!client.ready(node, time.milliseconds())) - client.poll(1, time.milliseconds()); - selector.clear(); - - int correlationId = sendEmptyProduceRequest(); - client.poll(1, time.milliseconds()); - - sendThrottledProduceResponse(correlationId, 100, (short) 5); - client.poll(1, time.milliseconds()); - - // Since client-side throttling is disabled, the connection is ready even though the response indicated a - // throttle delay. - assertTrue(client.ready(node, time.milliseconds())); - assertEquals(0, client.throttleDelayMs(node, time.milliseconds())); - } - private int sendEmptyProduceRequest() { - return sendEmptyProduceRequest(node.idString()); + return sendEmptyProduceRequest(client, node.idString()); } - private int sendEmptyProduceRequest(String nodeId) { - ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData() + private int sendEmptyProduceRequest(NetworkClient client, String nodeId) { + ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks((short) 1) .setTimeoutMs(1000)); @@ -1086,7 +1170,7 @@ public void testReconnectAfterAddressChange() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect client.ready(node, time.milliseconds()); @@ -1147,7 +1231,7 @@ public void testFailedConnectionToFirstAddress() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // First connection attempt should fail client.ready(node, time.milliseconds()); @@ -1200,7 +1284,7 @@ public void testFailedConnectionToFirstAddressAfterReconnect() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, false, new ApiVersions(), null, new LogContext(), mockHostResolver, mockClientTelemetrySender, - MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Connect to one the initial addresses, then change the addresses and disconnect client.ready(node, time.milliseconds()); @@ -1309,7 +1393,7 @@ public void testTelemetryRequest() { reconnectBackoffMsTest, reconnectBackoffMaxMsTest, 64 * 1024, 64 * 1024, defaultRequestTimeoutMs, connectionSetupTimeoutMsTest, connectionSetupTimeoutMaxMsTest, time, true, new ApiVersions(), null, new LogContext(), new DefaultHostResolver(), mockClientTelemetrySender, - MetadataRecoveryStrategy.NONE); + Long.MAX_VALUE, MetadataRecoveryStrategy.NONE); // Send the ApiVersionsRequest client.ready(node, time.milliseconds()); diff --git a/clients/src/test/java/org/apache/kafka/clients/NodeApiVersionsTest.java b/clients/src/test/java/org/apache/kafka/clients/NodeApiVersionsTest.java index ec45475e4c126..ad1f614509fc9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NodeApiVersionsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NodeApiVersionsTest.java @@ -44,7 +44,7 @@ public class NodeApiVersionsTest { @Test public void testUnsupportedVersionsToString() { - NodeApiVersions versions = new NodeApiVersions(new ApiVersionCollection(), Collections.emptyList(), false); + NodeApiVersions versions = new NodeApiVersions(new ApiVersionCollection(), Collections.emptyList()); StringBuilder bld = new StringBuilder(); String prefix = "("; for (ApiKeys apiKey : ApiKeys.clientApis()) { @@ -73,7 +73,7 @@ public void testVersionsToString() { .setMaxVersion((short) 10001)); } else versionList.add(ApiVersionsResponse.toApiVersion(apiKey)); } - NodeApiVersions versions = new NodeApiVersions(versionList, Collections.emptyList(), false); + NodeApiVersions versions = new NodeApiVersions(versionList, Collections.emptyList()); StringBuilder bld = new StringBuilder(); String prefix = "("; for (ApiKeys apiKey : ApiKeys.values()) { @@ -102,16 +102,16 @@ public void testVersionsToString() { @Test public void testLatestUsableVersion() { - NodeApiVersions apiVersions = NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 1, (short) 3); - assertEquals(3, apiVersions.latestUsableVersion(ApiKeys.PRODUCE)); - assertEquals(1, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 0, (short) 1)); - assertEquals(1, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 1, (short) 1)); - assertEquals(2, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 1, (short) 2)); - assertEquals(3, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 1, (short) 3)); - assertEquals(2, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 2, (short) 2)); - assertEquals(3, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 2, (short) 3)); - assertEquals(3, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 3, (short) 3)); - assertEquals(3, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 3, (short) 4)); + NodeApiVersions apiVersions = NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 8, (short) 10); + assertEquals(10, apiVersions.latestUsableVersion(ApiKeys.PRODUCE)); + assertEquals(8, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 7, (short) 8)); + assertEquals(8, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 8, (short) 8)); + assertEquals(9, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 8, (short) 9)); + assertEquals(10, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 8, (short) 10)); + assertEquals(9, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 9, (short) 9)); + assertEquals(10, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 9, (short) 10)); + assertEquals(10, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 10, (short) 10)); + assertEquals(10, apiVersions.latestUsableVersion(ApiKeys.PRODUCE, (short) 10, (short) 11)); } @Test @@ -130,7 +130,7 @@ public void testLatestUsableVersionOutOfRangeHigh() { @Test public void testUsableVersionCalculationNoKnownVersions() { - NodeApiVersions versions = new NodeApiVersions(new ApiVersionCollection(), Collections.emptyList(), false); + NodeApiVersions versions = new NodeApiVersions(new ApiVersionCollection(), Collections.emptyList()); assertThrows(UnsupportedVersionException.class, () -> versions.latestUsableVersion(ApiKeys.FETCH)); } @@ -152,7 +152,7 @@ public void testUsableVersionLatestVersions(ApiMessageType.ListenerType scope) { .setApiKey((short) 100) .setMinVersion((short) 0) .setMaxVersion((short) 1)); - NodeApiVersions versions = new NodeApiVersions(versionList, Collections.emptyList(), false); + NodeApiVersions versions = new NodeApiVersions(versionList, Collections.emptyList()); for (ApiKeys apiKey: ApiKeys.apisForListener(scope)) { assertEquals(apiKey.latestVersion(), versions.latestUsableVersion(apiKey)); } @@ -162,7 +162,7 @@ public void testUsableVersionLatestVersions(ApiMessageType.ListenerType scope) { @EnumSource(ApiMessageType.ListenerType.class) public void testConstructionFromApiVersionsResponse(ApiMessageType.ListenerType scope) { ApiVersionsResponse apiVersionsResponse = TestUtils.defaultApiVersionsResponse(scope); - NodeApiVersions versions = new NodeApiVersions(apiVersionsResponse.data().apiKeys(), Collections.emptyList(), false); + NodeApiVersions versions = new NodeApiVersions(apiVersionsResponse.data().apiKeys(), Collections.emptyList()); for (ApiVersion apiVersionKey : apiVersionsResponse.data().apiKeys()) { ApiVersion apiVersion = versions.apiVersion(ApiKeys.forId(apiVersionKey.apiKey())); @@ -180,7 +180,6 @@ public void testFeatures() { .setName("transaction.version") .setMaxVersion((short) 2) .setMinVersion((short) 0)), - false, Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() .setName("transaction.version") .setMaxVersionLevel((short) 2) diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientConfigTest.java index 252aa63109a89..6d0ec9e8a8e84 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientConfigTest.java @@ -35,7 +35,8 @@ public class AdminClientConfigTest { public void testDefaultMetadataRecoveryStrategy() { Map configs = new HashMap<>(); final AdminClientConfig adminClientConfig = new AdminClientConfig(configs); - assertEquals(MetadataRecoveryStrategy.NONE.name, adminClientConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); + assertEquals(MetadataRecoveryStrategy.REBOOTSTRAP.name, adminClientConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); + } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ConsumerGroupDescriptionTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ConsumerGroupDescriptionTest.java new file mode 100644 index 0000000000000..4327f4db7f678 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ConsumerGroupDescriptionTest.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.common.ConsumerGroupState; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ConsumerGroupDescriptionTest { + @Test + public void testState() { + for (ConsumerGroupState consumerGroupState : ConsumerGroupState.values()) { + ConsumerGroupDescription description = new ConsumerGroupDescription( + "groupId", + false, + null, + "assignor", + consumerGroupState, + null + ); + assertEquals(consumerGroupState, description.state()); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ConsumerGroupListingTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ConsumerGroupListingTest.java new file mode 100644 index 0000000000000..15553b641bb48 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ConsumerGroupListingTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.common.ConsumerGroupState; + +import org.junit.jupiter.api.Test; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ConsumerGroupListingTest { + @Test + public void testState() { + for (ConsumerGroupState consumerGroupState : ConsumerGroupState.values()) { + ConsumerGroupListing listing = new ConsumerGroupListing( + "groupId", + false, + Optional.of(consumerGroupState) + ); + assertEquals(consumerGroupState, listing.state().get()); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java index b7dcab344bc82..736733febe42c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java @@ -63,7 +63,7 @@ public void testTopLevelErrorConstructor() throws InterruptedException { partitionFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception()); DeleteConsumerGroupOffsetsResult topLevelErrorResult = new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions); - TestUtils.assertFutureError(topLevelErrorResult.all(), GroupAuthorizationException.class); + TestUtils.assertFutureThrows(topLevelErrorResult.all(), GroupAuthorizationException.class); } @Test @@ -79,9 +79,9 @@ public void testPartitionMissingInResponseErrorConstructor() throws InterruptedE DeleteConsumerGroupOffsetsResult missingPartitionResult = new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions); - TestUtils.assertFutureError(missingPartitionResult.all(), IllegalArgumentException.class); + TestUtils.assertFutureThrows(missingPartitionResult.all(), IllegalArgumentException.class); assertNull(missingPartitionResult.partitionResult(tpZero).get()); - TestUtils.assertFutureError(missingPartitionResult.partitionResult(tpOne), IllegalArgumentException.class); + TestUtils.assertFutureThrows(missingPartitionResult.partitionResult(tpOne), IllegalArgumentException.class); } @Test @@ -110,9 +110,9 @@ private DeleteConsumerGroupOffsetsResult createAndVerifyPartitionLevelError() th DeleteConsumerGroupOffsetsResult partitionLevelErrorResult = new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions); - TestUtils.assertFutureError(partitionLevelErrorResult.all(), UnknownTopicOrPartitionException.class); + TestUtils.assertFutureThrows(partitionLevelErrorResult.all(), UnknownTopicOrPartitionException.class); assertNull(partitionLevelErrorResult.partitionResult(tpZero).get()); - TestUtils.assertFutureError(partitionLevelErrorResult.partitionResult(tpOne), UnknownTopicOrPartitionException.class); + TestUtils.assertFutureThrows(partitionLevelErrorResult.partitionResult(tpOne), UnknownTopicOrPartitionException.class); return partitionLevelErrorResult; } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/GroupListingTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/GroupListingTest.java index 7a8279be34a5c..f07752d36cd13 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/GroupListingTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/GroupListingTest.java @@ -18,6 +18,7 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.junit.jupiter.api.Test; @@ -33,16 +34,16 @@ public class GroupListingTest { @Test public void testSimpleConsumerGroup() { - GroupListing gl = new GroupListing(GROUP_ID, Optional.of(GroupType.CLASSIC), ""); + GroupListing gl = new GroupListing(GROUP_ID, Optional.of(GroupType.CLASSIC), "", Optional.of(GroupState.EMPTY)); assertTrue(gl.isSimpleConsumerGroup()); - gl = new GroupListing(GROUP_ID, Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE); + gl = new GroupListing(GROUP_ID, Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE)); assertFalse(gl.isSimpleConsumerGroup()); - gl = new GroupListing(GROUP_ID, Optional.of(GroupType.CONSUMER), ""); + gl = new GroupListing(GROUP_ID, Optional.of(GroupType.CONSUMER), "", Optional.of(GroupState.EMPTY)); assertFalse(gl.isSimpleConsumerGroup()); - gl = new GroupListing(GROUP_ID, Optional.empty(), ""); + gl = new GroupListing(GROUP_ID, Optional.empty(), "", Optional.empty()); assertFalse(gl.isSimpleConsumerGroup()); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index 1b44b93c70406..6e521c65898da 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.ClientRequest; import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.MetadataRecoveryStrategy; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.admin.DeleteAclsResult.FilterResults; @@ -30,14 +31,13 @@ import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.ClassicGroupState; import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.ConsumerGroupState; import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicCollection; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicPartitionReplica; @@ -79,6 +79,8 @@ import org.apache.kafka.common.message.AddRaftVoterRequestData; import org.apache.kafka.common.message.AddRaftVoterResponseData; import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData; +import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse; +import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirPartitionResult; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult; @@ -134,6 +136,8 @@ import org.apache.kafka.common.message.ListOffsetsResponseData; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData; +import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment; +import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment; import org.apache.kafka.common.message.ListTransactionsResponseData; import org.apache.kafka.common.message.MetadataResponseData; import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition; @@ -157,7 +161,6 @@ import org.apache.kafka.common.quota.ClientQuotaEntity; import org.apache.kafka.common.quota.ClientQuotaFilter; import org.apache.kafka.common.quota.ClientQuotaFilterComponent; -import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.requests.AddRaftVoterRequest; import org.apache.kafka.common.requests.AddRaftVoterResponse; import org.apache.kafka.common.requests.AlterClientQuotasResponse; @@ -773,7 +776,7 @@ private static ApiVersionsResponse prepareApiVersionsResponseForDescribeFeatures if (error == Errors.NONE) { return new ApiVersionsResponse.Builder(). setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.current(), ApiMessageType.ListenerType.ZK_BROKER, false, false)). + ApiMessageType.ListenerType.ZK_BROKER, false, false)). setSupportedFeatures( convertSupportedFeaturesMap(defaultFeatureMetadata().supportedFeatures())). setFinalizedFeatures( @@ -853,7 +856,7 @@ public void testTimeoutWithoutMetadata() throws Exception { KafkaFuture future = env.adminClient().createTopics( singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); - TestUtils.assertFutureError(future, TimeoutException.class); + TestUtils.assertFutureThrows(future, TimeoutException.class); } } @@ -883,18 +886,32 @@ public void testConnectionFailureOnMetadataUpdate() throws Exception { @Test public void testUnreachableBootstrapServer() throws Exception { + verifyUnreachableBootstrapServer(MetadataRecoveryStrategy.REBOOTSTRAP); + } + + @Test + public void testUnreachableBootstrapServerNoRebootstrap() throws Exception { + verifyUnreachableBootstrapServer(MetadataRecoveryStrategy.NONE); + } + + private void verifyUnreachableBootstrapServer(MetadataRecoveryStrategy metadataRecoveryStrategy) throws Exception { // This tests the scenario in which the bootstrap server is unreachable for a short while, // which prevents AdminClient from being able to send the initial metadata request Cluster cluster = Cluster.bootstrap(singletonList(new InetSocketAddress("localhost", 8121))); Map unreachableNodes = Collections.singletonMap(cluster.nodes().get(0), 200L); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, - AdminClientUnitTestEnv.clientConfigs(), unreachableNodes)) { + AdminClientUnitTestEnv.clientConfigs(AdminClientConfig.METADATA_RECOVERY_STRATEGY_CONFIG, metadataRecoveryStrategy.name), unreachableNodes)) { Cluster discoveredCluster = mockCluster(3, 0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, RequestTestUtils.metadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); + if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP) { + env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, + RequestTestUtils.metadataResponse(discoveredCluster.nodes(), + discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); + } env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, prepareCreateTopicsResponse("myTopic", Errors.NONE)); @@ -922,7 +939,7 @@ public void testPropagatedMetadataFetchException() throws Exception { KafkaFuture future = env.adminClient().createTopics( singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); - TestUtils.assertFutureError(future, SaslAuthenticationException.class); + TestUtils.assertFutureThrows(future, SaslAuthenticationException.class); } } @@ -1166,14 +1183,14 @@ public void testDeleteTopics() throws Exception { prepareDeleteTopicsResponse("myTopic", Errors.TOPIC_DELETION_DISABLED)); future = env.adminClient().deleteTopics(singletonList("myTopic"), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); + TestUtils.assertFutureThrows(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("myTopic"), prepareDeleteTopicsResponse("myTopic", Errors.UNKNOWN_TOPIC_OR_PARTITION)); future = env.adminClient().deleteTopics(singletonList("myTopic"), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureError(future, UnknownTopicOrPartitionException.class); + TestUtils.assertFutureThrows(future, UnknownTopicOrPartitionException.class); // With topic IDs Uuid topicId = Uuid.randomUuid(); @@ -1190,14 +1207,14 @@ public void testDeleteTopics() throws Exception { prepareDeleteTopicsResponseWithTopicId(topicId, Errors.TOPIC_DELETION_DISABLED)); future = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(singletonList(topicId)), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); + TestUtils.assertFutureThrows(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId), prepareDeleteTopicsResponseWithTopicId(topicId, Errors.UNKNOWN_TOPIC_ID)); future = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(singletonList(topicId)), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureError(future, UnknownTopicIdException.class); + TestUtils.assertFutureThrows(future, UnknownTopicIdException.class); } } @@ -1398,7 +1415,7 @@ public void testDeleteTopicsDontRetryThrottlingExceptionWhenDisabled() throws Ex ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.topicNameValues().get("topic2"), ThrottlingQuotaExceededException.class); assertEquals(1000, e.throttleTimeMs()); - TestUtils.assertFutureError(result.topicNameValues().get("topic3"), TopicExistsException.class); + TestUtils.assertFutureThrows(result.topicNameValues().get("topic3"), TopicExistsException.class); // With topic IDs Uuid topicId1 = Uuid.randomUuid(); @@ -1419,7 +1436,7 @@ public void testDeleteTopicsDontRetryThrottlingExceptionWhenDisabled() throws Ex e = TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), ThrottlingQuotaExceededException.class); assertEquals(1000, e.throttleTimeMs()); - TestUtils.assertFutureError(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class); + TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class); } } @@ -1451,14 +1468,14 @@ public void testInvalidTopicNames() throws Exception { List sillyTopicNames = asList("", null); Map> deleteFutures = env.adminClient().deleteTopics(sillyTopicNames).topicNameValues(); for (String sillyTopicName : sillyTopicNames) { - TestUtils.assertFutureError(deleteFutures.get(sillyTopicName), InvalidTopicException.class); + TestUtils.assertFutureThrows(deleteFutures.get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); Map> describeFutures = env.adminClient().describeTopics(sillyTopicNames).topicNameValues(); for (String sillyTopicName : sillyTopicNames) { - TestUtils.assertFutureError(describeFutures.get(sillyTopicName), InvalidTopicException.class); + TestUtils.assertFutureThrows(describeFutures.get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); @@ -1469,7 +1486,7 @@ public void testInvalidTopicNames() throws Exception { Map> createFutures = env.adminClient().createTopics(newTopics).values(); for (String sillyTopicName : sillyTopicNames) { - TestUtils.assertFutureError(createFutures .get(sillyTopicName), InvalidTopicException.class); + TestUtils.assertFutureThrows(createFutures .get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); } @@ -1760,11 +1777,12 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiErrorHandling() thro asList(topicName1, topicName0), new DescribeTopicsOptions() ); - TestUtils.assertFutureError(result.allTopicNames(), TopicAuthorizationException.class); + TestUtils.assertFutureThrows(result.allTopicNames(), TopicAuthorizationException.class); } } + // @Flaky("KAFKA-18441") @Test public void testAdminClientApisAuthenticationFailure() { Cluster cluster = mockBootstrapCluster(); @@ -1856,10 +1874,10 @@ public void testDescribeAcls() throws Exception { env.kafkaClient().prepareResponse(new DescribeAclsResponse(new DescribeAclsResponseData() .setErrorCode(Errors.SECURITY_DISABLED.code()) .setErrorMessage("Security is disabled"), ApiKeys.DESCRIBE_ACLS.latestVersion())); - TestUtils.assertFutureError(env.adminClient().describeAcls(FILTER2).values(), SecurityDisabledException.class); + TestUtils.assertFutureThrows(env.adminClient().describeAcls(FILTER2).values(), SecurityDisabledException.class); // Test a call where we supply an invalid filter. - TestUtils.assertFutureError(env.adminClient().describeAcls(UNKNOWN_FILTER).values(), + TestUtils.assertFutureThrows(env.adminClient().describeAcls(UNKNOWN_FILTER).values(), InvalidRequestException.class); } } @@ -1887,9 +1905,9 @@ public void testCreateAcls() throws Exception { new CreateAclsResponseData.AclCreationResult())))); results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); - TestUtils.assertFutureError(results.values().get(ACL1), SecurityDisabledException.class); + TestUtils.assertFutureThrows(results.values().get(ACL1), SecurityDisabledException.class); results.values().get(ACL2).get(); - TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); + TestUtils.assertFutureThrows(results.all(), SecurityDisabledException.class); } } @@ -1917,8 +1935,8 @@ public void testDeleteAcls() throws Exception { assertEquals(ACL1, filter1Results.values().get(0).binding()); assertNull(filter1Results.values().get(1).exception()); assertEquals(ACL2, filter1Results.values().get(1).binding()); - TestUtils.assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class); - TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); + TestUtils.assertFutureThrows(filterResults.get(FILTER2), SecurityDisabledException.class); + TestUtils.assertFutureThrows(results.all(), SecurityDisabledException.class); // Test a call where one deletion result has an error. env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData() @@ -1938,7 +1956,7 @@ public void testDeleteAcls() throws Exception { ApiKeys.DELETE_ACLS.latestVersion())); results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); assertTrue(results.values().get(FILTER2).get().values().isEmpty()); - TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); + TestUtils.assertFutureThrows(results.all(), SecurityDisabledException.class); // Test a call where there are no errors. env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData() @@ -2009,7 +2027,7 @@ public void testElectLeaders() throws Exception { electionType, new HashSet<>(asList(topic1, topic2)), new ElectLeadersOptions().timeoutMs(100)); - TestUtils.assertFutureError(results.partitions(), TimeoutException.class); + TestUtils.assertFutureThrows(results.partitions(), TimeoutException.class); } } } @@ -2047,9 +2065,9 @@ public void testDescribeBrokerAndLogConfigs() throws Exception { new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(brokerResource.name()).setResourceType(brokerResource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList()), - new DescribeConfigsResponseData.DescribeConfigsResult() - .setResourceName(brokerLoggerResource.name()).setResourceType(brokerLoggerResource.type().id()).setErrorCode(Errors.NONE.code()) - .setConfigs(emptyList())))), env.cluster().nodeById(0)); + new DescribeConfigsResponseData.DescribeConfigsResult() + .setResourceName(brokerLoggerResource.name()).setResourceType(brokerLoggerResource.type().id()).setErrorCode(Errors.NONE.code()) + .setConfigs(emptyList())))), env.cluster().nodeById(0)); Map> result = env.adminClient().describeConfigs(asList( brokerResource, brokerLoggerResource)).values(); @@ -2088,9 +2106,9 @@ public void testDescribeConfigsUnrequested() throws Exception { new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList()), - new DescribeConfigsResponseData.DescribeConfigsResult() - .setResourceName(unrequested.name()).setResourceType(unrequested.type().id()).setErrorCode(Errors.NONE.code()) - .setConfigs(emptyList()))))); + new DescribeConfigsResponseData.DescribeConfigsResult() + .setResourceName(unrequested.name()).setResourceType(unrequested.type().id()).setErrorCode(Errors.NONE.code()) + .setConfigs(emptyList()))))); Map> result = env.adminClient().describeConfigs(singletonList( topic)).values(); assertEquals(new HashSet<>(singletonList(topic)), result.keySet()); @@ -2306,50 +2324,6 @@ public void testDescribeLogDirsWithVolumeBytes() throws ExecutionException, Inte } } - @SuppressWarnings("deprecation") - @Test - public void testDescribeLogDirsDeprecated() throws ExecutionException, InterruptedException { - Set brokers = singleton(0); - TopicPartition tp = new TopicPartition("topic", 12); - String logDir = "/var/data/kafka"; - Errors error = Errors.NONE; - int offsetLag = 24; - long partitionSize = 1234567890; - - try (AdminClientUnitTestEnv env = mockClientEnv()) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponseFrom( - prepareDescribeLogDirsResponse(error, logDir, tp, partitionSize, offsetLag), - env.cluster().nodeById(0)); - - DescribeLogDirsResult result = env.adminClient().describeLogDirs(brokers); - - Map>> deprecatedValues = result.values(); - assertEquals(brokers, deprecatedValues.keySet()); - assertNotNull(deprecatedValues.get(0)); - assertDescriptionContains(deprecatedValues.get(0).get(), logDir, tp, error, offsetLag, partitionSize); - - Map> deprecatedAll = result.all().get(); - assertEquals(brokers, deprecatedAll.keySet()); - assertDescriptionContains(deprecatedAll.get(0), logDir, tp, error, offsetLag, partitionSize); - } - } - - @SuppressWarnings("deprecation") - private static void assertDescriptionContains(Map descriptionsMap, - String logDir, TopicPartition tp, Errors error, - int offsetLag, long partitionSize) { - assertNotNull(descriptionsMap); - assertEquals(singleton(logDir), descriptionsMap.keySet()); - assertEquals(error, descriptionsMap.get(logDir).error); - Map allReplicaInfos = - descriptionsMap.get(logDir).replicaInfos; - assertEquals(singleton(tp), allReplicaInfos.keySet()); - assertEquals(partitionSize, allReplicaInfos.get(tp).size); - assertEquals(offsetLag, allReplicaInfos.get(tp).offsetLag); - assertFalse(allReplicaInfos.get(tp).isFuture); - } - @Test public void testDescribeLogDirsOfflineDir() throws ExecutionException, InterruptedException { Set brokers = singleton(0); @@ -2382,39 +2356,6 @@ public void testDescribeLogDirsOfflineDir() throws ExecutionException, Interrupt } } - @SuppressWarnings("deprecation") - @Test - public void testDescribeLogDirsOfflineDirDeprecated() throws ExecutionException, InterruptedException { - Set brokers = singleton(0); - String logDir = "/var/data/kafka"; - Errors error = Errors.KAFKA_STORAGE_ERROR; - - try (AdminClientUnitTestEnv env = mockClientEnv()) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponseFrom( - prepareDescribeLogDirsResponse(error, logDir, emptyList()), - env.cluster().nodeById(0)); - - DescribeLogDirsResult result = env.adminClient().describeLogDirs(brokers); - - Map>> deprecatedValues = result.values(); - assertEquals(brokers, deprecatedValues.keySet()); - assertNotNull(deprecatedValues.get(0)); - Map valuesMap = deprecatedValues.get(0).get(); - assertEquals(singleton(logDir), valuesMap.keySet()); - assertEquals(error, valuesMap.get(logDir).error); - assertEquals(emptySet(), valuesMap.get(logDir).replicaInfos.keySet()); - - Map> deprecatedAll = result.all().get(); - assertEquals(brokers, deprecatedAll.keySet()); - Map allMap = deprecatedAll.get(0); - assertNotNull(allMap); - assertEquals(singleton(logDir), allMap.keySet()); - assertEquals(error, allMap.get(logDir).error); - assertEquals(emptySet(), allMap.get(logDir).replicaInfos.keySet()); - } - } - @Test public void testDescribeReplicaLogDirs() throws ExecutionException, InterruptedException { TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 12, 1); @@ -2717,7 +2658,7 @@ public void testDeleteRecordsMultipleSends() throws Exception { DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete); assertEquals(3L, results.lowWatermarks().get(tp0).get().lowWatermark()); - TestUtils.assertFutureThrows(results.lowWatermarks().get(tp1), AuthenticationException.class); + TestUtils.assertFutureThrows(results.lowWatermarks().get(tp1), SaslAuthenticationException.class); } } @@ -2839,13 +2780,13 @@ public void testDescribeTopicsByIds() throws ExecutionException, InterruptedExce DescribeTopicsResult result1 = env.adminClient().describeTopics( TopicCollection.ofTopicIds(singletonList(nonExistID))); - TestUtils.assertFutureError(result1.allTopicIds(), UnknownTopicIdException.class); + TestUtils.assertFutureThrows(result1.allTopicIds(), UnknownTopicIdException.class); Exception e = assertThrows(Exception.class, () -> result1.allTopicIds().get(), "describe with non-exist topic ID should throw exception"); assertEquals(String.format("org.apache.kafka.common.errors.UnknownTopicIdException: TopicId %s not found.", nonExistID), e.getMessage()); DescribeTopicsResult result2 = env.adminClient().describeTopics( TopicCollection.ofTopicIds(singletonList(Uuid.ZERO_UUID))); - TestUtils.assertFutureError(result2.allTopicIds(), InvalidTopicException.class); + TestUtils.assertFutureThrows(result2.allTopicIds(), InvalidTopicException.class); e = assertThrows(Exception.class, () -> result2.allTopicIds().get(), "describe with non-exist topic ID should throw exception"); assertEquals("The given topic id 'AAAAAAAAAAAAAAAAAAAAAA' cannot be represented in a request.", e.getCause().getMessage()); @@ -3068,7 +3009,7 @@ public void testListGroups() throws Exception { env.cluster().nodeById(3)); final ListGroupsResult result = env.adminClient().listGroups(); - TestUtils.assertFutureError(result.all(), UnknownServerException.class); + TestUtils.assertFutureThrows(result.all(), UnknownServerException.class); Collection listings = result.valid().get(); assertEquals(6, listings.size()); @@ -3102,7 +3043,7 @@ public void testListGroupsMetadataFailure() throws Exception { Collections.emptyList())); final ListGroupsResult result = env.adminClient().listGroups(); - TestUtils.assertFutureError(result.all(), KafkaException.class); + TestUtils.assertFutureThrows(result.all(), KafkaException.class); } } @@ -3134,8 +3075,8 @@ public void testListGroupsEmptyProtocol() throws Exception { assertEquals(2, listings.size()); List expected = new ArrayList<>(); - expected.add(new GroupListing("group-2", Optional.of(GroupType.CLASSIC), "")); - expected.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE)); + expected.add(new GroupListing("group-2", Optional.of(GroupType.CLASSIC), "", Optional.of(GroupState.EMPTY))); + expected.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); } @@ -3163,7 +3104,7 @@ public void testListGroupsEmptyGroupType() throws Exception { assertEquals(1, listings.size()); List expected = new ArrayList<>(); - expected.add(new GroupListing("group-1", Optional.empty(), "any")); + expected.add(new GroupListing("group-1", Optional.empty(), "any", Optional.empty())); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); } @@ -3199,15 +3140,15 @@ public void testListGroupsWithTypes() throws Exception { assertEquals(2, listing.size()); List expected = new ArrayList<>(); - expected.add(new GroupListing("group-2", Optional.of(GroupType.CONSUMER), "")); - expected.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE)); + expected.add(new GroupListing("group-2", Optional.of(GroupType.CONSUMER), "", Optional.of(GroupState.EMPTY))); + expected.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); assertEquals(expected, listing); assertEquals(0, result.errors().get().size()); } } @Test - public void testListGroupsWithTypesOlderBrokerVersion() throws Exception { + public void testListGroupsWithTypesOlderBrokerVersion() { ApiVersion listGroupV4 = new ApiVersion() .setApiKey(ApiKeys.LIST_GROUPS.id) .setMinVersion((short) 0) @@ -3229,6 +3170,23 @@ public void testListGroupsWithTypesOlderBrokerVersion() throws Exception { } } + @Test + public void testDescribeClusterHandleUnsupportedVersionForIncludingFencedBrokers() { + ApiVersion describeClusterV1 = new ApiVersion() + .setApiKey(ApiKeys.DESCRIBE_CLUSTER.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 1); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(describeClusterV1))); + + env.kafkaClient().prepareUnsupportedVersionResponse( + request -> request instanceof DescribeClusterRequest); + + final DescribeClusterResult result = env.adminClient().describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)); + TestUtils.assertFutureThrows(result.nodes(), UnsupportedVersionException.class); + } + } + @Test public void testListConsumerGroups() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0), @@ -3322,7 +3280,7 @@ public void testListConsumerGroups() throws Exception { env.cluster().nodeById(3)); final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); - TestUtils.assertFutureError(result.all(), UnknownServerException.class); + TestUtils.assertFutureThrows(result.all(), UnknownServerException.class); Collection listings = result.valid().get(); assertEquals(3, listings.size()); @@ -3357,7 +3315,7 @@ public void testListConsumerGroupsMetadataFailure() throws Exception { Collections.emptyList())); final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); - TestUtils.assertFutureError(result.all(), KafkaException.class); + TestUtils.assertFutureThrows(result.all(), KafkaException.class); } } @@ -3387,8 +3345,8 @@ public void testListConsumerGroupsWithStates() throws Exception { assertEquals(2, listings.size()); List expected = new ArrayList<>(); - expected.add(new ConsumerGroupListing("group-2", true, Optional.of(ConsumerGroupState.EMPTY))); - expected.add(new ConsumerGroupListing("group-1", false, Optional.of(ConsumerGroupState.STABLE))); + expected.add(new ConsumerGroupListing("group-2", Optional.of(GroupState.EMPTY), true)); + expected.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), false)); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); } @@ -3403,7 +3361,7 @@ public void testListConsumerGroupsWithTypes() throws Exception { env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(singleton(ConsumerGroupState.STABLE.toString()), Collections.emptySet()), + expectListGroupsRequestWithFilters(singleton(GroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) .setGroups(singletonList( @@ -3414,13 +3372,13 @@ public void testListConsumerGroupsWithTypes() throws Exception { .setGroupType(GroupType.CLASSIC.toString())))), env.cluster().nodeById(0)); - final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(singleton(ConsumerGroupState.STABLE)); + final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); Collection listings = result.valid().get(); assertEquals(1, listings.size()); List expected = new ArrayList<>(); - expected.add(new ConsumerGroupListing("group-1", false, Optional.of(ConsumerGroupState.STABLE), Optional.of(GroupType.CLASSIC))); + expected.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), Optional.of(GroupType.CLASSIC), false)); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); @@ -3449,8 +3407,8 @@ public void testListConsumerGroupsWithTypes() throws Exception { assertEquals(2, listings2.size()); List expected2 = new ArrayList<>(); - expected2.add(new ConsumerGroupListing("group-2", true, Optional.of(ConsumerGroupState.EMPTY), Optional.of(GroupType.CONSUMER))); - expected2.add(new ConsumerGroupListing("group-1", false, Optional.of(ConsumerGroupState.STABLE), Optional.of(GroupType.CONSUMER))); + expected2.add(new ConsumerGroupListing("group-2", Optional.of(GroupState.EMPTY), Optional.of(GroupType.CONSUMER), true)); + expected2.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), Optional.of(GroupType.CONSUMER), false)); assertEquals(expected2, listings2); assertEquals(0, result.errors().get().size()); } @@ -3488,7 +3446,7 @@ public void testListConsumerGroupsWithStatesOlderBrokerVersion() throws Exceptio env.kafkaClient().prepareUnsupportedVersionResponse( body -> body instanceof ListGroupsRequest); - options = new ListConsumerGroupsOptions().inStates(singleton(ConsumerGroupState.STABLE)); + options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); result = env.adminClient().listConsumerGroups(options); TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } @@ -3507,23 +3465,23 @@ public void testListConsumerGroupsWithTypesOlderBrokerVersion() throws Exception // Check if we can list groups with older broker if we specify states and don't specify types. env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(singleton(ConsumerGroupState.STABLE.toString()), Collections.emptySet()), + expectListGroupsRequestWithFilters(singleton(GroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) .setGroups(Collections.singletonList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState(ConsumerGroupState.STABLE.toString())))), + .setGroupState(GroupState.STABLE.toString())))), env.cluster().nodeById(0)); - ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(singleton(ConsumerGroupState.STABLE)); + ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); Collection listing = result.all().get(); assertEquals(1, listing.size()); List expected = Collections.singletonList( - new ConsumerGroupListing("group-1", false, Optional.of(ConsumerGroupState.STABLE)) + new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), false) ); assertEquals(expected, listing); @@ -3572,7 +3530,7 @@ public void testOffsetCommitNumRetries() throws Exception { offsets.put(tp1, new OffsetAndMetadata(123L)); final AlterConsumerGroupOffsetsResult result = env.adminClient().alterConsumerGroupOffsets(GROUP_ID, offsets); - TestUtils.assertFutureError(result.all(), TimeoutException.class); + TestUtils.assertFutureThrows(result.all(), TimeoutException.class); } } @@ -3603,9 +3561,9 @@ public void testOffsetCommitWithMultipleErrors() throws Exception { .alterConsumerGroupOffsets(GROUP_ID, offsets); assertNull(result.partitionResult(foo0).get()); - TestUtils.assertFutureError(result.partitionResult(foo1), UnknownTopicOrPartitionException.class); + TestUtils.assertFutureThrows(result.partitionResult(foo1), UnknownTopicOrPartitionException.class); - TestUtils.assertFutureError(result.all(), UnknownTopicOrPartitionException.class); + TestUtils.assertFutureThrows(result.all(), UnknownTopicOrPartitionException.class); } } @@ -3684,7 +3642,7 @@ public void testDescribeConsumerGroupNumRetries() throws Exception { final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)); - TestUtils.assertFutureError(result.all(), TimeoutException.class); + TestUtils.assertFutureThrows(result.all(), TimeoutException.class); } } @@ -3862,7 +3820,13 @@ public void testDescribeMultipleConsumerGroups() { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse(new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(asList( + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, GROUP_ID, env.cluster().controller()), + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, "group-connect-0", env.cluster().controller()) + )) + )); // The first request sent will be a ConsumerGroupDescribe request. Let's // fail it in order to fail back to using the classic version. @@ -3882,8 +3846,8 @@ public void testDescribeMultipleConsumerGroups() { byte[] memberAssignmentBytes = new byte[memberAssignment.remaining()]; memberAssignment.get(memberAssignmentBytes); - DescribeGroupsResponseData group0Data = new DescribeGroupsResponseData(); - group0Data.groups().add(DescribeGroupsResponse.groupMetadata( + DescribeGroupsResponseData groupData = new DescribeGroupsResponseData(); + groupData.groups().add(DescribeGroupsResponse.groupMetadata( GROUP_ID, Errors.NONE, "", @@ -3894,9 +3858,7 @@ public void testDescribeMultipleConsumerGroups() { DescribeGroupsResponse.groupMember("1", null, "clientId1", "clientHost", memberAssignmentBytes, null) ), Collections.emptySet())); - - DescribeGroupsResponseData groupConnectData = new DescribeGroupsResponseData(); - group0Data.groups().add(DescribeGroupsResponse.groupMetadata( + groupData.groups().add(DescribeGroupsResponse.groupMetadata( "group-connect-0", Errors.NONE, "", @@ -3908,8 +3870,7 @@ public void testDescribeMultipleConsumerGroups() { ), Collections.emptySet())); - env.kafkaClient().prepareResponse(new DescribeGroupsResponse(group0Data)); - env.kafkaClient().prepareResponse(new DescribeGroupsResponse(groupConnectData)); + env.kafkaClient().prepareResponse(new DescribeGroupsResponse(groupData)); Collection groups = new HashSet<>(); groups.add(GROUP_ID); @@ -3917,6 +3878,72 @@ public void testDescribeMultipleConsumerGroups() { final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(groups); assertEquals(2, result.describedGroups().size()); assertEquals(groups, result.describedGroups().keySet()); + KafkaFuture> allFuture = result.all(); + // This throws because the second group is a classic connect group, not a consumer group. + assertThrows(ExecutionException.class, allFuture::get); + assertTrue(allFuture.isCompletedExceptionally()); + } + } + + @Test + public void testDescribeConsumerGroupsGroupIdNotFound() { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + env.kafkaClient().prepareResponse(new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(asList( + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, GROUP_ID, env.cluster().controller()), + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, "group-connect-0", env.cluster().controller()) + )) + )); + + // The first request sent will be a ConsumerGroupDescribe request. Let's + // fail it in order to fail back to using the classic version. + env.kafkaClient().prepareUnsupportedVersionResponse( + request -> request instanceof ConsumerGroupDescribeRequest); + + TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); + TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); + TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); + + final List topicPartitions = new ArrayList<>(); + topicPartitions.add(0, myTopicPartition0); + topicPartitions.add(1, myTopicPartition1); + topicPartitions.add(2, myTopicPartition2); + + final ByteBuffer memberAssignment = ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(topicPartitions)); + byte[] memberAssignmentBytes = new byte[memberAssignment.remaining()]; + memberAssignment.get(memberAssignmentBytes); + + DescribeGroupsResponseData groupData = new DescribeGroupsResponseData(); + groupData.groups().add(DescribeGroupsResponse.groupMetadata( + GROUP_ID, + Errors.NONE, + "", + ConsumerProtocol.PROTOCOL_TYPE, + "", + asList( + DescribeGroupsResponse.groupMember("0", null, "clientId0", "clientHost", memberAssignmentBytes, null), + DescribeGroupsResponse.groupMember("1", null, "clientId1", "clientHost", memberAssignmentBytes, null) + ), + Collections.emptySet())); + groupData.groups().add(DescribeGroupsResponse.groupError( + "group-connect-0", + Errors.GROUP_ID_NOT_FOUND, + "Group group-connect-0 is not a classic group.")); + + env.kafkaClient().prepareResponse(new DescribeGroupsResponse(groupData)); + + Collection groups = new HashSet<>(); + groups.add(GROUP_ID); + groups.add("group-connect-0"); + final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(groups); + assertEquals(2, result.describedGroups().size()); + assertEquals(groups, result.describedGroups().keySet()); + KafkaFuture> allFuture = result.all(); + assertThrows(ExecutionException.class, allFuture::get); + assertTrue(result.all().isCompletedExceptionally()); } } @@ -3979,7 +4006,7 @@ public void testDescribeNonConsumerGroups() throws Exception { final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)); - TestUtils.assertFutureError(result.describedGroups().get(GROUP_ID), IllegalArgumentException.class); + TestUtils.assertFutureThrows(result.describedGroups().get(GROUP_ID), IllegalArgumentException.class); } } @@ -4000,7 +4027,7 @@ public void testDescribeGroupsWithBothUnsupportedApis() throws InterruptedExcept request -> request instanceof DescribeGroupsRequest); DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)); - TestUtils.assertFutureError(result.describedGroups().get(GROUP_ID), UnsupportedVersionException.class); + TestUtils.assertFutureThrows(result.describedGroups().get(GROUP_ID), UnsupportedVersionException.class); } } @@ -4051,6 +4078,7 @@ public void testDescribeOldAndNewConsumerGroups() throws Exception { .setTopicName("foo") .setPartitions(singletonList(1)) ))) + .setMemberType((byte) 1) )), new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId("grp2") @@ -4104,14 +4132,18 @@ public void testDescribeOldAndNewConsumerGroups() throws Exception { ), Optional.of(new MemberAssignment( Collections.singleton(new TopicPartition("foo", 1)) - )) + )), + Optional.of(10), + Optional.of(true) ) ), "range", GroupType.CONSUMER, - ConsumerGroupState.STABLE, + GroupState.STABLE, env.cluster().controller(), - Collections.emptySet() + Collections.emptySet(), + Optional.of(10), + Optional.of(10) )); expectedResult.put("grp2", new ConsumerGroupDescription( "grp2", @@ -4124,32 +4156,31 @@ public void testDescribeOldAndNewConsumerGroups() throws Exception { "clientHost", new MemberAssignment( Collections.singleton(new TopicPartition("bar", 0)) - ) + ), + Optional.empty(), + Optional.empty(), + Optional.empty() ) ), "range", GroupType.CLASSIC, - ConsumerGroupState.STABLE, + GroupState.STABLE, env.cluster().controller(), - Collections.emptySet() + Collections.emptySet(), + Optional.empty(), + Optional.empty() )); assertEquals(expectedResult, result.all().get()); } } - @Test - public void testListConsumerGroupOffsetsOptionsWithUnbatchedApi() throws Exception { - verifyListConsumerGroupOffsetsOptions(false); - } - @Test public void testListConsumerGroupOffsetsOptionsWithBatchedApi() throws Exception { - verifyListConsumerGroupOffsetsOptions(true); + verifyListConsumerGroupOffsetsOptions(); } - @SuppressWarnings("deprecation") - private void verifyListConsumerGroupOffsetsOptions(boolean batchedApi) throws Exception { + private void verifyListConsumerGroupOffsetsOptions() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); @@ -4163,13 +4194,10 @@ private void verifyListConsumerGroupOffsetsOptions(boolean batchedApi) throws Ex final ListConsumerGroupOffsetsOptions options = new ListConsumerGroupOffsetsOptions() .requireStable(true) .timeoutMs(300); - if (batchedApi) { - final ListConsumerGroupOffsetsSpec groupSpec = new ListConsumerGroupOffsetsSpec() - .topicPartitions(partitions); - env.adminClient().listConsumerGroupOffsets(Collections.singletonMap(GROUP_ID, groupSpec), options); - } else { - env.adminClient().listConsumerGroupOffsets(GROUP_ID, options.topicPartitions(partitions)); - } + + final ListConsumerGroupOffsetsSpec groupSpec = new ListConsumerGroupOffsetsSpec() + .topicPartitions(partitions); + env.adminClient().listConsumerGroupOffsets(Collections.singletonMap(GROUP_ID, groupSpec), options); final MockClient mockClient = env.kafkaClient(); waitForRequest(mockClient, ApiKeys.OFFSET_FETCH); @@ -4203,7 +4231,7 @@ public void testListConsumerGroupOffsetsNumRetries() throws Exception { final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - TestUtils.assertFutureError(result.partitionsToOffsetAndMetadata(), TimeoutException.class); + TestUtils.assertFutureThrows(result.partitionsToOffsetAndMetadata(), TimeoutException.class); } } @@ -4310,7 +4338,7 @@ public void testListConsumerGroupOffsetsNonRetriableErrors() throws Exception { ListConsumerGroupOffsetsResult errorResult = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - TestUtils.assertFutureError(errorResult.partitionsToOffsetAndMetadata(), error.exception().getClass()); + TestUtils.assertFutureThrows(errorResult.partitionsToOffsetAndMetadata(), error.exception().getClass()); } } } @@ -4530,7 +4558,7 @@ public void testDeleteConsumerGroupsNumRetries() throws Exception { final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); - TestUtils.assertFutureError(result.all(), TimeoutException.class); + TestUtils.assertFutureThrows(result.all(), TimeoutException.class); } } @@ -4631,7 +4659,7 @@ public void testDeleteConsumerGroupsWithOlderBroker() throws Exception { prepareOldFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds); - TestUtils.assertFutureError(errorResult.deletedGroups().get("groupId"), GroupAuthorizationException.class); + TestUtils.assertFutureThrows(errorResult.deletedGroups().get("groupId"), GroupAuthorizationException.class); // Retriable errors should be retried env.kafkaClient().prepareResponse( @@ -4753,7 +4781,7 @@ public void testDeleteConsumerGroupOffsetsNumRetries() throws Exception { final DeleteConsumerGroupOffsetsResult result = env.adminClient() .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); - TestUtils.assertFutureError(result.all(), TimeoutException.class); + TestUtils.assertFutureThrows(result.all(), TimeoutException.class); } } @@ -4844,8 +4872,8 @@ public void testDeleteConsumerGroupOffsets() throws Exception { GROUP_ID, Stream.of(tp1, tp2).collect(Collectors.toSet())); assertNull(errorResult.partitionResult(tp1).get()); - TestUtils.assertFutureError(errorResult.all(), GroupSubscribedToTopicException.class); - TestUtils.assertFutureError(errorResult.partitionResult(tp2), GroupSubscribedToTopicException.class); + TestUtils.assertFutureThrows(errorResult.all(), GroupSubscribedToTopicException.class); + TestUtils.assertFutureThrows(errorResult.partitionResult(tp2), GroupSubscribedToTopicException.class); assertThrows(IllegalArgumentException.class, () -> errorResult.partitionResult(tp3)); } } @@ -4916,8 +4944,8 @@ public void testDeleteConsumerGroupOffsetsNonRetriableErrors() throws Exception DeleteConsumerGroupOffsetsResult errorResult = env.adminClient() .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); - TestUtils.assertFutureError(errorResult.all(), error.exception().getClass()); - TestUtils.assertFutureError(errorResult.partitionResult(tp1), error.exception().getClass()); + TestUtils.assertFutureThrows(errorResult.all(), error.exception().getClass()); + TestUtils.assertFutureThrows(errorResult.partitionResult(tp1), error.exception().getClass()); } } } @@ -4965,8 +4993,8 @@ public void testDeleteConsumerGroupOffsetsFindCoordinatorNonRetriableErrors() th final DeleteConsumerGroupOffsetsResult errorResult = env.adminClient() .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); - TestUtils.assertFutureError(errorResult.all(), GroupAuthorizationException.class); - TestUtils.assertFutureError(errorResult.partitionResult(tp1), GroupAuthorizationException.class); + TestUtils.assertFutureThrows(errorResult.all(), GroupAuthorizationException.class); + TestUtils.assertFutureThrows(errorResult.partitionResult(tp1), GroupAuthorizationException.class); } } @@ -5029,7 +5057,7 @@ public void testDescribeShareGroups() throws Exception { ShareGroupDescribeResponseData group0Data = new ShareGroupDescribeResponseData(); group0Data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) - .setGroupState(ShareGroupState.STABLE.toString()) + .setGroupState(GroupState.STABLE.toString()) .setMembers(asList(memberOne, memberTwo))); final List expectedTopicPartitions = new ArrayList<>(); @@ -5037,14 +5065,14 @@ public void testDescribeShareGroups() throws Exception { expectedTopicPartitions.add(1, new TopicPartition("my_topic", 1)); expectedTopicPartitions.add(2, new TopicPartition("my_topic", 2)); - List expectedMemberDescriptions = new ArrayList<>(); - expectedMemberDescriptions.add(convertToMemberDescriptions(memberOne, - new MemberAssignment(new HashSet<>(expectedTopicPartitions)))); - expectedMemberDescriptions.add(convertToMemberDescriptions(memberTwo, - new MemberAssignment(new HashSet<>(expectedTopicPartitions)))); + List expectedMemberDescriptions = new ArrayList<>(); + expectedMemberDescriptions.add(convertToShareMemberDescriptions(memberOne, + new ShareMemberAssignment(new HashSet<>(expectedTopicPartitions)))); + expectedMemberDescriptions.add(convertToShareMemberDescriptions(memberTwo, + new ShareMemberAssignment(new HashSet<>(expectedTopicPartitions)))); data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) - .setGroupState(ShareGroupState.STABLE.toString()) + .setGroupState(GroupState.STABLE.toString()) .setMembers(asList(memberOne, memberTwo))); env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(data)); @@ -5059,6 +5087,59 @@ public void testDescribeShareGroups() throws Exception { } } + @Test + public void testDescribeShareGroupsGroupIdNotFound() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + env.kafkaClient().prepareResponse(new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(asList( + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, GROUP_ID, env.cluster().controller()), + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, "group-1", env.cluster().controller()) + )) + )); + + ShareGroupDescribeResponseData.TopicPartitions topicPartitions = new ShareGroupDescribeResponseData.TopicPartitions() + .setTopicName("my_topic") + .setPartitions(asList(0, 1, 2)); + final ShareGroupDescribeResponseData.Assignment memberAssignment = new ShareGroupDescribeResponseData.Assignment() + .setTopicPartitions(asList(topicPartitions)); + ShareGroupDescribeResponseData groupData = new ShareGroupDescribeResponseData(); + groupData.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() + .setGroupId(GROUP_ID) + .setGroupState(GroupState.STABLE.toString()) + .setMembers(asList( + new ShareGroupDescribeResponseData.Member() + .setMemberId("0") + .setClientId("clientId0") + .setClientHost("clientHost") + .setAssignment(memberAssignment), + new ShareGroupDescribeResponseData.Member() + .setMemberId("1") + .setClientId("clientId1") + .setClientHost("clientHost") + .setAssignment(memberAssignment)))); + groupData.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() + .setGroupId("group-1") + .setGroupState(GroupState.DEAD.toString()) + .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage("Group group-1 not found.")); + + env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(groupData)); + + Collection groups = new HashSet<>(); + groups.add(GROUP_ID); + groups.add("group-1"); + final DescribeShareGroupsResult result = env.adminClient().describeShareGroups(groups); + assertEquals(2, result.describedGroups().size()); + assertEquals(groups, result.describedGroups().keySet()); + KafkaFuture> allFuture = result.all(); + assertThrows(ExecutionException.class, allFuture::get); + assertTrue(result.all().isCompletedExceptionally()); + } + } + @Test public void testDescribeShareGroupsWithAuthorizedOperationsOmitted() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { @@ -5087,17 +5168,23 @@ public void testDescribeMultipleShareGroups() { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse(new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(asList( + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, GROUP_ID, env.cluster().controller()), + FindCoordinatorResponse.prepareCoordinatorResponse(Errors.NONE, "group-1", env.cluster().controller()) + )) + )); ShareGroupDescribeResponseData.TopicPartitions topicPartitions = new ShareGroupDescribeResponseData.TopicPartitions() .setTopicName("my_topic") .setPartitions(asList(0, 1, 2)); final ShareGroupDescribeResponseData.Assignment memberAssignment = new ShareGroupDescribeResponseData.Assignment() .setTopicPartitions(asList(topicPartitions)); - ShareGroupDescribeResponseData group0Data = new ShareGroupDescribeResponseData(); - group0Data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() + ShareGroupDescribeResponseData groupData = new ShareGroupDescribeResponseData(); + groupData.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(GROUP_ID) - .setGroupState(ShareGroupState.STABLE.toString()) + .setGroupState(GroupState.STABLE.toString()) .setMembers(asList( new ShareGroupDescribeResponseData.Member() .setMemberId("0") @@ -5109,11 +5196,9 @@ public void testDescribeMultipleShareGroups() { .setClientId("clientId1") .setClientHost("clientHost") .setAssignment(memberAssignment)))); - - ShareGroupDescribeResponseData group1Data = new ShareGroupDescribeResponseData(); - group1Data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() + groupData.groups().add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId("group-1") - .setGroupState(ShareGroupState.STABLE.toString()) + .setGroupState(GroupState.STABLE.toString()) .setMembers(asList( new ShareGroupDescribeResponseData.Member() .setMemberId("0") @@ -5126,8 +5211,7 @@ public void testDescribeMultipleShareGroups() { .setClientHost("clientHost") .setAssignment(memberAssignment)))); - env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(group0Data)); - env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(group1Data)); + env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(groupData)); Collection groups = new HashSet<>(); groups.add(GROUP_ID); @@ -5135,6 +5219,9 @@ public void testDescribeMultipleShareGroups() { final DescribeShareGroupsResult result = env.adminClient().describeShareGroups(groups); assertEquals(2, result.describedGroups().size()); assertEquals(groups, result.describedGroups().keySet()); + KafkaFuture> allFuture = result.all(); + assertDoesNotThrow(() -> allFuture.get()); + assertFalse(allFuture.isCompletedExceptionally()); } } @@ -5222,16 +5309,16 @@ public void testListShareGroups() throws Exception { .setGroups(Collections.emptyList())), env.cluster().nodeById(3)); - final ListShareGroupsResult result = env.adminClient().listShareGroups(); - TestUtils.assertFutureError(result.all(), UnknownServerException.class); + final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + TestUtils.assertFutureThrows(result.all(), UnknownServerException.class); - Collection listings = result.valid().get(); + Collection listings = result.valid().get(); assertEquals(4, listings.size()); Set groupIds = new HashSet<>(); - for (ShareGroupListing listing : listings) { + for (GroupListing listing : listings) { groupIds.add(listing.groupId()); - assertTrue(listing.state().isPresent()); + assertTrue(listing.groupState().isPresent()); } assertEquals(Set.of("share-group-1", "share-group-2", "share-group-3", "share-group-4"), groupIds); @@ -5257,8 +5344,8 @@ public void testListShareGroupsMetadataFailure() throws Exception { -1, Collections.emptyList())); - final ListShareGroupsResult result = env.adminClient().listShareGroups(); - TestUtils.assertFutureError(result.all(), KafkaException.class); + final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + TestUtils.assertFutureThrows(result.all(), KafkaException.class); } } @@ -5276,28 +5363,29 @@ public void testListShareGroupsWithStates() throws Exception { new ListGroupsResponseData.ListedGroup() .setGroupId("share-group-1") .setGroupType(GroupType.SHARE.toString()) + .setProtocolType("share") .setGroupState("Stable"), new ListGroupsResponseData.ListedGroup() .setGroupId("share-group-2") .setGroupType(GroupType.SHARE.toString()) + .setProtocolType("share") .setGroupState("Empty")))), env.cluster().nodeById(0)); - final ListShareGroupsOptions options = new ListShareGroupsOptions(); - final ListShareGroupsResult result = env.adminClient().listShareGroups(options); - Collection listings = result.valid().get(); + final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + Collection listings = result.valid().get(); assertEquals(2, listings.size()); - List expected = new ArrayList<>(); - expected.add(new ShareGroupListing("share-group-1", Optional.of(ShareGroupState.STABLE))); - expected.add(new ShareGroupListing("share-group-2", Optional.of(ShareGroupState.EMPTY))); + List expected = new ArrayList<>(); + expected.add(new GroupListing("share-group-1", Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.STABLE))); + expected.add(new GroupListing("share-group-2", Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.EMPTY))); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); } } @Test - public void testListShareGroupsWithStatesOlderBrokerVersion() throws Exception { + public void testListShareGroupsWithStatesOlderBrokerVersion() { ApiVersion listGroupV4 = new ApiVersion() .setApiKey(ApiKeys.LIST_GROUPS.id) .setMinVersion((short) 0) @@ -5315,8 +5403,7 @@ public void testListShareGroupsWithStatesOlderBrokerVersion() throws Exception { new ListGroupsResponseData.ListedGroup() .setGroupId("share-group-1")))), env.cluster().nodeById(0)); - ListShareGroupsOptions options = new ListShareGroupsOptions(); - ListShareGroupsResult result = env.adminClient().listShareGroups(options); + ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } } @@ -5553,10 +5640,10 @@ public void testIncrementalAlterConfigs() throws Exception { configs.put(groupResource, singletonList(alterConfigOp4)); AlterConfigsResult result = env.adminClient().incrementalAlterConfigs(configs); - TestUtils.assertFutureError(result.values().get(brokerResource), ClusterAuthorizationException.class); - TestUtils.assertFutureError(result.values().get(topicResource), InvalidRequestException.class); - TestUtils.assertFutureError(result.values().get(metricResource), InvalidRequestException.class); - TestUtils.assertFutureError(result.values().get(groupResource), InvalidConfigurationException.class); + TestUtils.assertFutureThrows(result.values().get(brokerResource), ClusterAuthorizationException.class); + TestUtils.assertFutureThrows(result.values().get(topicResource), InvalidRequestException.class); + TestUtils.assertFutureThrows(result.values().get(metricResource), InvalidRequestException.class); + TestUtils.assertFutureThrows(result.values().get(groupResource), InvalidConfigurationException.class); // Test a call where there are no errors. responseData = new IncrementalAlterConfigsResponseData(); @@ -5605,7 +5692,7 @@ public void testRemoveMembersFromGroupNumRetries() throws Exception { final RemoveMembersFromConsumerGroupResult result = env.adminClient().removeMembersFromConsumerGroup( GROUP_ID, new RemoveMembersFromConsumerGroupOptions(membersToRemove)); - TestUtils.assertFutureError(result.all(), TimeoutException.class); + TestUtils.assertFutureThrows(result.all(), TimeoutException.class); } } @@ -5741,8 +5828,8 @@ public void testRemoveMembersFromGroupNonRetriableErrors() throws Exception { final RemoveMembersFromConsumerGroupResult result = env.adminClient().removeMembersFromConsumerGroup( GROUP_ID, new RemoveMembersFromConsumerGroupOptions(membersToRemove)); - TestUtils.assertFutureError(result.all(), error.exception().getClass()); - TestUtils.assertFutureError(result.memberResult(memberToRemove), error.exception().getClass()); + TestUtils.assertFutureThrows(result.all(), error.exception().getClass()); + TestUtils.assertFutureThrows(result.memberResult(memberToRemove), error.exception().getClass()); } } } @@ -5777,8 +5864,8 @@ public void testRemoveMembersFromGroup() throws Exception { MemberToRemove memberOne = new MemberToRemove(instanceOne); MemberToRemove memberTwo = new MemberToRemove(instanceTwo); - TestUtils.assertFutureError(unknownErrorResult.memberResult(memberOne), UnknownServerException.class); - TestUtils.assertFutureError(unknownErrorResult.memberResult(memberTwo), UnknownServerException.class); + TestUtils.assertFutureThrows(unknownErrorResult.memberResult(memberOne), UnknownServerException.class); + TestUtils.assertFutureThrows(unknownErrorResult.memberResult(memberTwo), UnknownServerException.class); MemberResponse responseOne = new MemberResponse() .setGroupInstanceId(instanceOne) @@ -5799,8 +5886,8 @@ public void testRemoveMembersFromGroup() throws Exception { new RemoveMembersFromConsumerGroupOptions(membersToRemove) ); - TestUtils.assertFutureError(memberLevelErrorResult.all(), UnknownMemberIdException.class); - TestUtils.assertFutureError(memberLevelErrorResult.memberResult(memberOne), UnknownMemberIdException.class); + TestUtils.assertFutureThrows(memberLevelErrorResult.all(), UnknownMemberIdException.class); + TestUtils.assertFutureThrows(memberLevelErrorResult.memberResult(memberOne), UnknownMemberIdException.class); assertNull(memberLevelErrorResult.memberResult(memberTwo).get()); // Return with missing member. @@ -5814,9 +5901,9 @@ public void testRemoveMembersFromGroup() throws Exception { new RemoveMembersFromConsumerGroupOptions(membersToRemove) ); - TestUtils.assertFutureError(missingMemberResult.all(), IllegalArgumentException.class); + TestUtils.assertFutureThrows(missingMemberResult.all(), IllegalArgumentException.class); // The memberOne was not included in the response. - TestUtils.assertFutureError(missingMemberResult.memberResult(memberOne), IllegalArgumentException.class); + TestUtils.assertFutureThrows(missingMemberResult.memberResult(memberOne), IllegalArgumentException.class); assertNull(missingMemberResult.memberResult(memberTwo).get()); @@ -5968,8 +6055,8 @@ public void testAlterPartitionReassignments() throws Exception { AlterPartitionReassignmentsResult result1 = env.adminClient().alterPartitionReassignments(reassignments); Future future1 = result1.all(); Future future2 = result1.values().get(tp1); - TestUtils.assertFutureError(future1, UnknownServerException.class); - TestUtils.assertFutureError(future2, UnknownServerException.class); + TestUtils.assertFutureThrows(future1, UnknownServerException.class); + TestUtils.assertFutureThrows(future2, UnknownServerException.class); // 2. NOT_CONTROLLER error handling AlterPartitionReassignmentsResponseData controllerErrResponseData = @@ -6020,7 +6107,7 @@ public void testAlterPartitionReassignments() throws Exception { ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(partitionLevelErrData)); AlterPartitionReassignmentsResult partitionLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments); - TestUtils.assertFutureError(partitionLevelErrResult.values().get(tp1), Errors.INVALID_REPLICA_ASSIGNMENT.exception().getClass()); + TestUtils.assertFutureThrows(partitionLevelErrResult.values().get(tp1), Errors.INVALID_REPLICA_ASSIGNMENT.exception().getClass()); partitionLevelErrResult.values().get(tp2).get(); // 4. top-level error @@ -6060,8 +6147,8 @@ public void testAlterPartitionReassignments() throws Exception { ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(singlePartResponseData)); AlterPartitionReassignmentsResult unrepresentableTopicResult = env.adminClient().alterPartitionReassignments(invalidTopicReassignments); - TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidTopicTP), InvalidTopicException.class); - TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidPartitionTP), InvalidTopicException.class); + TestUtils.assertFutureThrows(unrepresentableTopicResult.values().get(invalidTopicTP), InvalidTopicException.class); + TestUtils.assertFutureThrows(unrepresentableTopicResult.values().get(invalidPartitionTP), InvalidTopicException.class); unrepresentableTopicResult.values().get(tp1).get(); // Test success scenario @@ -6130,7 +6217,7 @@ public void testListPartitionReassignments() throws Exception { env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(unknownTpData)); ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(new HashSet<>(asList(tp1, tp2))); - TestUtils.assertFutureError(unknownTpResult.reassignments(), UnknownTopicOrPartitionException.class); + TestUtils.assertFutureThrows(unknownTpResult.reassignments(), UnknownTopicOrPartitionException.class); // 3. Success ListPartitionReassignmentsResponseData responseData = new ListPartitionReassignmentsResponseData() @@ -6181,7 +6268,7 @@ public void testAlterConsumerGroupOffsets() throws Exception { assertNull(result.all().get()); assertNull(result.partitionResult(tp1).get()); assertNull(result.partitionResult(tp2).get()); - TestUtils.assertFutureError(result.partitionResult(tp3), IllegalArgumentException.class); + TestUtils.assertFutureThrows(result.partitionResult(tp3), IllegalArgumentException.class); } } @@ -6250,8 +6337,8 @@ public void testAlterConsumerGroupOffsetsNonRetriableErrors() throws Exception { AlterConsumerGroupOffsetsResult errorResult = env.adminClient() .alterConsumerGroupOffsets(GROUP_ID, offsets); - TestUtils.assertFutureError(errorResult.all(), error.exception().getClass()); - TestUtils.assertFutureError(errorResult.partitionResult(tp1), error.exception().getClass()); + TestUtils.assertFutureThrows(errorResult.all(), error.exception().getClass()); + TestUtils.assertFutureThrows(errorResult.partitionResult(tp1), error.exception().getClass()); } } } @@ -6303,8 +6390,8 @@ public void testAlterConsumerGroupOffsetsFindCoordinatorNonRetriableErrors() thr final AlterConsumerGroupOffsetsResult errorResult = env.adminClient() .alterConsumerGroupOffsets(GROUP_ID, offsets); - TestUtils.assertFutureError(errorResult.all(), GroupAuthorizationException.class); - TestUtils.assertFutureError(errorResult.partitionResult(tp1), GroupAuthorizationException.class); + TestUtils.assertFutureThrows(errorResult.all(), GroupAuthorizationException.class); + TestUtils.assertFutureThrows(errorResult.partitionResult(tp1), GroupAuthorizationException.class); } } @@ -6478,7 +6565,7 @@ public void testListOffsetsNonRetriableErrors() throws Exception { partitions.put(tp0, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); - TestUtils.assertFutureError(result.all(), TopicAuthorizationException.class); + TestUtils.assertFutureThrows(result.all(), TopicAuthorizationException.class); } } @@ -6549,7 +6636,7 @@ public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Ex .noneMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP), new ListOffsetsResponse(responseData), node); - ListOffsetsResult result = env.adminClient().listOffsets(new HashMap() {{ + ListOffsetsResult result = env.adminClient().listOffsets(new HashMap<>() {{ put(tp0, OffsetSpec.maxTimestamp()); put(tp1, OffsetSpec.latest()); }}); @@ -6610,7 +6697,7 @@ public void testListOffsetsHandlesFulfillmentTimeouts() throws Exception { new ListOffsetsResponse(responseDataWithError), node); } ListOffsetsResult result = env.adminClient().listOffsets( - new HashMap() { + new HashMap<>() { { put(tp0, OffsetSpec.latest()); put(tp1, OffsetSpec.latest()); @@ -6633,7 +6720,7 @@ public void testListOffsetsHandlesFulfillmentTimeouts() throws Exception { env.kafkaClient().prepareResponseFrom( request -> request instanceof ListOffsetsRequest, new ListOffsetsResponse(responseData), node); result = env.adminClient().listOffsets( - new HashMap() { + new HashMap<>() { { put(tp0, OffsetSpec.latest()); put(tp1, OffsetSpec.latest()); @@ -7242,7 +7329,7 @@ public void testListOffsetsMetadataNonRetriableErrors( partitions.put(tp1, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); - TestUtils.assertFutureError(result.all(), expectedFailure); + TestUtils.assertFutureThrows(result.all(), expectedFailure); } } @@ -7560,8 +7647,8 @@ public void testAlterClientQuotas() throws Exception { AlterClientQuotasResult result = env.adminClient().alterClientQuotas(entries); result.values().get(goodEntity); - TestUtils.assertFutureError(result.values().get(unauthorizedEntity), ClusterAuthorizationException.class); - TestUtils.assertFutureError(result.values().get(invalidEntity), InvalidRequestException.class); + TestUtils.assertFutureThrows(result.values().get(unauthorizedEntity), ClusterAuthorizationException.class); + TestUtils.assertFutureThrows(result.values().get(invalidEntity), InvalidRequestException.class); // ensure immutable assertThrows(UnsupportedOperationException.class, () -> result.values().put(newClientQuotaEntity(ClientQuotaEntity.USER, "user-3"), null)); @@ -7600,7 +7687,7 @@ public void testAlterReplicaLogDirsLogDirNotFound() throws Exception { logDirs.put(tpr1, "/data1"); AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs); assertNull(result.values().get(tpr0).get()); - TestUtils.assertFutureError(result.values().get(tpr1), LogDirNotFoundException.class); + TestUtils.assertFutureThrows(result.values().get(tpr1), LogDirNotFoundException.class); } } @@ -7667,7 +7754,7 @@ public void testAlterReplicaLogDirsPartialFailure() throws Exception { // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); - TestUtils.assertFutureThrows(result.values().get(tpr1), ApiException.class); + TestUtils.assertFutureThrows(result.values().get(tpr1), TimeoutException.class); assertNull(result.values().get(tpr2).get()); } } @@ -7855,7 +7942,7 @@ public void testDescribeLogDirsPartialFailure() throws Exception { // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); - TestUtils.assertFutureThrows(result.descriptions().get(0), ApiException.class); + TestUtils.assertFutureThrows(result.descriptions().get(0), TimeoutException.class); assertNotNull(result.descriptions().get(1).get()); } } @@ -8609,16 +8696,19 @@ private static MemberDescription convertToMemberDescriptions(DescribedGroupMembe Optional.ofNullable(member.groupInstanceId()), member.clientId(), member.clientHost(), - assignment); + assignment, + Optional.empty(), + Optional.empty(), + Optional.empty()); } - private static MemberDescription convertToMemberDescriptions(ShareGroupDescribeResponseData.Member member, - MemberAssignment assignment) { - return new MemberDescription(member.memberId(), - Optional.empty(), - member.clientId(), - member.clientHost(), - assignment); + private static ShareMemberDescription convertToShareMemberDescriptions(ShareGroupDescribeResponseData.Member member, + ShareMemberAssignment assignment) { + return new ShareMemberDescription(member.memberId(), + member.clientId(), + member.clientHost(), + assignment, + member.memberEpoch()); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java new file mode 100644 index 0000000000000..75d6c1c88c537 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin; + +import org.apache.kafka.common.ConsumerGroupState; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ListConsumerGroupsOptionsTest { + @Test + public void testState() { + Set consumerGroupStates = new HashSet<>(Arrays.asList(ConsumerGroupState.values())); + ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(consumerGroupStates); + assertEquals(consumerGroupStates, options.states()); + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java index 0bddc618cfc03..7c3e928b3a636 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java @@ -41,20 +41,31 @@ public class MemberDescriptionTest { INSTANCE_ID, CLIENT_ID, HOST, - ASSIGNMENT); + ASSIGNMENT, + Optional.empty(), + Optional.empty(), + Optional.empty()); } @Test public void testEqualsWithoutGroupInstanceId() { MemberDescription dynamicMemberDescription = new MemberDescription(MEMBER_ID, + Optional.empty(), CLIENT_ID, HOST, - ASSIGNMENT); + ASSIGNMENT, + Optional.empty(), + Optional.empty(), + Optional.empty()); MemberDescription identityDescription = new MemberDescription(MEMBER_ID, + Optional.empty(), CLIENT_ID, HOST, - ASSIGNMENT); + ASSIGNMENT, + Optional.empty(), + Optional.empty(), + Optional.empty()); assertNotEquals(STATIC_MEMBER_DESCRIPTION, dynamicMemberDescription); assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), dynamicMemberDescription.hashCode()); @@ -74,7 +85,10 @@ public void testEqualsWithGroupInstanceId() { INSTANCE_ID, CLIENT_ID, HOST, - ASSIGNMENT); + ASSIGNMENT, + Optional.empty(), + Optional.empty(), + Optional.empty()); assertEquals(STATIC_MEMBER_DESCRIPTION, identityDescription); assertEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), identityDescription.hashCode()); @@ -86,7 +100,10 @@ public void testNonEqual() { INSTANCE_ID, CLIENT_ID, HOST, - ASSIGNMENT); + ASSIGNMENT, + Optional.empty(), + Optional.empty(), + Optional.empty()); assertNotEquals(STATIC_MEMBER_DESCRIPTION, newMemberDescription); assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), newMemberDescription.hashCode()); @@ -95,9 +112,45 @@ public void testNonEqual() { Optional.of("new_instance"), CLIENT_ID, HOST, - ASSIGNMENT); + ASSIGNMENT, + Optional.empty(), + Optional.empty(), + Optional.empty()); assertNotEquals(STATIC_MEMBER_DESCRIPTION, newInstanceDescription); assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), newInstanceDescription.hashCode()); + + MemberDescription newTargetAssignmentDescription = new MemberDescription(MEMBER_ID, + INSTANCE_ID, + CLIENT_ID, + HOST, + ASSIGNMENT, + Optional.of(ASSIGNMENT), + Optional.empty(), + Optional.empty()); + assertNotEquals(STATIC_MEMBER_DESCRIPTION, newTargetAssignmentDescription); + assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), newTargetAssignmentDescription.hashCode()); + + MemberDescription newMemberEpochDescription = new MemberDescription(MEMBER_ID, + INSTANCE_ID, + CLIENT_ID, + HOST, + ASSIGNMENT, + Optional.empty(), + Optional.of(1), + Optional.empty()); + assertNotEquals(STATIC_MEMBER_DESCRIPTION, newMemberEpochDescription); + assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), newMemberEpochDescription.hashCode()); + + MemberDescription newIsClassicDescription = new MemberDescription(MEMBER_ID, + INSTANCE_ID, + CLIENT_ID, + HOST, + ASSIGNMENT, + Optional.empty(), + Optional.empty(), + Optional.of(false)); + assertNotEquals(STATIC_MEMBER_DESCRIPTION, newIsClassicDescription); + assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), newIsClassicDescription.hashCode()); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java index 482240e57779d..ed2d7c61f08f9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java @@ -21,6 +21,7 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Metric; @@ -646,7 +647,7 @@ public synchronized CreateDelegationTokenResult createDelegationToken(CreateDele } String tokenId = Uuid.randomUuid().toString(); - TokenInformation tokenInfo = new TokenInformation(tokenId, options.renewers().get(0), options.renewers(), System.currentTimeMillis(), options.maxlifeTimeMs(), -1); + TokenInformation tokenInfo = new TokenInformation(tokenId, options.renewers().get(0), options.renewers(), System.currentTimeMillis(), options.maxLifetimeMs(), -1); DelegationToken token = new DelegationToken(tokenInfo, tokenId.getBytes()); allTokens.add(token); future.complete(token); @@ -724,7 +725,7 @@ public synchronized DescribeDelegationTokenResult describeDelegationToken(Descri @Override public synchronized ListGroupsResult listGroups(ListGroupsOptions options) { KafkaFutureImpl> future = new KafkaFutureImpl<>(); - future.complete(groupConfigs.keySet().stream().map(g -> new GroupListing(g, Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE)).collect(Collectors.toList())); + future.complete(groupConfigs.keySet().stream().map(g -> new GroupListing(g, Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))).collect(Collectors.toList())); return new ListGroupsResult(future); } @@ -870,12 +871,6 @@ private static Config toConfigObject(Map map) { return new Config(configEntries); } - @Override - @Deprecated - public synchronized AlterConfigsResult alterConfigs(Map configs, AlterConfigsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - @Override public synchronized AlterConfigsResult incrementalAlterConfigs( Map> configs, @@ -1395,7 +1390,7 @@ public synchronized DescribeShareGroupsResult describeShareGroups(Collection groupSpecs, ListShareGroupOffsetsOptions options) { throw new UnsupportedOperationException("Not implemented yet"); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java index 40f7f5ff49969..1b0ac0ba0c462 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java @@ -62,7 +62,7 @@ public void testTopLevelErrorConstructor() throws InterruptedException { memberFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception()); RemoveMembersFromConsumerGroupResult topLevelErrorResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); - TestUtils.assertFutureError(topLevelErrorResult.all(), GroupAuthorizationException.class); + TestUtils.assertFutureThrows(topLevelErrorResult.all(), GroupAuthorizationException.class); } @Test @@ -78,9 +78,9 @@ public void testMemberMissingErrorInRequestConstructor() throws InterruptedExcep RemoveMembersFromConsumerGroupResult missingMemberResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); - TestUtils.assertFutureError(missingMemberResult.all(), IllegalArgumentException.class); + TestUtils.assertFutureThrows(missingMemberResult.all(), IllegalArgumentException.class); assertNull(missingMemberResult.memberResult(instanceOne).get()); - TestUtils.assertFutureError(missingMemberResult.memberResult(instanceTwo), IllegalArgumentException.class); + TestUtils.assertFutureThrows(missingMemberResult.memberResult(instanceTwo), IllegalArgumentException.class); } @Test @@ -111,9 +111,9 @@ private RemoveMembersFromConsumerGroupResult createAndVerifyMemberLevelError() t RemoveMembersFromConsumerGroupResult memberLevelErrorResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); - TestUtils.assertFutureError(memberLevelErrorResult.all(), FencedInstanceIdException.class); + TestUtils.assertFutureThrows(memberLevelErrorResult.all(), FencedInstanceIdException.class); assertNull(memberLevelErrorResult.memberResult(instanceOne).get()); - TestUtils.assertFutureError(memberLevelErrorResult.memberResult(instanceTwo), FencedInstanceIdException.class); + TestUtils.assertFutureThrows(memberLevelErrorResult.memberResult(instanceTwo), FencedInstanceIdException.class); return memberLevelErrorResult; } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java index 54ac41755823e..1e6823ea8ee64 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.AuthorizationException; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; @@ -98,6 +99,57 @@ public void testAuthenticationFailure() { assertTrue(mgr.isReady()); } + @Test + public void testAuthorizationFailure() { + mgr.transitionToUpdatePending(time.milliseconds()); + mgr.updateFailed(new AuthorizationException("Authorization failed")); + assertEquals(refreshBackoffMs, mgr.metadataFetchDelayMs(time.milliseconds())); + assertThrows(AuthorizationException.class, mgr::isReady); + mgr.update(mockCluster(), time.milliseconds()); + assertTrue(mgr.isReady()); + } + + @Test + public void testNeedsRebootstrap() { + long rebootstrapTriggerMs = 1000; + mgr.update(Cluster.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 9999))), time.milliseconds()); + assertFalse(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + assertFalse(mgr.needsRebootstrap(time.milliseconds() + 2000, rebootstrapTriggerMs)); + + mgr.transitionToUpdatePending(time.milliseconds()); + assertFalse(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + assertTrue(mgr.needsRebootstrap(time.milliseconds() + 1001, rebootstrapTriggerMs)); + + time.sleep(100); + mgr.updateFailed(new RuntimeException()); + assertFalse(mgr.needsRebootstrap(time.milliseconds() + 900, rebootstrapTriggerMs)); + assertTrue(mgr.needsRebootstrap(time.milliseconds() + 901, rebootstrapTriggerMs)); + + time.sleep(1000); + mgr.update(mockCluster(), time.milliseconds()); + assertFalse(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + assertFalse(mgr.needsRebootstrap(time.milliseconds() + 2000, rebootstrapTriggerMs)); + + time.sleep(1000); + mgr.transitionToUpdatePending(time.milliseconds()); + assertFalse(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + assertTrue(mgr.needsRebootstrap(time.milliseconds() + 1001, rebootstrapTriggerMs)); + + time.sleep(1001); + assertTrue(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + mgr.rebootstrap(time.milliseconds()); + assertFalse(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + assertFalse(mgr.needsRebootstrap(time.milliseconds() + 1000, rebootstrapTriggerMs)); + assertTrue(mgr.needsRebootstrap(time.milliseconds() + 1001, rebootstrapTriggerMs)); + + mgr.initiateRebootstrap(); + assertTrue(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + mgr.rebootstrap(time.milliseconds()); + assertFalse(mgr.needsRebootstrap(time.milliseconds(), rebootstrapTriggerMs)); + assertFalse(mgr.needsRebootstrap(time.milliseconds() + 1000, rebootstrapTriggerMs)); + assertTrue(mgr.needsRebootstrap(time.milliseconds() + 1001, rebootstrapTriggerMs)); + } + private static Cluster mockCluster() { HashMap nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java index 9651964e1c68c..4cd9d613095c8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java @@ -61,7 +61,7 @@ public class DeleteRecordsHandlerTest { private final TopicPartition t0p3 = new TopicPartition("t0", 3); private final Node node1 = new Node(1, "host", 1234); private final Node node2 = new Node(2, "host", 1235); - private final Map recordsToDelete = new HashMap() { + private final Map recordsToDelete = new HashMap<>() { { put(t0p0, RecordsToDelete.beforeOffset(10L)); put(t0p1, RecordsToDelete.beforeOffset(10L)); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java index cfbf67e2090d8..20cf0b761e641 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java @@ -22,6 +22,7 @@ import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.ConsumerGroupState; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -54,6 +55,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; +import java.util.List; import java.util.Optional; import java.util.Set; @@ -152,29 +154,46 @@ public void testInvalidBuildRequest() { @Test public void testSuccessfulHandleConsumerGroupResponse() { DescribeConsumerGroupsHandler handler = new DescribeConsumerGroupsHandler(false, logContext); - Collection members = singletonList(new MemberDescription( - "memberId", - Optional.of("instanceId"), - "clientId", - "host", - new MemberAssignment(Set.of( - new TopicPartition("foo", 0), - new TopicPartition("bar", 1)) + Collection members = List.of( + new MemberDescription( + "memberId", + Optional.of("instanceId"), + "clientId", + "host", + new MemberAssignment(Set.of( + new TopicPartition("foo", 0) + )), + Optional.of(new MemberAssignment(Set.of( + new TopicPartition("foo", 1) + ))), + Optional.of(10), + Optional.of(true) ), - Optional.of(new MemberAssignment(Set.of( - new TopicPartition("foo", 1), - new TopicPartition("bar", 2) - ))) - )); + new MemberDescription( + "memberId-classic", + Optional.of("instanceId-classic"), + "clientId-classic", + "host", + new MemberAssignment(Set.of( + new TopicPartition("bar", 0) + )), + Optional.of(new MemberAssignment(Set.of( + new TopicPartition("bar", 1) + ))), + Optional.of(9), + Optional.of(false) + )); ConsumerGroupDescription expected = new ConsumerGroupDescription( groupId1, false, members, "range", GroupType.CONSUMER, - ConsumerGroupState.STABLE, + GroupState.STABLE, coordinator, - Collections.emptySet() + Collections.emptySet(), + Optional.of(10), + Optional.of(10) ); AdminApiHandler.ApiResult result = handler.handleResponse( coordinator, @@ -189,7 +208,7 @@ public void testSuccessfulHandleConsumerGroupResponse() { .setAssignmentEpoch(10) .setAssignorName("range") .setAuthorizedOperations(Utils.to32BitField(emptySet())) - .setMembers(singletonList( + .setMembers(List.of( new ConsumerGroupDescribeResponseData.Member() .setMemberId("memberId") .setInstanceId("instanceId") @@ -200,27 +219,44 @@ public void testSuccessfulHandleConsumerGroupResponse() { .setSubscribedTopicNames(singletonList("foo")) .setSubscribedTopicRegex("regex") .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupDescribeResponseData.TopicPartitions() .setTopicId(Uuid.randomUuid()) .setTopicName("foo") - .setPartitions(Collections.singletonList(0)), + .setPartitions(Collections.singletonList(0)) + ))) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List.of( new ConsumerGroupDescribeResponseData.TopicPartitions() .setTopicId(Uuid.randomUuid()) - .setTopicName("bar") + .setTopicName("foo") .setPartitions(Collections.singletonList(1)) ))) - .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setMemberType((byte) 1), + new ConsumerGroupDescribeResponseData.Member() + .setMemberId("memberId-classic") + .setInstanceId("instanceId-classic") + .setClientHost("host") + .setClientId("clientId-classic") + .setMemberEpoch(9) + .setRackId("rackid") + .setSubscribedTopicNames(singletonList("bar")) + .setSubscribedTopicRegex("regex") + .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List.of( new ConsumerGroupDescribeResponseData.TopicPartitions() .setTopicId(Uuid.randomUuid()) - .setTopicName("foo") - .setPartitions(Collections.singletonList(1)), + .setTopicName("bar") + .setPartitions(Collections.singletonList(0)) + ))) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List.of( new ConsumerGroupDescribeResponseData.TopicPartitions() .setTopicId(Uuid.randomUuid()) .setTopicName("bar") - .setPartitions(Collections.singletonList(2)) + .setPartitions(Collections.singletonList(1)) ))) + .setMemberType((byte) 0) )) )) ) @@ -232,9 +268,13 @@ public void testSuccessfulHandleConsumerGroupResponse() { public void testSuccessfulHandleClassicGroupResponse() { Collection members = singletonList(new MemberDescription( "memberId", + Optional.empty(), "clientId", "host", - new MemberAssignment(tps))); + new MemberAssignment(tps), + Optional.empty(), + Optional.empty(), + Optional.empty())); ConsumerGroupDescription expected = new ConsumerGroupDescription( groupId1, true, diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java index d9935fbdca6e8..e3bb56347a8ae 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java @@ -177,11 +177,11 @@ public void testSuccessfulHandleResponseWithOnePartitionErrorWithMultipleGroups( Map offsetAndMetadataMapTwo = Collections.singletonMap(t2p2, new OffsetAndMetadata(10L)); Map> expectedResult = - new HashMap>() {{ - put(groupZero, offsetAndMetadataMapZero); - put(groupOne, offsetAndMetadataMapOne); - put(groupTwo, offsetAndMetadataMapTwo); - }}; + new HashMap<>() {{ + put(groupZero, offsetAndMetadataMapZero); + put(groupOne, offsetAndMetadataMapOne); + put(groupTwo, offsetAndMetadataMapTwo); + }}; assertCompletedForMultipleGroups( handleWithPartitionErrorMultipleGroups(Errors.UNKNOWN_TOPIC_OR_PARTITION), expectedResult); @@ -304,11 +304,11 @@ private OffsetFetchResponse buildResponseWithPartitionErrorWithMultipleGroups(Er responseDataTwo.put(t2p2, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); Map> responseData = - new HashMap>() {{ - put(groupZero, responseDataZero); - put(groupOne, responseDataOne); - put(groupTwo, responseDataTwo); - }}; + new HashMap<>() {{ + put(groupZero, responseDataZero); + put(groupOne, responseDataOne); + put(groupTwo, responseDataTwo); + }}; Map errorMap = errorMap(groups, Errors.NONE); return new OffsetFetchResponse(0, errorMap, responseData); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java index 5ad92ce1c9561..a7156554001aa 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java @@ -65,7 +65,7 @@ public final class ListOffsetsHandlerTest { private final Node node = new Node(1, "host", 1234); - private final Map offsetTimestampsByPartition = new HashMap() { + private final Map offsetTimestampsByPartition = new HashMap<>() { { put(t0p0, ListOffsetsRequest.LATEST_TIMESTAMP); put(t0p1, ListOffsetsRequest.EARLIEST_TIMESTAMP); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyIntegrationTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyIntegrationTest.java new file mode 100644 index 0000000000000..4e03ae7d952f6 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyIntegrationTest.java @@ -0,0 +1,491 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.admin.internals; + +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.DisconnectException; +import org.apache.kafka.common.errors.LeaderNotAvailableException; +import org.apache.kafka.common.errors.NotLeaderOrFollowerException; +import org.apache.kafka.common.errors.RetriableException; +import org.apache.kafka.common.errors.UnknownServerException; +import org.apache.kafka.common.message.ListOffsetsResponseData; +import org.apache.kafka.common.message.MetadataRequestData; +import org.apache.kafka.common.message.MetadataResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.AbstractRequest; +import org.apache.kafka.common.requests.AbstractResponse; +import org.apache.kafka.common.requests.ListOffsetsResponse; +import org.apache.kafka.common.requests.MetadataRequest; +import org.apache.kafka.common.requests.MetadataResponse; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.OptionalInt; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class PartitionLeaderStrategyIntegrationTest { + private static final long TIMEOUT_MS = 5000; + private static final long RETRY_BACKOFF_MS = 100; + + private static final Node NODE_1 = new Node(1, "host1", 9092); + private static final Node NODE_2 = new Node(2, "host2", 9092); + + private final LogContext logContext = new LogContext(); + private final MockTime time = new MockTime(); + + private AdminApiDriver buildDriver( + PartitionLeaderStrategy.PartitionLeaderFuture result + ) { + return new AdminApiDriver<>( + new MockApiHandler(), + result, + time.milliseconds() + TIMEOUT_MS, + RETRY_BACKOFF_MS, + RETRY_BACKOFF_MS, + logContext + ); + } + + @Test + public void testCachingRepeatedRequest() { + Map partitionLeaderCache = new HashMap<>(); + + TopicPartition tp0 = new TopicPartition("T", 0); + TopicPartition tp1 = new TopicPartition("T", 1); + Set requestKeys = Set.of(tp0, tp1); + + // First, the lookup stage needs to obtain leadership data because the cache is empty + PartitionLeaderStrategy.PartitionLeaderFuture result = + new PartitionLeaderStrategy.PartitionLeaderFuture<>(requestKeys, partitionLeaderCache); + AdminApiDriver driver = buildDriver(result); + + List> requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + assertEquals(OptionalInt.empty(), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(requestKeys, requestSpecs.get(0).keys); + + // The cache will be populated using the leader information from this metadata response + Map leaders = Map.of(tp0, 1, tp1, 2); + driver.onResponse(time.milliseconds(), requestSpecs.get(0), metadataResponseWithPartitionLeaders(leaders), Node.noNode()); + assertFalse(result.all().get(tp0).isDone()); + assertFalse(result.all().get(tp1).isDone()); + + assertEquals(1, partitionLeaderCache.get(tp0)); + assertEquals(2, partitionLeaderCache.get(tp1)); + + // Second, the fulfillment stage makes the actual requests + requestSpecs = driver.poll(); + assertEquals(2, requestSpecs.size()); + + assertEquals(OptionalInt.of(1), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(OptionalInt.of(2), requestSpecs.get(1).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_1); + driver.onResponse(time.milliseconds(), requestSpecs.get(1), listOffsetsResponseSuccess(requestSpecs.get(1).keys), NODE_2); + assertTrue(result.all().get(tp0).isDone()); + assertTrue(result.all().get(tp1).isDone()); + + // On the second request, the partition leader cache already contains all the leadership + // data so the request goes straight to the fulfillment stage + result = new PartitionLeaderStrategy.PartitionLeaderFuture<>(requestKeys, partitionLeaderCache); + driver = buildDriver(result); + + requestSpecs = driver.poll(); + assertEquals(2, requestSpecs.size()); + + // We can tell this is the fulfillment stage by the destination broker id being set + assertEquals(OptionalInt.of(1), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(OptionalInt.of(2), requestSpecs.get(1).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_1); + driver.onResponse(time.milliseconds(), requestSpecs.get(1), listOffsetsResponseSuccess(requestSpecs.get(1).keys), NODE_2); + assertTrue(result.all().get(tp0).isDone()); + assertTrue(result.all().get(tp1).isDone()); + } + + @Test + public void testCachingOverlappingRequests() { + // This test uses several requests to exercise the caching in various ways: + // 1) for T-0 and T-1 (initially the cache is empty) + // 2) for T-1 and T-2 (leadership data for T-1 should be cached from previous request) + // 3) for T-0, T-1 and T-2 (all leadership data should be cached already) + // 4) for T-0, T-1, T-2 and T-3 (just T-3 needs to be looked up) + Map partitionLeaderCache = new HashMap<>(); + + TopicPartition tp0 = new TopicPartition("T", 0); + TopicPartition tp1 = new TopicPartition("T", 1); + TopicPartition tp2 = new TopicPartition("T", 2); + TopicPartition tp3 = new TopicPartition("T", 3); + + // + // Request 1 - T-0 and T-1 + // + Set requestKeys = Set.of(tp0, tp1); + + // First, the lookup stage needs to obtain leadership data because the cache is empty + PartitionLeaderStrategy.PartitionLeaderFuture result = + new PartitionLeaderStrategy.PartitionLeaderFuture<>(requestKeys, partitionLeaderCache); + AdminApiDriver driver = buildDriver(result); + + List> requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + assertEquals(OptionalInt.empty(), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(requestKeys, requestSpecs.get(0).keys); + + // The cache will be populated using the leader information from this metadata response + Map leaders = Map.of(tp0, 1, tp1, 2); + driver.onResponse(time.milliseconds(), requestSpecs.get(0), metadataResponseWithPartitionLeaders(leaders), Node.noNode()); + assertFalse(result.all().get(tp0).isDone()); + assertFalse(result.all().get(tp1).isDone()); + + assertEquals(1, partitionLeaderCache.get(tp0)); + assertEquals(2, partitionLeaderCache.get(tp1)); + + // Second, the fulfillment stage makes the actual requests + requestSpecs = driver.poll(); + assertEquals(2, requestSpecs.size()); + + assertEquals(OptionalInt.of(1), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(OptionalInt.of(2), requestSpecs.get(1).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_1); + driver.onResponse(time.milliseconds(), requestSpecs.get(1), listOffsetsResponseSuccess(requestSpecs.get(1).keys), NODE_2); + assertTrue(result.all().get(tp0).isDone()); + assertTrue(result.all().get(tp1).isDone()); + + // + // Request 2 - T-1 and T-2 + // + // On the second request, the partition leader cache already contains some of the leadership data. + // Now the lookup and fulfillment stages overlap. + requestKeys = Set.of(tp1, tp2); + result = new PartitionLeaderStrategy.PartitionLeaderFuture<>(requestKeys, partitionLeaderCache); + driver = buildDriver(result); + + requestSpecs = driver.poll(); + assertEquals(2, requestSpecs.size()); + + assertEquals(OptionalInt.empty(), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(Collections.singleton(tp2), requestSpecs.get(0).keys); + assertEquals(OptionalInt.of(2), requestSpecs.get(1).scope.destinationBrokerId()); + + // The cache will be populated using the leader information from this metadata response + leaders = Map.of(tp2, 1); + driver.onResponse(time.milliseconds(), requestSpecs.get(0), metadataResponseWithPartitionLeaders(leaders), Node.noNode()); + driver.onResponse(time.milliseconds(), requestSpecs.get(1), listOffsetsResponseSuccess(requestSpecs.get(1).keys), NODE_2); + assertTrue(result.all().get(tp1).isDone()); // Already fulfilled + assertFalse(result.all().get(tp2).isDone()); + + assertEquals(1, partitionLeaderCache.get(tp0)); + assertEquals(2, partitionLeaderCache.get(tp1)); + assertEquals(1, partitionLeaderCache.get(tp2)); + + // Finally, the fulfillment stage makes the actual request for the uncached topic-partition + requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + assertEquals(OptionalInt.of(1), requestSpecs.get(0).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_1); + assertTrue(result.all().get(tp1).isDone()); + assertTrue(result.all().get(tp2).isDone()); + + // + // Request 3 - T-0, T-1 and T-2 + // + // On the third request, the partition leader cache contains all the leadership data + requestKeys = Set.of(tp0, tp1, tp2); + result = new PartitionLeaderStrategy.PartitionLeaderFuture<>(requestKeys, partitionLeaderCache); + driver = buildDriver(result); + + requestSpecs = driver.poll(); + assertEquals(2, requestSpecs.size()); + + // We can tell this is the fulfillment stage by the destination broker id being set + assertEquals(OptionalInt.of(1), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(OptionalInt.of(2), requestSpecs.get(1).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_1); + driver.onResponse(time.milliseconds(), requestSpecs.get(1), listOffsetsResponseSuccess(requestSpecs.get(1).keys), NODE_2); + assertTrue(result.all().get(tp0).isDone()); + assertTrue(result.all().get(tp1).isDone()); + assertTrue(result.all().get(tp2).isDone()); + + // + // Request 4 - T-0, T-1, T-2 and T-3 + // + // On the fourth request, the partition leader cache already contains some of the leadership data. + // Now the lookup and fulfillment stages overlap. + requestKeys = Set.of(tp0, tp1, tp2, tp3); + result = new PartitionLeaderStrategy.PartitionLeaderFuture<>(requestKeys, partitionLeaderCache); + driver = buildDriver(result); + + requestSpecs = driver.poll(); + assertEquals(3, requestSpecs.size()); + + assertEquals(OptionalInt.empty(), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(Collections.singleton(tp3), requestSpecs.get(0).keys); + assertEquals(OptionalInt.of(1), requestSpecs.get(1).scope.destinationBrokerId()); + assertEquals(OptionalInt.of(2), requestSpecs.get(2).scope.destinationBrokerId()); + + // The cache will be populated using the leader information from this metadata response + leaders = Map.of(tp3, 2); + driver.onResponse(time.milliseconds(), requestSpecs.get(0), metadataResponseWithPartitionLeaders(leaders), Node.noNode()); + driver.onResponse(time.milliseconds(), requestSpecs.get(1), listOffsetsResponseSuccess(requestSpecs.get(1).keys), NODE_1); + driver.onResponse(time.milliseconds(), requestSpecs.get(2), listOffsetsResponseSuccess(requestSpecs.get(2).keys), NODE_2); + assertTrue(result.all().get(tp0).isDone()); // Already fulfilled + assertTrue(result.all().get(tp1).isDone()); // Already fulfilled + assertTrue(result.all().get(tp2).isDone()); // Already fulfilled + assertFalse(result.all().get(tp3).isDone()); + + assertEquals(1, partitionLeaderCache.get(tp0)); + assertEquals(2, partitionLeaderCache.get(tp1)); + assertEquals(1, partitionLeaderCache.get(tp2)); + assertEquals(2, partitionLeaderCache.get(tp3)); + + // Finally, the fulfillment stage makes the actual request for the uncached topic-partition + requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + assertEquals(OptionalInt.of(2), requestSpecs.get(0).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_2); + assertTrue(result.all().get(tp0).isDone()); + assertTrue(result.all().get(tp1).isDone()); + assertTrue(result.all().get(tp2).isDone()); + assertTrue(result.all().get(tp3).isDone()); + } + + @Test + public void testNotLeaderFulfillmentError() { + Map partitionLeaderCache = new HashMap<>(); + + TopicPartition tp0 = new TopicPartition("T", 0); + TopicPartition tp1 = new TopicPartition("T", 1); + Set requestKeys = Set.of(tp0, tp1); + + // First, the lookup stage needs to obtain leadership data because the cache is empty + PartitionLeaderStrategy.PartitionLeaderFuture result = + new PartitionLeaderStrategy.PartitionLeaderFuture<>(requestKeys, partitionLeaderCache); + AdminApiDriver driver = buildDriver(result); + + List> requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + assertEquals(OptionalInt.empty(), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(requestKeys, requestSpecs.get(0).keys); + + // The cache will be populated using the leader information from this metadata response + Map leaders = Map.of(tp0, 1, tp1, 2); + driver.onResponse(time.milliseconds(), requestSpecs.get(0), metadataResponseWithPartitionLeaders(leaders), Node.noNode()); + assertFalse(result.all().get(tp0).isDone()); + assertFalse(result.all().get(tp1).isDone()); + + assertEquals(1, partitionLeaderCache.get(tp0)); + assertEquals(2, partitionLeaderCache.get(tp1)); + + // Second, the fulfillment stage makes the actual requests + requestSpecs = driver.poll(); + assertEquals(2, requestSpecs.size()); + + assertEquals(OptionalInt.of(1), requestSpecs.get(0).scope.destinationBrokerId()); + assertEquals(OptionalInt.of(2), requestSpecs.get(1).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_1); + driver.onResponse(time.milliseconds(), requestSpecs.get(1), listOffsetsResponseFailure(requestSpecs.get(1).keys, Errors.NOT_LEADER_OR_FOLLOWER), NODE_2); + assertTrue(result.all().get(tp0).isDone()); + assertFalse(result.all().get(tp1).isDone()); + + // Now the lookup occurs again - change leadership to node 1 + requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + assertEquals(OptionalInt.empty(), requestSpecs.get(0).scope.destinationBrokerId()); + + leaders = Map.of(tp1, 1); + driver.onResponse(time.milliseconds(), requestSpecs.get(0), metadataResponseWithPartitionLeaders(leaders), Node.noNode()); + assertTrue(result.all().get(tp0).isDone()); + assertFalse(result.all().get(tp1).isDone()); + + assertEquals(1, partitionLeaderCache.get(tp0)); + assertEquals(1, partitionLeaderCache.get(tp1)); + + // And the fulfillment stage makes the actual request + requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + assertEquals(OptionalInt.of(1), requestSpecs.get(0).scope.destinationBrokerId()); + + driver.onResponse(time.milliseconds(), requestSpecs.get(0), listOffsetsResponseSuccess(requestSpecs.get(0).keys), NODE_1); + assertTrue(result.all().get(tp0).isDone()); + assertTrue(result.all().get(tp1).isDone()); + } + + @Test + public void testFatalLookupError() { + TopicPartition tp0 = new TopicPartition("T", 0); + Map partitionLeaderCache = new HashMap<>(); + PartitionLeaderStrategy.PartitionLeaderFuture result = + new PartitionLeaderStrategy.PartitionLeaderFuture<>(Collections.singleton(tp0), partitionLeaderCache); + AdminApiDriver driver = buildDriver(result); + + List> requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + AdminApiDriver.RequestSpec spec = requestSpecs.get(0); + assertEquals(Collections.singleton(tp0), spec.keys); + + driver.onFailure(time.milliseconds(), spec, new UnknownServerException()); + assertTrue(result.all().get(tp0).isDone()); + TestUtils.assertFutureThrows(result.all().get(tp0), UnknownServerException.class); + assertEquals(Collections.emptyList(), driver.poll()); + } + + @Test + public void testRetryLookupAfterDisconnect() { + TopicPartition tp0 = new TopicPartition("T", 0); + Map partitionLeaderCache = new HashMap<>(); + PartitionLeaderStrategy.PartitionLeaderFuture result = + new PartitionLeaderStrategy.PartitionLeaderFuture<>(Collections.singleton(tp0), partitionLeaderCache); + AdminApiDriver driver = buildDriver(result); + + List> requestSpecs = driver.poll(); + assertEquals(1, requestSpecs.size()); + + AdminApiDriver.RequestSpec spec = requestSpecs.get(0); + assertEquals(Collections.singleton(tp0), spec.keys); + + driver.onFailure(time.milliseconds(), spec, new DisconnectException()); + List> retrySpecs = driver.poll(); + assertEquals(1, retrySpecs.size()); + + AdminApiDriver.RequestSpec retrySpec = retrySpecs.get(0); + assertEquals(Collections.singleton(tp0), retrySpec.keys); + assertEquals(time.milliseconds(), retrySpec.nextAllowedTryMs); + assertEquals(Collections.emptyList(), driver.poll()); + } + + private MetadataResponse metadataResponseWithPartitionLeaders(Map mapping) { + MetadataResponseData response = new MetadataResponseData(); + mapping.forEach((tp, brokerId) -> response.topics().add(new MetadataResponseData.MetadataResponseTopic() + .setName(tp.topic()) + .setPartitions(Collections.singletonList(new MetadataResponseData.MetadataResponsePartition() + .setPartitionIndex(tp.partition()) + .setLeaderId(brokerId))))); + return new MetadataResponse(response, ApiKeys.METADATA.latestVersion()); + } + + private ListOffsetsResponse listOffsetsResponseSuccess(Set keys) { + // This structure is not quite how Kafka does it, but it works for the MockApiHandler + ListOffsetsResponseData response = new ListOffsetsResponseData(); + keys.forEach(tp -> { + ListOffsetsResponseData.ListOffsetsPartitionResponse partResponse = + new ListOffsetsResponseData.ListOffsetsPartitionResponse() + .setPartitionIndex(tp.partition()); + ListOffsetsResponseData.ListOffsetsTopicResponse topicResponse = + new ListOffsetsResponseData.ListOffsetsTopicResponse() + .setName(tp.topic()) + .setPartitions(Collections.singletonList(partResponse)); + response.topics().add(topicResponse); + }); + return new ListOffsetsResponse(response); + } + + private ListOffsetsResponse listOffsetsResponseFailure(Set keys, Errors error) { + // This structure is not quite how Kafka does it, but it works for the MockApiHandler + ListOffsetsResponseData response = new ListOffsetsResponseData(); + keys.forEach(tp -> { + ListOffsetsResponseData.ListOffsetsPartitionResponse partResponse = + new ListOffsetsResponseData.ListOffsetsPartitionResponse() + .setPartitionIndex(tp.partition()) + .setErrorCode(error.code()); + ListOffsetsResponseData.ListOffsetsTopicResponse topicResponse = + new ListOffsetsResponseData.ListOffsetsTopicResponse() + .setName(tp.topic()) + .setPartitions(Collections.singletonList(partResponse)); + response.topics().add(topicResponse); + }); + return new ListOffsetsResponse(response); + } + + private class MockApiHandler extends AdminApiHandler.Batched { + private final PartitionLeaderStrategy partitionLeaderStrategy = new PartitionLeaderStrategy(logContext); + + @Override + public String apiName() { + return "mock-api"; + } + + @Override + public AbstractRequest.Builder buildBatchedRequest( + int brokerId, + Set keys + ) { + return new MetadataRequest.Builder(new MetadataRequestData()); + } + + @Override + public ApiResult handleResponse( + Node broker, + Set keys, + AbstractResponse abstractResponse + ) { + ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse; + + Map completed = new HashMap<>(); + Map failed = new HashMap<>(); + List unmapped = new ArrayList<>(); + + response.topics().forEach(topic -> topic.partitions().forEach(partition -> { + TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex()); + if (partition.errorCode() != Errors.NONE.code()) { + Exception exception = Errors.forCode(partition.errorCode()).exception(); + if (exception instanceof NotLeaderOrFollowerException || exception instanceof LeaderNotAvailableException) { + unmapped.add(tp); + } else if (!(exception instanceof RetriableException)) { + failed.put(tp, Errors.forCode(partition.errorCode()).exception()); + } + } else { + completed.put(tp, null); + } + })); + + return new ApiResult<>(completed, failed, unmapped); + } + + @Override + public PartitionLeaderStrategy lookupStrategy() { + return partitionLeaderStrategy; + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java index 99c45f05c1559..2fa5515fb4073 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java @@ -210,7 +210,7 @@ public void testDefaultMetadataRecoveryStrategy() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); - assertEquals(MetadataRecoveryStrategy.NONE.name, consumerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); + assertEquals(MetadataRecoveryStrategy.REBOOTSTRAP.name, consumerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @Test @@ -237,4 +237,23 @@ public void testProtocolConfigValidation(String protocol, boolean isValid) { assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); } } + + @Test + public void testUnsupportedConfigsWithConsumerGroupProtocol() { + testUnsupportedConfigsWithConsumerGroupProtocol(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "RoundRobinAssignor"); + testUnsupportedConfigsWithConsumerGroupProtocol(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000); + testUnsupportedConfigsWithConsumerGroupProtocol(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 30000); + } + + private void testUnsupportedConfigsWithConsumerGroupProtocol(String configName, Object value) { + final Map configs = Map.of( + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass, + ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name(), + configName, value + ); + ConfigException exception = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); + assertEquals(configName + " cannot be set when " + + ConsumerConfig.GROUP_PROTOCOL_CONFIG + "=" + GroupProtocol.CONSUMER.name(), exception.getMessage()); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java index ab33c8f45d7fd..b4f649de579ae 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java @@ -150,6 +150,7 @@ private ConsumerConfig initConsumerConfigWithClassTypes(List classTypes) props.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, classTypes); + props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name()); return new ConsumerConfig(props); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java index c260fa48c019b..33ca2844305c7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java @@ -20,7 +20,11 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.MockClient; +import org.apache.kafka.clients.NetworkClient; import org.apache.kafka.clients.NodeApiVersions; +import org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer; import org.apache.kafka.clients.consumer.internals.ConsumerMetadata; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.clients.consumer.internals.MockRebalanceListener; @@ -58,6 +62,9 @@ import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.metrics.JmxReporter; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; @@ -88,6 +95,7 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetrySender; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -96,11 +104,13 @@ import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.TestUtils; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.mockito.internal.stubbing.answers.CallsRealMethods; import java.lang.management.ManagementFactory; @@ -145,6 +155,7 @@ import static org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.DEFAULT_REASON; import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID; import static org.apache.kafka.common.utils.Utils.propsToMap; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -157,8 +168,10 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atMostOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -212,7 +225,7 @@ public class KafkaConsumerTest { private final Collection singleTopicPartition = Collections.singleton(new TopicPartition(topic, 0)); private final Time time = new MockTime(); - private final SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + private final SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); private final ConsumerPartitionAssignor assignor = new RoundRobinAssignor(); private KafkaConsumer consumer; @@ -224,6 +237,107 @@ public void cleanup() { } } + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testSubscribingCustomMetricsDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) { + Properties props = new Properties(); + props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer()); + + Map customMetrics = customMetrics(); + customMetrics.forEach((name, metric) -> consumer.registerMetricForSubscription(metric)); + + Map consumerMetrics = consumer.metrics(); + customMetrics.forEach((name, metric) -> assertFalse(consumerMetrics.containsKey(name))); + } + + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testSubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) { + Properties props = new Properties(); + props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + Class consumerClass = groupProtocol == GroupProtocol.CLASSIC ? ClassicKafkaConsumer.class : AsyncKafkaConsumer.class; + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + appender.setClassLogger(consumerClass, Level.DEBUG); + consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer()); + KafkaMetric existingMetricToAdd = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.registerMetricForSubscription(existingMetricToAdd); + final String expectedMessage = String.format("Skipping registration for metric %s. Existing consumer metrics cannot be overwritten.", existingMetricToAdd.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); + } + } + + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testUnsubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) { + Properties props = new Properties(); + props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + Class consumerClass = groupProtocol == GroupProtocol.CLASSIC ? ClassicKafkaConsumer.class : AsyncKafkaConsumer.class; + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + appender.setClassLogger(consumerClass, Level.DEBUG); + consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer()); + KafkaMetric existingMetricToRemove = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.unregisterMetricFromSubscription(existingMetricToRemove); + final String expectedMessage = String.format("Skipping unregistration for metric %s. Existing consumer metrics cannot be removed.", existingMetricToRemove.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); + } + } + + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testShouldOnlyCallMetricReporterMetricChangeOnceWithExistingConsumerMetric(GroupProtocol groupProtocol) { + try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { + ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); + clientTelemetryReporter.configure(any()); + mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); + + Properties props = new Properties(); + props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer()); + + KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.registerMetricForSubscription(existingMetric); + // This test would fail without the check as the exising metric is registered in the consumer on startup + Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); + } + } + + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testShouldNotCallMetricReporterMetricRemovalWithExistingConsumerMetric(GroupProtocol groupProtocol) { + try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { + ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); + clientTelemetryReporter.configure(any()); + mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); + + Properties props = new Properties(); + props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer()); + + KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.unregisterMetricFromSubscription(existingMetric); + Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric); + } + } + + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testUnSubscribingNonExisingMetricsDoesntCauseError(GroupProtocol groupProtocol) { + Properties props = new Properties(); + props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + consumer = newConsumer(props, new StringDeserializer(), new StringDeserializer()); + + Map customMetrics = customMetrics(); + //Metrics never registered but removed should not cause an error + customMetrics.forEach((name, metric) -> assertDoesNotThrow(() -> consumer.unregisterMetricFromSubscription(metric))); + } + @ParameterizedTest @EnumSource(GroupProtocol.class) public void testMetricsReporterAutoGeneratedClientId(GroupProtocol groupProtocol) { @@ -916,7 +1030,7 @@ private void initMetadata(MockClient mockClient, Map partitionC @ParameterizedTest @EnumSource(value = GroupProtocol.class) public void testMissingOffsetNoResetPolicy(GroupProtocol groupProtocol) throws InterruptedException { - SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); @@ -945,7 +1059,7 @@ public void testMissingOffsetNoResetPolicy(GroupProtocol groupProtocol) throws I @ParameterizedTest @EnumSource(GroupProtocol.class) public void testResetToCommittedOffset(GroupProtocol groupProtocol) { - SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); @@ -968,7 +1082,20 @@ public void testResetToCommittedOffset(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(GroupProtocol.class) public void testResetUsingAutoResetPolicy(GroupProtocol groupProtocol) { - SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.LATEST); + setUpConsumerWithAutoResetPolicy(groupProtocol, AutoOffsetResetStrategy.LATEST); + assertEquals(50L, consumer.position(tp0)); + } + + @ParameterizedTest + @EnumSource(GroupProtocol.class) + public void testResetUsingDurationBasedAutoResetPolicy(GroupProtocol groupProtocol) { + AutoOffsetResetStrategy durationStrategy = AutoOffsetResetStrategy.fromString("by_duration:PT1H"); + setUpConsumerWithAutoResetPolicy(groupProtocol, durationStrategy); + assertEquals(50L, consumer.position(tp0)); + } + + private void setUpConsumerWithAutoResetPolicy(GroupProtocol groupProtocol, AutoOffsetResetStrategy strategy) { + SubscriptionState subscription = new SubscriptionState(new LogContext(), strategy); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); @@ -986,14 +1113,12 @@ public void testResetUsingAutoResetPolicy(GroupProtocol groupProtocol) { client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); consumer.poll(Duration.ZERO); - - assertEquals(50L, consumer.position(tp0)); } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testOffsetIsValidAfterSeek(GroupProtocol groupProtocol) { - SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.LATEST); + SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.LATEST); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); @@ -1856,11 +1981,11 @@ public void testOperationsBySubscribingConsumerWithDefaultGroupId(GroupProtocol } try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - assertThrows(InvalidGroupIdException.class, () -> consumer.commitAsync()); + assertThrows(InvalidGroupIdException.class, consumer::commitAsync); } try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - assertThrows(InvalidGroupIdException.class, () -> consumer.commitSync()); + assertThrows(InvalidGroupIdException.class, consumer::commitSync); } } @@ -1871,8 +1996,8 @@ public void testOperationsByAssigningConsumerWithDefaultGroupId(GroupProtocol gr consumer.assign(singleton(tp0)); assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); - assertThrows(InvalidGroupIdException.class, () -> consumer.commitAsync()); - assertThrows(InvalidGroupIdException.class, () -> consumer.commitSync()); + assertThrows(InvalidGroupIdException.class, consumer::commitAsync); + assertThrows(InvalidGroupIdException.class, consumer::commitSync); } } @@ -2170,7 +2295,7 @@ public void testMeasureCommitSyncDurationOnFailure(GroupProtocol groupProtocol) public void testMeasureCommitSyncDuration(GroupProtocol groupProtocol) { Time time = new MockTime(Duration.ofSeconds(1).toMillis()); SubscriptionState subscription = new SubscriptionState(new LogContext(), - OffsetResetStrategy.EARLIEST); + AutoOffsetResetStrategy.EARLIEST); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 2)); @@ -2216,7 +2341,7 @@ public void testMeasureCommittedDuration(GroupProtocol groupProtocol) { long offset1 = 10000; Time time = new MockTime(Duration.ofSeconds(1).toMillis()); SubscriptionState subscription = new SubscriptionState(new LogContext(), - OffsetResetStrategy.EARLIEST); + AutoOffsetResetStrategy.EARLIEST); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 2)); @@ -2555,7 +2680,7 @@ public void testCurrentLag(GroupProtocol groupProtocol) throws InterruptedExcept @ParameterizedTest @EnumSource(GroupProtocol.class) - public void testListOffsetShouldUpdateSubscriptions(GroupProtocol groupProtocol) throws InterruptedException { + public void testListOffsetShouldUpdateSubscriptions(GroupProtocol groupProtocol) { final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); @@ -2951,7 +3076,10 @@ private ConsumerConfig newConsumerConfig(GroupProtocol groupProtocol, configs.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, minBytes); configs.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); - configs.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatIntervalMs); + if (groupProtocol == GroupProtocol.CLASSIC) { + configs.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatIntervalMs); + configs.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs); + } configs.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_UNCOMMITTED.toString()); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); configs.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, fetchSize); @@ -2960,7 +3088,6 @@ private ConsumerConfig newConsumerConfig(GroupProtocol groupProtocol, configs.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs); configs.put(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG, retryBackoffMaxMs); configs.put(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, retryBackoffMs); - configs.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs); configs.put(ConsumerConfig.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, throwOnStableOffsetNotSupported); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); groupInstanceId.ifPresent(gi -> configs.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, gi)); @@ -3497,10 +3624,32 @@ public void testPollSendsRequestToJoin(GroupProtocol groupProtocol) throws Inter "Expected " + (groupProtocol == GroupProtocol.CLASSIC ? "JoinGroup" : "Heartbeat") + " request"); } + @ParameterizedTest + @EnumSource(value = GroupProtocol.class, names = "CLASSIC") + public void testSubscribeToRe2jPatternNotSupportedForClassicConsumer(GroupProtocol groupProtocol) { + KafkaConsumer consumer = newConsumerNoAutoCommit(groupProtocol, time, mock(NetworkClient.class), subscription, + mock(ConsumerMetadata.class)); + assertThrows(UnsupportedOperationException.class, () -> + consumer.subscribe(new SubscriptionPattern("t*"))); + assertThrows(UnsupportedOperationException.class, () -> + consumer.subscribe(new SubscriptionPattern("t*"), mock(ConsumerRebalanceListener.class))); + } + private boolean requestGenerated(MockClient client, ApiKeys apiKey) { return client.requests().stream().anyMatch(request -> request.requestBuilder().apiKey().equals(apiKey)); } + private Map customMetrics() { + MetricConfig metricConfig = new MetricConfig(); + Object lock = new Object(); + MetricName metricNameOne = new MetricName("metricOne", "stream-metrics", "description for metric one", new HashMap<>()); + MetricName metricNameTwo = new MetricName("metricTwo", "stream-metrics", "description for metric two", new HashMap<>()); + + KafkaMetric streamClientMetricOne = new KafkaMetric(lock, metricNameOne, (Measurable) (m, now) -> 1.0, metricConfig, Time.SYSTEM); + KafkaMetric streamClientMetricTwo = new KafkaMetric(lock, metricNameTwo, (Measurable) (m, now) -> 2.0, metricConfig, Time.SYSTEM); + return Map.of(metricNameOne, streamClientMetricOne, metricNameTwo, streamClientMetricTwo); + } + private static final List CLIENT_IDS = new ArrayList<>(); public static class DeserializerForClientId implements Deserializer { @Override diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java index 2742b4d8410a3..ae7f1774bfc31 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java @@ -19,21 +19,33 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.MockClient; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.ConsumerMetadata; +import org.apache.kafka.clients.consumer.internals.ShareConsumerImpl; import org.apache.kafka.clients.consumer.internals.SubscriptionState; +import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.internals.ClusterResourceListeners; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.internal.stubbing.answers.CallsRealMethods; import java.time.Duration; import java.util.AbstractMap; @@ -45,9 +57,16 @@ import java.util.stream.Stream; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atMostOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; public class KafkaShareConsumerMetricsTest { private final String topic = "test"; @@ -56,7 +75,7 @@ public class KafkaShareConsumerMetricsTest { new AbstractMap.SimpleEntry<>(topic, topicId)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); private final Time time = new MockTime(); - private final SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + private final SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); private final String groupId = "mock-group"; @Test @@ -156,6 +175,7 @@ public void testClosingConsumerUnregistersConsumerMetrics() { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); + KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); consumer.subscribe(Collections.singletonList(topic)); assertTrue(consumerMetricPresent(consumer, "last-poll-seconds-ago")); @@ -167,6 +187,110 @@ public void testClosingConsumerUnregistersConsumerMetrics() { assertFalse(consumerMetricPresent(consumer, "time-between-poll-max")); } + @Test + public void testRegisteringCustomMetricsDoesntAffectConsumerMetrics() { + Time time = new MockTime(1L); + ConsumerMetadata metadata = createMetadata(subscription); + MockClient client = new MockClient(time, metadata); + initMetadata(client, Collections.singletonMap(topic, 1)); + + KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); + Map customMetrics = customMetrics(); + customMetrics.forEach((name, metric) -> consumer.registerMetricForSubscription(metric)); + + Map consumerMetrics = consumer.metrics(); + customMetrics.forEach((name, metric) -> assertFalse(consumerMetrics.containsKey(name))); + } + + @Test + public void testRegisteringCustomMetricsWithSameNameDoesntAffectConsumerMetrics() { + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + appender.setClassLogger(ShareConsumerImpl.class, Level.DEBUG); + Time time = new MockTime(1L); + ConsumerMetadata metadata = createMetadata(subscription); + MockClient client = new MockClient(time, metadata); + initMetadata(client, Collections.singletonMap(topic, 1)); + + KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); + KafkaMetric existingMetricToAdd = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.registerMetricForSubscription(existingMetricToAdd); + final String expectedMessage = String.format("Skipping registration for metric %s. Existing consumer metrics cannot be overwritten.", existingMetricToAdd.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); + } + } + + @Test + public void testUnregisteringCustomMetricsWithSameNameDoesntAffectConsumerMetrics() { + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + appender.setClassLogger(ShareConsumerImpl.class, Level.DEBUG); + Time time = new MockTime(1L); + ConsumerMetadata metadata = createMetadata(subscription); + MockClient client = new MockClient(time, metadata); + initMetadata(client, Collections.singletonMap(topic, 1)); + + KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); + KafkaMetric existingMetricToRemove = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.unregisterMetricFromSubscription(existingMetricToRemove); + final String expectedMessage = String.format("Skipping unregistration for metric %s. Existing consumer metrics cannot be removed.", existingMetricToRemove.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); + } + } + + @Test + public void testShouldOnlyCallMetricReporterMetricChangeOnceWithExistingConsumerMetric() { + try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { + ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); + clientTelemetryReporter.configure(any()); + mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); + + Time time = new MockTime(1L); + ConsumerMetadata metadata = createMetadata(subscription); + MockClient client = new MockClient(time, metadata); + initMetadata(client, Collections.singletonMap(topic, 1)); + + KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); + + KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.registerMetricForSubscription(existingMetric); + // This test would fail without the check as the existing metric is registered in the consumer on startup + Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); + } + } + + @Test + public void testShouldNotCallMetricReporterMetricRemovalWithExistingConsumerMetric() { + try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { + ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); + clientTelemetryReporter.configure(any()); + mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); + + Time time = new MockTime(1L); + ConsumerMetadata metadata = createMetadata(subscription); + MockClient client = new MockClient(time, metadata); + initMetadata(client, Collections.singletonMap(topic, 1)); + + KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); + + KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); + consumer.unregisterMetricFromSubscription(existingMetric); + Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric); + } + } + + @Test + public void testUnregisteringNonexistingMetricsDoesntCauseError() { + Time time = new MockTime(1L); + ConsumerMetadata metadata = createMetadata(subscription); + MockClient client = new MockClient(time, metadata); + initMetadata(client, Collections.singletonMap(topic, 1)); + + KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); + + Map customMetrics = customMetrics(); + // Metrics never registered but removed should not cause an error + customMetrics.forEach((name, metric) -> assertDoesNotThrow(() -> consumer.unregisterMetricFromSubscription(metric))); + } + private ConsumerMetadata createMetadata(SubscriptionState subscription) { return new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false, subscription, new LogContext(), new ClusterResourceListeners()); @@ -255,4 +379,15 @@ private void initMetadata(MockClient mockClient, Map partitionC mockClient.updateMetadata(initialMetadata); } + + private Map customMetrics() { + MetricConfig metricConfig = new MetricConfig(); + Object lock = new Object(); + MetricName metricNameOne = new MetricName("metricOne", "stream-metrics", "description for metric one", new HashMap<>()); + MetricName metricNameTwo = new MetricName("metricTwo", "stream-metrics", "description for metric two", new HashMap<>()); + + KafkaMetric streamClientMetricOne = new KafkaMetric(lock, metricNameOne, (Measurable) (m, now) -> 1.0, metricConfig, Time.SYSTEM); + KafkaMetric streamClientMetricTwo = new KafkaMetric(lock, metricNameTwo, (Measurable) (m, now) -> 2.0, metricConfig, Time.SYSTEM); + return Map.of(metricNameOne, streamClientMetricOne, metricNameTwo, streamClientMetricTwo); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java index c8b644e718931..21cee3183bc69 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.consumer; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.record.TimestampType; @@ -34,11 +35,12 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; public class MockConsumerTest { - private final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + private final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); @Test public void testSimpleMock() { @@ -115,6 +117,28 @@ public void endOffsetsShouldBeIdempotent() { assertEquals(11L, (long) consumer.endOffsets(Collections.singleton(partition)).get(partition)); } + @Test + public void testDurationBasedOffsetReset() { + MockConsumer consumer = new MockConsumer<>("by_duration:PT1H"); + consumer.subscribe(Collections.singleton("test")); + consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1))); + HashMap durationBasedOffsets = new HashMap<>(); + durationBasedOffsets.put(new TopicPartition("test", 0), 10L); + durationBasedOffsets.put(new TopicPartition("test", 1), 11L); + consumer.updateDurationOffsets(durationBasedOffsets); + ConsumerRecord rec1 = new ConsumerRecord<>("test", 0, 10L, 0L, TimestampType.CREATE_TIME, + 0, 0, "key1", "value1", new RecordHeaders(), Optional.empty()); + ConsumerRecord rec2 = new ConsumerRecord<>("test", 0, 11L, 0L, TimestampType.CREATE_TIME, + 0, 0, "key2", "value2", new RecordHeaders(), Optional.empty()); + consumer.addRecord(rec1); + consumer.addRecord(rec2); + ConsumerRecords records = consumer.poll(Duration.ofMillis(1)); + Iterator> iter = records.iterator(); + assertEquals(rec1, iter.next()); + assertEquals(rec2, iter.next()); + assertFalse(iter.hasNext()); + } + @Test public void testRebalanceListener() { final List revoked = new ArrayList<>(); @@ -162,5 +186,20 @@ public void onPartitionsAssigned(Collection partitions) { assertEquals(1, revoked.size()); assertTrue(revoked.contains(topicPartitionList.get(0))); } + + @Test + public void testRe2JPatternSubscription() { + assertThrows(IllegalArgumentException.class, () -> consumer.subscribe((SubscriptionPattern) null)); + assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(new SubscriptionPattern(""))); + + SubscriptionPattern pattern = new SubscriptionPattern("t.*"); + assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(pattern, null)); + + consumer.subscribe(pattern); + assertTrue(consumer.subscription().isEmpty()); + // Check that the subscription to pattern was successfully applied in the mock consumer (using a different + // subscription type should fail) + assertThrows(IllegalStateException.class, () -> consumer.subscribe(List.of("topic1"))); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/RangeAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/RangeAssignorTest.java index 5fb2c1696105a..eb45d0279770d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/RangeAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/RangeAssignorTest.java @@ -103,7 +103,7 @@ public void testOneConsumerNonexistentTopic(boolean hasConsumerRack) { assertTrue(assignment.get(consumer1).isEmpty()); } - @ParameterizedTest(name = "rackConfig = {0}") + @ParameterizedTest(name = "{displayName}.rackConfig = {0}") @EnumSource(RackConfig.class) public void testOneConsumerOneTopic(RackConfig rackConfig) { initializeRacks(rackConfig); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java index f82bb011b8412..969de83c328ff 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.MockClient; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; @@ -60,6 +59,7 @@ import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; @@ -132,7 +132,7 @@ private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int reb LogContext logContext = new LogContext(); this.mockTime = new MockTime(); ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, retryBackoffMaxMs, 60 * 60 * 1000L, - false, false, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), + false, false, new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST), logContext, new ClusterResourceListeners()); this.mockClient = new MockClient(mockTime, metadata); @@ -156,7 +156,7 @@ false, false, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), groupInstanceId, retryBackoffMs, retryBackoffMaxMs, - !groupInstanceId.isPresent()); + groupInstanceId.isEmpty()); this.coordinator = new DummyCoordinator(rebalanceConfig, consumerClient, metrics, @@ -1435,6 +1435,7 @@ public void testWakeupAfterJoinGroupReceivedExternalCompletion() throws Exceptio awaitFirstHeartbeat(heartbeatReceived); } + @Tag("flaky") // "KAFKA-18310" @Test public void testWakeupAfterSyncGroupSentExternalCompletion() throws Exception { setupCoordinator(); @@ -1471,6 +1472,7 @@ public boolean matches(AbstractRequest body) { awaitFirstHeartbeat(heartbeatReceived); } + @Tag("flaky") // "KAFKA-18310" @Test public void testWakeupAfterSyncGroupReceived() throws Exception { setupCoordinator(); @@ -1504,6 +1506,7 @@ public void testWakeupAfterSyncGroupReceived() throws Exception { awaitFirstHeartbeat(heartbeatReceived); } + @Tag("flaky") // KAFKA-15474 and KAFKA-18310 @Test public void testWakeupAfterSyncGroupReceivedExternalCompletion() throws Exception { setupCoordinator(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java index 31334b7b19c52..ee6df4e65a2e9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java @@ -1426,14 +1426,8 @@ private String getCanonicalName(String str, int i, int maxNum) { } private String pad(int num, int digits) { - StringBuilder sb = new StringBuilder(); int iDigits = Integer.toString(num).length(); - - for (int i = 1; i <= digits - iDigits; ++i) - sb.append("0"); - - sb.append(num); - return sb.toString(); + return "0".repeat(Math.max(0, digits - iDigits)) + num; } protected static List topics(String... topics) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java new file mode 100644 index 0000000000000..3430719b16ee6 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; +import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; +import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; +import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; +import org.apache.kafka.clients.consumer.internals.events.PollEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; + +import org.junit.jupiter.api.Test; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +public class ApplicationEventHandlerTest { + private final Time time = new MockTime(); + private final BlockingQueue applicationEventsQueue = new LinkedBlockingQueue<>(); + private final ApplicationEventProcessor applicationEventProcessor = mock(ApplicationEventProcessor.class); + private final NetworkClientDelegate networkClientDelegate = mock(NetworkClientDelegate.class); + private final RequestManagers requestManagers = mock(RequestManagers.class); + private final CompletableEventReaper applicationEventReaper = mock(CompletableEventReaper.class); + + @Test + public void testRecordApplicationEventQueueSize() { + try (Metrics metrics = new Metrics(); + AsyncConsumerMetrics asyncConsumerMetrics = spy(new AsyncConsumerMetrics(metrics)); + ApplicationEventHandler applicationEventHandler = new ApplicationEventHandler( + new LogContext(), + time, + applicationEventsQueue, + applicationEventReaper, + () -> applicationEventProcessor, + () -> networkClientDelegate, + () -> requestManagers, + asyncConsumerMetrics + )) { + // add event + applicationEventHandler.add(new PollEvent(time.milliseconds())); + verify(asyncConsumerMetrics).recordApplicationEventQueueSize(1); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java index 54a41587b06a9..819365e9712f5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java @@ -17,6 +17,8 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.Metadata.LeaderAndEpoch; +import org.apache.kafka.clients.MockClient; +import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; @@ -27,8 +29,8 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetCommitCallback; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.RetriableCommitFailedException; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; import org.apache.kafka.clients.consumer.internals.events.AssignmentChangeEvent; @@ -44,35 +46,49 @@ import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; import org.apache.kafka.clients.consumer.internals.events.EventProcessor; import org.apache.kafka.clients.consumer.internals.events.FetchCommittedOffsetsEvent; +import org.apache.kafka.clients.consumer.internals.events.LeaveGroupOnCloseEvent; import org.apache.kafka.clients.consumer.internals.events.ListOffsetsEvent; import org.apache.kafka.clients.consumer.internals.events.PollEvent; import org.apache.kafka.clients.consumer.internals.events.ResetOffsetEvent; import org.apache.kafka.clients.consumer.internals.events.SeekUnvalidatedEvent; import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; import org.apache.kafka.clients.consumer.internals.events.TopicPatternSubscriptionChangeEvent; +import org.apache.kafka.clients.consumer.internals.events.TopicRe2JPatternSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.TopicSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.UnsubscribeEvent; +import org.apache.kafka.clients.consumer.internals.events.UpdatePatternSubscriptionEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; +import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; -import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.internals.ClusterResourceListeners; +import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.ConsumerGroupHeartbeatResponse; +import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.ListOffsetsRequest; +import org.apache.kafka.common.requests.MetadataResponse; +import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.apache.kafka.test.MockConsumerInterceptor; +import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -80,6 +96,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; import org.mockito.MockedStatic; @@ -117,6 +134,7 @@ import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_ASSIGNED; import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_LOST; import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_REVOKED; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; @@ -134,6 +152,7 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; @@ -194,7 +213,7 @@ private AsyncKafkaConsumer newConsumer(Properties props) { new StringDeserializer(), new StringDeserializer(), time, - (a, b, c, d, e, f, g) -> applicationEventHandler, + (a, b, c, d, e, f, g, h) -> applicationEventHandler, a -> backgroundEventReaper, (a, b, c, d, e, f, g) -> fetchCollector, (a, b, c, d) -> metadata, @@ -208,7 +227,7 @@ private AsyncKafkaConsumer newConsumer(ConsumerConfig config) { new StringDeserializer(), new StringDeserializer(), time, - (a, b, c, d, e, f, g) -> applicationEventHandler, + (a, b, c, d, e, f, g, h) -> applicationEventHandler, a -> backgroundEventReaper, (a, b, c, d, e, f, g) -> fetchCollector, (a, b, c, d) -> metadata, @@ -263,23 +282,6 @@ public void testFailOnClosedConsumer() { assertEquals("This consumer has already been closed.", res.getMessage()); } - @Test - public void testUnsubscribeWithInvalidTopicException() { - consumer = newConsumer(); - backgroundEventQueue.add(new ErrorEvent(new InvalidTopicException("Invalid topic name"))); - completeUnsubscribeApplicationEventSuccessfully(); - assertDoesNotThrow(() -> consumer.unsubscribe()); - assertDoesNotThrow(() -> consumer.close()); - } - - @Test - public void testCloseWithInvalidTopicException() { - consumer = newConsumer(); - backgroundEventQueue.add(new ErrorEvent(new InvalidTopicException("Invalid topic name"))); - completeUnsubscribeApplicationEventSuccessfully(); - assertDoesNotThrow(() -> consumer.close()); - } - @Test public void testCommitAsyncWithNullCallback() { consumer = newConsumer(); @@ -358,7 +360,7 @@ public void testCommitted() { assertEquals(topicPartitionOffsets, consumer.committed(topicPartitionOffsets.keySet(), Duration.ofMillis(1000))); verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(FetchCommittedOffsetsEvent.class)); final Metric metric = consumer.metrics() - .get(consumer.metricsRegistry().metricName("committed-time-ns-total", "consumer-metrics")); + .get(consumer.metricsRegistry().metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP)); assertTrue((double) metric.metricValue() > 0); } @@ -636,12 +638,13 @@ public void testVerifyApplicationEventOnShutdown() { completeUnsubscribeApplicationEventSuccessfully(); doReturn(null).when(applicationEventHandler).addAndGet(any()); consumer.close(); - verify(applicationEventHandler).add(any(UnsubscribeEvent.class)); verify(applicationEventHandler).add(any(CommitOnCloseEvent.class)); + verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class)); } - @Test - public void testUnsubscribeOnClose() { + @ParameterizedTest + @ValueSource(longs = {0, ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS}) + public void testCloseLeavesGroup(long timeoutMs) { SubscriptionState subscriptions = mock(SubscriptionState.class); consumer = spy(newConsumer( mock(FetchBuffer.class), @@ -651,34 +654,69 @@ public void testUnsubscribeOnClose() { "group-id", "client-id", false)); - completeUnsubscribeApplicationEventSuccessfully(); - consumer.close(Duration.ZERO); - verifyUnsubscribeEvent(subscriptions); + consumer.close(Duration.ofMillis(timeoutMs)); + verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class)); } @Test - public void testFailedPartitionRevocationOnClose() { + public void testCloseLeavesGroupDespiteOnPartitionsLostError() { // If rebalance listener failed to execute during close, we still send the leave group, // and proceed with closing the consumer. + Throwable rootError = new KafkaException("Intentional error"); + Set partitions = singleton(new TopicPartition("topic1", 0)); SubscriptionState subscriptions = mock(SubscriptionState.class); + when(subscriptions.assignedPartitions()).thenReturn(partitions); + ConsumerRebalanceListenerInvoker invoker = mock(ConsumerRebalanceListenerInvoker.class); + doAnswer(invocation -> rootError).when(invoker).invokePartitionsLost(any(SortedSet.class)); + consumer = spy(newConsumer( mock(FetchBuffer.class), new ConsumerInterceptors<>(Collections.emptyList()), + invoker, + subscriptions, + "group-id", + "client-id", + false)); + consumer.setGroupAssignmentSnapshot(partitions); + + Throwable t = assertThrows(KafkaException.class, () -> consumer.close(Duration.ZERO)); + assertNotNull(t.getCause()); + assertEquals(rootError, t.getCause()); + + verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class)); + } + + @ParameterizedTest + @ValueSource(longs = {0, ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS}) + public void testCloseLeavesGroupDespiteInterrupt(long timeoutMs) { + Set partitions = singleton(new TopicPartition("topic1", 0)); + SubscriptionState subscriptions = mock(SubscriptionState.class); + when(subscriptions.assignedPartitions()).thenReturn(partitions); + when(applicationEventHandler.addAndGet(any(CompletableApplicationEvent.class))).thenThrow(InterruptException.class); + consumer = spy(newConsumer( + mock(FetchBuffer.class), + mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, "group-id", "client-id", false)); - doThrow(new KafkaException()).when(consumer).processBackgroundEvents(any(), any(), any()); - assertThrows(KafkaException.class, () -> consumer.close(Duration.ZERO)); - verifyUnsubscribeEvent(subscriptions); - // Close operation should carry on even if the unsubscribe fails - verify(applicationEventHandler).close(any(Duration.class)); + + Duration timeout = Duration.ofMillis(timeoutMs); + + try { + assertThrows(InterruptException.class, () -> consumer.close(timeout)); + } finally { + Thread.interrupted(); + } + + verify(applicationEventHandler).add(any(CommitOnCloseEvent.class)); + verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class)); } @Test public void testCommitSyncAllConsumed() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer( mock(FetchBuffer.class), mock(ConsumerInterceptors.class), @@ -702,7 +740,7 @@ public void testCommitSyncAllConsumed() { @Test public void testAutoCommitSyncDisabled() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer( mock(FetchBuffer.class), mock(ConsumerInterceptors.class), @@ -1476,18 +1514,6 @@ public void testGroupRemoteAssignorUsedInConsumerProtocol() { assertFalse(config.unused().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); } - @Test - public void testPartitionAssignmentStrategyUnusedInAsyncConsumer() { - final Properties props = requiredConsumerConfig(); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroup1"); - props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name().toLowerCase(Locale.ROOT)); - props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "CooperativeStickyAssignor"); - final ConsumerConfig config = new ConsumerConfig(props); - consumer = newConsumer(config); - - assertTrue(config.unused().contains(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG)); - } - @Test public void testGroupIdNull() { final Properties props = requiredConsumerConfig(); @@ -1514,7 +1540,7 @@ public void testGroupIdNotNullAndValid() { @Test public void testEnsurePollEventSentOnConsumerPoll() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer( mock(FetchBuffer.class), new ConsumerInterceptors<>(Collections.emptyList()), @@ -1577,9 +1603,11 @@ public void testLongPollWaitIsLimited() { final OffsetAndMetadata nextOffsetAndMetadata = new OffsetAndMetadata(4, Optional.of(0), ""); // On the first iteration, return no data; on the second, return two records + Set partitions = singleton(tp); doAnswer(invocation -> { // Mock the subscription being assigned as the first fetch is collected - consumer.subscriptions().assignFromSubscribed(Collections.singleton(tp)); + consumer.subscriptions().assignFromSubscribed(partitions); + consumer.setGroupAssignmentSnapshot(partitions); return Fetch.empty(); }).doAnswer(invocation -> Fetch.forPartition(tp, records, true, nextOffsetAndMetadata) @@ -1593,7 +1621,7 @@ public void testLongPollWaitIsLimited() { assertEquals(Optional.of(0), returnedRecords.nextOffsets().get(tp).leaderEpoch()); assertEquals(singleton(topicName), consumer.subscription()); - assertEquals(singleton(tp), consumer.assignment()); + assertEquals(partitions, consumer.assignment()); } /** @@ -1741,7 +1769,7 @@ public void testSeekToBeginning() { CompletableApplicationEvent event = addAndGetLastEnqueuedEvent(); ResetOffsetEvent resetOffsetEvent = assertInstanceOf(ResetOffsetEvent.class, event); assertEquals(topics, new HashSet<>(resetOffsetEvent.topicPartitions())); - assertEquals(OffsetResetStrategy.EARLIEST, resetOffsetEvent.offsetResetStrategy()); + assertEquals(AutoOffsetResetStrategy.EARLIEST, resetOffsetEvent.offsetResetStrategy()); } @Test @@ -1768,19 +1796,150 @@ public void testSeekToEnd() { CompletableApplicationEvent event = addAndGetLastEnqueuedEvent(); ResetOffsetEvent resetOffsetEvent = assertInstanceOf(ResetOffsetEvent.class, event); assertEquals(topics, new HashSet<>(resetOffsetEvent.topicPartitions())); - assertEquals(OffsetResetStrategy.LATEST, resetOffsetEvent.offsetResetStrategy()); + assertEquals(AutoOffsetResetStrategy.LATEST, resetOffsetEvent.offsetResetStrategy()); } - private void verifyUnsubscribeEvent(SubscriptionState subscriptions) { - // Check that an unsubscribe event was generated, and that the consumer waited for it to - // complete processing background events. - verify(applicationEventHandler).add(any(UnsubscribeEvent.class)); - verify(consumer).processBackgroundEvents(any(), any(), any()); + @Test + public void testUpdatePatternSubscriptionEventGeneratedOnlyIfPatternUsed() { + consumer = newConsumer(); + doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class)); + when(applicationEventHandler.addAndGet(any(CheckAndUpdatePositionsEvent.class))).thenReturn(true); + doReturn(LeaderAndEpoch.noLeaderOrEpoch()).when(metadata).currentLeader(any()); + completeAssignmentChangeEventSuccessfully(); + completeTopicPatternSubscriptionChangeEventSuccessfully(); + completeUnsubscribeApplicationEventSuccessfully(); + + consumer.assign(singleton(new TopicPartition("topic1", 0))); + consumer.poll(Duration.ZERO); + verify(applicationEventHandler, never()).addAndGet(any(UpdatePatternSubscriptionEvent.class)); - // The consumer should not clear the assignment in the app thread. The unsubscribe - // event is the one responsible for updating the assignment in the background when it - // completes. - verify(subscriptions, never()).assignFromSubscribed(any()); + consumer.unsubscribe(); + + consumer.subscribe(Pattern.compile("t*")); + consumer.poll(Duration.ZERO); + verify(applicationEventHandler).addAndGet(any(UpdatePatternSubscriptionEvent.class)); + } + + @Test + public void testSubscribeToRe2JPatternValidation() { + consumer = newConsumer(); + + Throwable t = assertThrows(IllegalArgumentException.class, () -> consumer.subscribe((SubscriptionPattern) null)); + assertEquals("Topic pattern to subscribe to cannot be null", t.getMessage()); + + t = assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(new SubscriptionPattern(""))); + assertEquals("Topic pattern to subscribe to cannot be empty", t.getMessage()); + + assertDoesNotThrow(() -> consumer.subscribe(new SubscriptionPattern("t*"))); + + assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(new SubscriptionPattern("t*"), null)); + assertDoesNotThrow(() -> consumer.subscribe(new SubscriptionPattern("t*"), mock(ConsumerRebalanceListener.class))); + } + + @Test + public void testSubscribeToRe2JPatternThrowsIfNoGroupId() { + consumer = newConsumer(requiredConsumerConfig()); + assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(new SubscriptionPattern("t*"))); + assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(new SubscriptionPattern("t*"), + mock(ConsumerRebalanceListener.class))); + } + + @Test + public void testSubscribeToRe2JPatternGeneratesEvent() { + consumer = newConsumer(); + completeTopicRe2JPatternSubscriptionChangeEventSuccessfully(); + + consumer.subscribe(new SubscriptionPattern("t*")); + verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class)); + + clearInvocations(applicationEventHandler); + consumer.subscribe(new SubscriptionPattern("t*"), mock(ConsumerRebalanceListener.class)); + verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class)); + } + + // SubscriptionPattern is supported as of ConsumerGroupHeartbeatRequest v1. Clients using subscribe + // (SubscribePattern) against older broker versions should get UnsupportedVersionException on poll after subscribe + @Test + public void testSubscribePatternAgainstBrokerNotSupportingRegex() throws InterruptedException { + final Properties props = requiredConsumerConfig(); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id"); + props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + + final ConsumerConfig config = new ConsumerConfig(props); + + ConsumerMetadata metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false, + mock(SubscriptionState.class), new LogContext(), new ClusterResourceListeners()); + MockClient client = new MockClient(time, metadata); + MetadataResponse initialMetadata = RequestTestUtils.metadataUpdateWithIds(1, Map.of("topic1", 2), + Map.of("topic1", Uuid.randomUuid())); + client.updateMetadata(initialMetadata); + // ConsumerGroupHeartbeat v0 does not support broker-side regex resolution + client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.CONSUMER_GROUP_HEARTBEAT.id, (short) 0, (short) 0)); + + // Mock response to find coordinator + Node node = metadata.fetch().nodes().get(0); + client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, "group-id", node), node); + + // Mock HB response (needed so that the MockClient builds the request) + ConsumerGroupHeartbeatResponse result = + new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData() + .setMemberId("") + .setMemberEpoch(0)); + Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); + client.prepareResponseFrom(result, coordinator); + + SubscriptionState subscriptionState = mock(SubscriptionState.class); + + consumer = new AsyncKafkaConsumer<>( + new LogContext(), + time, + config, + new StringDeserializer(), + new StringDeserializer(), + client, + subscriptionState, + metadata + ); + completeTopicRe2JPatternSubscriptionChangeEventSuccessfully(); + + SubscriptionPattern pattern = new SubscriptionPattern("t*"); + consumer.subscribe(pattern); + when(subscriptionState.subscriptionPattern()).thenReturn(pattern); + TestUtils.waitForCondition(() -> { + try { + // The request is generated in the background thread so allow for that + // async operation to happen to detect the failure. + consumer.poll(Duration.ZERO); + return false; + } catch (UnsupportedVersionException e) { + return true; + } + }, "Consumer did not throw the expected UnsupportedVersionException on poll"); + } + + @Test + public void testRecordBackgroundEventQueueSizeAndBackgroundEventQueueTime() { + consumer = newConsumer( + mock(FetchBuffer.class), + mock(ConsumerInterceptors.class), + mock(ConsumerRebalanceListenerInvoker.class), + mock(SubscriptionState.class), + "group-id", + "client-id", + false); + Metrics metrics = consumer.metricsRegistry(); + AsyncConsumerMetrics kafkaConsumerMetrics = consumer.kafkaConsumerMetrics(); + + ConsumerRebalanceListenerCallbackNeededEvent event = new ConsumerRebalanceListenerCallbackNeededEvent(ON_PARTITIONS_REVOKED, Collections.emptySortedSet()); + event.setEnqueuedMs(time.milliseconds()); + backgroundEventQueue.add(event); + kafkaConsumerMetrics.recordBackgroundEventQueueSize(1); + + time.sleep(10); + consumer.processBackgroundEvents(); + assertEquals(0, (double) metrics.metric(metrics.metricName("background-event-queue-size", CONSUMER_METRIC_GROUP)).metricValue()); + assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-avg", CONSUMER_METRIC_GROUP)).metricValue()); + assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP)).metricValue()); } private Map mockTopicPartitionOffset() { @@ -1867,6 +2026,7 @@ private void completeFetchedCommittedOffsetApplicationEventExceptionally(Excepti private void completeUnsubscribeApplicationEventSuccessfully() { doAnswer(invocation -> { UnsubscribeEvent event = invocation.getArgument(0); + consumer.subscriptions().unsubscribe(); event.future().complete(null); return null; }).when(applicationEventHandler).add(ArgumentMatchers.isA(UnsubscribeEvent.class)); @@ -1875,7 +2035,8 @@ private void completeUnsubscribeApplicationEventSuccessfully() { private void completeAssignmentChangeEventSuccessfully() { doAnswer(invocation -> { AssignmentChangeEvent event = invocation.getArgument(0); - consumer.subscriptions().assignFromUser(new HashSet<>(event.partitions())); + HashSet partitions = new HashSet<>(event.partitions()); + consumer.subscriptions().assignFromUser(partitions); event.future().complete(null); return null; }).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(AssignmentChangeEvent.class)); @@ -1899,6 +2060,15 @@ private void completeTopicPatternSubscriptionChangeEventSuccessfully() { }).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicPatternSubscriptionChangeEvent.class)); } + private void completeTopicRe2JPatternSubscriptionChangeEventSuccessfully() { + doAnswer(invocation -> { + TopicRe2JPatternSubscriptionChangeEvent event = invocation.getArgument(0); + consumer.subscriptions().subscribe(event.pattern(), event.listener()); + event.future().complete(null); + return null; + }).when(applicationEventHandler).addAndGet(ArgumentMatchers.isA(TopicRe2JPatternSubscriptionChangeEvent.class)); + } + private void completeSeekUnvalidatedEventSuccessfully() { doAnswer(invocation -> { SeekUnvalidatedEvent event = invocation.getArgument(0); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategyTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategyTest.java new file mode 100644 index 0000000000000..25ff9073747e8 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategyTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.requests.ListOffsetsRequest; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.time.Instant; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class AutoOffsetResetStrategyTest { + + @Test + public void testFromString() { + assertEquals(AutoOffsetResetStrategy.EARLIEST, AutoOffsetResetStrategy.fromString("earliest")); + assertEquals(AutoOffsetResetStrategy.LATEST, AutoOffsetResetStrategy.fromString("latest")); + assertEquals(AutoOffsetResetStrategy.NONE, AutoOffsetResetStrategy.fromString("none")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("invalid")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration:invalid")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration:-PT1H")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration:")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("LATEST")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("EARLIEST")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("NONE")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("")); + assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString(null)); + + AutoOffsetResetStrategy strategy = AutoOffsetResetStrategy.fromString("by_duration:PT1H"); + assertEquals("by_duration", strategy.name()); + } + + @Test + public void testValidator() { + AutoOffsetResetStrategy.Validator validator = new AutoOffsetResetStrategy.Validator(); + assertDoesNotThrow(() -> validator.ensureValid("test", "earliest")); + assertDoesNotThrow(() -> validator.ensureValid("test", "latest")); + assertDoesNotThrow(() -> validator.ensureValid("test", "none")); + assertDoesNotThrow(() -> validator.ensureValid("test", "by_duration:PT1H")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "invalid")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:invalid")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:-PT1H")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "LATEST")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "EARLIEST")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "NONE")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", null)); + } + + @Test + public void testEqualsAndHashCode() { + AutoOffsetResetStrategy earliest1 = AutoOffsetResetStrategy.fromString("earliest"); + AutoOffsetResetStrategy earliest2 = AutoOffsetResetStrategy.fromString("earliest"); + AutoOffsetResetStrategy latest1 = AutoOffsetResetStrategy.fromString("latest"); + + AutoOffsetResetStrategy duration1 = AutoOffsetResetStrategy.fromString("by_duration:P2D"); + AutoOffsetResetStrategy duration2 = AutoOffsetResetStrategy.fromString("by_duration:P2D"); + + assertEquals(earliest1, earliest2); + assertNotEquals(earliest1, latest1); + assertEquals(earliest1.hashCode(), earliest2.hashCode()); + assertNotEquals(earliest1.hashCode(), latest1.hashCode()); + + assertNotEquals(latest1, duration2); + assertEquals(duration1, duration2); + } + + @Test + public void testTimestamp() { + AutoOffsetResetStrategy earliest1 = AutoOffsetResetStrategy.fromString("earliest"); + AutoOffsetResetStrategy earliest2 = AutoOffsetResetStrategy.fromString("earliest"); + assertEquals(Optional.of(ListOffsetsRequest.EARLIEST_TIMESTAMP), earliest1.timestamp()); + assertEquals(earliest1, earliest2); + + AutoOffsetResetStrategy latest1 = AutoOffsetResetStrategy.fromString("latest"); + AutoOffsetResetStrategy latest2 = AutoOffsetResetStrategy.fromString("latest"); + assertEquals(Optional.of(ListOffsetsRequest.LATEST_TIMESTAMP), latest1.timestamp()); + assertEquals(latest1, latest2); + + AutoOffsetResetStrategy none1 = AutoOffsetResetStrategy.fromString("none"); + AutoOffsetResetStrategy none2 = AutoOffsetResetStrategy.fromString("none"); + assertFalse(none1.timestamp().isPresent()); + assertEquals(none1, none2); + + AutoOffsetResetStrategy byDuration1 = AutoOffsetResetStrategy.fromString("by_duration:PT1H"); + Optional timestamp = byDuration1.timestamp(); + assertTrue(timestamp.isPresent()); + assertTrue(timestamp.get() <= Instant.now().toEpochMilli() - Duration.ofHours(1).toMillis()); + + AutoOffsetResetStrategy byDuration2 = AutoOffsetResetStrategy.fromString("by_duration:PT1H"); + AutoOffsetResetStrategy byDuration3 = AutoOffsetResetStrategy.fromString("by_duration:PT2H"); + + assertEquals(byDuration1, byDuration2); + assertNotEquals(byDuration1, byDuration3); + } +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java new file mode 100644 index 0000000000000..63269b6f5542d --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; +import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; +import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.utils.MockTime; + +import org.junit.jupiter.api.Test; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics.BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class BackgroundEventHandlerTest { + private final BlockingQueue backgroundEventsQueue = new LinkedBlockingQueue<>(); + + @Test + public void testRecordBackgroundEventQueueSize() { + try (Metrics metrics = new Metrics(); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics)) { + BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( + backgroundEventsQueue, + new MockTime(0), + asyncConsumerMetrics); + // add event + backgroundEventHandler.add(new ErrorEvent(new Throwable())); + assertEquals( + 1, + (double) metrics.metric( + metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, CONSUMER_METRIC_GROUP) + ).metricValue() + ); + + // drain event + backgroundEventHandler.drainEvents(); + assertEquals( + 0, + (double) metrics.metric( + metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, CONSUMER_METRIC_GROUP) + ).metricValue() + ); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 0ecd99afbd443..252d5a7ccbd08 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -20,14 +20,19 @@ import org.apache.kafka.clients.consumer.CommitFailedException; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.RetriableCommitFailedException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.GroupAuthorizationException; +import org.apache.kafka.common.errors.InvalidCommitOffsetSizeException; +import org.apache.kafka.common.errors.OffsetMetadataTooLarge; import org.apache.kafka.common.errors.RetriableException; +import org.apache.kafka.common.errors.StaleMemberEpochException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; import org.apache.kafka.common.message.OffsetFetchRequestData; @@ -119,7 +124,7 @@ public class CommitRequestManagerTest { public void setup() { this.logContext = new LogContext(); this.time = new MockTime(0); - this.subscriptionState = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + this.subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); this.metadata = mock(ConsumerMetadata.class); this.coordinatorRequestManager = mock(CoordinatorRequestManager.class); this.offsetCommitCallbackInvoker = mock(OffsetCommitCallbackInvoker.class); @@ -779,7 +784,8 @@ public void testOffsetFetchRequestErroredRequests(final Errors error) { @ParameterizedTest @MethodSource("offsetFetchExceptionSupplier") - public void testOffsetFetchRequestTimeoutRequests(final Errors error) { + public void testOffsetFetchRequestTimeoutRequests(final Errors error, + final Class expectedExceptionClass) { CommitRequestManager commitRequestManager = create(true, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -800,10 +806,10 @@ public void testOffsetFetchRequestTimeoutRequests(final Errors error) { assertFalse(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); NetworkClientDelegate.PollResult poll = commitRequestManager.poll(time.milliseconds()); mimicResponse(error, poll); - futures.forEach(f -> assertFutureThrows(f, TimeoutException.class)); + futures.forEach(f -> assertFutureThrows(f, expectedExceptionClass)); assertTrue(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); } else { - futures.forEach(f -> assertFutureThrows(f, KafkaException.class)); + futures.forEach(f -> assertFutureThrows(f, expectedExceptionClass)); assertEmptyPendingRequests(commitRequestManager); } } @@ -966,7 +972,9 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { */ @ParameterizedTest @MethodSource("offsetCommitExceptionSupplier") - public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExpires(final Errors error) { + public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExpires( + final Errors error, + final Class expectedExceptionClass) { CommitRequestManager commitRequestManager = create(false, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -986,10 +994,7 @@ public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExp assertEquals(0, res.unsentRequests.size()); assertTrue(commitResult.isDone()); - if (error.exception() instanceof RetriableException) - assertFutureThrows(commitResult, TimeoutException.class); - else - assertFutureThrows(commitResult, KafkaException.class); + assertFutureThrows(commitResult, expectedExceptionClass); } /** @@ -1384,18 +1389,23 @@ private void testNonRetriable(final List offsetCommitExceptionSupplier() { return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), - Arguments.of(Errors.REQUEST_TIMED_OUT), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), - Arguments.of(Errors.STALE_MEMBER_EPOCH), - Arguments.of(Errors.UNKNOWN_MEMBER_ID)); + // Retriable errors should result in TimeoutException when retry time expires + Arguments.of(Errors.NOT_COORDINATOR, TimeoutException.class), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, TimeoutException.class), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, TimeoutException.class), + Arguments.of(Errors.REQUEST_TIMED_OUT, TimeoutException.class), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, TimeoutException.class), + + // Non-retriable errors should result in their specific exceptions + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, GroupAuthorizationException.class), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, OffsetMetadataTooLarge.class), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, InvalidCommitOffsetSizeException.class), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, TopicAuthorizationException.class), + Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), + Arguments.of(Errors.STALE_MEMBER_EPOCH, CommitFailedException.class), + + // Generic errors should result in KafkaException + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); } /** @@ -1403,21 +1413,27 @@ private static Stream offsetCommitExceptionSupplier() { */ private static Stream offsetFetchExceptionSupplier() { return Stream.of( - Arguments.of(Errors.NOT_COORDINATOR), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), - Arguments.of(Errors.UNKNOWN_SERVER_ERROR), - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), - Arguments.of(Errors.REQUEST_TIMED_OUT), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), - Arguments.of(Errors.UNKNOWN_MEMBER_ID), + // Retriable errors should result in TimeoutException when retry time expires + Arguments.of(Errors.NOT_COORDINATOR, TimeoutException.class), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, TimeoutException.class), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, TimeoutException.class), + Arguments.of(Errors.REQUEST_TIMED_OUT, TimeoutException.class), + Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT, TimeoutException.class), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, TimeoutException.class), + + // Non-retriable errors should result in their specific exceptions + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, GroupAuthorizationException.class), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, KafkaException.class), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, KafkaException.class), + + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, KafkaException.class), + Arguments.of(Errors.UNKNOWN_MEMBER_ID, UnknownMemberIdException.class), // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new // member epoch is received. Tested separately. - Arguments.of(Errors.STALE_MEMBER_EPOCH), - Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT)); + Arguments.of(Errors.STALE_MEMBER_EPOCH, StaleMemberEpochException.class), + + // Generic errors should result in KafkaException + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); } /** diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java index f7be3a58ffd7d..e12b0121fd493 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.compress.Compression; @@ -216,7 +215,7 @@ public void testCorruptedMessage() { private CompletedFetch newCompletedFetch(long fetchOffset, FetchResponseData.PartitionData partitionData) { LogContext logContext = new LogContext(); - SubscriptionState subscriptions = new SubscriptionState(logContext, OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(logContext, AutoOffsetResetStrategy.NONE); FetchMetricsRegistry metricsRegistry = new FetchMetricsRegistry(); FetchMetricsManager metrics = new FetchMetricsManager(new Metrics(), metricsRegistry); FetchMetricsAggregator metricAggregator = new FetchMetricsAggregator(metrics, Collections.singleton(TP)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java index 7a39356a8c930..ba7798cbd5437 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java @@ -25,7 +25,6 @@ import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetCommitCallback; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.RetriableCommitFailedException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; @@ -197,7 +196,7 @@ public ConsumerCoordinatorTest(final ConsumerPartitionAssignor.RebalanceProtocol @BeforeEach public void setup() { LogContext logContext = new LogContext(); - this.subscriptions = new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST); + this.subscriptions = new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST); this.metadata = new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false, subscriptions, logContext, new ClusterResourceListeners()); this.client = new MockClient(time, metadata); @@ -224,7 +223,7 @@ private GroupRebalanceConfig buildRebalanceConfig(Optional groupInstance groupInstanceId, retryBackoffMs, retryBackoffMaxMs, - !groupInstanceId.isPresent()); + groupInstanceId.isEmpty()); } @AfterEach @@ -678,7 +677,6 @@ public void testCoordinatorUnknownInUnsentCallbacksAfterCoordinatorDead() { .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) .setCommittedMetadata("") .setCommittedOffset(13L) - .setCommitTimestamp(0) )) ) ); @@ -3249,13 +3247,13 @@ public void testNoCoordinatorDiscoveryIfPartitionAwaitingReset() { assertTrue(coordinator.coordinatorUnknown()); subscriptions.assignFromUser(singleton(t1p)); - subscriptions.requestOffsetReset(t1p, OffsetResetStrategy.EARLIEST); + subscriptions.requestOffsetReset(t1p, AutoOffsetResetStrategy.EARLIEST); coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE)); assertEquals(Collections.emptySet(), subscriptions.initializingPartitions()); assertFalse(subscriptions.hasAllFetchPositions()); assertEquals(Collections.singleton(t1p), subscriptions.partitionsNeedingReset(time.milliseconds())); - assertEquals(OffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(t1p)); + assertEquals(AutoOffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(t1p)); assertTrue(coordinator.coordinatorUnknown()); } @@ -4137,7 +4135,7 @@ private static class RackAwareAssignor extends MockPartitionAssignor { @Override public Map> assign(Map partitionsPerTopic, Map subscriptions) { subscriptions.forEach((consumer, subscription) -> { - if (!subscription.rackId().isPresent()) + if (subscription.rackId().isEmpty()) throw new IllegalStateException("Rack id not provided in subscription for " + consumer); rackIds.add(subscription.rackId().get()); }); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java index 6aa9924769a25..1d340e3198d2d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.AbstractHeartbeatRequestManager.HeartbeatRequestState; import org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.LocalAssignment; import org.apache.kafka.clients.consumer.internals.ConsumerHeartbeatRequestManager.HeartbeatState; @@ -27,8 +28,10 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.metrics.Metrics; @@ -52,6 +55,7 @@ import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentCaptor; +import org.mockito.InOrder; import java.util.Arrays; import java.util.Collection; @@ -62,6 +66,8 @@ import java.util.Set; import java.util.SortedSet; +import static org.apache.kafka.clients.consumer.internals.AbstractHeartbeatRequestManager.CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG; +import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.REGEX_RESOLUTION_NOT_SUPPORTED_MSG; import static org.apache.kafka.common.utils.Utils.mkSortedSet; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -74,6 +80,7 @@ import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -418,6 +425,36 @@ public void testFailureOnFatalException() { verify(backgroundEventHandler).add(any()); } + @Test + public void testHeartbeatResponseErrorNotifiedToGroupManagerAfterErrorPropagated() { + time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); + NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); + assertEquals(1, result.unsentRequests.size()); + ClientResponse response = createHeartbeatResponse(result.unsentRequests.get(0), Errors.GROUP_AUTHORIZATION_FAILED); + result.unsentRequests.get(0).handler().onComplete(response); + + // The error should be propagated before notifying the group manager. This ensures that the app thread is aware + // of the HB error before the manager completes any ongoing unsubscribe. + InOrder inOrder = inOrder(backgroundEventHandler, membershipManager); + inOrder.verify(backgroundEventHandler).add(any(ErrorEvent.class)); + inOrder.verify(membershipManager).onHeartbeatFailure(false); + } + + @Test + public void testHeartbeatRequestFailureNotifiedToGroupManagerAfterErrorPropagated() { + time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); + NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); + assertEquals(1, result.unsentRequests.size()); + ClientResponse response = createHeartbeatResponse(result.unsentRequests.get(0), Errors.GROUP_AUTHORIZATION_FAILED); + result.unsentRequests.get(0).handler().onFailure(time.milliseconds(), new AuthenticationException("Fatal error in HB")); + + // The error should be propagated before notifying the group manager. This ensures that the app thread is aware + // of the HB error before the manager completes any ongoing unsubscribe. + InOrder inOrder = inOrder(backgroundEventHandler, membershipManager); + inOrder.verify(backgroundEventHandler).add(any(ErrorEvent.class)); + inOrder.verify(membershipManager).onHeartbeatFailure(false); + } + @Test public void testNoCoordinator() { when(coordinatorRequestManager.coordinator()).thenReturn(Optional.empty()); @@ -568,6 +605,61 @@ public void testHeartbeatResponseOnErrorHandling(final Errors error, final boole } } + /** + * This validates the UnsupportedApiVersion the client generates while building a HB if: + * 1. HB API is not supported. + * 2. Required HB API version is not available. + */ + @ParameterizedTest + @ValueSource(strings = {CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG}) + public void testUnsupportedVersionFromBroker(String errorMsg) { + mockResponseWithException(new UnsupportedVersionException(errorMsg), true); + ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); + verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); + ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); + assertInstanceOf(Errors.UNSUPPORTED_VERSION.exception().getClass(), errorEvent.error()); + assertEquals(errorMsg, errorEvent.error().getMessage()); + clearInvocations(backgroundEventHandler); + } + + /** + * This validates the UnsupportedApiVersion the client generates while building a HB if: + * REGEX_RESOLUTION_NOT_SUPPORTED_MSG only generated on the client side. + */ + @ParameterizedTest + @ValueSource(strings = {CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG, REGEX_RESOLUTION_NOT_SUPPORTED_MSG}) + public void testUnsupportedVersionFromClient(String errorMsg) { + mockResponseWithException(new UnsupportedVersionException(errorMsg), false); + ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); + verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); + ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); + assertInstanceOf(Errors.UNSUPPORTED_VERSION.exception().getClass(), errorEvent.error()); + assertEquals(errorMsg, errorEvent.error().getMessage()); + clearInvocations(backgroundEventHandler); + } + + private void mockErrorResponse(Errors error, String exceptionCustomMsg) { + time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); + NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); + assertEquals(1, result.unsentRequests.size()); + + when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); + ClientResponse response = createHeartbeatResponse( + result.unsentRequests.get(0), error, exceptionCustomMsg); + result.unsentRequests.get(0).handler().onComplete(response); + } + + private void mockResponseWithException(UnsupportedVersionException exception, boolean isFromBroker) { + time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); + NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); + assertEquals(1, result.unsentRequests.size()); + + when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); + ClientResponse response = createHeartbeatResponseWithException( + result.unsentRequests.get(0), exception, isFromBroker); + result.unsentRequests.get(0).handler().onComplete(response); + } + private void assertNextHeartbeatTiming(long expectedTimeToNextHeartbeatMs) { long currentTimeMs = time.milliseconds(); assertEquals(expectedTimeToNextHeartbeatMs, heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs)); @@ -831,6 +923,61 @@ public void testPollOnCloseGeneratesRequestIfNeeded() { "No requests should be generated on close if the member is not leaving when closing the manager"); } + @Test + public void testRegexInHeartbeatLifecycle() { + heartbeatState = new HeartbeatState(subscriptions, membershipManager, DEFAULT_MAX_POLL_INTERVAL_MS); + createHeartbeatRequestStateWithZeroHeartbeatInterval(); + + // Initial heartbeat with regex + mockJoiningMemberData(null); + when(subscriptions.subscriptionPattern()).thenReturn(new SubscriptionPattern("t1.*")); + ConsumerGroupHeartbeatRequestData data = heartbeatState.buildRequestData(); + assertEquals("t1.*", data.subscribedTopicRegex()); + + // Regex not included in HB if not updated + when(membershipManager.state()).thenReturn(MemberState.STABLE); + data = heartbeatState.buildRequestData(); + assertNull(data.subscribedTopicRegex()); + + // Regex included in HB if updated + when(subscriptions.subscriptionPattern()).thenReturn(new SubscriptionPattern("t2.*")); + data = heartbeatState.buildRequestData(); + assertEquals("t2.*", data.subscribedTopicRegex()); + + // Empty regex included in HB to remove pattern subscription + when(subscriptions.subscriptionPattern()).thenReturn(null); + data = heartbeatState.buildRequestData(); + assertEquals("", data.subscribedTopicRegex()); + + // Regex not included in HB after pattern subscription removed + when(subscriptions.subscriptionPattern()).thenReturn(null); + data = heartbeatState.buildRequestData(); + assertNull(data.subscribedTopicRegex()); + } + + @Test + public void testRegexInJoiningHeartbeat() { + heartbeatState = new HeartbeatState(subscriptions, membershipManager, DEFAULT_MAX_POLL_INTERVAL_MS); + createHeartbeatRequestStateWithZeroHeartbeatInterval(); + + // Initial heartbeat with regex + mockJoiningMemberData(null); + when(subscriptions.subscriptionPattern()).thenReturn(new SubscriptionPattern("t1.*")); + ConsumerGroupHeartbeatRequestData data = heartbeatState.buildRequestData(); + assertEquals("t1.*", data.subscribedTopicRegex()); + + // Members unsubscribes from regex (empty regex included in HB) + when(subscriptions.subscriptionPattern()).thenReturn(null); + data = heartbeatState.buildRequestData(); + assertEquals("", data.subscribedTopicRegex()); + + // Member rejoins (ie. fenced) should not include regex field in HB + when(membershipManager.state()).thenReturn(MemberState.JOINING); + when(subscriptions.subscriptionPattern()).thenReturn(null); + data = heartbeatState.buildRequestData(); + assertNull(data.subscribedTopicRegex()); + } + private void assertHeartbeat(ConsumerHeartbeatRequestManager hrm, int nextPollMs) { NetworkClientDelegate.PollResult pollResult = hrm.poll(time.milliseconds()); assertEquals(1, pollResult.unsentRequests.size()); @@ -880,9 +1027,15 @@ private static Collection errorProvider() { Arguments.of(Errors.GROUP_MAX_SIZE_REACHED, true)); } + private ClientResponse createHeartbeatResponse(NetworkClientDelegate.UnsentRequest request, + Errors error) { + return createHeartbeatResponse(request, error, "stubbed error message"); + } + private ClientResponse createHeartbeatResponse( final NetworkClientDelegate.UnsentRequest request, - final Errors error + final Errors error, + final String msg ) { ConsumerGroupHeartbeatResponseData data = new ConsumerGroupHeartbeatResponseData() .setErrorCode(error.code()) @@ -890,7 +1043,7 @@ private ClientResponse createHeartbeatResponse( .setMemberId(DEFAULT_MEMBER_ID) .setMemberEpoch(DEFAULT_MEMBER_EPOCH); if (error != Errors.NONE) { - data.setErrorMessage("stubbed error message"); + data.setErrorMessage(msg); } ConsumerGroupHeartbeatResponse response = new ConsumerGroupHeartbeatResponse(data); return new ClientResponse( @@ -905,6 +1058,27 @@ private ClientResponse createHeartbeatResponse( response); } + private ClientResponse createHeartbeatResponseWithException( + final NetworkClientDelegate.UnsentRequest request, + final UnsupportedVersionException exception, + final boolean isFromBroker + ) { + ConsumerGroupHeartbeatResponse response = null; + if (isFromBroker) { + response = new ConsumerGroupHeartbeatResponse(null); + } + return new ClientResponse( + new RequestHeader(ApiKeys.CONSUMER_GROUP_HEARTBEAT, ApiKeys.CONSUMER_GROUP_HEARTBEAT.latestVersion(), "client-id", 1), + request.handler(), + "0", + time.milliseconds(), + time.milliseconds(), + false, + exception, + null, + response); + } + private ConsumerConfig config() { Properties prop = new Properties(); prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); @@ -914,7 +1088,6 @@ private ConsumerConfig config() { prop.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_INTERVAL_MS)); prop.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(DEFAULT_RETRY_BACKOFF_MS)); prop.setProperty(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG, String.valueOf(DEFAULT_RETRY_BACKOFF_MAX_MS)); - prop.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_HEARTBEAT_INTERVAL_MS)); return new ConsumerConfig(prop); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java index 7faf4cc55c337..d42d81d7ce427 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java @@ -21,6 +21,7 @@ import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackCompletedEvent; import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackNeededEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.ConsumerRebalanceMetricsManager; import org.apache.kafka.clients.consumer.internals.metrics.RebalanceCallbackMetricsManager; import org.apache.kafka.common.KafkaException; @@ -28,6 +29,8 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData.Assignment; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData.TopicPartitions; @@ -45,6 +48,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.InOrder; import java.util.ArrayList; import java.util.Arrays; @@ -86,6 +90,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -93,6 +98,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +@SuppressWarnings("ClassDataAbstractionCoupling") public class ConsumerMembershipManagerTest { private static final String GROUP_ID = "test-group"; @@ -115,8 +121,8 @@ public void setup() { subscriptionState = mock(SubscriptionState.class); commitRequestManager = mock(CommitRequestManager.class); backgroundEventQueue = new LinkedBlockingQueue<>(); - backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); time = new MockTime(0); + backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue, time, mock(AsyncConsumerMetrics.class)); metrics = new Metrics(time); rebalanceMetricsManager = new ConsumerRebalanceMetricsManager(metrics); @@ -1433,6 +1439,7 @@ public void testReconcilePartitionsRevokedNoAutoCommitNoCallbacks() { membershipManager.poll(time.milliseconds()); testRevocationOfAllPartitionsCompleted(membershipManager); + verify(subscriptionState, times(2)).markPendingRevocation(Set.of(new TopicPartition("topic1", 0))); } @Test @@ -1456,6 +1463,10 @@ public void testReconcilePartitionsRevokedWithSuccessfulAutoCommitNoCallbacks() // Complete commit request commitResult.complete(null); + InOrder inOrder = inOrder(subscriptionState, commitRequestManager); + inOrder.verify(subscriptionState).markPendingRevocation(Set.of(new TopicPartition("topic1", 0))); + inOrder.verify(commitRequestManager).maybeAutoCommitSyncBeforeRevocation(anyLong()); + inOrder.verify(subscriptionState).markPendingRevocation(Set.of(new TopicPartition("topic1", 0))); testRevocationOfAllPartitionsCompleted(membershipManager); } @@ -1480,6 +1491,7 @@ public void testReconcilePartitionsRevokedWithFailedAutoCommitCompletesRevocatio // Complete commit request commitResult.completeExceptionally(new KafkaException("Commit request failed with " + "non-retriable error")); + verify(subscriptionState, times(2)).markPendingRevocation(Set.of(new TopicPartition("topic1", 0))); testRevocationOfAllPartitionsCompleted(membershipManager); } @@ -1579,11 +1591,11 @@ public void testRevokePartitionsUsesTopicNamesLocalCacheWhenMetadataNotAvailable mockOwnedPartitionAndAssignmentReceived(membershipManager, topicId, topicName, Collections.emptyList()); // Member received assignment to reconcile; - receiveAssignment(topicId, Arrays.asList(0, 1), membershipManager); verifyReconciliationNotTriggered(membershipManager); membershipManager.poll(time.milliseconds()); + verify(subscriptionState).markPendingRevocation(Set.of()); // Member should complete reconciliation assertEquals(MemberState.ACKNOWLEDGING, membershipManager.state()); @@ -1607,6 +1619,7 @@ public void testRevokePartitionsUsesTopicNamesLocalCacheWhenMetadataNotAvailable receiveAssignment(topicId, Collections.singletonList(1), membershipManager); membershipManager.poll(time.milliseconds()); + verify(subscriptionState, times(2)).markPendingRevocation(Set.of(new TopicPartition(topicName, 0))); // Revocation should complete without requesting any metadata update given that the topic // received in target assignment should exist in local topic name cache. @@ -1728,6 +1741,12 @@ public void testListenerCallbacksBasic() { @Test public void testListenerCallbacksThrowsErrorOnPartitionsRevoked() { + testErrorsOnPartitionsRevoked(new WakeupException()); + testErrorsOnPartitionsRevoked(new InterruptException("Intentional onPartitionsRevoked() error")); + testErrorsOnPartitionsRevoked(new IllegalArgumentException("Intentional onPartitionsRevoked() error")); + } + + private void testErrorsOnPartitionsRevoked(RuntimeException error) { // Step 1: set up mocks String topicName = "topic1"; Uuid topicId = Uuid.randomUuid(); @@ -1735,7 +1754,7 @@ public void testListenerCallbacksThrowsErrorOnPartitionsRevoked() { ConsumerMembershipManager membershipManager = createMemberInStableState(); mockOwnedPartition(membershipManager, topicId, topicName); CounterConsumerRebalanceListener listener = new CounterConsumerRebalanceListener( - Optional.of(new IllegalArgumentException("Intentional onPartitionsRevoked() error")), + Optional.ofNullable(error), Optional.empty(), Optional.empty() ); @@ -1782,6 +1801,12 @@ public void testListenerCallbacksThrowsErrorOnPartitionsRevoked() { @Test public void testListenerCallbacksThrowsErrorOnPartitionsAssigned() { + testErrorsOnPartitionsAssigned(new WakeupException()); + testErrorsOnPartitionsAssigned(new InterruptException("Intentional error")); + testErrorsOnPartitionsAssigned(new IllegalArgumentException("Intentional error")); + } + + private void testErrorsOnPartitionsAssigned(RuntimeException error) { // Step 1: set up mocks ConsumerMembershipManager membershipManager = createMemberInStableState(); String topicName = "topic1"; @@ -1789,7 +1814,7 @@ public void testListenerCallbacksThrowsErrorOnPartitionsAssigned() { mockOwnedPartition(membershipManager, topicId, topicName); CounterConsumerRebalanceListener listener = new CounterConsumerRebalanceListener( Optional.empty(), - Optional.of(new IllegalArgumentException("Intentional onPartitionsAssigned() error")), + Optional.ofNullable(error), Optional.empty() ); ConsumerRebalanceListenerInvoker invoker = consumerRebalanceListenerInvoker(); @@ -1869,7 +1894,7 @@ public void testAddedPartitionsTemporarilyDisabledAwaitingOnPartitionsAssignedCa true ); - verify(subscriptionState).enablePartitionsAwaitingCallback(addedPartitions); + verify(subscriptionState).enablePartitionsAwaitingCallback(assignedPartitions); } @Test @@ -1905,12 +1930,14 @@ public void testAddedPartitionsNotEnabledAfterFailedOnPartitionsAssignedCallback @Test public void testOnPartitionsLostNoError() { - testOnPartitionsLost(Optional.empty()); + testOnPartitionsLost(null); } @Test public void testOnPartitionsLostError() { - testOnPartitionsLost(Optional.of(new KafkaException("Intentional error for test"))); + testOnPartitionsLost(new KafkaException("Intentional error for test")); + testOnPartitionsLost(new WakeupException()); + testOnPartitionsLost(new InterruptException("Intentional error for test")); } private void assertLeaveGroupDueToExpiredPollAndTransitionToStale(ConsumerMembershipManager membershipManager) { @@ -2044,7 +2071,7 @@ private void mockPartitionOwnedAndNewPartitionAdded(String topicName, receiveAssignment(topicId, Arrays.asList(partitionOwned, partitionAdded), membershipManager); } - private void testOnPartitionsLost(Optional lostError) { + private void testOnPartitionsLost(RuntimeException lostError) { // Step 1: set up mocks ConsumerMembershipManager membershipManager = createMemberInStableState(); String topicName = "topic1"; @@ -2053,7 +2080,7 @@ private void testOnPartitionsLost(Optional lostError) { CounterConsumerRebalanceListener listener = new CounterConsumerRebalanceListener( Optional.empty(), Optional.empty(), - lostError + Optional.ofNullable(lostError) ); ConsumerRebalanceListenerInvoker invoker = consumerRebalanceListenerInvoker(); @@ -2551,7 +2578,6 @@ private void testRevocationCompleted(ConsumerMembershipManager membershipManager assertEquals(assignmentByTopicId, membershipManager.currentAssignment().partitions); assertFalse(membershipManager.reconciliationInProgress()); - verify(subscriptionState).markPendingRevocation(anySet()); List expectedTopicPartitionAssignment = buildTopicPartitions(expectedCurrentAssignment); HashSet expectedSet = new HashSet<>(expectedTopicPartitionAssignment); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java index 1f8492f8e5934..949bdc9aa727d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.Metadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.ClusterResourceListener; import org.apache.kafka.common.Node; @@ -65,7 +64,7 @@ public class ConsumerMetadataTest { private final Node node = new Node(1, "localhost", 9092); - private final SubscriptionState subscription = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + private final SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); private final Time time = new MockTime(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java index a21001d510236..b5ab39e62c720 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java @@ -41,10 +41,12 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.Collections; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -264,30 +266,45 @@ public void testMetadataFailurePropagated() { assertEquals(metadataException, exc); } + @Disabled("KAFKA-17554") @Test public void testFutureCompletionOutsidePoll() throws Exception { // Tests the scenario in which the request that is being awaited in one thread // is received and completed in another thread. + + final CountDownLatch t1TheardCountDownLatch = new CountDownLatch(1); + final CountDownLatch t2ThreadCountDownLatch = new CountDownLatch(2); final RequestFuture future = consumerClient.send(node, heartbeat()); consumerClient.pollNoWakeup(); // dequeue and send the request client.enableBlockingUntilWakeup(2); - Thread t1 = new Thread(() -> consumerClient.pollNoWakeup()); + Thread t1 = new Thread(() -> { + t1TheardCountDownLatch.countDown(); + consumerClient.pollNoWakeup(); + t2ThreadCountDownLatch.countDown(); + }); + t1.start(); - // Sleep a little so that t1 is blocking in poll - Thread.sleep(50); - - Thread t2 = new Thread(() -> consumerClient.poll(future)); + Thread t2 = new Thread(() -> { + try { + t2ThreadCountDownLatch.await(); + consumerClient.poll(future); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); t2.start(); - - // Sleep a little so that t2 is awaiting the network client lock - Thread.sleep(50); - + // Simulate a network response and return from the poll in t1 client.respond(heartbeatResponse(Errors.NONE)); + // Wait for t1 to block in poll + t1TheardCountDownLatch.await(); + client.wakeup(); + // while t1 is blocked in poll, t2 should be able to complete the future + t2ThreadCountDownLatch.countDown(); // Both threads should complete since t1 should wakeup t2 t1.join(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 0e85fe2d79950..520279fc8d454 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -19,6 +19,9 @@ import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; +import org.apache.kafka.clients.consumer.internals.events.PollEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; +import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -40,6 +43,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -53,7 +57,7 @@ public class ConsumerNetworkThreadTest { private final Time time; - private final BlockingQueue applicationEventsQueue; + private final BlockingQueue applicationEventQueue; private final ApplicationEventProcessor applicationEventProcessor; private final OffsetsRequestManager offsetsRequestManager; private final ConsumerHeartbeatRequestManager heartbeatRequestManager; @@ -62,6 +66,7 @@ public class ConsumerNetworkThreadTest { private final NetworkClientDelegate networkClientDelegate; private final RequestManagers requestManagers; private final CompletableEventReaper applicationEventReaper; + private final AsyncConsumerMetrics asyncConsumerMetrics; ConsumerNetworkThreadTest() { this.networkClientDelegate = mock(NetworkClientDelegate.class); @@ -72,17 +77,19 @@ public class ConsumerNetworkThreadTest { this.applicationEventProcessor = mock(ApplicationEventProcessor.class); this.applicationEventReaper = mock(CompletableEventReaper.class); this.time = new MockTime(); - this.applicationEventsQueue = new LinkedBlockingQueue<>(); + this.applicationEventQueue = new LinkedBlockingQueue<>(); + this.asyncConsumerMetrics = mock(AsyncConsumerMetrics.class); LogContext logContext = new LogContext(); this.consumerNetworkThread = new ConsumerNetworkThread( logContext, time, - applicationEventsQueue, + applicationEventQueue, applicationEventReaper, () -> applicationEventProcessor, () -> networkClientDelegate, - () -> requestManagers + () -> requestManagers, + asyncConsumerMetrics ); } @@ -183,14 +190,18 @@ public void testMaximumTimeToWait() { public void testCleanupInvokesReaper() { LinkedList queue = new LinkedList<>(); when(networkClientDelegate.unsentRequests()).thenReturn(queue); + when(applicationEventReaper.reap(applicationEventQueue)).thenReturn(1L); consumerNetworkThread.cleanup(); - verify(applicationEventReaper).reap(applicationEventsQueue); + verify(applicationEventReaper).reap(applicationEventQueue); + verify(asyncConsumerMetrics).recordApplicationEventExpiredSize(1L); } @Test public void testRunOnceInvokesReaper() { + when(applicationEventReaper.reap(any(Long.class))).thenReturn(1L); consumerNetworkThread.runOnce(); verify(applicationEventReaper).reap(any(Long.class)); + verify(asyncConsumerMetrics).recordApplicationEventExpiredSize(1L); } @Test @@ -199,4 +210,82 @@ public void testSendUnsentRequests() { consumerNetworkThread.cleanup(); verify(networkClientDelegate, times(2)).poll(anyLong(), anyLong()); } + + @Test + public void testRunOnceRecordTimeBetweenNetworkThreadPoll() { + try (Metrics metrics = new Metrics(); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( + new LogContext(), + time, + applicationEventQueue, + applicationEventReaper, + () -> applicationEventProcessor, + () -> networkClientDelegate, + () -> requestManagers, + asyncConsumerMetrics + )) { + consumerNetworkThread.initializeResources(); + + consumerNetworkThread.runOnce(); + time.sleep(10); + consumerNetworkThread.runOnce(); + assertEquals( + 10, + (double) metrics.metric( + metrics.metricName("time-between-network-thread-poll-avg", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + assertEquals( + 10, + (double) metrics.metric( + metrics.metricName("time-between-network-thread-poll-max", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + } + } + + @Test + public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTime() { + try (Metrics metrics = new Metrics(); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( + new LogContext(), + time, + applicationEventQueue, + applicationEventReaper, + () -> applicationEventProcessor, + () -> networkClientDelegate, + () -> requestManagers, + asyncConsumerMetrics + )) { + consumerNetworkThread.initializeResources(); + + PollEvent event = new PollEvent(0); + event.setEnqueuedMs(time.milliseconds()); + applicationEventQueue.add(event); + asyncConsumerMetrics.recordApplicationEventQueueSize(1); + + time.sleep(10); + consumerNetworkThread.runOnce(); + assertEquals( + 0, + (double) metrics.metric( + metrics.metricName("application-event-queue-size", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + assertEquals( + 10, + (double) metrics.metric( + metrics.metricName("application-event-queue-time-avg", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + assertEquals( + 10, + (double) metrics.metric( + metrics.metricName("application-event-queue-time-max", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocolTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocolTest.java index 07808f29806a5..b73576757e86e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocolTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocolTest.java @@ -298,8 +298,8 @@ public void serializeDeserializeConsumerProtocolSubscriptionAllVersions() { if (version >= 1) { assertEquals( Set.of( - new ConsumerProtocolSubscription.TopicPartition().setTopic("foo").setPartitions(Collections.singletonList(0)), - new ConsumerProtocolSubscription.TopicPartition().setTopic("bar").setPartitions(Collections.singletonList(0) + new ConsumerProtocolSubscription.TopicPartition().setTopic("foo").setPartitions(Collections.singletonList(0)), + new ConsumerProtocolSubscription.TopicPartition().setTopic("bar").setPartitions(Collections.singletonList(0) )), Set.copyOf(parsedSubscription.ownedPartitions()) ); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java index c003574a23f82..7e805dc3cd3b6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java @@ -28,16 +28,24 @@ import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.RequestHeader; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.Collections; +import java.util.List; +import java.util.Objects; import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -75,6 +83,78 @@ public void testSuccessfulResponse() { assertEquals(Collections.emptyList(), pollResult.unsentRequests); } + /** + * This test mimics a client that has been disconnected from the coordinator. When the client remains disconnected + * from the coordinator for 60 seconds, the client will begin to emit a warning log every minute thereafter to + * alert the user about the ongoing disconnect status. The warning log includes the length of time of the ongoing + * disconnect: + * + * + * Consumer has been disconnected from the group coordinator for XXXXXms + * + * + *

          + * + * However, the logic used to calculate the length of the disconnect was not correct. This test exercises the + * disconnect logic, controlling the logging and system time, to ensure the warning message is correct. + * + * @see CoordinatorRequestManager#markCoordinatorUnknown(String, long) + */ + @Test + public void testMarkCoordinatorUnknownLoggingAccuracy() { + long oneMinute = 60000; + + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + // You'd be forgiven for assuming that a warning message would be logged at WARN, but + // markCoordinatorUnknown logs the warning at DEBUG. This is partly for historical parity with the + // ClassicKafkaConsumer. + appender.setClassLogger(CoordinatorRequestManager.class, Level.DEBUG); + CoordinatorRequestManager coordinatorRequestManager = setupCoordinatorManager(GROUP_ID); + assertFalse(coordinatorRequestManager.coordinator().isPresent()); + + // Step 1: mark the coordinator as disconnected right after creation of the CoordinatorRequestManager. + // Because the disconnect occurred immediately, no warning should be logged. + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + assertTrue(millisecondsFromLog(appender).isEmpty()); + + // Step 2: sleep for one minute and mark the coordinator unknown again. Then verify that the warning was + // logged and the reported time is accurate. + time.sleep(oneMinute); + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + Optional firstLogMs = millisecondsFromLog(appender); + assertTrue(firstLogMs.isPresent()); + assertEquals(oneMinute, firstLogMs.get()); + + // Step 3: sleep for *another* minute, mark the coordinator unknown again, and verify the accuracy. + time.sleep(oneMinute); + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + Optional secondLogMs = millisecondsFromLog(appender); + assertTrue(secondLogMs.isPresent()); + assertEquals(oneMinute * 2, secondLogMs.get()); + } + } + + private Optional millisecondsFromLog(LogCaptureAppender appender) { + Pattern pattern = Pattern.compile("\\s+(?\\d+)+ms"); + List milliseconds = appender.getMessages().stream() + .map(pattern::matcher) + .filter(Matcher::find) + .map(matcher -> matcher.group("millis")) + .filter(Objects::nonNull) + .map(millisString -> { + try { + return Long.parseLong(millisString); + } catch (NumberFormatException e) { + return null; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + // Return the most recent log entry that matches the message in markCoordinatorUnknown, if present. + return milliseconds.isEmpty() ? Optional.empty() : Optional.of(milliseconds.get(milliseconds.size() - 1)); + } + @Test public void testMarkCoordinatorUnknown() { CoordinatorRequestManager coordinatorManager = setupCoordinatorManager(GROUP_ID); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/EagerConsumerCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/EagerConsumerCoordinatorTest.java index d005318cb8d21..a4b8e457f3744 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/EagerConsumerCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/EagerConsumerCoordinatorTest.java @@ -22,4 +22,6 @@ public class EagerConsumerCoordinatorTest extends ConsumerCoordinatorTest { public EagerConsumerCoordinatorTest() { super(ConsumerPartitionAssignor.RebalanceProtocol.EAGER); } + + // @Flaky("KAFKA-15900") -> testOutdatedCoordinatorAssignment (in super class) } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java index 5960fd28fbf0b..68b9ecb528b65 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java @@ -214,7 +214,7 @@ public void testErrorInInitialize(int recordCount, RuntimeException expectedExce assignAndSeek(topicAPartition0); // Create a FetchCollector that fails on CompletedFetch initialization. - fetchCollector = new FetchCollector(logContext, + fetchCollector = new FetchCollector<>(logContext, metadata, subscriptions, fetchConfig, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java index c7daeb5334358..8dc50b1e66a39 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java @@ -24,6 +24,7 @@ import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -32,10 +33,12 @@ import org.junit.jupiter.api.Test; import java.util.Map; +import java.util.Set; import static org.apache.kafka.clients.consumer.internals.FetchMetricsManager.topicPartitionTags; import static org.apache.kafka.clients.consumer.internals.FetchMetricsManager.topicTags; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; public class FetchMetricsManagerTest { @@ -43,7 +46,6 @@ public class FetchMetricsManagerTest { private final Time time = new MockTime(1, 0, 0); private static final String TOPIC_NAME = "test"; - private static final TopicPartition TP = new TopicPartition(TOPIC_NAME, 0); private Metrics metrics; private FetchMetricsRegistry metricsRegistry; @@ -115,22 +117,43 @@ public void testBytesFetched() { } @Test + @SuppressWarnings("deprecation") public void testBytesFetchedTopic() { String topicName1 = TOPIC_NAME; - String topicName2 = "another-topic"; - Map tags1 = topicTags(topicName1); - Map tags2 = topicTags(topicName2); + String topicName2 = "another.topic"; + Map tags1 = Map.of("topic", topicName1); + Map tags2 = Map.of("topic", topicName2); + Map deprecatedTags = topicTags(topicName2); + int initialMetricsSize = metrics.metrics().size(); metricsManager.recordBytesFetched(topicName1, 2); + // 4 new metrics shall be registered. + assertEquals(4, metrics.metrics().size() - initialMetricsSize); metricsManager.recordBytesFetched(topicName2, 1); + // Another 8 metrics get registered as deprecated metrics should be reported for topicName2. + assertEquals(12, metrics.metrics().size() - initialMetricsSize); + time.sleep(metrics.config().timeWindowMs() + 1); metricsManager.recordBytesFetched(topicName1, 10); metricsManager.recordBytesFetched(topicName2, 5); + // Subsequent calls should not register new metrics. + assertEquals(12, metrics.metrics().size() - initialMetricsSize); + // Validate metrics for topicName1. assertEquals(6, metricValue(metricsRegistry.topicFetchSizeAvg, tags1), EPSILON); assertEquals(10, metricValue(metricsRegistry.topicFetchSizeMax, tags1), EPSILON); + assertTrue(metricValue(metricsRegistry.topicBytesConsumedRate, tags1) > 0); + assertEquals(12, metricValue(metricsRegistry.topicBytesConsumedTotal, tags1), EPSILON); + // Validate metrics for topicName2. assertEquals(3, metricValue(metricsRegistry.topicFetchSizeAvg, tags2), EPSILON); assertEquals(5, metricValue(metricsRegistry.topicFetchSizeMax, tags2), EPSILON); + assertTrue(metricValue(metricsRegistry.topicBytesConsumedRate, tags2) > 0); + assertEquals(6, metricValue(metricsRegistry.topicBytesConsumedTotal, tags2), EPSILON); + // Validate metrics for deprecated topic. + assertEquals(3, metricValue(metricsRegistry.topicFetchSizeAvg, deprecatedTags), EPSILON); + assertEquals(5, metricValue(metricsRegistry.topicFetchSizeMax, deprecatedTags), EPSILON); + assertTrue(metricValue(metricsRegistry.topicBytesConsumedRate, deprecatedTags) > 0); + assertEquals(6, metricValue(metricsRegistry.topicBytesConsumedTotal, deprecatedTags), EPSILON); } @Test @@ -143,48 +166,216 @@ public void testRecordsFetched() { } @Test + @SuppressWarnings("deprecation") public void testRecordsFetchedTopic() { String topicName1 = TOPIC_NAME; - String topicName2 = "another-topic"; - Map tags1 = topicTags(topicName1); - Map tags2 = topicTags(topicName2); + String topicName2 = "another.topic"; + Map tags1 = Map.of("topic", topicName1); + Map tags2 = Map.of("topic", topicName2); + Map deprecatedTags = topicTags(topicName2); + int initialMetricsSize = metrics.metrics().size(); metricsManager.recordRecordsFetched(topicName1, 2); + // 3 new metrics shall be registered. + assertEquals(3, metrics.metrics().size() - initialMetricsSize); metricsManager.recordRecordsFetched(topicName2, 1); + // Another 6 metrics get registered as deprecated metrics should be reported for topicName2. + assertEquals(9, metrics.metrics().size() - initialMetricsSize); + time.sleep(metrics.config().timeWindowMs() + 1); metricsManager.recordRecordsFetched(topicName1, 10); metricsManager.recordRecordsFetched(topicName2, 5); + // Subsequent calls should not register new metrics. + assertEquals(9, metrics.metrics().size() - initialMetricsSize); + // Validate metrics for topicName1. assertEquals(6, metricValue(metricsRegistry.topicRecordsPerRequestAvg, tags1), EPSILON); + assertTrue(metricValue(metricsRegistry.topicRecordsConsumedRate, tags1) > 0); + assertEquals(12, metricValue(metricsRegistry.topicRecordsConsumedTotal, tags1), EPSILON); + // Validate metrics for topicName2. assertEquals(3, metricValue(metricsRegistry.topicRecordsPerRequestAvg, tags2), EPSILON); + assertTrue(metricValue(metricsRegistry.topicRecordsConsumedRate, tags2) > 0); + assertEquals(6, metricValue(metricsRegistry.topicRecordsConsumedTotal, tags2), EPSILON); + // Validate metrics for deprecated topic. + assertEquals(3, metricValue(metricsRegistry.topicRecordsPerRequestAvg, deprecatedTags), EPSILON); + assertTrue(metricValue(metricsRegistry.topicRecordsConsumedRate, deprecatedTags) > 0); + assertEquals(6, metricValue(metricsRegistry.topicRecordsConsumedTotal, deprecatedTags), EPSILON); } @Test + @SuppressWarnings("deprecation") public void testPartitionLag() { - Map tags = topicPartitionTags(TP); - metricsManager.recordPartitionLag(TP, 14); - metricsManager.recordPartitionLag(TP, 8); + TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); + TopicPartition tp2 = new TopicPartition("another.topic", 0); + + Map tags1 = Map.of("topic", tp1.topic(), "partition", String.valueOf(tp1.partition())); + Map tags2 = Map.of("topic", tp2.topic(), "partition", String.valueOf(tp2.partition())); + Map deprecatedTags = topicPartitionTags(tp2); + int initialMetricsSize = metrics.metrics().size(); + + metricsManager.recordPartitionLag(tp1, 14); + // 3 new metrics shall be registered. + assertEquals(3, metrics.metrics().size() - initialMetricsSize); + + metricsManager.recordPartitionLag(tp1, 8); time.sleep(metrics.config().timeWindowMs() + 1); - metricsManager.recordPartitionLag(TP, 5); + metricsManager.recordPartitionLag(tp1, 5); + // Subsequent calls should not register new metrics. + assertEquals(3, metrics.metrics().size() - initialMetricsSize); + // Validate metrics for tp1. assertEquals(14, metricValue(metricsRegistry.recordsLagMax), EPSILON); - assertEquals(5, metricValue(metricsRegistry.partitionRecordsLag, tags), EPSILON); - assertEquals(14, metricValue(metricsRegistry.partitionRecordsLagMax, tags), EPSILON); - assertEquals(9, metricValue(metricsRegistry.partitionRecordsLagAvg, tags), EPSILON); + assertEquals(5, metricValue(metricsRegistry.partitionRecordsLag, tags1), EPSILON); + assertEquals(14, metricValue(metricsRegistry.partitionRecordsLagMax, tags1), EPSILON); + assertEquals(9, metricValue(metricsRegistry.partitionRecordsLagAvg, tags1), EPSILON); + + metricsManager.recordPartitionLag(tp2, 7); + // Another 6 metrics get registered as deprecated metrics should be reported for tp2. + assertEquals(9, metrics.metrics().size() - initialMetricsSize); + metricsManager.recordPartitionLag(tp2, 3); + time.sleep(metrics.config().timeWindowMs() + 1); + metricsManager.recordPartitionLag(tp2, 2); + + // Subsequent calls should not register new metrics. + assertEquals(9, metrics.metrics().size() - initialMetricsSize); + // Validate metrics for tp2. + assertEquals(7, metricValue(metricsRegistry.recordsLagMax), EPSILON); + assertEquals(2, metricValue(metricsRegistry.partitionRecordsLag, tags2), EPSILON); + assertEquals(7, metricValue(metricsRegistry.partitionRecordsLagMax, tags2), EPSILON); + assertEquals(4, metricValue(metricsRegistry.partitionRecordsLagAvg, tags2), EPSILON); + // Validate metrics for deprecated topic. + assertEquals(2, metricValue(metricsRegistry.partitionRecordsLag, deprecatedTags), EPSILON); + assertEquals(7, metricValue(metricsRegistry.partitionRecordsLagMax, deprecatedTags), EPSILON); + assertEquals(4, metricValue(metricsRegistry.partitionRecordsLagAvg, deprecatedTags), EPSILON); } @Test + @SuppressWarnings("deprecation") public void testPartitionLead() { - Map tags = topicPartitionTags(TP); - metricsManager.recordPartitionLead(TP, 15); - metricsManager.recordPartitionLead(TP, 11); + TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); + TopicPartition tp2 = new TopicPartition("another.topic", 0); + + Map tags1 = Map.of("topic", tp1.topic(), "partition", String.valueOf(tp1.partition())); + Map tags2 = Map.of("topic", tp2.topic(), "partition", String.valueOf(tp2.partition())); + Map deprecatedTags = topicPartitionTags(tp2); + int initialMetricsSize = metrics.metrics().size(); + + metricsManager.recordPartitionLead(tp1, 15); + // 3 new metrics shall be registered. + assertEquals(3, metrics.metrics().size() - initialMetricsSize); + + metricsManager.recordPartitionLead(tp1, 11); time.sleep(metrics.config().timeWindowMs() + 1); - metricsManager.recordPartitionLead(TP, 13); + metricsManager.recordPartitionLead(tp1, 13); + // Subsequent calls should not register new metrics. + assertEquals(3, metrics.metrics().size() - initialMetricsSize); + // Validate metrics for tp1. assertEquals(11, metricValue(metricsRegistry.recordsLeadMin), EPSILON); - assertEquals(13, metricValue(metricsRegistry.partitionRecordsLead, tags), EPSILON); - assertEquals(11, metricValue(metricsRegistry.partitionRecordsLeadMin, tags), EPSILON); - assertEquals(13, metricValue(metricsRegistry.partitionRecordsLeadAvg, tags), EPSILON); + assertEquals(13, metricValue(metricsRegistry.partitionRecordsLead, tags1), EPSILON); + assertEquals(11, metricValue(metricsRegistry.partitionRecordsLeadMin, tags1), EPSILON); + assertEquals(13, metricValue(metricsRegistry.partitionRecordsLeadAvg, tags1), EPSILON); + + metricsManager.recordPartitionLead(tp2, 18); + // Another 6 metrics get registered as deprecated metrics should be reported for tp2. + assertEquals(9, metrics.metrics().size() - initialMetricsSize); + + metricsManager.recordPartitionLead(tp2, 12); + time.sleep(metrics.config().timeWindowMs() + 1); + metricsManager.recordPartitionLead(tp2, 15); + + // Subsequent calls should not register new metrics. + assertEquals(9, metrics.metrics().size() - initialMetricsSize); + // Validate metrics for tp2. + assertEquals(12, metricValue(metricsRegistry.recordsLeadMin), EPSILON); + assertEquals(15, metricValue(metricsRegistry.partitionRecordsLead, tags2), EPSILON); + assertEquals(12, metricValue(metricsRegistry.partitionRecordsLeadMin, tags2), EPSILON); + assertEquals(15, metricValue(metricsRegistry.partitionRecordsLeadAvg, tags2), EPSILON); + // Validate metrics for deprecated topic. + assertEquals(15, metricValue(metricsRegistry.partitionRecordsLead, deprecatedTags), EPSILON); + assertEquals(12, metricValue(metricsRegistry.partitionRecordsLeadMin, deprecatedTags), EPSILON); + assertEquals(15, metricValue(metricsRegistry.partitionRecordsLeadAvg, deprecatedTags), EPSILON); + } + + @Test + @SuppressWarnings("deprecation") + public void testMaybeUpdateAssignment() { + TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); + TopicPartition tp2 = new TopicPartition("another.topic", 0); + TopicPartition tp3 = new TopicPartition("another.topic", 1); + int initialMetricsSize = metrics.metrics().size(); + + SubscriptionState subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); + subscriptionState.assignFromUser(Set.of(tp1)); + + metricsManager.maybeUpdateAssignment(subscriptionState); + // 1 new metrics shall be registered. + assertEquals(1, metrics.metrics().size() - initialMetricsSize); + + subscriptionState.assignFromUser(Set.of(tp1, tp2)); + subscriptionState.updatePreferredReadReplica(tp2, 1, () -> 0L); + metricsManager.maybeUpdateAssignment(subscriptionState); + // Another 2 metrics get registered as deprecated metrics should be reported for tp2. + assertEquals(3, metrics.metrics().size() - initialMetricsSize); + + Map tags1 = Map.of("topic", tp1.topic(), "partition", String.valueOf(tp1.partition())); + Map tags2 = Map.of("topic", tp2.topic(), "partition", String.valueOf(tp2.partition())); + Map deprecatedTags = topicPartitionTags(tp2); + // Validate preferred read replica metrics. + assertEquals(-1, readReplicaMetricValue(metricsRegistry.partitionPreferredReadReplica, tags1), EPSILON); + assertEquals(1, readReplicaMetricValue(metricsRegistry.partitionPreferredReadReplica, tags2), EPSILON); + assertEquals(1, readReplicaMetricValue(metricsRegistry.partitionPreferredReadReplica, deprecatedTags), EPSILON); + + // Remove tp2 from subscription set. + subscriptionState.assignFromUser(Set.of(tp1, tp3)); + metricsManager.maybeUpdateAssignment(subscriptionState); + // Metrics count shall remain same as tp2 should be removed and tp3 gets added. + assertEquals(3, metrics.metrics().size() - initialMetricsSize); + + // Remove all partitions. + subscriptionState.assignFromUser(Set.of()); + metricsManager.maybeUpdateAssignment(subscriptionState); + // Metrics count shall be same as initial count as all new metrics shall be removed. + assertEquals(initialMetricsSize, metrics.metrics().size()); + } + + @Test + public void testMaybeUpdateAssignmentWithAdditionalRegisteredMetrics() { + TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); + TopicPartition tp2 = new TopicPartition("another.topic", 0); + TopicPartition tp3 = new TopicPartition("another.topic", 1); + + int initialMetricsSize = metrics.metrics().size(); + + metricsManager.recordPartitionLag(tp1, 14); + metricsManager.recordPartitionLead(tp1, 11); + metricsManager.recordPartitionLag(tp2, 5); + metricsManager.recordPartitionLead(tp2, 1); + metricsManager.recordPartitionLag(tp3, 4); + metricsManager.recordPartitionLead(tp3, 2); + + int additionalRegisteredMetricsSize = metrics.metrics().size(); + + SubscriptionState subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); + subscriptionState.assignFromUser(Set.of(tp1, tp2, tp3)); + metricsManager.maybeUpdateAssignment(subscriptionState); + + // 5 new metrics shall be registered. + assertEquals(5, metrics.metrics().size() - additionalRegisteredMetricsSize); + + // Remove 1 partition which has deprecated metrics as well. + subscriptionState.assignFromUser(Set.of(tp1, tp2)); + metricsManager.maybeUpdateAssignment(subscriptionState); + // For tp2, 14 metrics will be unregistered. 3 for partition lag, 3 for partition lead, 1 for + // preferred read replica and similarly 7 deprecated metrics. Hence, we should have 9 metrics + // removed from additionalRegisteredMetricsSize. + assertEquals(9, additionalRegisteredMetricsSize - metrics.metrics().size()); + + // Remove all partitions. + subscriptionState.assignFromUser(Set.of()); + metricsManager.maybeUpdateAssignment(subscriptionState); + // Metrics count shall be same as initial count as all new metrics shall be removed. + assertEquals(initialMetricsSize, metrics.metrics().size()); } private void registerNodeLatencyMetric(String connectionId, MetricName nodeLatencyAvg, MetricName nodeLatencyMax) { @@ -209,4 +400,9 @@ private double metricValue(MetricName metricName) { return (Double) metric.metricValue(); } + private Integer readReplicaMetricValue(MetricNameTemplate name, Map tags) { + MetricName metricName = metrics.metricInstance(name, tags); + KafkaMetric metric = metrics.metric(metricName); + return (Integer) metric.metricValue(); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index 3e3f70a7443f0..8657dcfc1e99d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -24,12 +24,11 @@ import org.apache.kafka.clients.MetadataRecoveryStrategy; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.NetworkClient; -import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetOutOfRangeException; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; @@ -42,7 +41,6 @@ import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; -import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.header.Header; @@ -161,7 +159,7 @@ public class FetchRequestManagerTest { private final String topicName = "test"; private final String groupId = "test-group"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap() { + private final Map topicIds = new HashMap<>() { { put(topicName, topicId); } @@ -1263,25 +1261,6 @@ public void testFetchNonContinuousRecords() { assertEquals(30L, consumerRecords.get(2).offset()); } - /** - * Test the case where the client makes a pre-v3 FetchRequest, but the server replies with only a partial - * request. This happens when a single message is larger than the per-partition limit. - */ - @Test - public void testFetchRequestWhenRecordTooLarge() { - try { - buildFetcher(); - - client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.FETCH.id, (short) 2, (short) 2)); - makeFetchRequestWithIncompleteRecord(); - assertThrows(RecordTooLargeException.class, this::collectFetch); - // the position should not advance since no data has been returned - assertEquals(0, subscriptions.position(tp0).offset); - } finally { - client.setNodeApiVersions(NodeApiVersions.create()); - } - } - /** * Test the case where the client makes a post KIP-74 FetchRequest, but the server replies with only a * partial request. For v3 and later FetchRequests, the implementation of KIP-74 changed the behavior @@ -1690,7 +1669,7 @@ public void testStaleOutOfRangeError() { @Test public void testFetchedRecordsAfterSeek() { - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED); assignFromUser(singleton(tp0)); @@ -1711,7 +1690,7 @@ public void testFetchedRecordsAfterSeek() { @Test public void testFetchOffsetOutOfRangeException() { - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED); assignFromUser(singleton(tp0)); @@ -1723,8 +1702,7 @@ public void testFetchOffsetOutOfRangeException() { assertFalse(subscriptions.isOffsetResetNeeded(tp0)); for (int i = 0; i < 2; i++) { - OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () -> - collectFetch()); + OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, this::collectFetch); assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet()); assertEquals(0L, e.offsetOutOfRangePartitions().get(tp0).longValue()); } @@ -1734,7 +1712,7 @@ public void testFetchOffsetOutOfRangeException() { public void testFetchPositionAfterException() { // verify the advancement in the next fetch offset equals to the number of fetched records when // some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(Set.of(tp0, tp1)); subscriptions.seek(tp0, 1); @@ -1780,7 +1758,7 @@ private void fetchRecordsInto(List> allFetchedRec @Test public void testCompletedFetchRemoval() { // Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records. - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(Set.of(tp0, tp1, tp2, tp3)); @@ -1856,7 +1834,7 @@ public void testCompletedFetchRemoval() { @Test public void testSeekBeforeException() { - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED); assignFromUser(Set.of(tp0)); @@ -2046,7 +2024,7 @@ public void testFetcherLeadMetric() { @Test public void testReadCommittedLagMetric() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); assignFromUser(singleton(tp0)); @@ -2263,7 +2241,7 @@ public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() { @Test public void testFetcherMetricsTemplates() { Map clientTags = Collections.singletonMap("client-id", "clientA"); - buildFetcher(new MetricConfig().tags(clientTags), OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(new MetricConfig().tags(clientTags), AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); // Fetch from topic to generate topic metrics @@ -2309,7 +2287,7 @@ private Map>> fetchRecords( @Test public void testSkippingAbortedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2344,7 +2322,7 @@ public void testSkippingAbortedTransactions() { @Test public void testReturnCommittedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2380,7 +2358,7 @@ public void testReturnCommittedTransactions() { @Test public void testReadCommittedWithCommittedAndAbortedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); @@ -2456,7 +2434,7 @@ public void testReadCommittedWithCommittedAndAbortedTransactions() { @Test public void testMultipleAbortMarkers() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2505,7 +2483,7 @@ public void testMultipleAbortMarkers() { @Test public void testReadCommittedAbortMarkerWithNoData() { - buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); @@ -2554,7 +2532,7 @@ public void testUpdatePositionWithLastRecordMissingFromBatch() { new SimpleRecord(null, "value".getBytes())); // Remove the last record to simulate compaction - MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter(0, 0) { + MemoryRecords.FilterResult result = records.filterTo(new MemoryRecords.RecordFilter(0, 0) { @Override protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false); @@ -2564,7 +2542,7 @@ protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { return record.key() != null; } - }, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + }, ByteBuffer.allocate(1024), BufferSupplier.NO_CACHING); result.outputBuffer().flip(); MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer()); @@ -2622,7 +2600,7 @@ public void testUpdatePositionOnEmptyBatch() { @Test public void testReadCommittedWithCompactedTopic() { - buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); @@ -2685,7 +2663,7 @@ public void testReadCommittedWithCompactedTopic() { @Test public void testReturnAbortedTransactionsInUncommittedMode() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2719,7 +2697,7 @@ public void testReturnAbortedTransactionsInUncommittedMode() { @Test public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); long currentOffset = 0; @@ -2833,7 +2811,7 @@ public void testConsumingViaIncrementalFetchRequests() { @Test public void testEmptyControlBatch() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 1; @@ -2961,7 +2939,7 @@ public void testSubscriptionPositionUpdatedWithEpoch() { @Test public void testPreferredReadReplica() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3004,7 +2982,7 @@ public void testPreferredReadReplica() { @Test public void testFetchDisconnectedShouldClearPreferredReadReplica() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3037,7 +3015,7 @@ public void testFetchDisconnectedShouldClearPreferredReadReplica() { @Test public void testFetchDisconnectedShouldNotClearPreferredReadReplicaIfUnassigned() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3072,7 +3050,7 @@ public void testFetchDisconnectedShouldNotClearPreferredReadReplicaIfUnassigned( @Test public void testFetchErrorShouldClearPreferredReadReplica() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3107,7 +3085,7 @@ public void testFetchErrorShouldClearPreferredReadReplica() { @Test public void testPreferredReadReplicaOffsetError() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3205,7 +3183,7 @@ public void testCorruptMessageError() { public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInformation(Errors error) { // The test runs with 2 partitions where 1 partition is fetched without errors, and // 2nd partition faces errors due to leadership changes. - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED, Duration.ofMinutes(5).toMillis()); @@ -3298,7 +3276,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo public void testWhenFetchResponseReturnsALeaderShipChangeErrorAndNewLeaderInformation(Errors error) { // The test runs with 2 partitions where 1 partition is fetched without errors, and // 2nd partition faces errors due to leadership changes. - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED, Duration.ofMinutes(5).toMillis()); @@ -3581,7 +3559,7 @@ private Fetch collectFetch() { } private void buildFetcher(int maxPollRecords) { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), maxPollRecords, IsolationLevel.READ_UNCOMMITTED); } @@ -3591,11 +3569,11 @@ private void buildFetcher() { private void buildFetcher(Deserializer keyDeserializer, Deserializer valueDeserializer) { - buildFetcher(OffsetResetStrategy.EARLIEST, keyDeserializer, valueDeserializer, + buildFetcher(AutoOffsetResetStrategy.EARLIEST, keyDeserializer, valueDeserializer, Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); } - private void buildFetcher(OffsetResetStrategy offsetResetStrategy, + private void buildFetcher(AutoOffsetResetStrategy offsetResetStrategy, Deserializer keyDeserializer, Deserializer valueDeserializer, int maxPollRecords, @@ -3605,7 +3583,7 @@ private void buildFetcher(OffsetResetStrategy offsetResetStrategy, } private void buildFetcher(MetricConfig metricConfig, - OffsetResetStrategy offsetResetStrategy, + AutoOffsetResetStrategy offsetResetStrategy, Deserializer keyDeserializer, Deserializer valueDeserializer, int maxPollRecords, @@ -3614,7 +3592,7 @@ private void buildFetcher(MetricConfig metricConfig, } private void buildFetcher(MetricConfig metricConfig, - OffsetResetStrategy offsetResetStrategy, + AutoOffsetResetStrategy offsetResetStrategy, Deserializer keyDeserializer, Deserializer valueDeserializer, int maxPollRecords, @@ -3702,7 +3680,7 @@ private void buildDependencies(MetricConfig metricConfig, properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); ConsumerConfig config = new ConsumerConfig(properties); - networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler)); + networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, true)); } private List collectRecordOffsets(List> records) { @@ -3779,8 +3757,9 @@ public TestableNetworkClientDelegate(Time time, LogContext logContext, KafkaClient client, Metadata metadata, - BackgroundEventHandler backgroundEventHandler) { - super(time, config, logContext, client, metadata, backgroundEventHandler); + BackgroundEventHandler backgroundEventHandler, + boolean notifyMetadataErrorsViaErrorQueue) { + super(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, mock(AsyncConsumerMetrics.class)); } @Override diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index 6c29d3df82b88..ede973c5f9b4f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -27,7 +27,6 @@ import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetOutOfRangeException; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; @@ -38,7 +37,6 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; -import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.header.Header; @@ -157,7 +155,7 @@ public class FetcherTest { private final String topicName = "test"; private final String groupId = "test-group"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap() { + private final Map topicIds = new HashMap<>() { { put(topicName, topicId); } @@ -1262,25 +1260,6 @@ public void testFetchNonContinuousRecords() { assertEquals(30L, consumerRecords.get(2).offset()); } - /** - * Test the case where the client makes a pre-v3 FetchRequest, but the server replies with only a partial - * request. This happens when a single message is larger than the per-partition limit. - */ - @Test - public void testFetchRequestWhenRecordTooLarge() { - try { - buildFetcher(); - - client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.FETCH.id, (short) 2, (short) 2)); - makeFetchRequestWithIncompleteRecord(); - assertThrows(RecordTooLargeException.class, this::collectFetch); - // the position should not advance since no data has been returned - assertEquals(0, subscriptions.position(tp0).offset); - } finally { - client.setNodeApiVersions(NodeApiVersions.create()); - } - } - /** * Test the case where the client makes a post KIP-74 FetchRequest, but the server replies with only a * partial request. For v3 and later FetchRequests, the implementation of KIP-74 changed the behavior @@ -1676,7 +1655,7 @@ public void testStaleOutOfRangeError() { @Test public void testFetchedRecordsAfterSeek() { - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED); assignFromUser(singleton(tp0)); @@ -1697,7 +1676,7 @@ public void testFetchedRecordsAfterSeek() { @Test public void testFetchOffsetOutOfRangeException() { - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED); assignFromUser(singleton(tp0)); @@ -1709,8 +1688,7 @@ public void testFetchOffsetOutOfRangeException() { assertFalse(subscriptions.isOffsetResetNeeded(tp0)); for (int i = 0; i < 2; i++) { - OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () -> - collectFetch()); + OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, this::collectFetch); assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet()); assertEquals(0L, e.offsetOutOfRangePartitions().get(tp0).longValue()); } @@ -1720,7 +1698,7 @@ public void testFetchOffsetOutOfRangeException() { public void testFetchPositionAfterException() { // verify the advancement in the next fetch offset equals to the number of fetched records when // some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(Set.of(tp0, tp1)); subscriptions.seek(tp0, 1); @@ -1766,7 +1744,7 @@ private void fetchRecordsInto(List> allFetchedRec @Test public void testCompletedFetchRemoval() { // Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records. - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(Set.of(tp0, tp1, tp2, tp3)); @@ -1842,7 +1820,7 @@ public void testCompletedFetchRemoval() { @Test public void testSeekBeforeException() { - buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED); assignFromUser(Set.of(tp0)); @@ -2032,7 +2010,7 @@ public void testFetcherLeadMetric() { @Test public void testReadCommittedLagMetric() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); assignFromUser(singleton(tp0)); @@ -2249,7 +2227,7 @@ public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() { @Test public void testFetcherMetricsTemplates() { Map clientTags = Collections.singletonMap("client-id", "clientA"); - buildFetcher(new MetricConfig().tags(clientTags), OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(new MetricConfig().tags(clientTags), AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); // Fetch from topic to generate topic metrics @@ -2295,7 +2273,7 @@ private Map>> fetchRecords( @Test public void testSkippingAbortedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2330,7 +2308,7 @@ public void testSkippingAbortedTransactions() { @Test public void testReturnCommittedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2366,7 +2344,7 @@ public void testReturnCommittedTransactions() { @Test public void testReadCommittedWithCommittedAndAbortedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); @@ -2442,7 +2420,7 @@ public void testReadCommittedWithCommittedAndAbortedTransactions() { @Test public void testMultipleAbortMarkers() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2491,7 +2469,7 @@ public void testMultipleAbortMarkers() { @Test public void testReadCommittedAbortMarkerWithNoData() { - buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); @@ -2540,7 +2518,7 @@ public void testUpdatePositionWithLastRecordMissingFromBatch() { new SimpleRecord(null, "value".getBytes())); // Remove the last record to simulate compaction - MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter(0, 0) { + MemoryRecords.FilterResult result = records.filterTo(new MemoryRecords.RecordFilter(0, 0) { @Override protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false); @@ -2550,7 +2528,7 @@ protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { return record.key() != null; } - }, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + }, ByteBuffer.allocate(1024), BufferSupplier.NO_CACHING); result.outputBuffer().flip(); MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer()); @@ -2608,7 +2586,7 @@ public void testUpdatePositionOnEmptyBatch() { @Test public void testReadCommittedWithCompactedTopic() { - buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); @@ -2671,7 +2649,7 @@ public void testReadCommittedWithCompactedTopic() { @Test public void testReturnAbortedTransactionsInUncommittedMode() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; @@ -2705,7 +2683,7 @@ public void testReturnAbortedTransactionsInUncommittedMode() { @Test public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); long currentOffset = 0; @@ -2825,7 +2803,7 @@ public void testFetcherConcurrency() throws Exception { topicPartitions.add(new TopicPartition(topicName, i)); LogContext logContext = new LogContext(); - buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext); + buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST), logContext); IsolationLevel isolationLevel = IsolationLevel.READ_UNCOMMITTED; @@ -2849,7 +2827,7 @@ public void testFetcherConcurrency() throws Exception { true, // check crcs CommonClientConfigs.DEFAULT_CLIENT_RACK, isolationLevel); - fetcher = new Fetcher( + fetcher = new Fetcher<>( logContext, consumerClient, metadata, @@ -3032,7 +3010,7 @@ public void testFetcherSessionEpochUpdate() throws Exception { @Test public void testEmptyControlBatch() { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 1; @@ -3179,7 +3157,7 @@ public void testTruncationDetected() { builder.appendWithOffset(2L, 0L, "key".getBytes(), "value-3".getBytes()); MemoryRecords records = builder.build(); - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(singleton(tp0)); @@ -3238,7 +3216,7 @@ public void testTruncationDetected() { @Test public void testPreferredReadReplica() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3281,7 +3259,7 @@ public void testPreferredReadReplica() { @Test public void testFetchDisconnectedShouldClearPreferredReadReplica() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3314,7 +3292,7 @@ public void testFetchDisconnectedShouldClearPreferredReadReplica() { @Test public void testFetchDisconnectedShouldNotClearPreferredReadReplicaIfUnassigned() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3349,7 +3327,7 @@ public void testFetchDisconnectedShouldNotClearPreferredReadReplicaIfUnassigned( @Test public void testFetchErrorShouldClearPreferredReadReplica() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3384,7 +3362,7 @@ public void testFetchErrorShouldClearPreferredReadReplica() { @Test public void testPreferredReadReplicaOffsetError() { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis()); subscriptions.assignFromUser(singleton(tp0)); @@ -3481,7 +3459,7 @@ public void testCorruptMessageError() { public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInformation(Errors error) { // The test runs with 2 partitions where 1 partition is fetched without errors, and // 2nd partition faces errors due to leadership changes. - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED, Duration.ofMinutes(5).toMillis()); @@ -3574,7 +3552,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo public void testWhenFetchResponseReturnsALeaderShipChangeErrorAndNewLeaderInformation(Errors error) { // The test runs with 2 partitions where 1 partition is fetched without errors, and // 2nd partition faces errors due to leadership changes. - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED, Duration.ofMinutes(5).toMillis()); @@ -3826,7 +3804,7 @@ private Fetch collectFetch() { } private void buildFetcher(int maxPollRecords) { - buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), + buildFetcher(AutoOffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), maxPollRecords, IsolationLevel.READ_UNCOMMITTED); } @@ -3836,11 +3814,11 @@ private void buildFetcher() { private void buildFetcher(Deserializer keyDeserializer, Deserializer valueDeserializer) { - buildFetcher(OffsetResetStrategy.EARLIEST, keyDeserializer, valueDeserializer, + buildFetcher(AutoOffsetResetStrategy.EARLIEST, keyDeserializer, valueDeserializer, Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); } - private void buildFetcher(OffsetResetStrategy offsetResetStrategy, + private void buildFetcher(AutoOffsetResetStrategy offsetResetStrategy, Deserializer keyDeserializer, Deserializer valueDeserializer, int maxPollRecords, @@ -3850,7 +3828,7 @@ private void buildFetcher(OffsetResetStrategy offsetResetStrategy, } private void buildFetcher(MetricConfig metricConfig, - OffsetResetStrategy offsetResetStrategy, + AutoOffsetResetStrategy offsetResetStrategy, Deserializer keyDeserializer, Deserializer valueDeserializer, int maxPollRecords, @@ -3859,7 +3837,7 @@ private void buildFetcher(MetricConfig metricConfig, } private void buildFetcher(MetricConfig metricConfig, - OffsetResetStrategy offsetResetStrategy, + AutoOffsetResetStrategy offsetResetStrategy, Deserializer keyDeserializer, Deserializer valueDeserializer, int maxPollRecords, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java index 10e454499431f..81eb5187fecfb 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java @@ -23,11 +23,13 @@ import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.message.FindCoordinatorRequestData; +import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; @@ -41,17 +43,20 @@ import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedList; import java.util.Objects; import java.util.Optional; import java.util.Properties; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -78,7 +83,7 @@ public void setup() { @Test void testPollResultTimer() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() @@ -102,7 +107,7 @@ void testPollResultTimer() throws Exception { @Test public void testSuccessfulResponse() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); prepareFindCoordinatorResponse(Errors.NONE); @@ -116,7 +121,7 @@ public void testSuccessfulResponse() throws Exception { @Test public void testTimeoutBeforeSend() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { client.setUnreachable(mockNode(), REQUEST_TIMEOUT_MS); NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); ncd.add(unsentRequest); @@ -130,7 +135,7 @@ public void testTimeoutBeforeSend() throws Exception { @Test public void testTimeoutAfterSend() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); ncd.add(unsentRequest); ncd.poll(0, time.milliseconds()); @@ -164,7 +169,7 @@ public void testEnsureCorrectCompletionTimeOnComplete() { @Test public void testEnsureTimerSetOnAdd() { - NetworkClientDelegate ncd = newNetworkClientDelegate(); + NetworkClientDelegate ncd = newNetworkClientDelegate(false); NetworkClientDelegate.UnsentRequest findCoordRequest = newUnsentFindCoordinatorRequest(); assertNull(findCoordRequest.timer()); @@ -181,7 +186,7 @@ public void testEnsureTimerSetOnAdd() { @Test public void testHasAnyPendingRequests() throws Exception { - try (NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate()) { + try (NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); networkClientDelegate.add(unsentRequest); @@ -212,9 +217,24 @@ public void testPropagateMetadataError() { AuthenticationException authException = new AuthenticationException("Test Auth Exception"); doThrow(authException).when(metadata).maybeThrowAnyException(); - LinkedList backgroundEventQueue = new LinkedList<>(); - this.backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); - NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(); + NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(false); + assertTrue(networkClientDelegate.getAndClearMetadataError().isEmpty()); + networkClientDelegate.poll(0, time.milliseconds()); + + Optional metadataError = networkClientDelegate.getAndClearMetadataError(); + assertTrue(metadataError.isPresent()); + assertInstanceOf(AuthenticationException.class, metadataError.get()); + assertEquals(authException.getMessage(), metadataError.get().getMessage()); + } + + @Test + public void testPropagateMetadataErrorWithErrorEvent() { + AuthenticationException authException = new AuthenticationException("Test Auth Exception"); + doThrow(authException).when(metadata).maybeThrowAnyException(); + + BlockingQueue backgroundEventQueue = new LinkedBlockingQueue<>(); + this.backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue, time, mock(AsyncConsumerMetrics.class)); + NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(true); assertEquals(0, backgroundEventQueue.size()); networkClientDelegate.poll(0, time.milliseconds()); @@ -226,19 +246,59 @@ public void testPropagateMetadataError() { assertEquals(authException, ((ErrorEvent) event).error()); } - public NetworkClientDelegate newNetworkClientDelegate() { + @Test + public void testRecordUnsentRequestsQueueTime() throws Exception { + try (Metrics metrics = new Metrics(); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); + NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(false, asyncConsumerMetrics)) { + NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); + networkClientDelegate.add(unsentRequest); + asyncConsumerMetrics.recordUnsentRequestsQueueSize(1, time.milliseconds()); + + time.sleep(10); + long timeMs = time.milliseconds(); + networkClientDelegate.poll(0, timeMs); + assertEquals( + 0, + (double) metrics.metric( + metrics.metricName("unsent-requests-queue-size", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + assertEquals( + 10, + (double) metrics.metric( + metrics.metricName("unsent-requests-queue-time-avg", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + assertEquals( + 10, + (double) metrics.metric( + metrics.metricName("unsent-requests-queue-time-max", CONSUMER_METRIC_GROUP) + ).metricValue() + ); + } + } + + public NetworkClientDelegate newNetworkClientDelegate(boolean notifyMetadataErrorsViaErrorQueue) { + return newNetworkClientDelegate(notifyMetadataErrorsViaErrorQueue, mock(AsyncConsumerMetrics.class)); + } + + public NetworkClientDelegate newNetworkClientDelegate(boolean notifyMetadataErrorsViaErrorQueue, AsyncConsumerMetrics asyncConsumerMetrics) { LogContext logContext = new LogContext(); Properties properties = new Properties(); properties.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.put(GROUP_ID_CONFIG, GROUP_ID); properties.put(REQUEST_TIMEOUT_MS_CONFIG, REQUEST_TIMEOUT_MS); - return new NetworkClientDelegate(this.time, + return new NetworkClientDelegate(time, new ConsumerConfig(properties), logContext, this.client, this.metadata, - this.backgroundEventHandler); + this.backgroundEventHandler, + notifyMetadataErrorsViaErrorQueue, + asyncConsumerMetrics + ); } public NetworkClientDelegate.UnsentRequest newUnsentFindCoordinatorRequest() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java index 4973624b0a0a7..96d6e5e0b3db9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java @@ -26,7 +26,6 @@ import org.apache.kafka.clients.consumer.LogTruncationException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; @@ -66,6 +65,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -94,12 +94,14 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class OffsetFetcherTest { private final String topicName = "test"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap() { + private final Map topicIds = new HashMap<>() { { put(topicName, topicId); } @@ -174,7 +176,7 @@ public void testUpdateFetchPositionResetToDefaultOffset() { public void testUpdateFetchPositionResetToLatestOffset() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); client.updateMetadata(initialUpdateResponse); @@ -187,6 +189,26 @@ public void testUpdateFetchPositionResetToLatestOffset() { assertEquals(5, subscriptions.position(tp0).offset); } + @Test + public void testUpdateFetchPositionResetToDurationOffset() { + long timestamp = Instant.now().toEpochMilli(); + AutoOffsetResetStrategy durationStrategy = mock(AutoOffsetResetStrategy.class); + when(durationStrategy.timestamp()).thenReturn(Optional.of(timestamp)); + buildFetcher(durationStrategy); + assignFromUser(singleton(tp0)); + subscriptions.requestOffsetReset(tp0, durationStrategy); + + client.updateMetadata(initialUpdateResponse); + + client.prepareResponse(listOffsetRequestMatcher(timestamp), + listOffsetResponse(Errors.NONE, 1L, 5L)); + offsetFetcher.resetPositionsIfNeeded(); + consumerClient.pollNoWakeup(); + assertFalse(subscriptions.isOffsetResetNeeded(tp0)); + assertTrue(subscriptions.isFetchable(tp0)); + assertEquals(5, subscriptions.position(tp0).offset); + } + /** * Make sure the client behaves appropriately when receiving an exception for unavailable offsets */ @@ -194,7 +216,7 @@ public void testUpdateFetchPositionResetToLatestOffset() { public void testFetchOffsetErrors() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // Fail with OFFSET_NOT_AVAILABLE client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, @@ -241,7 +263,7 @@ private void testListOffsetsSendsIsolationLevel(IsolationLevel isolationLevel) { buildFetcher(isolationLevel); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); client.prepareResponse(body -> { ListOffsetsRequest request = (ListOffsetsRequest) body; @@ -260,7 +282,7 @@ private void testListOffsetsSendsIsolationLevel(IsolationLevel isolationLevel) { public void testresetPositionsSkipsBlackedOutConnections() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); // Check that we skip sending the ListOffset request when the node is blacked out client.updateMetadata(initialUpdateResponse); @@ -270,7 +292,7 @@ public void testresetPositionsSkipsBlackedOutConnections() { assertEquals(0, consumerClient.pendingRequestCount()); consumerClient.pollNoWakeup(); assertTrue(subscriptions.isOffsetResetNeeded(tp0)); - assertEquals(OffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(tp0)); + assertEquals(AutoOffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(tp0)); time.sleep(500); client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.EARLIEST_TIMESTAMP), @@ -287,7 +309,7 @@ public void testresetPositionsSkipsBlackedOutConnections() { public void testUpdateFetchPositionResetToEarliestOffset() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.EARLIEST_TIMESTAMP, validLeaderEpoch), listOffsetResponse(Errors.NONE, 1L, 5L)); @@ -303,7 +325,7 @@ public void testUpdateFetchPositionResetToEarliestOffset() { public void testresetPositionsMetadataRefresh() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // First fetch fails with stale metadata client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, @@ -340,7 +362,7 @@ public void testListOffsetNoUpdateMissingEpoch() { client.updateMetadata(metadataWithNoLeaderEpochs); // Return a ListOffsets response with leaderEpoch=1, we should ignore it - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP), listOffsetResponse(tp0, Errors.NONE, 1L, 5L, 1)); offsetFetcher.resetPositionsIfNeeded(); @@ -363,7 +385,7 @@ public void testListOffsetUpdateEpoch() { client.updateMetadata(metadataWithLeaderEpochs); // Reset offsets to trigger ListOffsets call - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // Now we see a ListOffsets with leaderEpoch=2 epoch, we trigger a metadata update client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, 1), @@ -380,7 +402,7 @@ public void testListOffsetUpdateEpoch() { public void testUpdateFetchPositionDisconnect() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // First request gets a disconnect client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, @@ -416,7 +438,7 @@ public void testUpdateFetchPositionDisconnect() { public void testAssignmentChangeWithInFlightReset() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // Send the ListOffsets request to reset the position offsetFetcher.resetPositionsIfNeeded(); @@ -440,7 +462,7 @@ public void testAssignmentChangeWithInFlightReset() { public void testSeekWithInFlightReset() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // Send the ListOffsets request to reset the position offsetFetcher.resetPositionsIfNeeded(); @@ -462,7 +484,7 @@ public void testSeekWithInFlightReset() { private boolean listOffsetMatchesExpectedReset( TopicPartition tp, - OffsetResetStrategy strategy, + AutoOffsetResetStrategy strategy, AbstractRequest request ) { assertInstanceOf(ListOffsetsRequest.class, request); @@ -476,9 +498,9 @@ private boolean listOffsetMatchesExpectedReset( .map(ListOffsetsPartition::partitionIndex).collect(Collectors.toSet())); ListOffsetsPartition listPartition = listTopic.partitions().get(0); - if (strategy == OffsetResetStrategy.EARLIEST) { + if (strategy == AutoOffsetResetStrategy.EARLIEST) { assertEquals(ListOffsetsRequest.EARLIEST_TIMESTAMP, listPartition.timestamp()); - } else if (strategy == OffsetResetStrategy.LATEST) { + } else if (strategy == AutoOffsetResetStrategy.LATEST) { assertEquals(ListOffsetsRequest.LATEST_TIMESTAMP, listPartition.timestamp()); } return true; @@ -489,13 +511,13 @@ public void testEarlierOffsetResetArrivesLate() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); offsetFetcher.resetPositionsIfNeeded(); client.prepareResponse(req -> { - if (listOffsetMatchesExpectedReset(tp0, OffsetResetStrategy.EARLIEST, req)) { + if (listOffsetMatchesExpectedReset(tp0, AutoOffsetResetStrategy.EARLIEST, req)) { // Before the response is handled, we get a request to reset to the latest offset - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); return true; } else { return false; @@ -505,11 +527,11 @@ public void testEarlierOffsetResetArrivesLate() { // The list offset result should be ignored assertTrue(subscriptions.isOffsetResetNeeded(tp0)); - assertEquals(OffsetResetStrategy.LATEST, subscriptions.resetStrategy(tp0)); + assertEquals(AutoOffsetResetStrategy.LATEST, subscriptions.resetStrategy(tp0)); offsetFetcher.resetPositionsIfNeeded(); client.prepareResponse( - req -> listOffsetMatchesExpectedReset(tp0, OffsetResetStrategy.LATEST, req), + req -> listOffsetMatchesExpectedReset(tp0, AutoOffsetResetStrategy.LATEST, req), listOffsetResponse(Errors.NONE, 1L, 10L) ); consumerClient.pollNoWakeup(); @@ -522,7 +544,7 @@ public void testEarlierOffsetResetArrivesLate() { public void testChangeResetWithInFlightReset() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // Send the ListOffsets request to reset the position offsetFetcher.resetPositionsIfNeeded(); @@ -531,7 +553,7 @@ public void testChangeResetWithInFlightReset() { assertTrue(client.hasInFlightRequests()); // Now we get a seek from the user - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); // The response returns and is discarded client.respond(listOffsetResponse(Errors.NONE, 1L, 5L)); @@ -540,14 +562,14 @@ public void testChangeResetWithInFlightReset() { assertFalse(client.hasPendingResponses()); assertFalse(client.hasInFlightRequests()); assertTrue(subscriptions.isOffsetResetNeeded(tp0)); - assertEquals(OffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(tp0)); + assertEquals(AutoOffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(tp0)); } @Test public void testIdempotentResetWithInFlightReset() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // Send the ListOffsets request to reset the position offsetFetcher.resetPositionsIfNeeded(); @@ -556,7 +578,7 @@ public void testIdempotentResetWithInFlightReset() { assertTrue(client.hasInFlightRequests()); // Now we get a seek from the user - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); client.respond(listOffsetResponse(Errors.NONE, 1L, 5L)); consumerClient.pollNoWakeup(); @@ -570,7 +592,7 @@ public void testIdempotentResetWithInFlightReset() { public void testResetOffsetsAuthorizationFailure() { buildFetcher(); assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); // First request gets a disconnect client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, @@ -638,7 +660,7 @@ public void testUpdateFetchPositionOfPausedPartitionsRequiringOffsetReset() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.pause(tp0); // paused partition does not have a valid position - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, validLeaderEpoch), listOffsetResponse(Errors.NONE, 1L, 10L)); @@ -717,7 +739,7 @@ public void testGetOffsetsFencedLeaderEpoch() { subscriptions.assignFromUser(singleton(tp0)); client.updateMetadata(initialUpdateResponse); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); client.prepareResponse(listOffsetResponse(Errors.FENCED_LEADER_EPOCH, 1L, 5L)); offsetFetcher.resetPositionsIfNeeded(); @@ -846,7 +868,7 @@ public void testGetOffsetByTimeWithPartitionsRetryCouldTriggerMetadataUpdate() { public void testGetOffsetsUnknownLeaderEpoch() { buildFetcher(); subscriptions.assignFromUser(singleton(tp0)); - subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); + subscriptions.requestOffsetReset(tp0, AutoOffsetResetStrategy.LATEST); client.prepareResponse(listOffsetResponse(Errors.UNKNOWN_LEADER_EPOCH, 1L, 5L)); offsetFetcher.resetPositionsIfNeeded(); @@ -1091,43 +1113,6 @@ private void testGetOffsetsForTimesWithUnknownOffset() { assertNull(offsetAndTimestampMap.get(tp0)); } - @Test - public void testGetOffsetsForTimesWithUnknownOffsetV0() { - buildFetcher(); - // Empty map - assertTrue(offsetFetcher.offsetsForTimes(new HashMap<>(), time.timer(100L)).isEmpty()); - // Unknown Offset - client.reset(); - // Ensure metadata has both partition. - MetadataResponse initialMetadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds); - client.updateMetadata(initialMetadataUpdate); - // Force LIST_OFFSETS version 0 - Node node = metadata.fetch().nodes().get(0); - apiVersions.update(node.idString(), NodeApiVersions.create( - ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 0)); - - ListOffsetsResponseData data = new ListOffsetsResponseData() - .setThrottleTimeMs(0) - .setTopics(Collections.singletonList(new ListOffsetsTopicResponse() - .setName(tp0.topic()) - .setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse() - .setPartitionIndex(tp0.partition()) - .setErrorCode(Errors.NONE.code()) - .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) - .setOldStyleOffsets(Collections.emptyList()))))); - - client.prepareResponseFrom(new ListOffsetsResponse(data), - metadata.fetch().leaderFor(tp0)); - - Map timestampToSearch = new HashMap<>(); - timestampToSearch.put(tp0, 0L); - Map offsetAndTimestampMap = - offsetFetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE)); - - assertTrue(offsetAndTimestampMap.containsKey(tp0)); - assertNull(offsetAndTimestampMap.get(tp0)); - } - @Test public void testOffsetValidationRequestGrouping() { buildFetcher(); @@ -1236,7 +1221,7 @@ public void testOffsetValidationSkippedForOldBroker() { IsolationLevel isolationLevel = IsolationLevel.READ_UNCOMMITTED; int maxPollRecords = Integer.MAX_VALUE; long metadataExpireMs = Long.MAX_VALUE; - OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.EARLIEST; + AutoOffsetResetStrategy offsetResetStrategy = AutoOffsetResetStrategy.EARLIEST; int minBytes = 1; int maxBytes = Integer.MAX_VALUE; int maxWaitMs = 0; @@ -1355,36 +1340,36 @@ public void testOffsetValidationSkippedForOldResponse() { @Test public void testOffsetValidationresetPositionForUndefinedEpochWithDefinedResetPolicy() { testOffsetValidationWithGivenEpochOffset( - UNDEFINED_EPOCH, 0L, OffsetResetStrategy.EARLIEST); + UNDEFINED_EPOCH, 0L, AutoOffsetResetStrategy.EARLIEST); } @Test public void testOffsetValidationresetPositionForUndefinedOffsetWithDefinedResetPolicy() { testOffsetValidationWithGivenEpochOffset( - 2, UNDEFINED_EPOCH_OFFSET, OffsetResetStrategy.EARLIEST); + 2, UNDEFINED_EPOCH_OFFSET, AutoOffsetResetStrategy.EARLIEST); } @Test public void testOffsetValidationresetPositionForUndefinedEpochWithUndefinedResetPolicy() { testOffsetValidationWithGivenEpochOffset( - UNDEFINED_EPOCH, 0L, OffsetResetStrategy.NONE); + UNDEFINED_EPOCH, 0L, AutoOffsetResetStrategy.NONE); } @Test public void testOffsetValidationresetPositionForUndefinedOffsetWithUndefinedResetPolicy() { testOffsetValidationWithGivenEpochOffset( - 2, UNDEFINED_EPOCH_OFFSET, OffsetResetStrategy.NONE); + 2, UNDEFINED_EPOCH_OFFSET, AutoOffsetResetStrategy.NONE); } @Test public void testOffsetValidationTriggerLogTruncationForBadOffsetWithUndefinedResetPolicy() { testOffsetValidationWithGivenEpochOffset( - 1, 1L, OffsetResetStrategy.NONE); + 1, 1L, AutoOffsetResetStrategy.NONE); } private void testOffsetValidationWithGivenEpochOffset(int leaderEpoch, long endOffset, - OffsetResetStrategy offsetResetStrategy) { + AutoOffsetResetStrategy offsetResetStrategy) { buildFetcher(offsetResetStrategy); assignFromUser(singleton(tp0)); @@ -1415,7 +1400,7 @@ private void testOffsetValidationWithGivenEpochOffset(int leaderEpoch, prepareOffsetsForLeaderEpochResponse(tp0, leaderEpoch, endOffset)); consumerClient.poll(time.timer(Duration.ZERO)); - if (offsetResetStrategy == OffsetResetStrategy.NONE) { + if (offsetResetStrategy == AutoOffsetResetStrategy.NONE) { LogTruncationException thrown = assertThrows(LogTruncationException.class, () -> offsetFetcher.validatePositionsIfNeeded()); assertEquals(singletonMap(tp0, initialOffset), thrown.offsetOutOfRangePartitions()); @@ -1690,16 +1675,16 @@ private void buildFetcher() { buildFetcher(IsolationLevel.READ_UNCOMMITTED); } - private void buildFetcher(OffsetResetStrategy offsetResetStrategy) { + private void buildFetcher(AutoOffsetResetStrategy offsetResetStrategy) { buildFetcher(new MetricConfig(), offsetResetStrategy, IsolationLevel.READ_UNCOMMITTED); } private void buildFetcher(IsolationLevel isolationLevel) { - buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, isolationLevel); + buildFetcher(new MetricConfig(), AutoOffsetResetStrategy.EARLIEST, isolationLevel); } private void buildFetcher(MetricConfig metricConfig, - OffsetResetStrategy offsetResetStrategy, + AutoOffsetResetStrategy offsetResetStrategy, IsolationLevel isolationLevel) { long metadataExpireMs = Long.MAX_VALUE; LogContext logContext = new LogContext(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java index 721f1560f562c..a48b32b43efb6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.MockClient; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.TopicAuthorizationException; @@ -147,11 +146,11 @@ public void testRetriableError() { } private OffsetsForLeaderEpochClient newOffsetClient() { - buildDependencies(OffsetResetStrategy.EARLIEST); + buildDependencies(AutoOffsetResetStrategy.EARLIEST); return new OffsetsForLeaderEpochClient(consumerClient, new LogContext()); } - private void buildDependencies(OffsetResetStrategy offsetResetStrategy) { + private void buildDependencies(AutoOffsetResetStrategy offsetResetStrategy) { LogContext logContext = new LogContext(); Time time = new MockTime(1); SubscriptionState subscriptions = new SubscriptionState(logContext, offsetResetStrategy); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java index 996ae05feb938..2f92740c41411 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.ClusterResource; import org.apache.kafka.common.IsolationLevel; @@ -287,6 +286,8 @@ public void testRequestFailsWithRetriableError_RetrySucceeds(Errors error) throw assertFalse(fetchOffsetsFuture.isDone()); assertEquals(1, requestManager.requestsToRetry()); assertEquals(0, requestManager.requestsToSend()); + // A retriable error should be followed by a metadata update request + verify(metadata).requestUpdate(false); // Cluster metadata update. Failed requests should be retried and succeed mockSuccessfulRequest(Collections.singletonMap(TEST_PARTITION_1, LEADER_1)); @@ -385,6 +386,8 @@ public void testRequestPartiallyFailsWithRetriableError_RetrySucceeds() throws E assertFalse(fetchOffsetsFuture.isDone()); assertEquals(1, requestManager.requestsToRetry()); assertEquals(0, requestManager.requestsToSend()); + // A retriable error should be followed by a metadata update request + verify(metadata).requestUpdate(false); // Cluster metadata update. Failed requests should be retried mockSuccessfulRequest(partitionLeaders); @@ -514,7 +517,7 @@ public void testResetPositionsSendNoRequestIfNoPartitionsNeedingReset() { public void testResetPositionsMissingLeader() { mockFailedRequest_MissingLeader(); when(subscriptionState.partitionsNeedingReset(time.milliseconds())).thenReturn(Collections.singleton(TEST_PARTITION_1)); - when(subscriptionState.resetStrategy(any())).thenReturn(OffsetResetStrategy.EARLIEST); + when(subscriptionState.resetStrategy(any())).thenReturn(AutoOffsetResetStrategy.EARLIEST); requestManager.resetPositionsIfNeeded(); verify(metadata).requestUpdate(true); assertEquals(0, requestManager.requestsToSend()); @@ -537,7 +540,7 @@ public void testResetPositionsSuccess_LeaderEpochInResponse() { @Test public void testResetOffsetsAuthorizationFailure() { when(subscriptionState.partitionsNeedingReset(time.milliseconds())).thenReturn(Collections.singleton(TEST_PARTITION_1)); - when(subscriptionState.resetStrategy(any())).thenReturn(OffsetResetStrategy.EARLIEST); + when(subscriptionState.resetStrategy(any())).thenReturn(AutoOffsetResetStrategy.EARLIEST); mockSuccessfulRequest(Collections.singletonMap(TEST_PARTITION_1, LEADER_1)); CompletableFuture resetResult = requestManager.resetPositionsIfNeeded(); @@ -844,7 +847,7 @@ private void mockSuccessfulBuildRequestForValidatingPositions(SubscriptionState. private void testResetPositionsSuccessWithLeaderEpoch(Metadata.LeaderAndEpoch leaderAndEpoch) { TopicPartition tp = TEST_PARTITION_1; Node leader = LEADER_1; - OffsetResetStrategy strategy = OffsetResetStrategy.EARLIEST; + AutoOffsetResetStrategy strategy = AutoOffsetResetStrategy.EARLIEST; long offset = 5L; when(subscriptionState.partitionsNeedingReset(time.milliseconds())).thenReturn(Collections.singleton(tp)); when(subscriptionState.resetStrategy(any())).thenReturn(strategy); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestFutureTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestFutureTest.java index e218f8109fc8e..2cc4485f460cc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestFutureTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestFutureTest.java @@ -182,7 +182,7 @@ public void listenersInvokedIfAddedBeforeAndAfterCompletion() { @Test public void testComposeSuccessCase() { RequestFuture future = new RequestFuture<>(); - RequestFuture composed = future.compose(new RequestFutureAdapter() { + RequestFuture composed = future.compose(new RequestFutureAdapter<>() { @Override public void onSuccess(String value, RequestFuture future) { future.complete(value.length()); @@ -199,7 +199,7 @@ public void onSuccess(String value, RequestFuture future) { @Test public void testComposeFailureCase() { RequestFuture future = new RequestFuture<>(); - RequestFuture composed = future.compose(new RequestFutureAdapter() { + RequestFuture composed = future.compose(new RequestFutureAdapter<>() { @Override public void onSuccess(String value, RequestFuture future) { future.complete(value.length()); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java index 6af9509d04b90..0e74a9768a843 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java @@ -24,10 +24,10 @@ import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgementCommitCallbackEvent; +import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.KafkaException; @@ -60,6 +60,7 @@ import org.apache.kafka.common.requests.RequestHeader; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.requests.ShareAcknowledgeResponse; +import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.Deserializer; @@ -96,8 +97,11 @@ import java.util.Optional; import java.util.Properties; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.Collections.singleton; @@ -113,6 +117,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -124,13 +129,13 @@ public class ShareConsumeRequestManagerTest { private final String groupId = "test-group"; private final Uuid topicId = Uuid.randomUuid(); private final Uuid topicId2 = Uuid.randomUuid(); - private final Map topicIds = new HashMap() { + private final Map topicIds = new HashMap<>() { { put(topicName, topicId); put(topicName2, topicId2); } }; - private final Map topicPartitionCounts = new HashMap() { + private final Map topicPartitionCounts = new HashMap<>() { { put(topicName, 2); put(topicName2, 1); @@ -171,10 +176,8 @@ public void setup() { } private void assignFromSubscribed(Set partitions) { - partitions.forEach(partition -> { - subscriptions.subscribeToShareGroup(Collections.singleton(partition.topic())); - subscriptions.assignFromSubscribed(Collections.singleton(partition)); - }); + subscriptions.subscribeToShareGroup(partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet())); + subscriptions.assignFromSubscribed(partitions); client.updateMetadata(initialUpdateResponse); @@ -370,6 +373,64 @@ public void testCommitAsync() { completedAcknowledgements.clear(); } + @Test + public void testServerDisconnectedOnShareAcknowledge() throws InterruptedException { + buildRequestManager(); + // Enabling the config so that background event is sent when the acknowledgement response is received. + shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); + + assignFromSubscribed(Collections.singleton(tp0)); + + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + fetchRecords(); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); + + assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); + + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements2.add(3L, AcknowledgeType.REJECT); + + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements2)); + + client.prepareResponse(null, true); + networkClientDelegate.poll(time.timer(0)); + + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Errors.UNKNOWN_SERVER_ERROR, completedAcknowledgements.get(0).get(tip0).getAcknowledgeErrorCode()); + completedAcknowledgements.clear(); + + assertEquals(1, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getAcknowledgementsToSendCount(tip0)); + + TestUtils.retryOnExceptionWithTimeout(() -> { + assertEquals(0, shareConsumeRequestManager.sendAcknowledgements()); + // We expect the remaining acknowledgements to be cleared due to share session epoch being set to 0. + assertNull(shareConsumeRequestManager.requestStates(0)); + // The callback for these unsent acknowledgements will be invoked with an error code. + assertEquals(Collections.singletonMap(tip0, acknowledgements2), completedAcknowledgements.get(0)); + assertEquals(Errors.SHARE_SESSION_NOT_FOUND, completedAcknowledgements.get(0).get(tip0).getAcknowledgeErrorCode()); + }); + + // Attempt a normal fetch to check if nodesWithPendingRequests is empty. + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + } + @Test public void testAcknowledgeOnClose() { buildRequestManager(); @@ -490,6 +551,89 @@ public void testAcknowledgeOnCloseWithPendingCommitSync() { completedAcknowledgements.clear(); } + @Test + public void testResultHandlerOnCommitAsync() { + buildRequestManager(); + // Enabling the config so that background event is sent when the acknowledgement response is received. + shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); + + ShareConsumeRequestManager.ResultHandler resultHandler = shareConsumeRequestManager.buildResultHandler(null, Optional.empty()); + + // Passing null acknowledgements should mean we do not send the background event at all. + resultHandler.complete(tip0, null, true); + assertEquals(0, completedAcknowledgements.size()); + + // Setting isCommitAsync to false should still not send any background event + // as we have initialized remainingResults to null. + resultHandler.complete(tip0, acknowledgements, false); + assertEquals(0, completedAcknowledgements.size()); + + // Sending non-null acknowledgements means we do send the background event + resultHandler.complete(tip0, acknowledgements, true); + assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); + } + + @Test + public void testResultHandlerOnCommitSync() { + buildRequestManager(); + // Enabling the config so that background event is sent when the acknowledgement response is received. + shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); + + final CompletableFuture> future = new CompletableFuture<>(); + + // Initializing resultCount to 3. + AtomicInteger resultCount = new AtomicInteger(3); + + ShareConsumeRequestManager.ResultHandler resultHandler = shareConsumeRequestManager.buildResultHandler(resultCount, Optional.of(future)); + + // We only send the background event after all results have been completed. + resultHandler.complete(tip0, acknowledgements, false); + assertEquals(0, completedAcknowledgements.size()); + assertFalse(future.isDone()); + + resultHandler.complete(t2ip0, null, false); + assertEquals(0, completedAcknowledgements.size()); + assertFalse(future.isDone()); + + // After third response is received, we send the background event. + resultHandler.complete(tip1, acknowledgements, false); + assertEquals(1, completedAcknowledgements.size()); + assertEquals(2, completedAcknowledgements.get(0).size()); + assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); + assertEquals(3, completedAcknowledgements.get(0).get(tip1).size()); + assertTrue(future.isDone()); + } + + @Test + public void testResultHandlerCompleteIfEmpty() { + buildRequestManager(); + + final CompletableFuture> future = new CompletableFuture<>(); + + // Initializing resultCount to 1. + AtomicInteger resultCount = new AtomicInteger(1); + + ShareConsumeRequestManager.ResultHandler resultHandler = shareConsumeRequestManager.buildResultHandler(resultCount, Optional.of(future)); + + resultHandler.completeIfEmpty(); + assertFalse(future.isDone()); + + resultCount.decrementAndGet(); + + resultHandler.completeIfEmpty(); + assertTrue(future.isDone()); + } + @Test public void testBatchingAcknowledgeRequestStates() { buildRequestManager(); @@ -637,6 +781,52 @@ public void testRetryAcknowledgements() throws InterruptedException { assertEquals(0, shareConsumeRequestManager.requestStates(0).getSyncRequestQueue().peek().getIncompleteAcknowledgementsCount(tip0)); } + @Test + public void testPiggybackAcknowledgementsInFlight() { + buildRequestManager(); + + assignFromSubscribed(Collections.singleton(tp0)); + + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + + // Reading records from the share fetch buffer. + fetchRecords(); + + // Piggyback acknowledgements + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); + + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + assertEquals(2.0, + metrics.metrics().get(metrics.metricInstance(shareFetchMetricsRegistry.acknowledgementSendTotal)).metricValue()); + + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements2.add(3L, AcknowledgeType.ACCEPT); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements2)); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + fetchRecords(); + + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + assertEquals(3.0, + metrics.metrics().get(metrics.metricInstance(shareFetchMetricsRegistry.acknowledgementSendTotal)).metricValue()); + } + @Test public void testCommitAsyncWithSubscriptionChange() { buildRequestManager(); @@ -660,6 +850,13 @@ public void testCommitAsyncWithSubscriptionChange() { shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); + + client.prepareResponse(fullAcknowledgeResponse(tip1, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + + // We should send a fetch to the newly subscribed partition. + assertEquals(1, sendFetches()); + } @Test @@ -693,7 +890,130 @@ public void testShareFetchWithSubscriptionChange() { } @Test - public void testRetryAcknowledgementsWithLeaderChange() throws InterruptedException { + public void testShareFetchWithSubscriptionChangeMultipleNodes() { + buildRequestManager(); + + subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); + Set partitions = new HashSet<>(); + partitions.add(tp0); + partitions.add(tp1); + subscriptions.assignFromSubscribed(Collections.singletonList(tp0)); + + client.updateMetadata( + RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 2), + tp -> validLeaderEpoch, topicIds, false)); + Node nodeId0 = metadata.fetch().nodeById(0); + Node nodeId1 = metadata.fetch().nodeById(1); + Node tp0Leader = metadata.fetch().leaderFor(tp0); + Node tp1Leader = metadata.fetch().leaderFor(tp1); + + assertEquals(nodeId0, tp0Leader); + assertEquals(nodeId1, tp1Leader); + + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, emptyAcquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(0L, AcknowledgeType.ACCEPT); + acknowledgements.add(1L, AcknowledgeType.RELEASE); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + + // Send acknowledgements via ShareFetch + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); + fetchRecords(); + // Subscription changes. + subscriptions.assignFromSubscribed(Collections.singletonList(tp1)); + + NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); + assertEquals(2, pollResult.unsentRequests.size()); + + ShareFetchRequest.Builder builder1, builder2; + if (pollResult.unsentRequests.get(0).node().get() == nodeId0) { + builder1 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); + builder2 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(1).requestBuilder(); + assertEquals(nodeId1, pollResult.unsentRequests.get(1).node().get()); + } else { + builder1 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(1).requestBuilder(); + builder2 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); + assertEquals(nodeId0, pollResult.unsentRequests.get(1).node().get()); + assertEquals(nodeId1, pollResult.unsentRequests.get(0).node().get()); + } + + // Verify the builder data for node0. + assertEquals(1, builder1.data().topics().size()); + assertEquals(tip0.topicId(), builder1.data().topics().get(0).topicId()); + assertEquals(1, builder1.data().topics().get(0).partitions().size()); + assertEquals(0, builder1.data().topics().get(0).partitions().get(0).partitionIndex()); + assertEquals(1, builder1.data().topics().get(0).partitions().get(0).acknowledgementBatches().size()); + assertEquals(0L, builder1.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).firstOffset()); + assertEquals(2L, builder1.data().topics().get(0).partitions().get(0).acknowledgementBatches().get(0).lastOffset()); + + assertEquals(1, builder1.data().forgottenTopicsData().size()); + assertEquals(tip0.topicId(), builder1.data().forgottenTopicsData().get(0).topicId()); + assertEquals(1, builder1.data().forgottenTopicsData().get(0).partitions().size()); + assertEquals(0, builder1.data().forgottenTopicsData().get(0).partitions().get(0)); + + // Verify the builder data for node1. + assertEquals(1, builder2.data().topics().size()); + assertEquals(tip1.topicId(), builder2.data().topics().get(0).topicId()); + assertEquals(1, builder2.data().topics().get(0).partitions().size()); + assertEquals(1, builder2.data().topics().get(0).partitions().get(0).partitionIndex()); + } + + @Test + public void testShareFetchWithSubscriptionChangeMultipleNodesEmptyAcknowledgements() { + buildRequestManager(); + + subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); + subscriptions.assignFromSubscribed(Collections.singletonList(tp0)); + + client.updateMetadata( + RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 2), + tp -> validLeaderEpoch, topicIds, false)); + Node nodeId0 = metadata.fetch().nodeById(0); + Node nodeId1 = metadata.fetch().nodeById(1); + Node tp0Leader = metadata.fetch().leaderFor(tp0); + Node tp1Leader = metadata.fetch().leaderFor(tp1); + + assertEquals(nodeId0, tp0Leader); + assertEquals(nodeId1, tp1Leader); + + // Send the first ShareFetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + // Prepare an empty response + client.prepareResponse(fullFetchResponse(tip0, records, emptyAcquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + fetchRecords(); + + // Change the subscription. + subscriptions.assignFromSubscribed(Collections.singletonList(tp1)); + + + // Now we will be sending the request to node1 only as leader for tip1 is node1. + // We do not build the request for tip0 as there are no acknowledgements to send. + NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); + assertEquals(1, pollResult.unsentRequests.size()); + assertEquals(nodeId1, pollResult.unsentRequests.get(0).node().get()); + + ShareFetchRequest.Builder builder = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); + + assertEquals(1, builder.data().topics().size()); + assertEquals(tip1.topicId(), builder.data().topics().get(0).topicId()); + assertEquals(1, builder.data().topics().get(0).partitions().size()); + assertEquals(1, builder.data().topics().get(0).partitions().get(0).partitionIndex()); + assertEquals(0, builder.data().forgottenTopicsData().size()); + } + + @Test + public void testRetryAcknowledgementsWithLeaderChange() { buildRequestManager(); subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); @@ -794,6 +1114,60 @@ public void testCallbackHandlerConfig() throws InterruptedException { assertEquals(0, completedAcknowledgements.size()); } + @Test + public void testAcknowledgementCommitCallbackMultiplePartitionCommitAsync() { + buildRequestManager(); + shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); + + Set partitions = new HashSet<>(); + partitions.add(tp0); + partitions.add(t2p0); + + assignFromSubscribed(partitions); + + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + LinkedHashMap partitionDataMap = new LinkedHashMap<>(); + partitionDataMap.put(tip0, partitionDataForFetch(tip0, records, acquiredRecords, Errors.NONE, Errors.NONE)); + partitionDataMap.put(t2ip0, partitionDataForFetch(t2ip0, records, acquiredRecords, Errors.NONE, Errors.NONE)); + client.prepareResponse(ShareFetchResponse.of(Errors.NONE, 0, partitionDataMap, Collections.emptyList())); + + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(0L, AcknowledgeType.ACCEPT); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements2.add(0L, AcknowledgeType.ACCEPT); + acknowledgements2.add(1L, AcknowledgeType.ACCEPT); + acknowledgements2.add(2L, AcknowledgeType.ACCEPT); + + Map acks = new HashMap<>(); + acks.put(tip0, acknowledgements); + acks.put(t2ip0, acknowledgements2); + + shareConsumeRequestManager.commitAsync(acks); + + assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); + + Map errorsMap = new HashMap<>(); + errorsMap.put(tip0, Errors.NONE); + errorsMap.put(t2ip0, Errors.NONE); + client.prepareResponse(fullAcknowledgeResponse(errorsMap)); + + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + // Verifying that the acknowledgement commit callback is invoked for both the partitions. + assertEquals(2, completedAcknowledgements.size()); + assertEquals(1, completedAcknowledgements.get(0).size()); + assertEquals(1, completedAcknowledgements.get(1).size()); + } + @Test public void testMultipleTopicsFetch() { buildRequestManager(); @@ -1091,7 +1465,7 @@ public void testFetchWithLastRecordMissingFromBatch() { new SimpleRecord(null, "value".getBytes())); // Remove the last record to simulate compaction - MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter(0, 0) { + MemoryRecords.FilterResult result = records.filterTo(new MemoryRecords.RecordFilter(0, 0) { @Override protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false); @@ -1101,7 +1475,7 @@ protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { return record.key() != null; } - }, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + }, ByteBuffer.allocate(1024), BufferSupplier.NO_CACHING); result.outputBuffer().flip(); MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer()); @@ -1389,6 +1763,12 @@ private ShareAcknowledgeResponse fullAcknowledgeResponse(TopicIdPartition tp, Er return ShareAcknowledgeResponse.of(Errors.NONE, 0, new LinkedHashMap<>(partitions), Collections.emptyList()); } + private ShareAcknowledgeResponse fullAcknowledgeResponse(Map partitionErrorsMap) { + Map partitions = new HashMap<>(); + partitionErrorsMap.forEach((tip, error) -> partitions.put(tip, partitionDataForAcknowledge(tip, error))); + return ShareAcknowledgeResponse.of(Errors.NONE, 0, new LinkedHashMap<>(partitions), Collections.emptyList()); + } + private ShareAcknowledgeResponse fullAcknowledgeResponse(TopicIdPartition tp, Errors error, ShareAcknowledgeResponseData.LeaderIdAndEpoch currentLeader, @@ -1427,7 +1807,7 @@ private ShareAcknowledgeResponseData.PartitionData partitionDataForAcknowledge(T } /** - * Assert that the {@link ShareFetchCollector#collect(ShareFetchBuffer)} latest fetch} does not contain any + * Assert that the {@link ShareFetchCollector#collect(ShareFetchBuffer) latest fetch} does not contain any * {@link ShareFetch#records() user-visible records}, and is {@link ShareFetch#isEmpty() empty}. * * @param reason the reason to include for assertion methods such as {@link org.junit.jupiter.api.Assertions#assertTrue(boolean, String)} @@ -1464,7 +1844,7 @@ private void buildRequestManager(MetricConfig metricConfig, Deserializer keyDeserializer, Deserializer valueDeserializer) { LogContext logContext = new LogContext(); - SubscriptionState subscriptionState = new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST); + SubscriptionState subscriptionState = new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST); buildRequestManager(metricConfig, keyDeserializer, valueDeserializer, subscriptionState, logContext); } @@ -1494,7 +1874,7 @@ private void buildRequestManager(MetricConfig metricConfig, subscriptions, fetchConfig, deserializers); - BackgroundEventHandler backgroundEventHandler = new TestableBackgroundEventHandler(completedAcknowledgements); + BackgroundEventHandler backgroundEventHandler = new TestableBackgroundEventHandler(time, completedAcknowledgements); shareConsumeRequestManager = spy(new TestableShareConsumeRequestManager<>( logContext, groupId, @@ -1525,7 +1905,9 @@ private void buildDependencies(MetricConfig metricConfig, properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); ConsumerConfig config = new ConsumerConfig(properties); - networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, new BackgroundEventHandler(new LinkedBlockingQueue<>()))); + networkClientDelegate = spy(new TestableNetworkClientDelegate( + time, config, logContext, client, metadata, + new BackgroundEventHandler(new LinkedBlockingQueue<>(), time, mock(AsyncConsumerMetrics.class)), false)); } private class TestableShareConsumeRequestManager extends ShareConsumeRequestManager { @@ -1558,12 +1940,24 @@ private int sendFetches() { return pollResult.unsentRequests.size(); } + private NetworkClientDelegate.PollResult sendFetchesReturnPollResult() { + fetch(new HashMap<>()); + NetworkClientDelegate.PollResult pollResult = poll(time.milliseconds()); + networkClientDelegate.addAll(pollResult.unsentRequests); + return pollResult; + } + private int sendAcknowledgements() { NetworkClientDelegate.PollResult pollResult = poll(time.milliseconds()); networkClientDelegate.addAll(pollResult.unsentRequests); return pollResult.unsentRequests.size(); } + public ResultHandler buildResultHandler(final AtomicInteger remainingResults, + final Optional>> future) { + return new ResultHandler(remainingResults, future); + } + public Tuple requestStates(int nodeId) { return super.requestStates(nodeId); } @@ -1577,8 +1971,9 @@ public TestableNetworkClientDelegate(Time time, LogContext logContext, KafkaClient client, Metadata metadata, - BackgroundEventHandler backgroundEventHandler) { - super(time, config, logContext, client, metadata, backgroundEventHandler); + BackgroundEventHandler backgroundEventHandler, + boolean notifyMetadataErrorsViaErrorQueue) { + super(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue, mock(AsyncConsumerMetrics.class)); } @Override @@ -1673,8 +2068,8 @@ private void failUnsentRequests(Node node) { private static class TestableBackgroundEventHandler extends BackgroundEventHandler { List> completedAcknowledgements; - public TestableBackgroundEventHandler(List> completedAcknowledgements) { - super(new LinkedBlockingQueue<>()); + public TestableBackgroundEventHandler(Time time, List> completedAcknowledgements) { + super(new LinkedBlockingQueue<>(), time, mock(AsyncConsumerMetrics.class)); this.completedAcknowledgements = completedAcknowledgements; } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java index dc06875f7565d..04db229c8df35 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.consumer.AcknowledgementCommitCallback; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; @@ -123,7 +122,7 @@ private ShareConsumerImpl newConsumer(ConsumerConfig config) { new StringDeserializer(), new StringDeserializer(), time, - (a, b, c, d, e, f, g) -> applicationEventHandler, + (a, b, c, d, e, f, g, h) -> applicationEventHandler, a -> backgroundEventReaper, (a, b, c, d, e) -> fetchCollector, backgroundEventQueue @@ -169,7 +168,7 @@ private ShareConsumerImpl newConsumer( @Test public void testSuccessfulStartupShutdown() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); @@ -198,7 +197,7 @@ public void testFailConstructor() { @Test public void testWakeupBeforeCallingPoll() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); final String topicName = "foo"; @@ -216,7 +215,7 @@ public void testWakeupBeforeCallingPoll() { @Test public void testWakeupAfterEmptyFetch() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); final String topicName = "foo"; @@ -235,7 +234,7 @@ public void testWakeupAfterEmptyFetch() { @Test public void testWakeupAfterNonEmptyFetch() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); final String topicName = "foo"; @@ -262,7 +261,7 @@ public void testWakeupAfterNonEmptyFetch() { @Test public void testFailOnClosedConsumer() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); @@ -274,7 +273,7 @@ public void testFailOnClosedConsumer() { @Test public void testVerifyApplicationEventOnShutdown() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); @@ -336,7 +335,7 @@ public void testCompleteQuietly() { @Test public void testSubscribeGeneratesEvent() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); String topic = "topic1"; @@ -349,7 +348,7 @@ public void testSubscribeGeneratesEvent() { @Test public void testUnsubscribeGeneratesUnsubscribeEvent() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); @@ -361,7 +360,7 @@ public void testUnsubscribeGeneratesUnsubscribeEvent() { @Test public void testSubscribeToEmptyListActsAsUnsubscribe() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); @@ -461,7 +460,7 @@ private void testInvalidGroupId(final String groupId) { @Test public void testEnsurePollEventSentOnConsumerPoll() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer(subscriptions); final TopicPartition tp = new TopicPartition("topic", 0); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java index fb6a57ac0399e..893840de4c65c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java @@ -137,7 +137,7 @@ public void testErrorInInitialize(RuntimeException expectedException) { subscribeAndAssign(topicAPartition0); // Create a ShareFetchCollector that fails on ShareCompletedFetch initialization. - fetchCollector = new ShareFetchCollector(logContext, + fetchCollector = new ShareFetchCollector<>(logContext, metadata, subscriptions, fetchConfig, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java index 549720766858d..430cc3ae84fc8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java @@ -25,6 +25,7 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.metrics.KafkaMetric; @@ -58,6 +59,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.apache.kafka.clients.consumer.internals.ShareHeartbeatRequestManager.SHARE_PROTOCOL_NOT_SUPPORTED_MSG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -67,6 +69,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -363,7 +366,7 @@ public void testNoCoordinator() { @ParameterizedTest @MethodSource("errorProvider") public void testHeartbeatResponseOnErrorHandling(final Errors error, final boolean isFatal) { - // Handling errors on the second heartbeat + // Handling errors on the second heartbeat time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); assertEquals(1, result.unsentRequests.size()); @@ -422,6 +425,46 @@ public void testHeartbeatResponseOnErrorHandling(final Errors error, final boole } } + @ParameterizedTest + @ValueSource(strings = {SHARE_PROTOCOL_NOT_SUPPORTED_MSG}) + public void testUnsupportedVersionGeneratedOnTheBroker(String errorMsg) { + mockResponseWithException(new UnsupportedVersionException(errorMsg), true); + + ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); + verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); + ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); + assertInstanceOf(Errors.UNSUPPORTED_VERSION.exception().getClass(), errorEvent.error()); + assertEquals(errorMsg, errorEvent.error().getMessage()); + clearInvocations(backgroundEventHandler); + } + + @ParameterizedTest + @ValueSource(strings = {SHARE_PROTOCOL_NOT_SUPPORTED_MSG}) + public void testUnsupportedVersionGeneratedOnTheClient(String errorMsg) { + mockResponseWithException(new UnsupportedVersionException(errorMsg), false); + + ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); + verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); + ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); + assertInstanceOf(Errors.UNSUPPORTED_VERSION.exception().getClass(), errorEvent.error()); + assertEquals(errorMsg, errorEvent.error().getMessage()); + clearInvocations(backgroundEventHandler); + } + + private void mockResponseWithException(UnsupportedVersionException exception, boolean isFromBroker) { + time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); + NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); + assertEquals(1, result.unsentRequests.size()); + + // Manually completing the response to test error handling + when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); + ClientResponse response = createHeartbeatResponseWithException( + result.unsentRequests.get(0), + exception, + isFromBroker); + result.unsentRequests.get(0).handler().onComplete(response); + } + @Test public void testHeartbeatState() { mockJoiningMemberData(); @@ -646,6 +689,27 @@ private ClientResponse createHeartbeatResponse( response); } + private ClientResponse createHeartbeatResponseWithException( + final NetworkClientDelegate.UnsentRequest request, + final UnsupportedVersionException exception, + final boolean isFromClient + ) { + ShareGroupHeartbeatResponse response = null; + if (!isFromClient) { + response = new ShareGroupHeartbeatResponse(null); + } + return new ClientResponse( + new RequestHeader(ApiKeys.SHARE_GROUP_HEARTBEAT, ApiKeys.SHARE_GROUP_HEARTBEAT.latestVersion(), "client-id", 1), + request.handler(), + "0", + time.milliseconds(), + time.milliseconds(), + false, + exception, + null, + response); + } + private ConsumerConfig config() { Properties prop = new Properties(); prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); @@ -655,7 +719,6 @@ private ConsumerConfig config() { prop.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_INTERVAL_MS)); prop.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(DEFAULT_RETRY_BACKOFF_MS)); prop.setProperty(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG, String.valueOf(DEFAULT_RETRY_BACKOFF_MAX_MS)); - prop.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_HEARTBEAT_INTERVAL_MS)); return new ConsumerConfig(prop); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java index 36e482507141e..7c4c5684bcce0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java @@ -1100,6 +1100,7 @@ public void testRevokePartitionsUsesTopicNamesLocalCacheWhenMetadataNotAvailable verifyReconciliationNotTriggered(membershipManager); membershipManager.poll(time.milliseconds()); + verify(subscriptionState).markPendingRevocation(Set.of()); // Member should complete reconciliation assertEquals(MemberState.ACKNOWLEDGING, membershipManager.state()); @@ -1123,6 +1124,7 @@ public void testRevokePartitionsUsesTopicNamesLocalCacheWhenMetadataNotAvailable receiveAssignment(topicId, Collections.singletonList(1), membershipManager); membershipManager.poll(time.milliseconds()); + verify(subscriptionState, times(2)).markPendingRevocation(Set.of(new TopicPartition(topicName, 0))); // Revocation should complete without requesting any metadata update given that the topic // received in target assignment should exist in local topic name cache. @@ -1423,7 +1425,6 @@ private void testRevocationCompleted(ShareMembershipManager membershipManager, assertEquals(assignmentByTopicId, membershipManager.currentAssignment().partitions); assertFalse(membershipManager.reconciliationInProgress()); - verify(subscriptionState).markPendingRevocation(anySet()); List expectedTopicPartitionAssignment = buildTopicPartitions(expectedCurrentAssignment); HashSet expectedSet = new HashSet<>(expectedTopicPartitionAssignment); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java index df8a61a750384..0ce2f349f98ee 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.consumer.internals; +import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.TopicIdPartition; @@ -41,6 +42,7 @@ import static org.apache.kafka.common.requests.ShareRequestMetadata.INITIAL_EPOCH; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -415,6 +417,39 @@ public void testAddNewIdAfterTopicRemovedFromSession() { assertEquals(2, requestData3.shareSessionEpoch(), "Did not have the correct session epoch"); } + @Test + public void testNextAcknowledgementsClearedOnInvalidRequest() { + String groupId = "G1"; + Uuid memberId = Uuid.randomUuid(); + ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); + + Map topicIds = new HashMap<>(); + Map topicNames = new HashMap<>(); + Uuid fooId = addTopicId(topicIds, topicNames, "foo"); + TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(0L, AcknowledgeType.ACCEPT); + + handler.addPartitionToFetch(foo0, acknowledgements); + + // As we start with a ShareAcknowledge on epoch 0, we expect a null response. + assertNull(handler.newShareAcknowledgeBuilder(groupId, fetchConfig)); + + // Attempt a new ShareFetch + TopicIdPartition foo1 = new TopicIdPartition(fooId, 1, "foo"); + handler.addPartitionToFetch(foo1, null); + ShareFetchRequestData requestData = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); + + // We should have cleared the unsent acknowledgements before this ShareFetch. + assertEquals(0, requestData.topics().get(0).partitions().get(0).acknowledgementBatches().size()); + + ArrayList expectedToSend1 = new ArrayList<>(); + expectedToSend1.add(new TopicIdPartition(fooId, 1, "foo")); + assertListEquals(expectedToSend1, reqFetchList(requestData, topicNames)); + assertEquals(memberId.toString(), requestData.memberId()); + } + private Uuid addTopicId(Map topicIds, Map topicNames, String name) { // If the same topic name is added more than once, the latest mapping will be in the // topicIds, but all mappings will be in topicNames. This is needed in the replace tests. diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java index 3bde4367cac84..f697990b54425 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java @@ -21,7 +21,7 @@ import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.SubscriptionState.LogTruncation; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; @@ -53,7 +53,7 @@ public class SubscriptionStateTest { - private SubscriptionState state = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + private SubscriptionState state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); private final String topic = "test"; private final String topic1 = "test1"; private final TopicPartition tp0 = new TopicPartition(topic, 0); @@ -398,6 +398,40 @@ public void patternSubscription() { assertEquals(2, state.subscription().size(), "Expected subscribed topics count is incorrect"); } + @Test + public void testSubscribeToRe2JPattern() { + String pattern = "t.*"; + state.subscribe(new SubscriptionPattern(pattern), Optional.of(rebalanceListener)); + assertTrue(state.toString().contains("type=AUTO_PATTERN_RE2J")); + assertTrue(state.toString().contains("subscribedPattern=" + pattern)); + } + + @Test + public void testMixedPatternSubscriptionNotAllowed() { + state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); + assertThrows(IllegalStateException.class, () -> state.subscribe(new SubscriptionPattern("t.*"), + Optional.of(rebalanceListener))); + + state.unsubscribe(); + + state.subscribe(new SubscriptionPattern("t.*"), Optional.of(rebalanceListener)); + assertThrows(IllegalStateException.class, () -> state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener))); + } + + @Test + public void testSubscriptionPattern() { + SubscriptionPattern pattern = new SubscriptionPattern("t.*"); + state.subscribe(pattern, Optional.of(rebalanceListener)); + assertTrue(state.hasRe2JPatternSubscription()); + assertEquals(pattern, state.subscriptionPattern()); + assertTrue(state.hasAutoAssignedPartitions()); + + state.unsubscribe(); + assertFalse(state.hasRe2JPatternSubscription()); + assertNull(state.subscriptionPattern()); + } + + @Test public void unsubscribeUserAssignment() { state.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); @@ -587,7 +621,7 @@ public void testOffsetResetWhileAwaitingValidation() { new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); assertTrue(state.awaitingValidation(tp0)); - state.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); + state.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); assertFalse(state.awaitingValidation(tp0)); assertTrue(state.isOffsetResetNeeded(tp0)); } @@ -734,7 +768,7 @@ public void testTruncationDetectionWithResetPolicy() { @Test public void testTruncationDetectionWithoutResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); - state = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; @@ -764,7 +798,7 @@ public void testTruncationDetectionWithoutResetPolicy() { @Test public void testTruncationDetectionUnknownDivergentOffsetWithResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); - state = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; @@ -783,13 +817,13 @@ public void testTruncationDetectionUnknownDivergentOffsetWithResetPolicy() { assertEquals(Optional.empty(), truncationOpt); assertFalse(state.awaitingValidation(tp0)); assertTrue(state.isOffsetResetNeeded(tp0)); - assertEquals(OffsetResetStrategy.EARLIEST, state.resetStrategy(tp0)); + assertEquals(AutoOffsetResetStrategy.EARLIEST, state.resetStrategy(tp0)); } @Test public void testTruncationDetectionUnknownDivergentOffsetWithoutResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); - state = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); + state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; @@ -841,7 +875,7 @@ public void resetOffsetNoValidation() { state.assignFromUser(Collections.singleton(tp0)); // Reset offsets - state.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); + state.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); // Attempt to validate with older API version, should do nothing ApiVersions oldApis = new ApiVersions(); @@ -866,7 +900,7 @@ public void resetOffsetNoValidation() { assertFalse(state.isOffsetResetNeeded(tp0)); // Reset again, and complete it with a seek that would normally require validation - state.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); + state.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(10), new Metadata.LeaderAndEpoch( Optional.of(broker1), Optional.of(2)))); // We are now in AWAIT_VALIDATION diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java index e5b2833b154b5..ce2aa86e3e26e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.MockClient; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; @@ -59,7 +58,7 @@ public class TopicMetadataFetcherTest { private final String topicName = "test"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap() { + private final Map topicIds = new HashMap<>() { { put(topicName, topicId); } @@ -240,7 +239,7 @@ private void buildFetcher() { long retryBackoffMs = 100; long retryBackoffMaxMs = 1000; LogContext logContext = new LogContext(); - SubscriptionState subscriptionState = new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST); + SubscriptionState subscriptionState = new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST); buildDependencies(metricConfig, metadataExpireMs, subscriptionState, logContext); topicMetadataFetcher = new TopicMetadataFetcher(logContext, consumerClient, retryBackoffMs, retryBackoffMaxMs); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java index a16f9612c741d..911c028f728b7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java @@ -19,7 +19,8 @@ import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.SubscriptionPattern; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.CommitRequestManager; import org.apache.kafka.clients.consumer.internals.ConsumerHeartbeatRequestManager; import org.apache.kafka.clients.consumer.internals.ConsumerMembershipManager; @@ -65,6 +66,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; @@ -192,7 +194,7 @@ public void testAssignmentChangeEventWithException() { @Test public void testResetOffsetEvent() { Collection tp = Collections.singleton(new TopicPartition("topic", 0)); - OffsetResetStrategy strategy = OffsetResetStrategy.LATEST; + AutoOffsetResetStrategy strategy = AutoOffsetResetStrategy.LATEST; ResetOffsetEvent event = new ResetOffsetEvent(tp, strategy, 12345); setupProcessor(false); @@ -288,7 +290,7 @@ tp2, new OffsetAndMetadata(20L, Optional.of(3), "") @Test public void testTopicSubscriptionChangeEventWithIllegalSubscriptionState() { - subscriptionState = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); Optional listener = Optional.of(new MockRebalanceListener()); TopicSubscriptionChangeEvent event = new TopicSubscriptionChangeEvent( Set.of("topic1", "topic2"), listener, 12345); @@ -333,9 +335,33 @@ public void testTopicPatternSubscriptionChangeEvent() { assertDoesNotThrow(() -> event.future().get()); } + @Test + public void testTopicPatternSubscriptionTriggersJoin() { + TopicPatternSubscriptionChangeEvent event = new TopicPatternSubscriptionChangeEvent( + Pattern.compile("topic.*"), Optional.of(new MockRebalanceListener()), 12345); + setupProcessor(true); + Cluster cluster = mock(Cluster.class); + when(metadata.fetch()).thenReturn(cluster); + when(heartbeatRequestManager.membershipManager()).thenReturn(membershipManager); + + // Initial subscription where no topics match the pattern. Membership manager + // should still be notified so it joins if not in the group (with empty subscription). + when(subscriptionState.subscribeFromPattern(any())).thenReturn(false); + processor.process(event); + verify(membershipManager).onSubscriptionUpdated(); + + clearInvocations(membershipManager); + + // Subscription where some topics match so subscription is updated. Membership manager + // should be notified so it joins if not in the group. + when(subscriptionState.subscribeFromPattern(any())).thenReturn(true); + processor.process(event); + verify(membershipManager).onSubscriptionUpdated(); + } + @Test public void testTopicPatternSubscriptionChangeEventWithIllegalSubscriptionState() { - subscriptionState = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); Optional listener = Optional.of(new MockRebalanceListener()); TopicPatternSubscriptionChangeEvent event = new TopicPatternSubscriptionChangeEvent( Pattern.compile("topic.*"), listener, 12345); @@ -355,11 +381,10 @@ public void testUpdatePatternSubscriptionEventOnlyTakesEffectWhenMetadataHasNewV UpdatePatternSubscriptionEvent event1 = new UpdatePatternSubscriptionEvent(12345); setupProcessor(true); - + when(subscriptionState.hasPatternSubscription()).thenReturn(true); when(metadata.updateVersion()).thenReturn(0); processor.process(event1); - verify(subscriptionState, never()).hasPatternSubscription(); assertDoesNotThrow(() -> event1.future().get()); Cluster cluster = mock(Cluster.class); @@ -377,13 +402,46 @@ public void testUpdatePatternSubscriptionEventOnlyTakesEffectWhenMetadataHasNewV UpdatePatternSubscriptionEvent event2 = new UpdatePatternSubscriptionEvent(12345); processor.process(event2); verify(metadata).requestUpdateForNewTopics(); - verify(subscriptionState).hasPatternSubscription(); verify(subscriptionState).subscribeFromPattern(topics); assertEquals(1, processor.metadataVersionSnapshot()); verify(membershipManager).onSubscriptionUpdated(); assertDoesNotThrow(() -> event2.future().get()); } + @Test + public void testR2JPatternSubscriptionEventSuccess() { + SubscriptionPattern pattern = new SubscriptionPattern("t*"); + Optional listener = Optional.of(mock(ConsumerRebalanceListener.class)); + TopicRe2JPatternSubscriptionChangeEvent event = + new TopicRe2JPatternSubscriptionChangeEvent(pattern, listener, 12345); + + setupProcessor(true); + processor.process(event); + + verify(subscriptionState).subscribe(pattern, listener); + verify(subscriptionState, never()).subscribeFromPattern(any()); + verify(membershipManager).onSubscriptionUpdated(); + assertDoesNotThrow(() -> event.future().get()); + } + + @Test + public void testR2JPatternSubscriptionEventFailureWithMixedSubscriptionType() { + SubscriptionPattern pattern = new SubscriptionPattern("t*"); + Optional listener = Optional.of(mock(ConsumerRebalanceListener.class)); + TopicRe2JPatternSubscriptionChangeEvent event = + new TopicRe2JPatternSubscriptionChangeEvent(pattern, listener, 12345); + Exception mixedSubscriptionError = new IllegalStateException("Subscription to topics, partitions and " + + "pattern are mutually exclusive"); + doThrow(mixedSubscriptionError).when(subscriptionState).subscribe(pattern, listener); + + setupProcessor(true); + processor.process(event); + + verify(subscriptionState).subscribe(pattern, listener); + Exception thrown = assertFutureThrows(event.future(), mixedSubscriptionError.getClass()); + assertEquals(mixedSubscriptionError, thrown); + } + @ParameterizedTest @MethodSource("offsetsGenerator") public void testSyncCommitEvent(Optional> offsets) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaperTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaperTest.java index eabcb8773e17f..71e44631ee8c8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaperTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaperTest.java @@ -49,7 +49,7 @@ public void testExpired() { // Without any time passing, we check the reaper and verify that the event is not done amd is still // being tracked. - reaper.reap(time.milliseconds()); + assertEquals(0, reaper.reap(time.milliseconds())); assertFalse(event.future().isDone()); assertEquals(1, reaper.size()); @@ -62,7 +62,7 @@ public void testExpired() { // Call the reaper and validate that the event is now "done" (expired), the correct exception type is // thrown, and the event is no longer tracked. - reaper.reap(time.milliseconds()); + assertEquals(1, reaper.reap(time.milliseconds())); assertTrue(event.future().isDone()); assertThrows(TimeoutException.class, () -> ConsumerUtils.getResult(event.future())); assertEquals(0, reaper.size()); @@ -77,7 +77,7 @@ public void testCompleted() { // Without any time passing, we check the reaper and verify that the event is not done amd is still // being tracked. - reaper.reap(time.milliseconds()); + assertEquals(0, reaper.reap(time.milliseconds())); assertFalse(event.future().isDone()); assertEquals(1, reaper.size()); @@ -91,7 +91,7 @@ public void testCompleted() { time.sleep(timeoutMs + 1); // Call the reaper and validate that the event is not considered expired, but is still no longer tracked. - reaper.reap(time.milliseconds()); + assertEquals(0, reaper.reap(time.milliseconds())); assertTrue(event.future().isDone()); assertNull(ConsumerUtils.getResult(event.future())); assertEquals(0, reaper.size()); @@ -108,7 +108,7 @@ public void testCompletedAndExpired() { // Without any time passing, we check the reaper and verify that the event is not done amd is still // being tracked. - reaper.reap(time.milliseconds()); + assertEquals(0, reaper.reap(time.milliseconds())); assertFalse(event1.future().isDone()); assertFalse(event2.future().isDone()); assertEquals(2, reaper.size()); @@ -124,7 +124,7 @@ public void testCompletedAndExpired() { // Validate that the first (completed) event is not expired, but the second one is expired. In either case, // both should be completed and neither should be tracked anymore. - reaper.reap(time.milliseconds()); + assertEquals(1, reaper.reap(time.milliseconds())); assertTrue(event1.future().isDone()); assertTrue(event2.future().isDone()); assertNull(ConsumerUtils.getResult(event1.future())); @@ -150,7 +150,7 @@ public void testIncompleteQueue() { assertEquals(2, queue.size()); // Go ahead and reap the incomplete from the queue. - reaper.reap(queue); + assertEquals(1, reaper.reap(queue)); // The first event was completed, so we didn't expire it in the reaper. assertTrue(event1.future().isDone()); @@ -186,7 +186,7 @@ public void testIncompleteTracked() { assertEquals(2, reaper.size()); // Go ahead and reap the incomplete events. Both sets should be zero after that. - reaper.reap(queue); + assertEquals(1, reaper.reap(queue)); assertEquals(0, reaper.size()); assertEquals(0, queue.size()); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java new file mode 100644 index 0000000000000..2913bcfad70f1 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals.metrics; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.Metrics; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.HashSet; + +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class AsyncConsumerMetricsTest { + private static final long METRIC_VALUE = 123L; + + private final Metrics metrics = new Metrics(); + private AsyncConsumerMetrics consumerMetrics; + + @AfterEach + public void tearDown() { + if (consumerMetrics != null) { + consumerMetrics.close(); + } + metrics.close(); + } + + @Test + public void shouldMetricNames() { + // create + consumerMetrics = new AsyncConsumerMetrics(metrics); + HashSet expectedMetrics = new HashSet<>(Arrays.asList( + metrics.metricName("last-poll-seconds-ago", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-poll-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-poll-max", CONSUMER_METRIC_GROUP), + metrics.metricName("poll-idle-ratio-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("commit-sync-time-ns-total", CONSUMER_METRIC_GROUP), + metrics.metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP) + )); + expectedMetrics.forEach( + metricName -> assertTrue( + metrics.metrics().containsKey(metricName), + "Missing metric: " + metricName + ) + ); + + HashSet expectedConsumerMetrics = new HashSet<>(Arrays.asList( + metrics.metricName("time-between-network-thread-poll-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-network-thread-poll-max", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-size", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-processing-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-processing-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("unsent-requests-queue-size", CONSUMER_METRIC_GROUP), + metrics.metricName("unsent-requests-queue-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("unsent-requests-queue-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-size", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-processing-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-processing-time-max", CONSUMER_METRIC_GROUP) + )); + expectedConsumerMetrics.forEach( + metricName -> assertTrue( + metrics.metrics().containsKey(metricName), + "Missing metric: " + metricName + ) + ); + + // close + consumerMetrics.close(); + expectedMetrics.forEach( + metricName -> assertFalse( + metrics.metrics().containsKey(metricName), + "Metric present after close: " + metricName + ) + ); + expectedConsumerMetrics.forEach( + metricName -> assertFalse( + metrics.metrics().containsKey(metricName), + "Metric present after close: " + metricName + ) + ); + } + + @Test + public void shouldRecordTimeBetweenNetworkThreadPoll() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordTimeBetweenNetworkThreadPoll(METRIC_VALUE); + + // Then: + assertMetricValue("time-between-network-thread-poll-avg"); + assertMetricValue("time-between-network-thread-poll-max"); + } + + @Test + public void shouldRecordApplicationEventQueueSize() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordApplicationEventQueueSize(10); + + // Then: + assertEquals( + metrics.metric( + metrics.metricName( + "application-event-queue-size", + CONSUMER_METRIC_GROUP + ) + ).metricValue(), + (double) 10 + ); + } + + @Test + public void shouldRecordApplicationEventQueueTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordApplicationEventQueueTime(METRIC_VALUE); + + // Then: + assertMetricValue("application-event-queue-time-avg"); + assertMetricValue("application-event-queue-time-max"); + } + + @Test + public void shouldRecordApplicationEventQueueProcessingTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordApplicationEventQueueProcessingTime(METRIC_VALUE); + + // Then: + assertMetricValue("application-event-queue-processing-time-avg"); + assertMetricValue("application-event-queue-processing-time-max"); + } + + @Test + public void shouldRecordUnsentRequestsQueueSize() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordUnsentRequestsQueueSize(10, 100); + + // Then: + assertEquals( + metrics.metric( + metrics.metricName( + "unsent-requests-queue-size", + CONSUMER_METRIC_GROUP + ) + ).metricValue(), + (double) 10 + ); + } + + @Test + public void shouldRecordUnsentRequestsQueueTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordUnsentRequestsQueueTime(METRIC_VALUE); + + // Then: + assertMetricValue("unsent-requests-queue-time-avg"); + assertMetricValue("unsent-requests-queue-time-max"); + } + + @Test + public void shouldRecordBackgroundEventQueueSize() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordBackgroundEventQueueSize(10); + + // Then: + assertEquals( + metrics.metric( + metrics.metricName( + "background-event-queue-size", + CONSUMER_METRIC_GROUP + ) + ).metricValue(), + (double) 10 + ); + } + + @Test + public void shouldRecordBackgroundEventQueueTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordBackgroundEventQueueTime(METRIC_VALUE); + + // Then: + assertMetricValue("background-event-queue-time-avg"); + assertMetricValue("background-event-queue-time-max"); + } + + @Test + public void shouldRecordBackgroundEventQueueProcessingTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); + // When: + consumerMetrics.recordBackgroundEventQueueProcessingTime(METRIC_VALUE); + + // Then: + assertMetricValue("background-event-queue-processing-time-avg"); + assertMetricValue("background-event-queue-processing-time-avg"); + } + + private void assertMetricValue(final String name) { + assertEquals( + metrics.metric( + metrics.metricName( + name, + CONSUMER_METRIC_GROUP + ) + ).metricValue(), + (double) METRIC_VALUE + ); + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 6db988e3d2224..0f497b7936d4c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.producer; +import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.LeastLoadedNode; @@ -50,10 +51,14 @@ import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; +import org.apache.kafka.common.message.ApiVersionsResponseData; import org.apache.kafka.common.message.EndTxnResponseData; import org.apache.kafka.common.message.InitProducerIdResponseData; import org.apache.kafka.common.message.TxnOffsetCommitRequestData; import org.apache.kafka.common.metrics.JmxReporter; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; @@ -79,6 +84,7 @@ import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetrySender; import org.apache.kafka.common.utils.KafkaThread; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -88,12 +94,14 @@ import org.apache.kafka.test.MockSerializer; import org.apache.kafka.test.TestUtils; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.mockito.internal.stubbing.answers.CallsRealMethods; import java.lang.management.ManagementFactory; @@ -144,6 +152,7 @@ import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.notNull; +import static org.mockito.Mockito.atMostOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.never; @@ -188,7 +197,7 @@ private static KafkaProducer kafkaProducer(Map conf ProducerInterceptors interceptors, Time time) { return new KafkaProducer<>(new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer)), - keySerializer, valueSerializer, metadata, kafkaClient, interceptors, time); + keySerializer, valueSerializer, metadata, kafkaClient, interceptors, new ApiVersions(), time); } @BeforeEach @@ -412,21 +421,16 @@ public void testInflightRequestsAndIdempotenceForIdempotentProducers() { config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), "max.in.flight.requests.per.connection should be overwritten"); - Properties validProps2 = new Properties() {{ + Properties invalidProps1 = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); }}; - config = new ProducerConfig(validProps2); - assertFalse( - config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), - "idempotence should be disabled when `max.in.flight.requests.per.connection` is greater than 5 and " + - "`enable.idempotence` config is unset."); - assertEquals( - 6, - config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), - "`max.in.flight.requests.per.connection` should be set with overridden value"); - Properties invalidProps = new Properties() {{ + ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(invalidProps1)); + assertEquals("To use the idempotent producer, " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + + " must be set to at most 5. Current value is 6.", configException.getMessage()); + + Properties invalidProps2 = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); @@ -434,10 +438,10 @@ public void testInflightRequestsAndIdempotenceForIdempotentProducers() { }}; assertThrows( ConfigException.class, - () -> new ProducerConfig(invalidProps), + () -> new ProducerConfig(invalidProps2), "Cannot set a transactional.id without also enabling idempotence"); - Properties invalidProps2 = new Properties() {{ + Properties invalidProps3 = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); // explicitly enabling idempotence should still throw exception @@ -445,17 +449,17 @@ public void testInflightRequestsAndIdempotenceForIdempotentProducers() { }}; assertThrows( ConfigException.class, - () -> new ProducerConfig(invalidProps2), + () -> new ProducerConfig(invalidProps3), "Must set max.in.flight.requests.per.connection to at most 5 when using the idempotent producer."); - Properties invalidProps3 = new Properties() {{ + Properties invalidProps4 = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); }}; assertThrows( ConfigException.class, - () -> new ProducerConfig(invalidProps3), + () -> new ProducerConfig(invalidProps4), "Must set retries to non-zero when using the idempotent producer."); } @@ -753,9 +757,9 @@ public LeastLoadedNode leastLoadedNode(long now) { } }; - return new KafkaProducer( + return new KafkaProducer<>( new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, new StringSerializer(), new StringSerializer())), - new StringSerializer(), new StringSerializer(), metadata, mockClient, null, time) { + new StringSerializer(), new StringSerializer(), metadata, mockClient, null, new ApiVersions(), time) { @Override Sender newSender(LogContext logContext, KafkaClient kafkaClient, ProducerMetadata metadata) { // give Sender its own Metadata instance so that we can isolate Metadata calls from KafkaProducer @@ -1452,7 +1456,7 @@ public void testCommitTransactionWithRecordTooLargeException() throws Exception client.prepareResponse(endTxnResponse(Errors.NONE)); producer.beginTransaction(); - TestUtils.assertFutureError(producer.send(largeRecord), RecordTooLargeException.class); + TestUtils.assertFutureThrows(producer.send(largeRecord), RecordTooLargeException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @@ -1489,7 +1493,7 @@ public void testCommitTransactionWithMetadataTimeoutForMissingTopic() throws Exc producer.initTransactions(); producer.beginTransaction(); - TestUtils.assertFutureError(producer.send(record), TimeoutException.class); + TestUtils.assertFutureThrows(producer.send(record), TimeoutException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @@ -1526,7 +1530,7 @@ public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() thr producer.initTransactions(); producer.beginTransaction(); - TestUtils.assertFutureError(producer.send(record), TimeoutException.class); + TestUtils.assertFutureThrows(producer.send(record), TimeoutException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @@ -1565,7 +1569,7 @@ public void testCommitTransactionWithSendToInvalidTopic() throws Exception { producer.initTransactions(); producer.beginTransaction(); - TestUtils.assertFutureError(producer.send(record), InvalidTopicException.class); + TestUtils.assertFutureThrows(producer.send(record), InvalidTopicException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @@ -1607,6 +1611,121 @@ public void testSendTxnOffsetsWithGroupId() { } } + @Test + public void testSendTxnOffsetsWithGroupIdTransactionV2() { + Properties properties = new Properties(); + properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); + properties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000); + properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); + properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + + Time time = new MockTime(1); + MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1)); + ProducerMetadata metadata = newMetadata(0, 0, Long.MAX_VALUE); + + MockClient client = new MockClient(time, metadata); + client.updateMetadata(initialUpdateResponse); + + Node node = metadata.fetch().nodes().get(0); + client.setNodeApiVersions(NodeApiVersions.create()); + NodeApiVersions nodeApiVersions = new NodeApiVersions(NodeApiVersions.create().allSupportedApiVersions().values(), + Arrays.asList(new ApiVersionsResponseData.SupportedFeatureKey() + .setName("transaction.version") + .setMaxVersion((short) 2) + .setMinVersion((short) 0)), + Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() + .setName("transaction.version") + .setMaxVersionLevel((short) 2) + .setMinVersionLevel((short) 2)), + 0); + client.setNodeApiVersions(nodeApiVersions); + ApiVersions apiVersions = new ApiVersions(); + apiVersions.update(NODE.idString(), nodeApiVersions); + + client.throttle(node, 5000); + + client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE)); + client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE)); + client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE)); + String groupId = "group"; + client.prepareResponse(request -> + ((TxnOffsetCommitRequest) request).data().groupId().equals(groupId), + txnOffsetsCommitResponse(Collections.singletonMap( + new TopicPartition("topic", 0), Errors.NONE))); + client.prepareResponse(endTxnResponse(Errors.NONE)); + + try (KafkaProducer producer = new KafkaProducer<>( + new ProducerConfig(properties), new StringSerializer(), new StringSerializer(), metadata, client, + new ProducerInterceptors<>(Collections.emptyList()), apiVersions, time)) { + producer.initTransactions(); + producer.beginTransaction(); + producer.sendOffsetsToTransaction(Collections.singletonMap( + new TopicPartition("topic", 0), + new OffsetAndMetadata(5L)), + new ConsumerGroupMetadata(groupId)); + producer.commitTransaction(); + } + } + + @Test + public void testTransactionV2Produce() throws Exception { + StringSerializer serializer = new StringSerializer(); + KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); + + String topic = "foo"; + TopicPartition topicPartition = new TopicPartition(topic, 0); + Cluster cluster = TestUtils.singletonCluster(topic, 1); + + when(ctx.sender.isRunning()).thenReturn(true); + when(ctx.metadata.fetch()).thenReturn(cluster); + + long timestamp = ctx.time.milliseconds(); + ProducerRecord record = new ProducerRecord<>(topic, 0, timestamp, "key", "value"); + + Properties props = new Properties(); + props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + props.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some-txn"); + props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + ProducerConfig config = new ProducerConfig(props); + + Time time = new MockTime(1); + MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1)); + ProducerMetadata metadata = newMetadata(0, 0, Long.MAX_VALUE); + MockClient client = new MockClient(time, metadata); + client.updateMetadata(initialUpdateResponse); + NodeApiVersions nodeApiVersions = new NodeApiVersions(NodeApiVersions.create().allSupportedApiVersions().values(), + Arrays.asList(new ApiVersionsResponseData.SupportedFeatureKey() + .setName("transaction.version") + .setMaxVersion((short) 2) + .setMinVersion((short) 0)), + Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() + .setName("transaction.version") + .setMaxVersionLevel((short) 2) + .setMinVersionLevel((short) 2)), + 0); + client.setNodeApiVersions(nodeApiVersions); + ApiVersions apiVersions = new ApiVersions(); + apiVersions.update(NODE.idString(), nodeApiVersions); + + ProducerInterceptors interceptor = new ProducerInterceptors<>(Collections.emptyList()); + + client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some-txn", NODE)); + client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE)); + client.prepareResponse(produceResponse(topicPartition, 1L, Errors.NONE, 0, 1)); + client.prepareResponse(endTxnResponse(Errors.NONE)); + + try (KafkaProducer producer = new KafkaProducer<>( + config, new StringSerializer(), new StringSerializer(), metadata, client, interceptor, apiVersions, time) + ) { + producer.initTransactions(); + producer.beginTransaction(); + producer.send(record).get(); + producer.commitTransaction(); + } + } + private void assertDurationAtLeast(KafkaProducer producer, String name, double floor) { getAndAssertDurationAtLeast(producer, name, floor); } @@ -1887,7 +2006,7 @@ public void testSendToInvalidTopic() throws Exception { assertEquals(Collections.singleton(invalidTopicName), metadata.fetch().invalidTopics(), "Cluster has incorrect invalid topic list."); - TestUtils.assertFutureError(future, InvalidTopicException.class); + TestUtils.assertFutureThrows(future, InvalidTopicException.class); producer.close(Duration.ofMillis(0)); } @@ -2093,7 +2212,7 @@ public void testUnusedConfigs() { assertTrue(config.unused().contains(SslConfigs.SSL_PROTOCOL_CONFIG)); try (KafkaProducer producer = new KafkaProducer<>(config, null, null, - null, null, null, Time.SYSTEM)) { + null, null, null, null, Time.SYSTEM)) { assertTrue(config.unused().contains(SslConfigs.SSL_PROTOCOL_CONFIG)); } } @@ -2152,6 +2271,34 @@ public void testCallbackAndInterceptorHandleError() { } } + @Test + public void shouldNotInvokeFlushInCallback() { + Map configs = new HashMap<>(); + configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); + // only test in idempotence disabled producer for simplicity + configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false); + + Time time = new MockTime(1); + MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1)); + ProducerMetadata metadata = newMetadata(0, 0, Long.MAX_VALUE); + + MockClient client = new MockClient(time, metadata); + client.updateMetadata(initialUpdateResponse); + AtomicReference kafkaException = new AtomicReference<>(); + + try (Producer producer = kafkaProducer(configs, new StringSerializer(), + new StringSerializer(), metadata, client, null, time)) { + producer.send( + new ProducerRecord<>("topic", "value"), + (recordMetadata, exception) -> kafkaException.set(assertThrows(KafkaException.class, producer::flush)) + ); + } + + assertNotNull(kafkaException.get()); + assertEquals("KafkaProducer.flush() invocation inside a callback is not permitted because it may lead to deadlock.", + kafkaException.get().getMessage()); + } + @Test public void negativePartitionShouldThrow() { Map configs = new HashMap<>(); @@ -2194,40 +2341,6 @@ public void testPartitionAddedToTransaction() throws Exception { } } - @SuppressWarnings("deprecation") - @Test - public void testPartitionAddedToTransactionAfterFullBatchRetry() throws Exception { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - String topic = "foo"; - TopicPartition topicPartition0 = new TopicPartition(topic, 0); - TopicPartition topicPartition1 = new TopicPartition(topic, 1); - Cluster cluster = TestUtils.singletonCluster(topic, 2); - - when(ctx.sender.isRunning()).thenReturn(true); - when(ctx.metadata.fetch()).thenReturn(cluster); - - long timestamp = ctx.time.milliseconds(); - ProducerRecord record = new ProducerRecord<>(topic, null, timestamp, "key", "value"); - - FutureRecordMetadata future = expectAppendWithAbortForNewBatch( - ctx, - record, - topicPartition0, - topicPartition1, - cluster - ); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - assertEquals(future, producer.send(record)); - assertFalse(future.isDone()); - verify(ctx.partitioner).onNewBatch(topic, cluster, 0); - verify(ctx.transactionManager, never()).maybeAddPartition(topicPartition0); - verify(ctx.transactionManager).maybeAddPartition(topicPartition1); - } - } - private FutureRecordMetadata expectAppend( KafkaProducerTestContext ctx, ProducerRecord record, @@ -2266,7 +2379,6 @@ private FutureRecordMetadata expectAppend( eq(Record.EMPTY_HEADERS), // 5 any(RecordAccumulator.AppendCallbacks.class), // 6 <-- anyLong(), - eq(true), anyLong(), any() )).thenAnswer(invocation -> { @@ -2277,96 +2389,12 @@ private FutureRecordMetadata expectAppend( futureRecordMetadata, false, false, - false, 0); }); return futureRecordMetadata; } - private FutureRecordMetadata expectAppendWithAbortForNewBatch( - KafkaProducerTestContext ctx, - ProducerRecord record, - TopicPartition initialSelectedPartition, - TopicPartition retrySelectedPartition, - Cluster cluster - ) throws InterruptedException { - byte[] serializedKey = ctx.serializer.serialize(topic, record.key()); - byte[] serializedValue = ctx.serializer.serialize(topic, record.value()); - long timestamp = record.timestamp() == null ? ctx.time.milliseconds() : record.timestamp(); - - ProduceRequestResult requestResult = new ProduceRequestResult(retrySelectedPartition); - FutureRecordMetadata futureRecordMetadata = new FutureRecordMetadata( - requestResult, - 0, - timestamp, - serializedKey.length, - serializedValue.length, - ctx.time - ); - - when(ctx.partitioner.partition( - initialSelectedPartition.topic(), - record.key(), - serializedKey, - record.value(), - serializedValue, - cluster - )).thenReturn(initialSelectedPartition.partition()) - .thenReturn(retrySelectedPartition.partition()); - - when(ctx.accumulator.append( - eq(initialSelectedPartition.topic()), // 0 - eq(initialSelectedPartition.partition()), // 1 - eq(timestamp), // 2 - eq(serializedKey), // 3 - eq(serializedValue), // 4 - eq(Record.EMPTY_HEADERS), // 5 - any(RecordAccumulator.AppendCallbacks.class), // 6 <-- - anyLong(), - eq(true), // abortOnNewBatch - anyLong(), - any() - )).thenAnswer(invocation -> { - RecordAccumulator.AppendCallbacks callbacks = - (RecordAccumulator.AppendCallbacks) invocation.getArguments()[6]; - callbacks.setPartition(initialSelectedPartition.partition()); - return new RecordAccumulator.RecordAppendResult( - null, - false, - false, - true, - 0); - }); - - when(ctx.accumulator.append( - eq(retrySelectedPartition.topic()), // 0 - eq(retrySelectedPartition.partition()), // 1 - eq(timestamp), // 2 - eq(serializedKey), // 3 - eq(serializedValue), // 4 - eq(Record.EMPTY_HEADERS), // 5 - any(RecordAccumulator.AppendCallbacks.class), // 6 <-- - anyLong(), - eq(false), // abortOnNewBatch - anyLong(), - any() - )).thenAnswer(invocation -> { - RecordAccumulator.AppendCallbacks callbacks = - (RecordAccumulator.AppendCallbacks) invocation.getArguments()[6]; - callbacks.setPartition(retrySelectedPartition.partition()); - return new RecordAccumulator.RecordAppendResult( - futureRecordMetadata, - false, - true, - false, - 0); - }); - - return futureRecordMetadata; - } - - private static final List CLIENT_IDS = new ArrayList<>(); public static class SerializerForClientId implements Serializer { @@ -2547,4 +2575,116 @@ void testDeliveryTimeoutAndLingerMsConfig() { assertDoesNotThrow(() -> new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer()).close()); } + @SuppressWarnings("deprecation") + private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, int logStartOffset) { + ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset); + Map partResp = singletonMap(tp, resp); + return new ProduceResponse(partResp, throttleTimeMs); + } + + @Test + public void testSubscribingCustomMetricsDoesntAffectProducerMetrics() { + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + + Map customMetrics = customMetrics(); + customMetrics.forEach((name, metric) -> producer.registerMetricForSubscription(metric)); + + Map producerMetrics = producer.metrics(); + customMetrics.forEach((name, metric) -> assertFalse(producerMetrics.containsKey(name))); + } + + @Test + public void testUnSubscribingNonExisingMetricsDoesntCauseError() { + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + + Map customMetrics = customMetrics(); + //Metrics never registered but removed should not cause an error + customMetrics.forEach((name, metric) -> assertDoesNotThrow(() -> producer.unregisterMetricFromSubscription(metric))); + } + + @Test + public void testSubscribingCustomMetricsWithSameNameDoesntAffectProducerMetrics() { + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + appender.setClassLogger(KafkaProducer.class, Level.DEBUG); + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetricToAdd = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.registerMetricForSubscription(existingMetricToAdd); + final String expectedMessage = String.format("Skipping registration for metric %s. Existing producer metrics cannot be overwritten.", existingMetricToAdd.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); + } + } + + @Test + public void testUnsubscribingCustomMetricWithSameNameAsExistingMetricDoesntAffectProducerMetric() { + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + appender.setClassLogger(KafkaProducer.class, Level.DEBUG); + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetricToRemove = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.unregisterMetricFromSubscription(existingMetricToRemove); + final String expectedMessage = String.format("Skipping unregistration for metric %s. Existing producer metrics cannot be removed.", existingMetricToRemove.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); + } + } + + @Test + public void testShouldOnlyCallMetricReporterMetricChangeOnceWithExistingProducerMetric() { + try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { + ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); + clientTelemetryReporter.configure(any()); + mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); + + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.registerMetricForSubscription(existingMetric); + // This test would fail without the check as the exising metric is registered in the producer on startup + Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); + } + } + + @Test + public void testShouldNotCallMetricReporterMetricRemovalWithExistingProducerMetric() { + try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { + ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); + clientTelemetryReporter.configure(any()); + mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); + + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.unregisterMetricFromSubscription(existingMetric); + // This test would fail without the check as the exising metric is registered in the consumer on startup + Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric); + } + } + + + private Map customMetrics() { + MetricConfig metricConfig = new MetricConfig(); + Object lock = new Object(); + MetricName metricNameOne = new MetricName("metricOne", "stream-metrics", "description for metric one", new HashMap<>()); + MetricName metricNameTwo = new MetricName("metricTwo", "stream-metrics", "description for metric two", new HashMap<>()); + + KafkaMetric streamClientMetricOne = new KafkaMetric(lock, metricNameOne, (Measurable) (m, now) -> 1.0, metricConfig, Time.SYSTEM); + KafkaMetric streamClientMetricTwo = new KafkaMetric(lock, metricNameTwo, (Measurable) (m, now) -> 2.0, metricConfig, Time.SYSTEM); + return Map.of(metricNameOne, streamClientMetricOne, metricNameTwo, streamClientMetricTwo); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java index d27a297d30a9b..6ec8164c26805 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java @@ -59,7 +59,7 @@ public class MockProducerTest { private final String groupId = "group"; private void buildMockProducer(boolean autoComplete) { - this.producer = new MockProducer<>(autoComplete, new MockSerializer(), new MockSerializer()); + this.producer = new MockProducer<>(Cluster.empty(), autoComplete, null, new MockSerializer(), new MockSerializer()); } @AfterEach @@ -87,10 +87,16 @@ public void testPartitioner() throws Exception { PartitionInfo partitionInfo1 = new PartitionInfo(topic, 1, null, null, null); Cluster cluster = new Cluster(null, new ArrayList<>(0), asList(partitionInfo0, partitionInfo1), Collections.emptySet(), Collections.emptySet()); - MockProducer producer = new MockProducer<>(cluster, true, new StringSerializer(), new StringSerializer()); + MockProducer producer = new MockProducer<>( + cluster, + true, + new org.apache.kafka.clients.producer.RoundRobinPartitioner(), + new StringSerializer(), + new StringSerializer() + ); ProducerRecord record = new ProducerRecord<>(topic, "key", "value"); Future metadata = producer.send(record); - assertEquals(1, metadata.get().partition(), "Partition should be correct"); + assertEquals(0, metadata.get().partition(), "Partition should be correct"); producer.clear(); assertEquals(0, producer.history().size(), "Clear should erase our history"); producer.close(); @@ -393,14 +399,14 @@ public void shouldPublishConsumerGroupOffsetsOnlyAfterCommitIfTransactionsAreEna producer.beginTransaction(); String group1 = "g1"; - Map group1Commit = new HashMap() { + Map group1Commit = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); } }; String group2 = "g2"; - Map group2Commit = new HashMap() { + Map group2Commit = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(101L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(21L, null)); @@ -419,15 +425,6 @@ public void shouldPublishConsumerGroupOffsetsOnlyAfterCommitIfTransactionsAreEna assertEquals(Collections.singletonList(expectedResult), producer.consumerGroupOffsetsHistory()); } - @Deprecated - @Test - public void shouldThrowOnNullConsumerGroupIdWhenSendOffsetsToTransaction() { - buildMockProducer(true); - producer.initTransactions(); - producer.beginTransaction(); - assertThrows(NullPointerException.class, () -> producer.sendOffsetsToTransaction(Collections.emptyMap(), (String) null)); - } - @Test public void shouldThrowOnNullConsumerGroupMetadataWhenSendOffsetsToTransaction() { buildMockProducer(true); @@ -436,16 +433,6 @@ public void shouldThrowOnNullConsumerGroupMetadataWhenSendOffsetsToTransaction() assertThrows(NullPointerException.class, () -> producer.sendOffsetsToTransaction(Collections.emptyMap(), new ConsumerGroupMetadata(null))); } - @Deprecated - @Test - public void shouldIgnoreEmptyOffsetsWhenSendOffsetsToTransactionByGroupId() { - buildMockProducer(true); - producer.initTransactions(); - producer.beginTransaction(); - producer.sendOffsetsToTransaction(Collections.emptyMap(), "groupId"); - assertFalse(producer.sentOffsets()); - } - @Test public void shouldIgnoreEmptyOffsetsWhenSendOffsetsToTransactionByGroupMetadata() { buildMockProducer(true); @@ -454,25 +441,7 @@ public void shouldIgnoreEmptyOffsetsWhenSendOffsetsToTransactionByGroupMetadata( producer.sendOffsetsToTransaction(Collections.emptyMap(), new ConsumerGroupMetadata("groupId")); assertFalse(producer.sentOffsets()); } - - @Deprecated - @Test - public void shouldAddOffsetsWhenSendOffsetsToTransactionByGroupId() { - buildMockProducer(true); - producer.initTransactions(); - producer.beginTransaction(); - - assertFalse(producer.sentOffsets()); - - Map groupCommit = new HashMap() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - } - }; - producer.sendOffsetsToTransaction(groupCommit, "groupId"); - assertTrue(producer.sentOffsets()); - } - + @Test public void shouldAddOffsetsWhenSendOffsetsToTransactionByGroupMetadata() { buildMockProducer(true); @@ -481,7 +450,7 @@ public void shouldAddOffsetsWhenSendOffsetsToTransactionByGroupMetadata() { assertFalse(producer.sentOffsets()); - Map groupCommit = new HashMap() { + Map groupCommit = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); } @@ -498,7 +467,7 @@ public void shouldResetSentOffsetsFlagOnlyWhenBeginningNewTransaction() { assertFalse(producer.sentOffsets()); - Map groupCommit = new HashMap() { + Map groupCommit = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); } @@ -525,13 +494,13 @@ public void shouldPublishLatestAndCumulativeConsumerGroupOffsetsOnlyAfterCommitI producer.beginTransaction(); String group = "g"; - Map groupCommit1 = new HashMap() { + Map groupCommit1 = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); } }; - Map groupCommit2 = new HashMap() { + Map groupCommit2 = new HashMap<>() { { put(new TopicPartition(topic, 1), new OffsetAndMetadata(101L, null)); put(new TopicPartition(topic, 2), new OffsetAndMetadata(21L, null)); @@ -543,7 +512,7 @@ public void shouldPublishLatestAndCumulativeConsumerGroupOffsetsOnlyAfterCommitI assertTrue(producer.consumerGroupOffsetsHistory().isEmpty()); Map> expectedResult = new HashMap<>(); - expectedResult.put(group, new HashMap() { + expectedResult.put(group, new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(101L, null)); @@ -562,7 +531,7 @@ public void shouldDropConsumerGroupOffsetsOnAbortIfTransactionsAreEnabled() { producer.beginTransaction(); String group = "g"; - Map groupCommit = new HashMap() { + Map groupCommit = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); @@ -591,7 +560,7 @@ public void shouldPreserveOffsetsFromCommitByGroupIdOnAbortIfTransactionsAreEnab producer.beginTransaction(); String group = "g"; - Map groupCommit = new HashMap() { + Map groupCommit = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); @@ -616,7 +585,7 @@ public void shouldPreserveOffsetsFromCommitByGroupMetadataOnAbortIfTransactionsA producer.beginTransaction(); String group = "g"; - Map groupCommit = new HashMap() { + Map groupCommit = new HashMap<>() { { put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); @@ -628,7 +597,7 @@ public void shouldPreserveOffsetsFromCommitByGroupMetadataOnAbortIfTransactionsA producer.beginTransaction(); String group2 = "g2"; - Map groupCommit2 = new HashMap() { + Map groupCommit2 = new HashMap<>() { { put(new TopicPartition(topic, 2), new OffsetAndMetadata(53L, null)); put(new TopicPartition(topic, 3), new OffsetAndMetadata(84L, null)); @@ -717,7 +686,7 @@ public void shouldNotThrowOnFlushProducerIfProducerIsFenced() { @Test @SuppressWarnings("unchecked") public void shouldThrowClassCastException() { - try (MockProducer customProducer = new MockProducer<>(true, new IntegerSerializer(), new StringSerializer())) { + try (MockProducer customProducer = new MockProducer<>(Cluster.empty(), true, null, new IntegerSerializer(), new StringSerializer())) { assertThrows(ClassCastException.class, () -> customProducer.send(new ProducerRecord(topic, "key1", "value1"))); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java index f3ec9ca96c2ee..830711c0e5449 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java @@ -30,6 +30,7 @@ import java.util.Locale; import java.util.Map; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -106,7 +107,7 @@ public void testDefaultMetadataRecoveryStrategy() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); final ProducerConfig producerConfig = new ProducerConfig(configs); - assertEquals(MetadataRecoveryStrategy.NONE.name, producerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); + assertEquals(MetadataRecoveryStrategy.REBOOTSTRAP.name, producerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @Test @@ -129,4 +130,19 @@ public void testCaseInsensitiveSecurityProtocol() { final ProducerConfig producerConfig = new ProducerConfig(configs); assertEquals(saslSslLowerCase, producerConfig.originals().get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } + + @Test + void testUpperboundCheckOfEnableIdempotence() { + String inFlightConnection = "6"; + final Map configs = new HashMap<>(); + configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); + configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); + configs.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, inFlightConnection); + ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); + assertEquals("To use the idempotent producer, " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + + " must be set to at most 5. Current value is " + inFlightConnection + ".", configException.getMessage()); + + configs.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); + assertDoesNotThrow(() -> new ProducerConfig(configs)); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/RoundRobinPartitionerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/RoundRobinPartitionerTest.java index 37f35b0a5a31f..8c6d0a33d21b3 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/RoundRobinPartitionerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/RoundRobinPartitionerTest.java @@ -96,35 +96,4 @@ public void testRoundRobinWithKeyBytes() { assertEquals(10, partitionCount.get(1).intValue()); assertEquals(10, partitionCount.get(2).intValue()); } - - @Test - public void testRoundRobinWithNullKeyBytes() { - final String topicA = "topicA"; - final String topicB = "topicB"; - - List allPartitions = asList(new PartitionInfo(topicA, 0, NODES[0], NODES, NODES), - new PartitionInfo(topicA, 1, NODES[1], NODES, NODES), new PartitionInfo(topicA, 2, NODES[2], NODES, NODES), - new PartitionInfo(topicB, 0, NODES[0], NODES, NODES)); - Cluster testCluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), allPartitions, - Collections.emptySet(), Collections.emptySet()); - - final Map partitionCount = new HashMap<>(); - - Partitioner partitioner = new RoundRobinPartitioner(); - for (int i = 0; i < 30; ++i) { - int partition = partitioner.partition(topicA, null, null, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) - count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(topicB, null, null, null, null, testCluster); - } - } - - assertEquals(10, partitionCount.get(0).intValue()); - assertEquals(10, partitionCount.get(1).intValue()); - assertEquals(10, partitionCount.get(2).intValue()); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/UniformStickyPartitionerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/UniformStickyPartitionerTest.java deleted file mode 100644 index dbaa9dcdecd84..0000000000000 --- a/clients/src/test/java/org/apache/kafka/clients/producer/UniformStickyPartitionerTest.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.producer; - -import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.PartitionInfo; - -import org.junit.jupiter.api.Test; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Arrays.asList; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class UniformStickyPartitionerTest { - private static final Node[] NODES = new Node[] { - new Node(0, "localhost", 99), - new Node(1, "localhost", 100), - new Node(2, "localhost", 101) - }; - - private static final String TOPIC_A = "TOPIC_A"; - private static final String TOPIC_B = "TOPIC_B"; - - @SuppressWarnings("deprecation") - @Test - public void testRoundRobinWithUnavailablePartitions() { - // Intentionally make the partition list not in partition order to test the edge - // cases. - List partitions = asList( - new PartitionInfo("test", 1, null, NODES, NODES), - new PartitionInfo("test", 2, NODES[1], NODES, NODES), - new PartitionInfo("test", 0, NODES[0], NODES, NODES)); - // When there are some unavailable partitions, we want to make sure that (1) we - // always pick an available partition, - // and (2) the available partitions are selected in a sticky way. - int countForPart0 = 0; - int countForPart2 = 0; - int part = 0; - Partitioner partitioner = new UniformStickyPartitioner(); - Cluster cluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), partitions, - Collections.emptySet(), Collections.emptySet()); - for (int i = 0; i < 50; i++) { - part = partitioner.partition("test", null, null, null, null, cluster); - assertTrue(part == 0 || part == 2, "We should never choose a leader-less node in round robin"); - if (part == 0) - countForPart0++; - else - countForPart2++; - } - // Simulates switching the sticky partition on a new batch. - partitioner.onNewBatch("test", cluster, part); - for (int i = 1; i <= 50; i++) { - part = partitioner.partition("test", null, null, null, null, cluster); - assertTrue(part == 0 || part == 2, "We should never choose a leader-less node in round robin"); - if (part == 0) - countForPart0++; - else - countForPart2++; - } - assertEquals(countForPart0, countForPart2, "The distribution between two available partitions should be even"); - } - - @SuppressWarnings("deprecation") - @Test - public void testRoundRobinWithKeyBytes() { - List allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), - new PartitionInfo(TOPIC_A, 1, NODES[1], NODES, NODES), new PartitionInfo(TOPIC_A, 2, NODES[1], NODES, NODES), - new PartitionInfo(TOPIC_B, 0, NODES[0], NODES, NODES)); - Cluster testCluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), allPartitions, - Collections.emptySet(), Collections.emptySet()); - - final Map partitionCount = new HashMap<>(); - - final byte[] keyBytes = "key".getBytes(); - int partition = 0; - Partitioner partitioner = new UniformStickyPartitioner(); - for (int i = 0; i < 30; ++i) { - partition = partitioner.partition(TOPIC_A, null, keyBytes, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) - count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(TOPIC_B, null, keyBytes, null, null, testCluster); - } - } - // Simulate a batch filling up and switching the sticky partition. - partitioner.onNewBatch(TOPIC_A, testCluster, partition); - partitioner.onNewBatch(TOPIC_B, testCluster, 0); - - // Save old partition to ensure that the wrong partition does not trigger a new batch. - int oldPart = partition; - - for (int i = 0; i < 30; ++i) { - partition = partitioner.partition(TOPIC_A, null, keyBytes, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) - count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(TOPIC_B, null, keyBytes, null, null, testCluster); - } - } - - int newPart = partition; - - // Attempt to switch the partition with the wrong previous partition. Sticky partition should not change. - partitioner.onNewBatch(TOPIC_A, testCluster, oldPart); - - for (int i = 0; i < 30; ++i) { - partition = partitioner.partition(TOPIC_A, null, keyBytes, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) - count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(TOPIC_B, null, keyBytes, null, null, testCluster); - } - } - - assertEquals(30, partitionCount.get(oldPart).intValue()); - assertEquals(60, partitionCount.get(newPart).intValue()); - } - - @SuppressWarnings("deprecation") - @Test - public void testRoundRobinWithNullKeyBytes() { - List allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), - new PartitionInfo(TOPIC_A, 1, NODES[1], NODES, NODES), new PartitionInfo(TOPIC_A, 2, NODES[1], NODES, NODES), - new PartitionInfo(TOPIC_B, 0, NODES[0], NODES, NODES)); - Cluster testCluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), allPartitions, - Collections.emptySet(), Collections.emptySet()); - - final Map partitionCount = new HashMap<>(); - - int partition = 0; - Partitioner partitioner = new UniformStickyPartitioner(); - for (int i = 0; i < 30; ++i) { - partition = partitioner.partition(TOPIC_A, null, null, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) - count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(TOPIC_B, null, null, null, null, testCluster); - } - } - // Simulate a batch filling up and switching the sticky partition. - partitioner.onNewBatch(TOPIC_A, testCluster, partition); - partitioner.onNewBatch(TOPIC_B, testCluster, 0); - - // Save old partition to ensure that the wrong partition does not trigger a new batch. - int oldPart = partition; - - for (int i = 0; i < 30; ++i) { - partition = partitioner.partition(TOPIC_A, null, null, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) - count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(TOPIC_B, null, null, null, null, testCluster); - } - } - - int newPart = partition; - - // Attempt to switch the partition with the wrong previous partition. Sticky partition should not change. - partitioner.onNewBatch(TOPIC_A, testCluster, oldPart); - - for (int i = 0; i < 30; ++i) { - partition = partitioner.partition(TOPIC_A, null, null, null, null, testCluster); - Integer count = partitionCount.get(partition); - if (null == count) - count = 0; - partitionCount.put(partition, count + 1); - - if (i % 5 == 0) { - partitioner.partition(TOPIC_B, null, null, null, null, testCluster); - } - } - - assertEquals(30, partitionCount.get(oldPart).intValue()); - assertEquals(60, partitionCount.get(newPart).intValue()); - } -} diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/DefaultPartitionerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/DefaultPartitionerTest.java deleted file mode 100644 index 353e07b366af6..0000000000000 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/DefaultPartitionerTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.producer.internals; - -import org.apache.kafka.clients.producer.Partitioner; -import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.PartitionInfo; - -import org.junit.jupiter.api.Test; - -import java.util.Collections; -import java.util.List; - -import static java.util.Arrays.asList; -import static org.junit.jupiter.api.Assertions.assertEquals; - -public class DefaultPartitionerTest { - private static final byte[] KEY_BYTES = "key".getBytes(); - private static final Node[] NODES = new Node[] { - new Node(0, "localhost", 99), - new Node(1, "localhost", 100), - new Node(12, "localhost", 101) - }; - private static final String TOPIC = "test"; - // Intentionally make the partition list not in partition order to test the edge cases. - private static final List PARTITIONS = asList(new PartitionInfo(TOPIC, 1, null, NODES, NODES), - new PartitionInfo(TOPIC, 2, NODES[1], NODES, NODES), - new PartitionInfo(TOPIC, 0, NODES[0], NODES, NODES)); - - @Test - public void testKeyPartitionIsStable() { - @SuppressWarnings("deprecation") - final Partitioner partitioner = new DefaultPartitioner(); - final Cluster cluster = new Cluster("clusterId", asList(NODES), PARTITIONS, - Collections.emptySet(), Collections.emptySet()); - int partition = partitioner.partition(TOPIC, null, KEY_BYTES, null, null, cluster); - assertEquals(partition, partitioner.partition(TOPIC, null, KEY_BYTES, null, null, cluster), "Same key should yield same partition"); - } -} diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java index 3d8f132a487bd..ff50033b388a6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.clients.MetadataSnapshot; import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.producer.Callback; -import org.apache.kafka.clients.producer.Partitioner; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; @@ -29,7 +28,6 @@ import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.compress.Compression; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; @@ -80,7 +78,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -164,18 +161,18 @@ public void testDrainBatches() throws Exception { // initial data - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); // drain batches from 2 nodes: node1 => tp1, node2 => tp3, because the max request size is full after the first batch drained Map> batches1 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); verifyTopicPartitionInBatches(batches1, tp1, tp3); // add record for tp1, tp3 - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); // drain batches from 2 nodes: node1 => tp2, node2 => tp4, because the max request size is full after the first batch drained // The drain index should start from next topic partition, that is, node1 => tp2, node2 => tp4 @@ -187,18 +184,18 @@ public void testDrainBatches() throws Exception { verifyTopicPartitionInBatches(batches3, tp1, tp3); // add record for tp2, tp3, tp4 and mute the tp4 - accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.mutePartition(tp4); // drain batches from 2 nodes: node1 => tp2, node2 => tp3 (because tp4 is muted) Map> batches4 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); verifyTopicPartitionInBatches(batches4, tp2, tp3); // add record for tp1, tp2, tp3, and unmute tp4 - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); + accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.unmutePartition(tp4); // set maxSize as a max value, so that the all partitions in 2 nodes should be drained: node1 => [tp1, tp2], node2 => [tp3, tp4] Map> batches5 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), Integer.MAX_VALUE, 0); @@ -232,7 +229,7 @@ public void testFull() throws Exception { int appends = expectedNumAppends(batchSize); for (int i = 0; i < appends; i++) { // append to the first batch - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), metadataCache.cluster()); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), metadataCache.cluster()); Deque partitionBatches = accum.getDeque(tp1); assertEquals(1, partitionBatches.size()); @@ -243,7 +240,7 @@ public void testFull() throws Exception { // this append doesn't fit in the first batch, so a new batch is created and the first batch is closed - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), metadataCache.cluster()); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), metadataCache.cluster()); Deque partitionBatches = accum.getDeque(tp1); assertEquals(2, partitionBatches.size()); Iterator partitionBatchesIterator = partitionBatches.iterator(); @@ -278,7 +275,7 @@ private void testAppendLarge(Compression compression) throws Exception { byte[] value = new byte[2 * batchSize]; RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compression, 0); - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), metadataCache.cluster()); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), metadataCache.cluster()); assertEquals(Collections.singleton(node1), accum.ready(metadataCache, time.milliseconds()).readyNodes, "Our partition's leader should be ready"); Deque batches = accum.getDeque(tp1); @@ -316,7 +313,7 @@ private void testAppendLargeOldMessageFormat(Compression compression) throws Exc RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compression, 0); - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), metadataCache.cluster()); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), metadataCache.cluster()); assertEquals(Collections.singleton(node1), accum.ready(metadataCache, time.milliseconds()).readyNodes, "Our partition's leader should be ready"); Deque batches = accum.getDeque(tp1); @@ -340,7 +337,7 @@ public void testLinger() throws Exception { int lingerMs = 10; RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, Compression.NONE, lingerMs); - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(0, accum.ready(metadataCache, time.milliseconds()).readyNodes.size(), "No partitions should be ready"); time.sleep(10); assertEquals(Collections.singleton(node1), accum.ready(metadataCache, time.milliseconds()).readyNodes, "Our partition's leader should be ready"); @@ -363,7 +360,7 @@ public void testPartialDrain() throws Exception { List partitions = asList(tp1, tp2); for (TopicPartition tp : partitions) { for (int i = 0; i < appends; i++) - accum.append(tp.topic(), tp.partition(), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(tp.topic(), tp.partition(), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); } assertEquals(Collections.singleton(node1), accum.ready(metadataCache, time.milliseconds()).readyNodes, "Partition's leader should be ready"); @@ -384,7 +381,7 @@ public void testStressfulSituation() throws Exception { threads.add(new Thread(() -> { for (int j = 0; j < msgs; j++) { try { - accum.append(topic, j % numParts, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, j % numParts, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); } catch (Exception e) { e.printStackTrace(); } @@ -427,7 +424,7 @@ public void testNextReadyCheckDelay() throws Exception { // Partition on node1 only for (int i = 0; i < appends; i++) - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, time.milliseconds()); assertEquals(0, result.readyNodes.size(), "No nodes should be ready."); assertEquals(lingerMs, result.nextReadyCheckDelayMs, "Next check time should be the linger time"); @@ -436,14 +433,14 @@ public void testNextReadyCheckDelay() throws Exception { // Add partition on node2 only for (int i = 0; i < appends; i++) - accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); result = accum.ready(metadataCache, time.milliseconds()); assertEquals(0, result.readyNodes.size(), "No nodes should be ready."); assertEquals(lingerMs / 2, result.nextReadyCheckDelayMs, "Next check time should be defined by node1, half remaining linger time"); // Add data for another partition on node1, enough to make data sendable immediately for (int i = 0; i < appends + 1; i++) - accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); result = accum.ready(metadataCache, time.milliseconds()); assertEquals(Collections.singleton(node1), result.readyNodes, "Node1 should be ready"); // Note this can actually be < linger time because it may use delays from partitions that aren't sendable @@ -467,7 +464,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); long now = time.milliseconds(); - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, now + lingerMs + 1); assertEquals(Collections.singleton(node1), result.readyNodes, "Node1 should be ready"); Map> batches = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1); @@ -479,7 +476,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, accum.reenqueue(batches.get(0).get(0), now); // Put message for partition 1 into accumulator - accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); result = accum.ready(metadataCache, now + lingerMs + 1); assertEquals(Collections.singleton(node1), result.readyNodes, "Node1 should be ready"); @@ -533,7 +530,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, long now = time.milliseconds(); long initial = now; - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); // No backoff for initial attempt Map> batches = drainAndCheckBatchAmount(metadataCache, node1, accum, now + lingerMs + 1, 1); @@ -594,7 +591,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, long now = time.milliseconds(); long initial = now; - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); // No backoff for initial attempt Map> batches = drainAndCheckBatchAmount(metadataCache, node1, accum, now + lingerMs + 1, 1); @@ -651,7 +648,7 @@ public void testFlush() throws Exception { 4 * 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, Compression.NONE, lingerMs); for (int i = 0; i < 100; i++) { - accum.append(topic, i % 3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, i % 3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); assertTrue(accum.hasIncomplete()); } RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, time.milliseconds()); @@ -687,7 +684,7 @@ private void delayedInterrupt(final Thread thread, final long delayMs) { public void testAwaitFlushComplete() throws Exception { RecordAccumulator accum = createTestRecordAccumulator( 4 * 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, Compression.NONE, Integer.MAX_VALUE); - accum.append(topic, 0, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, 0, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.beginFlush(); assertTrue(accum.flushInProgress()); @@ -720,7 +717,7 @@ public void setPartition(int partition) { } } for (int i = 0; i < numRecords; i++) - accum.append(topic, i % 3, 0L, key, value, null, new TestCallback(), maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, i % 3, 0L, key, value, null, new TestCallback(), maxBlockTimeMs, time.milliseconds(), cluster); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map> drained = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); @@ -765,7 +762,7 @@ public void setPartition(int partition) { } } for (int i = 0; i < numRecords; i++) - accum.append(topic, i % 3, 0L, key, value, null, new TestCallback(), maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, i % 3, 0L, key, value, null, new TestCallback(), maxBlockTimeMs, time.milliseconds(), cluster); RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map> drained = accum.drain(metadataCache, result.readyNodes, Integer.MAX_VALUE, @@ -804,7 +801,7 @@ private void doExpireBatchSingle(int deliveryTimeoutMs) throws InterruptedExcept for (Boolean mute: muteStates) { if (time.milliseconds() < System.currentTimeMillis()) time.setCurrentTimeMs(System.currentTimeMillis()); - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(0, accum.ready(metadataCache, time.milliseconds()).readyNodes.size(), "No partition should be ready."); time.sleep(lingerMs); @@ -853,11 +850,11 @@ public void testExpiredBatches() throws InterruptedException { // Test batches not in retry for (int i = 0; i < appends; i++) { - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(0, accum.ready(metadataCache, time.milliseconds()).readyNodes.size(), "No partitions should be ready."); } // Make the batches ready due to batch full - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, time.milliseconds(), cluster); Set readyNodes = accum.ready(metadataCache, time.milliseconds()).readyNodes; assertEquals(Collections.singleton(node1), readyNodes, "Our partition's leader should be ready"); // Advance the clock to expire the batch. @@ -887,7 +884,7 @@ public void testExpiredBatches() throws InterruptedException { // Test batches in retry. // Create a retried batch - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, time.milliseconds(), cluster); time.sleep(lingerMs); readyNodes = accum.ready(metadataCache, time.milliseconds()).readyNodes; assertEquals(Collections.singleton(node1), readyNodes, "Our partition's leader should be ready"); @@ -911,7 +908,7 @@ public void testExpiredBatches() throws InterruptedException { assertEquals(0, expiredBatches.size(), "All batches should have been expired."); // Test that when being throttled muted batches are expired before the throttle time is over. - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, time.milliseconds(), cluster); time.sleep(lingerMs); readyNodes = accum.ready(metadataCache, time.milliseconds()).readyNodes; assertEquals(Collections.singleton(node1), readyNodes, "Our partition's leader should be ready"); @@ -944,7 +941,7 @@ public void testMutedPartitions() throws InterruptedException { batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, Compression.NONE, 10); int appends = expectedNumAppends(batchSize); for (int i = 0; i < appends; i++) { - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(0, accum.ready(metadataCache, now).readyNodes.size(), "No partitions should be ready."); } time.sleep(2000); @@ -970,26 +967,6 @@ public void testMutedPartitions() throws InterruptedException { assertFalse(drained.get(node1.id()).isEmpty(), "The batch should have been drained."); } - @Test - public void testIdempotenceWithOldMagic() { - // Simulate talking to an older broker, ie. one which supports a lower magic. - ApiVersions apiVersions = new ApiVersions(); - int batchSize = 1025; - int deliveryTimeoutMs = 3200; - int lingerMs = 10; - long retryBackoffMs = 100L; - long totalSize = 10 * batchSize; - String metricGrpName = "producer-metrics"; - - apiVersions.update("foobar", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2)); - TransactionManager transactionManager = new TransactionManager(new LogContext(), null, 0, retryBackoffMs, apiVersions); - RecordAccumulator accum = new RecordAccumulator(logContext, batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, - Compression.NONE, lingerMs, retryBackoffMs, retryBackoffMs, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, - new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); - assertThrows(UnsupportedVersionException.class, - () -> accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster)); - } - @Test public void testRecordsDrainedWhenTransactionCompleting() throws Exception { int batchSize = 1025; @@ -1004,16 +981,16 @@ public void testRecordsDrainedWhenTransactionCompleting() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(12345L, (short) 5); Mockito.when(transactionManager.producerIdAndEpoch()).thenReturn(producerIdAndEpoch); Mockito.when(transactionManager.isSendToPartitionAllowed(tp1)).thenReturn(true); - Mockito.when(transactionManager.isPartitionAdded(tp1)).thenReturn(true); + Mockito.when(transactionManager.transactionContainsPartition(tp1)).thenReturn(true); Mockito.when(transactionManager.firstInFlightSequence(tp1)).thenReturn(0); // Initially, the transaction is still in progress, so we should respect the linger. Mockito.when(transactionManager.isCompleting()).thenReturn(false); accumulator.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, - false, time.milliseconds(), cluster); + time.milliseconds(), cluster); accumulator.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, - false, time.milliseconds(), cluster); + time.milliseconds(), cluster); assertTrue(accumulator.hasUndrained()); RecordAccumulator.ReadyCheckResult firstResult = accumulator.ready(metadataCache, time.milliseconds()); @@ -1128,7 +1105,7 @@ public void testSplitFrequency() throws InterruptedException { int dice = random.nextInt(100); byte[] value = (dice < goodCompRatioPercentage) ? bytesWithGoodCompression(random) : bytesWithPoorCompression(random, 100); - accum.append(topic, partition1, 0L, null, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, null, value, Record.EMPTY_HEADERS, null, 0, time.milliseconds(), cluster); BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); numSplit += result.numSplit; numBatches += result.numBatches; @@ -1151,7 +1128,7 @@ public void testSoonToExpireBatchesArePickedUpForExpiry() throws InterruptedExce RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, Compression.NONE, lingerMs); - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); Set readyNodes = accum.ready(metadataCache, time.milliseconds()).readyNodes; Map> drained = accum.drain(metadataCache, readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(drained.isEmpty()); @@ -1166,7 +1143,7 @@ public void testSoonToExpireBatchesArePickedUpForExpiry() throws InterruptedExce //assertTrue(accum.soonToExpireInFlightBatches().isEmpty()); // Queue another batch and advance clock such that batch expiry time is earlier than request timeout. - accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); + accum.append(topic, partition2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); time.sleep(lingerMs * 4); // Now drain and check that accumulator picked up the drained batch because its expiry is soon. @@ -1191,7 +1168,7 @@ public void testExpiredBatchesRetry() throws InterruptedException { // Test batches in retry. for (Boolean mute : muteStates) { - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, time.milliseconds(), cluster); time.sleep(lingerMs); readyNodes = accum.ready(metadataCache, time.milliseconds()).readyNodes; assertEquals(Collections.singleton(node1), readyNodes, "Our partition's leader should be ready"); @@ -1213,79 +1190,6 @@ public void testExpiredBatchesRetry() throws InterruptedException { } } - @SuppressWarnings("deprecation") - @Test - public void testStickyBatches() throws Exception { - long now = time.milliseconds(); - - // Test case assumes that the records do not fill the batch completely - int batchSize = 1025; - - Partitioner partitioner = new DefaultPartitioner(); - RecordAccumulator accum = createTestRecordAccumulator(3200, - batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10L * batchSize, Compression.NONE, 10); - int expectedAppends = expectedNumAppendsNoKey(batchSize); - - // Create first batch - int partition = partitioner.partition(topic, null, null, "value", value, cluster); - accum.append(topic, partition, 0L, null, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - int appends = 1; - - boolean switchPartition = false; - while (!switchPartition) { - // Append to the first batch - partition = partitioner.partition(topic, null, null, "value", value, cluster); - RecordAccumulator.RecordAppendResult result = accum.append(topic, partition, 0L, null, - value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, true, time.milliseconds(), cluster); - Deque partitionBatches1 = accum.getDeque(tp1); - Deque partitionBatches2 = accum.getDeque(tp2); - Deque partitionBatches3 = accum.getDeque(tp3); - int numBatches = (partitionBatches1 == null ? 0 : partitionBatches1.size()) + (partitionBatches2 == null ? 0 : partitionBatches2.size()) + (partitionBatches3 == null ? 0 : partitionBatches3.size()); - // Only one batch is created because the partition is sticky. - assertEquals(1, numBatches); - - switchPartition = result.abortForNewBatch; - // We only appended if we do not retry. - if (!switchPartition) { - appends++; - assertEquals(0, accum.ready(metadataCache, now).readyNodes.size(), "No partitions should be ready."); - } - } - - // Batch should be full. - assertEquals(1, accum.ready(metadataCache, time.milliseconds()).readyNodes.size()); - assertEquals(appends, expectedAppends); - switchPartition = false; - - // KafkaProducer would call this method in this case, make second batch - partitioner.onNewBatch(topic, cluster, partition); - partition = partitioner.partition(topic, null, null, "value", value, cluster); - accum.append(topic, partition, 0L, null, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), cluster); - appends++; - - // These appends all go into the second batch - while (!switchPartition) { - partition = partitioner.partition(topic, null, null, "value", value, cluster); - RecordAccumulator.RecordAppendResult result = accum.append(topic, partition, 0L, null, value, - Record.EMPTY_HEADERS, null, maxBlockTimeMs, true, time.milliseconds(), cluster); - Deque partitionBatches1 = accum.getDeque(tp1); - Deque partitionBatches2 = accum.getDeque(tp2); - Deque partitionBatches3 = accum.getDeque(tp3); - int numBatches = (partitionBatches1 == null ? 0 : partitionBatches1.size()) + (partitionBatches2 == null ? 0 : partitionBatches2.size()) + (partitionBatches3 == null ? 0 : partitionBatches3.size()); - // Only two batches because the new partition is also sticky. - assertEquals(2, numBatches); - - switchPartition = result.abortForNewBatch; - // We only appended if we do not retry. - if (!switchPartition) { - appends++; - } - } - - // There should be two full batches now. - assertEquals(appends, 2 * expectedAppends); - } - @Test public void testUniformBuiltInPartitioner() throws Exception { @@ -1317,7 +1221,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { // Produce small record, we should switch to first partition. accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, value, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(partition1, partition.get()); assertEquals(1, mockRandom.get()); @@ -1326,28 +1230,28 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { // because of incomplete batch. byte[] largeValue = new byte[batchSize]; accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(partition1, partition.get()); assertEquals(1, mockRandom.get()); // Produce large record, we should switch to next partition as we complete // previous batch and exceeded sticky limit. accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(partition2, partition.get()); assertEquals(2, mockRandom.get()); // Produce large record, we should switch to next partition as we complete // previous batch and exceeded sticky limit. accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(partition3, partition.get()); assertEquals(3, mockRandom.get()); // Produce large record, we should switch to next partition as we complete // previous batch and exceeded sticky limit. accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(partition1, partition.get()); assertEquals(4, mockRandom.get()); } @@ -1379,7 +1283,7 @@ BuiltInPartitioner createBuiltInPartitioner(LogContext logContext, String topic, for (int c = queueSizes[i]; c-- > 0; ) { // Add large records to each partition, so that each record creates a batch. accum.append(topic, i, 0L, null, largeValue, Record.EMPTY_HEADERS, - null, maxBlockTimeMs, false, time.milliseconds(), cluster); + null, maxBlockTimeMs, time.milliseconds(), cluster); } assertEquals(queueSizes[i], accum.getDeque(new TopicPartition(topic, i)).size()); } @@ -1404,7 +1308,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { // Prime built-in partitioner so that it'd switch on every record, as switching only // happens after the "sticky" limit is exceeded. accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); // Issue a certain number of partition calls to validate that the partitions would be // distributed with frequencies that are reciprocal to the queue sizes. The number of @@ -1416,7 +1320,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { for (int i = 0; i < numberOfIterations; i++) { accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); ++frequencies[partition.get()]; } @@ -1433,11 +1337,11 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { // Do one append, because partition gets switched after append. accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); for (int c = 10; c-- > 0; ) { accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS, - callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster); + callbacks, maxBlockTimeMs, time.milliseconds(), cluster); assertEquals(partition3, partition.get()); } @@ -1457,7 +1361,7 @@ public void testBuiltInPartitionerFractionalBatches() throws Exception { // Produce about 2/3 of the batch size. for (int recCount = batchSize * 2 / 3 / valSize; recCount-- > 0; ) { accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0, null, value, Record.EMPTY_HEADERS, - null, maxBlockTimeMs, false, time.milliseconds(), cluster); + null, maxBlockTimeMs, time.milliseconds(), cluster); } // Advance the time to make the batch ready. @@ -1500,7 +1404,7 @@ deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, // Create 1 batch(batchA) to be produced to partition1. long now = time.milliseconds(); - accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, now, cluster); + accum.append(topic, partition1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, now, cluster); // 1st attempt(not a retry) to produce batchA, it should be ready & drained to be produced. { @@ -1620,7 +1524,7 @@ private int prepareSplitBatches(RecordAccumulator accum, long seed, int recordSi CompressionRatioEstimator.setEstimation(tp1.topic(), CompressionType.GZIP, 0.1f); // Append 20 records of 100 bytes size with poor compression ratio should make the batch too big. for (int i = 0; i < numRecords; i++) { - accum.append(topic, partition1, 0L, null, bytesWithPoorCompression(random, recordSize), Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster); + accum.append(topic, partition1, 0L, null, bytesWithPoorCompression(random, recordSize), Record.EMPTY_HEADERS, null, 0, time.milliseconds(), cluster); } RecordAccumulator.ReadyCheckResult result = accum.ready(metadataCache, time.milliseconds()); diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java index 2297109b14836..3dd612b0d7982 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java @@ -206,91 +206,6 @@ public void testSimple() throws Exception { assertEquals(offset, future.get().offset()); } - @Test - public void testMessageFormatDownConversion() throws Exception { - // this test case verifies the behavior when the version of the produce request supported by the - // broker changes after the record set is created - - long offset = 0; - - // start off support produce request v3 - apiVersions.update("0", NodeApiVersions.create()); - - Future future = appendToAccumulator(tp0, 0L, "key", "value"); - - // now the partition leader supports only v2 - apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2)); - - client.prepareResponse(body -> { - ProduceRequest request = (ProduceRequest) body; - if (request.version() != 2) - return false; - - MemoryRecords records = partitionRecords(request).get(tp0); - return records != null && - records.sizeInBytes() > 0 && - records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1); - }, produceResponse(tp0, offset, Errors.NONE, 0)); - - sender.runOnce(); // connect - sender.runOnce(); // send produce request - - assertTrue(future.isDone(), "Request should be completed"); - assertEquals(offset, future.get().offset()); - } - - @SuppressWarnings("deprecation") - @Test - public void testDownConversionForMismatchedMagicValues() throws Exception { - // it can happen that we construct a record set with mismatching magic values (perhaps - // because the partition leader changed after the record set was initially constructed) - // in this case, we down-convert record sets with newer magic values to match the oldest - // created record set - - long offset = 0; - - // start off support produce request v3 - apiVersions.update("0", NodeApiVersions.create()); - - Future future1 = appendToAccumulator(tp0, 0L, "key", "value"); - - // now the partition leader supports only v2 - apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2)); - - Future future2 = appendToAccumulator(tp1, 0L, "key", "value"); - - // start off support produce request v3 - apiVersions.update("0", NodeApiVersions.create()); - - ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(Errors.NONE, offset, RecordBatch.NO_TIMESTAMP, 100); - Map partResp = new HashMap<>(); - partResp.put(tp0, resp); - partResp.put(tp1, resp); - ProduceResponse produceResponse = new ProduceResponse(partResp, 0); - - client.prepareResponse(body -> { - ProduceRequest request = (ProduceRequest) body; - if (request.version() != 2) - return false; - - Map recordsMap = partitionRecords(request); - if (recordsMap.size() != 2) - return false; - - for (MemoryRecords records : recordsMap.values()) { - if (records == null || records.sizeInBytes() == 0 || !records.hasMatchingMagic(RecordBatch.MAGIC_VALUE_V1)) - return false; - } - return true; - }, produceResponse); - - sender.runOnce(); // connect - sender.runOnce(); // send produce request - - assertTrue(future1.isDone(), "Request should be completed"); - assertTrue(future2.isDone(), "Request should be completed"); - } - /* * Send multiple requests. Verify that the client side quota metrics have the right values */ @@ -319,7 +234,7 @@ time, true, new ApiVersions(), throttleTimeSensor, logContext, for (int i = 1; i <= 3; i++) { int throttleTimeMs = 100 * i; - ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData() + ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks((short) 1) .setTimeoutMs(1000)); @@ -492,7 +407,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { expiryCallbackCount.incrementAndGet(); try { accumulator.append(tp1.topic(), tp1.partition(), 0L, key, value, - Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds(), metadataCache.cluster()); + Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), metadataCache.cluster()); } catch (InterruptedException e) { throw new RuntimeException("Unexpected interruption", e); } @@ -503,7 +418,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { final long nowMs = time.milliseconds(); for (int i = 0; i < messagesPerBatch; i++) - accumulator.append(tp1.topic(), tp1.partition(), 0L, key, value, null, callbacks, maxBlockTimeMs, false, nowMs, metadataCache.cluster()); + accumulator.append(tp1.topic(), tp1.partition(), 0L, key, value, null, callbacks, maxBlockTimeMs, nowMs, metadataCache.cluster()); // Advance the clock to expire the first batch. time.sleep(10000); @@ -567,6 +482,46 @@ public void testMetadataTopicExpiry() throws Exception { assertTrue(future.isDone(), "Request should be completed"); } + @Test + public void senderThreadShouldNotGetStuckWhenThrottledAndAddingPartitionsToTxn() { + // We want MockClient#poll() to advance time so that eventually the backoff expires. + try { + client.advanceTimeDuringPoll(true); + + ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); + apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); + TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions); + + setupWithTransactionState(txnManager); + doInitTransactions(txnManager, producerIdAndEpoch); + + int throttleTimeMs = 1000; + long startTime = time.milliseconds(); + Node nodeToThrottle = metadata.fetch().nodeById(0); + client.throttle(nodeToThrottle, throttleTimeMs); + + // Verify node is throttled a little bit. In real-life Apache Kafka, we observe that this can happen + // as done above by throttling or with a disconnect / backoff. + long currentPollDelay = client.pollDelayMs(nodeToThrottle, startTime); + assertEquals(currentPollDelay, throttleTimeMs); + + txnManager.beginTransaction(); + txnManager.maybeAddPartition(tp0); + + assertFalse(txnManager.hasInFlightRequest()); + sender.runOnce(); + assertTrue(txnManager.hasInFlightRequest()); + + long totalTimeToRunOnce = time.milliseconds() - startTime; + + // It should have blocked roughly only the backoffTimeMs and some change. + assertTrue(totalTimeToRunOnce < REQUEST_TIMEOUT); + + } finally { + client.advanceTimeDuringPoll(false); + } + } + @Test public void testNodeLatencyStats() throws Exception { try (Metrics m = new Metrics()) { @@ -2435,9 +2390,9 @@ private void testSplitBatchAndSend(TransactionManager txnManager, long nowMs = time.milliseconds(); Cluster cluster = TestUtils.singletonCluster(); Future f1 = - accumulator.append(tp.topic(), tp.partition(), 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, false, nowMs, cluster).future; + accumulator.append(tp.topic(), tp.partition(), 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; Future f2 = - accumulator.append(tp.topic(), tp.partition(), 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, false, nowMs, cluster).future; + accumulator.append(tp.topic(), tp.partition(), 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; sender.runOnce(); // connect sender.runOnce(); // send produce request @@ -2577,15 +2532,17 @@ public void testRecordErrorPropagatedToApplication() throws InterruptedException FutureRecordMetadata future = futureEntry.getValue(); assertTrue(future.isDone()); - KafkaException exception = TestUtils.assertFutureThrows(future, KafkaException.class); Integer index = futureEntry.getKey(); if (index == 0 || index == 2) { + InvalidRecordException exception = TestUtils.assertFutureThrows(future, InvalidRecordException.class); assertInstanceOf(InvalidRecordException.class, exception); assertEquals(index.toString(), exception.getMessage()); } else if (index == 3) { + InvalidRecordException exception = TestUtils.assertFutureThrows(future, InvalidRecordException.class); assertInstanceOf(InvalidRecordException.class, exception); assertEquals(Errors.INVALID_RECORD.message(), exception.getMessage()); } else { + KafkaException exception = TestUtils.assertFutureThrows(future, KafkaException.class); assertEquals(KafkaException.class, exception.getClass()); } } @@ -2870,7 +2827,7 @@ public void testAwaitPendingRecordsBeforeCommittingTransaction() throws Exceptio private void addPartitionToTxn(Sender sender, TransactionManager txnManager, TopicPartition tp) { txnManager.maybeAddPartition(tp); client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp, Errors.NONE))); - runUntil(sender, () -> txnManager.isPartitionAdded(tp)); + runUntil(sender, () -> txnManager.transactionContainsPartition(tp)); assertFalse(txnManager.hasInFlightRequest()); } @@ -3600,7 +3557,7 @@ private FutureRecordMetadata appendToAccumulator(TopicPartition tp) throws Inter private FutureRecordMetadata appendToAccumulator(TopicPartition tp, long timestamp, String key, String value) throws InterruptedException { return accumulator.append(tp.topic(), tp.partition(), timestamp, key.getBytes(), value.getBytes(), Record.EMPTY_HEADERS, - null, MAX_BLOCK_TIMEOUT, false, time.milliseconds(), TestUtils.singletonCluster()).future; + null, MAX_BLOCK_TIMEOUT, time.milliseconds(), TestUtils.singletonCluster()).future; } @SuppressWarnings("deprecation") diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/StickyPartitionCacheTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/StickyPartitionCacheTest.java deleted file mode 100644 index 8f8f1c06bd988..0000000000000 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/StickyPartitionCacheTest.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.clients.producer.internals; - -import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.PartitionInfo; - -import org.junit.jupiter.api.Test; - -import java.util.Collections; -import java.util.List; - -import static java.util.Arrays.asList; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; - -public class StickyPartitionCacheTest { - private static final Node[] NODES = new Node[] { - new Node(0, "localhost", 99), - new Node(1, "localhost", 100), - new Node(2, "localhost", 101), - new Node(11, "localhost", 102) - }; - static final String TOPIC_A = "topicA"; - static final String TOPIC_B = "topicB"; - static final String TOPIC_C = "topicC"; - - @Test - public void testStickyPartitionCache() { - List allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), - new PartitionInfo(TOPIC_A, 1, NODES[1], NODES, NODES), - new PartitionInfo(TOPIC_A, 2, NODES[2], NODES, NODES), - new PartitionInfo(TOPIC_B, 0, NODES[0], NODES, NODES) - ); - Cluster testCluster = new Cluster("clusterId", asList(NODES), allPartitions, - Collections.emptySet(), Collections.emptySet()); - StickyPartitionCache stickyPartitionCache = new StickyPartitionCache(); - - int partA = stickyPartitionCache.partition(TOPIC_A, testCluster); - assertEquals(partA, stickyPartitionCache.partition(TOPIC_A, testCluster)); - - int partB = stickyPartitionCache.partition(TOPIC_B, testCluster); - assertEquals(partB, stickyPartitionCache.partition(TOPIC_B, testCluster)); - - int changedPartA = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA); - assertEquals(changedPartA, stickyPartitionCache.partition(TOPIC_A, testCluster)); - assertNotEquals(partA, changedPartA); - int changedPartA2 = stickyPartitionCache.partition(TOPIC_A, testCluster); - assertEquals(changedPartA2, changedPartA); - - // We do not want to change partitions because the previous partition does not match the current sticky one. - int changedPartA3 = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA); - assertEquals(changedPartA3, changedPartA2); - - // Check that we can still use the partitioner when there is only one partition - int changedPartB = stickyPartitionCache.nextPartition(TOPIC_B, testCluster, partB); - assertEquals(changedPartB, stickyPartitionCache.partition(TOPIC_B, testCluster)); - } - - @Test - public void unavailablePartitionsTest() { - // Partition 1 in topic A and partition 0 in topic B are unavailable partitions. - List allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), - new PartitionInfo(TOPIC_A, 1, null, NODES, NODES), - new PartitionInfo(TOPIC_A, 2, NODES[2], NODES, NODES), - new PartitionInfo(TOPIC_B, 0, null, NODES, NODES), - new PartitionInfo(TOPIC_B, 1, NODES[0], NODES, NODES), - new PartitionInfo(TOPIC_C, 0, null, NODES, NODES) - ); - - Cluster testCluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), allPartitions, - Collections.emptySet(), Collections.emptySet()); - StickyPartitionCache stickyPartitionCache = new StickyPartitionCache(); - - // Assure we never choose partition 1 because it is unavailable. - int partA = stickyPartitionCache.partition(TOPIC_A, testCluster); - assertNotEquals(1, partA); - for (int aPartitions = 0; aPartitions < 100; aPartitions++) { - partA = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA); - assertNotEquals(1, stickyPartitionCache.partition(TOPIC_A, testCluster)); - } - - // Assure we always choose partition 1 for topic B. - int partB = stickyPartitionCache.partition(TOPIC_B, testCluster); - assertEquals(1, partB); - for (int bPartitions = 0; bPartitions < 100; bPartitions++) { - partB = stickyPartitionCache.nextPartition(TOPIC_B, testCluster, partB); - assertEquals(1, stickyPartitionCache.partition(TOPIC_B, testCluster)); - } - - // Assure that we still choose the partition when there are no partitions available. - int partC = stickyPartitionCache.partition(TOPIC_C, testCluster); - assertEquals(0, partC); - partC = stickyPartitionCache.nextPartition(TOPIC_C, testCluster, partC); - assertEquals(0, partC); - } -} diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java index 76e5717024af8..579c8d7fbb251 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java @@ -36,6 +36,7 @@ import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.TransactionAbortableException; +import org.apache.kafka.common.errors.TransactionAbortedException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.errors.UnsupportedVersionException; @@ -172,12 +173,15 @@ private void initializeTransactionManager(Optional transactionalId, bool new ApiVersion() .setApiKey(ApiKeys.PRODUCE.id) .setMinVersion((short) 0) - .setMaxVersion((short) 7)), + .setMaxVersion(transactionV2Enabled ? ApiKeys.PRODUCE.latestVersion() : (short) 11), + new ApiVersion() + .setApiKey(ApiKeys.TXN_OFFSET_COMMIT.id) + .setMinVersion((short) 0) + .setMaxVersion(transactionV2Enabled ? ApiKeys.TXN_OFFSET_COMMIT.latestVersion() : (short) 4)), Arrays.asList(new ApiVersionsResponseData.SupportedFeatureKey() .setName("transaction.version") .setMaxVersion(transactionV2Enabled ? (short) 2 : (short) 1) .setMinVersion((short) 0)), - false, Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() .setName("transaction.version") .setMaxVersionLevel(transactionV2Enabled ? (short) 2 : (short) 1) @@ -230,7 +234,7 @@ public void testEndTxnNotSentIfIncompleteBatches() { transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxn(tp0, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); transactionManager.beginCommit(); assertNull(transactionManager.nextRequest(true)); @@ -293,7 +297,7 @@ public void testHasOngoingTransactionSuccessfulAbort() { runUntil(transactionManager::hasOngoingTransaction); prepareAddPartitionsToTxn(partition, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(partition)); + runUntil(() -> transactionManager.transactionContainsPartition(partition)); transactionManager.beginAbort(); assertTrue(transactionManager.hasOngoingTransaction()); @@ -317,7 +321,7 @@ public void testHasOngoingTransactionSuccessfulCommit() { assertTrue(transactionManager.hasOngoingTransaction()); prepareAddPartitionsToTxn(partition, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(partition)); + runUntil(() -> transactionManager.transactionContainsPartition(partition)); transactionManager.beginCommit(); assertTrue(transactionManager.hasOngoingTransaction()); @@ -341,7 +345,7 @@ public void testHasOngoingTransactionAbortableError() { assertTrue(transactionManager.hasOngoingTransaction()); prepareAddPartitionsToTxn(partition, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(partition)); + runUntil(() -> transactionManager.transactionContainsPartition(partition)); transactionManager.transitionToAbortableError(new KafkaException()); assertTrue(transactionManager.hasOngoingTransaction()); @@ -368,7 +372,7 @@ public void testHasOngoingTransactionFatalError() { assertTrue(transactionManager.hasOngoingTransaction()); prepareAddPartitionsToTxn(partition, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(partition)); + runUntil(() -> transactionManager.transactionContainsPartition(partition)); transactionManager.transitionToFatalError(new KafkaException()); assertFalse(transactionManager.hasOngoingTransaction()); @@ -382,20 +386,40 @@ public void testMaybeAddPartitionToTransaction() { transactionManager.maybeAddPartition(partition); assertTrue(transactionManager.hasPartitionsToAdd()); - assertFalse(transactionManager.isPartitionAdded(partition)); + assertFalse(transactionManager.transactionContainsPartition(partition)); assertTrue(transactionManager.isPartitionPendingAdd(partition)); prepareAddPartitionsToTxn(partition, Errors.NONE); assertTrue(transactionManager.hasPartitionsToAdd()); - runUntil(() -> transactionManager.isPartitionAdded(partition)); + runUntil(() -> transactionManager.transactionContainsPartition(partition)); assertFalse(transactionManager.hasPartitionsToAdd()); assertFalse(transactionManager.isPartitionPendingAdd(partition)); // adding the partition again should not have any effect transactionManager.maybeAddPartition(partition); assertFalse(transactionManager.hasPartitionsToAdd()); - assertTrue(transactionManager.isPartitionAdded(partition)); + assertTrue(transactionManager.transactionContainsPartition(partition)); + assertFalse(transactionManager.isPartitionPendingAdd(partition)); + } + + @Test + public void testMaybeAddPartitionToTransactionInTransactionV2() { + initializeTransactionManager(Optional.of(transactionalId), true); + TopicPartition partition = new TopicPartition("foo", 0); + doInitTransactions(); + transactionManager.beginTransaction(); + + transactionManager.maybeAddPartition(partition); + // In V2, the maybeAddPartition should not add the partition to the pending list. + assertFalse(transactionManager.hasPartitionsToAdd()); + assertTrue(transactionManager.transactionContainsPartition(partition)); + assertFalse(transactionManager.isPartitionPendingAdd(partition)); + + // Adding the partition again should not have any effect + transactionManager.maybeAddPartition(partition); + assertFalse(transactionManager.hasPartitionsToAdd()); + assertTrue(transactionManager.transactionContainsPartition(partition)); assertFalse(transactionManager.isPartitionPendingAdd(partition)); } @@ -407,7 +431,7 @@ public void testAddPartitionToTransactionOverridesRetryBackoffForConcurrentTrans transactionManager.maybeAddPartition(partition); assertTrue(transactionManager.hasPartitionsToAdd()); - assertFalse(transactionManager.isPartitionAdded(partition)); + assertFalse(transactionManager.transactionContainsPartition(partition)); assertTrue(transactionManager.isPartitionPendingAdd(partition)); prepareAddPartitionsToTxn(partition, Errors.CONCURRENT_TRANSACTIONS); @@ -426,7 +450,7 @@ public void testAddPartitionToTransactionRetainsRetryBackoffForRegularRetriableE transactionManager.maybeAddPartition(partition); assertTrue(transactionManager.hasPartitionsToAdd()); - assertFalse(transactionManager.isPartitionAdded(partition)); + assertFalse(transactionManager.transactionContainsPartition(partition)); assertTrue(transactionManager.isPartitionPendingAdd(partition)); prepareAddPartitionsToTxn(partition, Errors.COORDINATOR_NOT_AVAILABLE); @@ -445,11 +469,11 @@ public void testAddPartitionToTransactionRetainsRetryBackoffWhenPartitionsAlread transactionManager.maybeAddPartition(partition); assertTrue(transactionManager.hasPartitionsToAdd()); - assertFalse(transactionManager.isPartitionAdded(partition)); + assertFalse(transactionManager.transactionContainsPartition(partition)); assertTrue(transactionManager.isPartitionPendingAdd(partition)); prepareAddPartitionsToTxn(partition, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(partition)); + runUntil(() -> transactionManager.transactionContainsPartition(partition)); TopicPartition otherPartition = new TopicPartition("foo", 1); transactionManager.maybeAddPartition(otherPartition); @@ -678,7 +702,7 @@ public void testBatchCompletedAfterProducerReset(boolean transactionV2Enabled) { assertEquals(2, transactionManager.sequenceNumber(tp0)); // The producerId might be reset due to a failure on another partition - transactionManager.requestEpochBumpForPartition(tp1); + transactionManager.requestIdempotentEpochBumpForPartition(tp1); transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); initializeIdempotentProducerId(producerId + 1, (short) 0); @@ -723,7 +747,7 @@ MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), thi assertEquals(0, transactionManager.sequenceNumber(tp0)); Future responseFuture1 = accumulator.append(tp0.topic(), tp0.partition(), time.milliseconds(), - "1".getBytes(), "1".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT, false, time.milliseconds(), + "1".getBytes(), "1".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT, time.milliseconds(), TestUtils.singletonCluster()).future; sender.runOnce(); assertEquals(1, transactionManager.sequenceNumber(tp0)); @@ -754,7 +778,7 @@ MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), thi assertEquals(0, transactionManager.sequenceNumber(tp0)); Future responseFuture2 = accumulator.append(tp0.topic(), tp0.partition(), time.milliseconds(), - "2".getBytes(), "2".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT, false, time.milliseconds(), + "2".getBytes(), "2".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT, time.milliseconds(), TestUtils.singletonCluster()).future; sender.runOnce(); sender.runOnce(); @@ -780,6 +804,21 @@ private ProducerBatch writeIdempotentBatchWithValue(TransactionManager manager, return batch; } + private ProducerBatch writeTransactionalBatchWithValue( + TransactionManager manager, + TopicPartition tp, + String value + ) { + manager.maybeUpdateProducerIdAndEpoch(tp); + int seq = manager.sequenceNumber(tp); + manager.incrementSequenceNumber(tp, 1); + ProducerBatch batch = batchWithValue(tp, value); + batch.setProducerState(manager.producerIdAndEpoch(), seq, true); + manager.addInFlightBatch(batch); + batch.close(); + return batch; + } + private ProducerBatch batchWithValue(TopicPartition tp, String value) { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(64), Compression.NONE, TimestampType.CREATE_TIME, 0L); @@ -814,7 +853,7 @@ public void testProducerIdReset(boolean transactionV2Enabled) { transactionManager.incrementSequenceNumber(tp1, 3); assertEquals(transactionManager.sequenceNumber(tp1), 3); - transactionManager.requestEpochBumpForPartition(tp0); + transactionManager.requestIdempotentEpochBumpForPartition(tp0); transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); assertEquals(transactionManager.sequenceNumber(tp0), 0); assertEquals(transactionManager.sequenceNumber(tp1), 3); @@ -890,7 +929,6 @@ public void testTransactionManagerEnablesV2() { .setName("transaction.version") .setMaxVersion((short) 2) .setMinVersion((short) 0)), - false, Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() .setName("transaction.version") .setMaxVersionLevel((short) 2) @@ -904,22 +942,121 @@ public void testTransactionManagerEnablesV2() { assertTrue(transactionManager.hasOngoingTransaction()); prepareAddPartitionsToTxn(tp1, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(tp1)); + runUntil(() -> transactionManager.transactionContainsPartition(tp1)); TransactionalRequestResult retryResult = transactionManager.beginCommit(); assertTrue(transactionManager.hasOngoingTransaction()); - assertFalse(transactionManager.isTransactionV2Enabled()); + assertTrue(transactionManager.isTransactionV2Enabled()); prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch); + prepareInitPidResponse(Errors.NONE, false, producerId, (short) (epoch + 1)); runUntil(() -> !transactionManager.hasOngoingTransaction()); runUntil(retryResult::isCompleted); retryResult.await(); runUntil(retryResult::isAcked); assertFalse(transactionManager.hasOngoingTransaction()); - // After restart the transaction, the V2 is enabled. + // After restart the transaction, the V2 is still enabled and epoch is bumped. transactionManager.beginTransaction(); assertTrue(transactionManager.isTransactionV2Enabled()); + assertEquals(epoch + 1, transactionManager.producerIdAndEpoch().epoch); + } + + @Test + public void testTransactionV2AddPartitionAndOffsets() throws InterruptedException { + initializeTransactionManager(Optional.of(transactionalId), true); + doInitTransactions(); + + transactionManager.beginTransaction(); + + Future responseFuture = appendToAccumulator(tp0); + + assertFalse(responseFuture.isDone()); + + prepareProduceResponse(Errors.NONE, producerId, epoch); + transactionManager.maybeAddPartition(tp0); + assertTrue(transactionManager.transactionContainsPartition(tp0)); + assertTrue(transactionManager.isSendToPartitionAllowed(tp0)); + assertFalse(responseFuture.isDone()); + runUntil(responseFuture::isDone); + + // Now, test adding the offsets. + Map offsets = new HashMap<>(); + offsets.put(tp1, new OffsetAndMetadata(1)); + + TransactionalRequestResult addOffsetsResult = transactionManager.sendOffsetsToTransaction( + offsets, new ConsumerGroupMetadata(consumerGroupId)); + + assertTrue(transactionManager.hasPendingOffsetCommits()); + + // the result doesn't complete until TxnOffsetCommit returns + assertFalse(addOffsetsResult.isCompleted()); + + Map txnOffsetCommitResponse = new HashMap<>(); + txnOffsetCommitResponse.put(tp1, Errors.NONE); + + prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId); + prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, txnOffsetCommitResponse); + + assertNull(transactionManager.coordinator(CoordinatorType.GROUP)); + runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null); + assertTrue(transactionManager.hasPendingOffsetCommits()); + + runUntil(() -> !transactionManager.hasPendingOffsetCommits()); + // We should only be done after both RPCs complete. + assertTrue(addOffsetsResult.isCompleted()); + + transactionManager.beginCommit(); + prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch, producerId, (short) (epoch + 1), false); + runUntil(() -> !transactionManager.hasOngoingTransaction()); + assertFalse(transactionManager.isCompleting()); + } + + @Test + public void testTransactionManagerDisablesV2() { + Metrics metrics = new Metrics(time); + + apiVersions.update("0", new NodeApiVersions(Arrays.asList( + new ApiVersion() + .setApiKey(ApiKeys.INIT_PRODUCER_ID.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 3), + new ApiVersion() + .setApiKey(ApiKeys.PRODUCE.id) + .setMinVersion((short) 5) + .setMaxVersion((short) (ProduceRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 + 1)), + new ApiVersion() + .setApiKey(ApiKeys.TXN_OFFSET_COMMIT.id) + .setMinVersion((short) 1) + .setMaxVersion((short) (TxnOffsetCommitRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 + 1))), + Arrays.asList(new ApiVersionsResponseData.SupportedFeatureKey() + .setName("transaction.version") + .setMaxVersion((short) 1) + .setMinVersion((short) 0)), + Arrays.asList(new ApiVersionsResponseData.FinalizedFeatureKey() + .setName("transaction.version") + .setMaxVersionLevel((short) 1) + .setMinVersionLevel((short) 1)), + 0)); + this.transactionManager = new TransactionManager(logContext, transactionalId, + transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions); + + int batchSize = 16 * 1024; + int deliveryTimeoutMs = 3000; + long totalSize = 1024 * 1024; + String metricGrpName = "producer-metrics"; + + this.brokerNode = new Node(0, "localhost", 2211); + this.accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 0L, 0L, + deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, + new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); + + this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, true, + MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), this.time, REQUEST_TIMEOUT, + 50, transactionManager, apiVersions); + + doInitTransactions(); + assertFalse(transactionManager.isTransactionV2Enabled()); } @Test @@ -1276,6 +1413,22 @@ public void testGroupAuthorizationFailureInTxnOffsetCommit() { assertAbortableError(GroupAuthorizationException.class); } + @Test + public void testFatalErrorWhenProduceResponseWithInvalidPidMapping() throws InterruptedException { + initializeTransactionManager(Optional.of(transactionalId), true); + doInitTransactions(); + + transactionManager.beginTransaction(); + Future responseFuture = appendToAccumulator(tp0); + transactionManager.maybeAddPartition(tp0); + assertFalse(responseFuture.isDone()); + + prepareProduceResponse(Errors.INVALID_PRODUCER_ID_MAPPING, producerId, epoch); + assertFalse(responseFuture.isDone()); + runUntil(responseFuture::isDone); + assertTrue(transactionManager.hasFatalError()); + } + @Test public void testTransactionalIdAuthorizationFailureInAddOffsetsToTxn() { final TopicPartition tp = new TopicPartition("foo", 0); @@ -1365,8 +1518,8 @@ public void testTopicAuthorizationFailureInAddPartitions() throws InterruptedExc assertInstanceOf(TopicAuthorizationException.class, transactionManager.lastError()); assertFalse(transactionManager.isPartitionPendingAdd(tp0)); assertFalse(transactionManager.isPartitionPendingAdd(tp1)); - assertFalse(transactionManager.isPartitionAdded(tp0)); - assertFalse(transactionManager.isPartitionAdded(tp1)); + assertFalse(transactionManager.transactionContainsPartition(tp0)); + assertFalse(transactionManager.transactionContainsPartition(tp1)); assertFalse(transactionManager.hasPartitionsToAdd()); TopicAuthorizationException exception = (TopicAuthorizationException) transactionManager.lastError(); @@ -1374,8 +1527,8 @@ public void testTopicAuthorizationFailureInAddPartitions() throws InterruptedExc assertAbortableError(TopicAuthorizationException.class); sender.runOnce(); - TestUtils.assertFutureThrows(firstPartitionAppend, KafkaException.class); - TestUtils.assertFutureThrows(secondPartitionAppend, KafkaException.class); + TestUtils.assertFutureThrows(firstPartitionAppend, TransactionAbortedException.class); + TestUtils.assertFutureThrows(secondPartitionAppend, TransactionAbortedException.class); } @Test @@ -1422,8 +1575,8 @@ public void testCommitWithTopicAuthorizationFailureInAddPartitionsInFlight() thr // the pending transaction commit. sender.runOnce(); assertTrue(commitResult.isCompleted()); - TestUtils.assertFutureThrows(firstPartitionAppend, KafkaException.class); - TestUtils.assertFutureThrows(secondPartitionAppend, KafkaException.class); + TestUtils.assertFutureThrows(firstPartitionAppend, TopicAuthorizationException.class); + TestUtils.assertFutureThrows(secondPartitionAppend, TopicAuthorizationException.class); assertInstanceOf(TopicAuthorizationException.class, commitResult.error()); } @@ -1461,7 +1614,7 @@ public void testRecoveryFromAbortableErrorTransactionNotStarted() throws Excepti responseFuture = appendToAccumulator(tp0); prepareAddPartitionsToTxn(singletonMap(tp0, Errors.NONE)); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); assertFalse(transactionManager.hasPartitionsToAdd()); transactionManager.beginCommit(); @@ -1482,7 +1635,7 @@ public void testRetryAbortTransactionAfterTimeout() throws Exception { prepareAddPartitionsToTxn(tp0, Errors.NONE); appendToAccumulator(tp0); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); TransactionalRequestResult result = transactionManager.beginAbort(); assertThrows(TimeoutException.class, () -> result.await(0, TimeUnit.MILLISECONDS)); @@ -1516,7 +1669,7 @@ public void testRetryCommitTransactionAfterTimeout() throws Exception { prepareProduceResponse(Errors.NONE, producerId, epoch); appendToAccumulator(tp0); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); TransactionalRequestResult result = transactionManager.beginCommit(); assertThrows(TimeoutException.class, () -> result.await(0, TimeUnit.MILLISECONDS)); @@ -1582,14 +1735,14 @@ public void testRecoveryFromAbortableErrorTransactionStarted() throws Exception prepareAddPartitionsToTxn(tp0, Errors.NONE); Future authorizedTopicProduceFuture = appendToAccumulator(unauthorizedPartition); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); transactionManager.maybeAddPartition(unauthorizedPartition); Future unauthorizedTopicProduceFuture = appendToAccumulator(unauthorizedPartition); prepareAddPartitionsToTxn(singletonMap(unauthorizedPartition, Errors.TOPIC_AUTHORIZATION_FAILED)); runUntil(transactionManager::hasAbortableError); - assertTrue(transactionManager.isPartitionAdded(tp0)); - assertFalse(transactionManager.isPartitionAdded(unauthorizedPartition)); + assertTrue(transactionManager.transactionContainsPartition(tp0)); + assertFalse(transactionManager.transactionContainsPartition(unauthorizedPartition)); assertFalse(authorizedTopicProduceFuture.isDone()); assertFalse(unauthorizedTopicProduceFuture.isDone()); @@ -1612,7 +1765,7 @@ public void testRecoveryFromAbortableErrorTransactionStarted() throws Exception FutureRecordMetadata nextTransactionFuture = appendToAccumulator(tp0); prepareAddPartitionsToTxn(singletonMap(tp0, Errors.NONE)); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); assertFalse(transactionManager.hasPartitionsToAdd()); transactionManager.beginCommit(); @@ -1635,7 +1788,7 @@ public void testRecoveryFromAbortableErrorProduceRequestInRetry() throws Excepti prepareAddPartitionsToTxn(tp0, Errors.NONE); Future authorizedTopicProduceFuture = appendToAccumulator(tp0); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); accumulator.beginFlush(); prepareProduceResponse(Errors.REQUEST_TIMED_OUT, producerId, epoch); @@ -1647,8 +1800,8 @@ public void testRecoveryFromAbortableErrorProduceRequestInRetry() throws Excepti Future unauthorizedTopicProduceFuture = appendToAccumulator(unauthorizedPartition); prepareAddPartitionsToTxn(singletonMap(unauthorizedPartition, Errors.TOPIC_AUTHORIZATION_FAILED)); runUntil(transactionManager::hasAbortableError); - assertTrue(transactionManager.isPartitionAdded(tp0)); - assertFalse(transactionManager.isPartitionAdded(unauthorizedPartition)); + assertTrue(transactionManager.transactionContainsPartition(tp0)); + assertFalse(transactionManager.transactionContainsPartition(unauthorizedPartition)); assertFalse(authorizedTopicProduceFuture.isDone()); prepareProduceResponse(Errors.NONE, producerId, epoch); @@ -1676,7 +1829,7 @@ public void testRecoveryFromAbortableErrorProduceRequestInRetry() throws Excepti FutureRecordMetadata nextTransactionFuture = appendToAccumulator(tp0); prepareAddPartitionsToTxn(singletonMap(tp0, Errors.NONE)); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); assertFalse(transactionManager.hasPartitionsToAdd()); transactionManager.beginCommit(); @@ -1800,10 +1953,10 @@ public void testMultipleAddPartitionsPerForOneProduce() throws InterruptedExcept @ParameterizedTest @EnumSource(names = { - "UNKNOWN_TOPIC_OR_PARTITION", - "REQUEST_TIMED_OUT", - "COORDINATOR_LOAD_IN_PROGRESS", - "CONCURRENT_TRANSACTIONS" + "UNKNOWN_TOPIC_OR_PARTITION", + "REQUEST_TIMED_OUT", + "COORDINATOR_LOAD_IN_PROGRESS", + "CONCURRENT_TRANSACTIONS" }) public void testRetriableErrors(Errors error) { // Ensure FindCoordinator retries. @@ -2220,7 +2373,7 @@ public void testCancelUnsentAddPartitionsAndProduceOnAbort() throws InterruptedE assertTrue(abortResult.isSuccessful()); assertTrue(transactionManager.isReady()); // make sure we are ready for a transaction now. - TestUtils.assertFutureThrows(responseFuture, KafkaException.class); + TestUtils.assertFutureThrows(responseFuture, TransactionAbortedException.class); } @Test @@ -2246,7 +2399,7 @@ public void testAbortResendsAddPartitionErrorIfRetried() throws InterruptedExcep assertTrue(abortResult.isSuccessful()); assertTrue(transactionManager.isReady()); // make sure we are ready for a transaction now. - TestUtils.assertFutureThrows(responseFuture, KafkaException.class); + TestUtils.assertFutureThrows(responseFuture, TransactionAbortedException.class); } @Test @@ -2806,7 +2959,7 @@ public void testDropCommitOnBatchExpiry() throws InterruptedException { @Test public void testTransitionToFatalErrorWhenRetriedBatchIsExpired() throws InterruptedException { - apiVersions.update("0", NodeApiVersions.create(Arrays.asList( + apiVersions.update("0", new NodeApiVersions(Arrays.asList( new ApiVersion() .setApiKey(ApiKeys.INIT_PRODUCER_ID.id) .setMinVersion((short) 0) @@ -2814,7 +2967,10 @@ public void testTransitionToFatalErrorWhenRetriedBatchIsExpired() throws Interru new ApiVersion() .setApiKey(ApiKeys.PRODUCE.id) .setMinVersion((short) 0) - .setMaxVersion((short) 7)))); + .setMaxVersion((short) 7)), + Collections.emptyList(), + Collections.emptyList(), + 0)); doInitTransactions(); @@ -2948,7 +3104,7 @@ public void testNoProducerIdResetAfterLastInFlightBatchSucceeds(boolean transact @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testEpochBumpAfterLastInflightBatchFails(boolean transactionV2Enabled) { + public void testEpochBumpAfterLastInFlightBatchFailsIdempotentProducer(boolean transactionV2Enabled) { initializeTransactionManager(Optional.empty(), transactionV2Enabled); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, epoch); initializeIdempotentProducerId(producerId, epoch); @@ -2980,6 +3136,39 @@ public void testEpochBumpAfterLastInflightBatchFails(boolean transactionV2Enable assertEquals(0, transactionManager.sequenceNumber(tp0)); } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testMaybeResolveSequencesTransactionalProducer(boolean transactionV2Enabled) throws Exception { + initializeTransactionManager(Optional.of(transactionalId), transactionV2Enabled); + + // Initialize transaction with initial producer ID and epoch. + doInitTransactions(producerId, epoch); + + transactionManager.beginTransaction(); + transactionManager.maybeAddPartition(tp0); + prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); + + ProducerBatch b1 = writeTransactionalBatchWithValue(transactionManager, tp0, "1"); + assertEquals(Integer.valueOf(1), transactionManager.sequenceNumber(tp0)); + + transactionManager.markSequenceUnresolved(b1); + assertTrue(transactionManager.hasUnresolvedSequences()); + + transactionManager.handleFailedBatch(b1, new TimeoutException(), false); + // Call maybeResolveSequences to trigger resolution logic + transactionManager.maybeResolveSequences(); + + // Verify the type of error state the transaction is in. + if (transactionManager.isTransactionV2Enabled() || transactionManager.needToTriggerEpochBumpFromClient()) { + // Expected to throw an abortable error when epoch bumping is allowed + assertTrue(transactionManager.hasAbortableError()); + } else { + // Expected to throw a fatal error when epoch bumping is not allowed + assertTrue(transactionManager.hasFatalError()); + } + } + @Test public void testEpochUpdateAfterBumpFromEndTxnResponseInV2() throws InterruptedException { initializeTransactionManager(Optional.of(transactionalId), true); @@ -2989,8 +3178,6 @@ public void testEpochUpdateAfterBumpFromEndTxnResponseInV2() throws InterruptedE transactionManager.beginTransaction(); transactionManager.maybeAddPartition(tp0); - prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); // Append record with initial producer ID and epoch. Future responseFuture = appendToAccumulator(tp0); @@ -3017,12 +3204,9 @@ public void testProducerIdAndEpochUpdateAfterOverflowFromEndTxnResponseInV2() th doInitTransactions(producerId, epoch); transactionManager.beginTransaction(); - transactionManager.maybeAddPartition(tp0); - prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); - // Append record with initial producer ID and epoch Future responseFuture = appendToAccumulator(tp0); + transactionManager.maybeAddPartition(tp0); prepareProduceResponse(Errors.NONE, producerId, epoch); runUntil(responseFuture::isDone); @@ -3065,7 +3249,7 @@ public void testNoFailedBatchHandlingWhenTxnManagerIsInFatalError(boolean transa @Test public void testAbortTransactionAndReuseSequenceNumberOnError() throws InterruptedException { - apiVersions.update("0", NodeApiVersions.create(Arrays.asList( + apiVersions.update("0", new NodeApiVersions(Arrays.asList( new ApiVersion() .setApiKey(ApiKeys.INIT_PRODUCER_ID.id) .setMinVersion((short) 0) @@ -3077,8 +3261,10 @@ public void testAbortTransactionAndReuseSequenceNumberOnError() throws Interrupt new ApiVersion() .setApiKey(ApiKeys.PRODUCE.id) .setMinVersion((short) 0) - .setMaxVersion((short) 7) - ))); + .setMaxVersion((short) 7)), + Collections.emptyList(), + Collections.emptyList(), + 0)); doInitTransactions(); @@ -3088,7 +3274,7 @@ public void testAbortTransactionAndReuseSequenceNumberOnError() throws Interrupt Future responseFuture0 = appendToAccumulator(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId); prepareProduceResponse(Errors.NONE, producerId, epoch); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); // Send AddPartitionsRequest + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); // Send AddPartitionsRequest runUntil(responseFuture0::isDone); Future responseFuture1 = appendToAccumulator(tp0); @@ -3112,7 +3298,7 @@ public void testAbortTransactionAndReuseSequenceNumberOnError() throws Interrupt transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); // Send AddPartitionsRequest + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); // Send AddPartitionsRequest assertEquals(2, transactionManager.sequenceNumber(tp0)); } @@ -3123,7 +3309,7 @@ public void testAbortTransactionAndResetSequenceNumberOnUnknownProducerId() thro // where the sequence number is reset on an UnknownProducerId error, allowing subsequent transactions to // append to the log successfully // Set the EndTxn version such that sequence is not reset on every end txn. - apiVersions.update("0", NodeApiVersions.create(Arrays.asList( + apiVersions.update("0", new NodeApiVersions(Arrays.asList( new ApiVersion() .setApiKey(ApiKeys.INIT_PRODUCER_ID.id) .setMinVersion((short) 0) @@ -3135,8 +3321,10 @@ public void testAbortTransactionAndResetSequenceNumberOnUnknownProducerId() thro new ApiVersion() .setApiKey(ApiKeys.END_TXN.id) .setMinVersion((short) 0) - .setMaxVersion((short) 4) - ))); + .setMaxVersion((short) 4)), + Collections.emptyList(), + Collections.emptyList(), + 0)); doInitTransactions(); @@ -3147,14 +3335,14 @@ public void testAbortTransactionAndResetSequenceNumberOnUnknownProducerId() thro prepareAddPartitionsToTxnResponse(Errors.NONE, tp1, epoch, producerId); prepareProduceResponse(Errors.NONE, producerId, epoch, tp1); runUntil(successPartitionResponseFuture::isDone); - assertTrue(transactionManager.isPartitionAdded(tp1)); + assertTrue(transactionManager.transactionContainsPartition(tp1)); transactionManager.maybeAddPartition(tp0); Future responseFuture0 = appendToAccumulator(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId); prepareProduceResponse(Errors.NONE, producerId, epoch); runUntil(responseFuture0::isDone); - assertTrue(transactionManager.isPartitionAdded(tp0)); + assertTrue(transactionManager.transactionContainsPartition(tp0)); Future responseFuture1 = appendToAccumulator(tp0); prepareProduceResponse(Errors.NONE, producerId, epoch); @@ -3178,7 +3366,7 @@ public void testAbortTransactionAndResetSequenceNumberOnUnknownProducerId() thro transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); assertEquals(0, transactionManager.sequenceNumber(tp0)); assertEquals(1, transactionManager.sequenceNumber(tp1)); @@ -3196,7 +3384,7 @@ public void testBumpTransactionalEpochOnAbortableError(boolean transactionV2Enab transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, initialEpoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); Future responseFuture0 = appendToAccumulator(tp0); prepareProduceResponse(Errors.NONE, producerId, initialEpoch); @@ -3226,7 +3414,7 @@ public void testBumpTransactionalEpochOnAbortableError(boolean transactionV2Enab transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, bumpedEpoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); assertEquals(0, transactionManager.sequenceNumber(tp0)); } @@ -3242,7 +3430,7 @@ public void testBumpTransactionalEpochOnUnknownProducerIdError() throws Interrup transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, initialEpoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); Future responseFuture0 = appendToAccumulator(tp0); prepareProduceResponse(Errors.NONE, producerId, initialEpoch); @@ -3273,7 +3461,7 @@ public void testBumpTransactionalEpochOnUnknownProducerIdError() throws Interrup transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, bumpedEpoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); assertEquals(0, transactionManager.sequenceNumber(tp0)); } @@ -3289,7 +3477,7 @@ public void testBumpTransactionalEpochOnTimeout() throws InterruptedException { transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, initialEpoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); Future responseFuture0 = appendToAccumulator(tp0); prepareProduceResponse(Errors.NONE, producerId, initialEpoch); @@ -3332,7 +3520,7 @@ public void testBumpTransactionalEpochOnTimeout() throws InterruptedException { transactionManager.maybeAddPartition(tp0); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, bumpedEpoch, producerId); - runUntil(() -> transactionManager.isPartitionAdded(tp0)); + runUntil(() -> transactionManager.transactionContainsPartition(tp0)); assertEquals(0, transactionManager.sequenceNumber(tp0)); } @@ -3346,7 +3534,7 @@ public void testBumpTransactionalEpochOnRecoverableAddPartitionRequestError() { transactionManager.beginTransaction(); transactionManager.maybeAddPartition(tp0); - prepareAddPartitionsToTxnResponse(Errors.INVALID_PRODUCER_ID_MAPPING, tp0, initialEpoch, producerId); + prepareAddPartitionsToTxnResponse(Errors.UNKNOWN_PRODUCER_ID, tp0, initialEpoch, producerId); runUntil(transactionManager::hasAbortableError); TransactionalRequestResult abortResult = transactionManager.beginAbort(); @@ -3378,7 +3566,7 @@ public void testBumpTransactionalEpochOnRecoverableAddOffsetsRequestError() thro offsets.put(tp0, new OffsetAndMetadata(1)); transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId)); assertFalse(transactionManager.hasPendingOffsetCommits()); - prepareAddOffsetsToTxnResponse(Errors.INVALID_PRODUCER_ID_MAPPING, consumerGroupId, producerId, initialEpoch); + prepareAddOffsetsToTxnResponse(Errors.UNKNOWN_PRODUCER_ID, consumerGroupId, producerId, initialEpoch); runUntil(transactionManager::hasAbortableError); // Send AddOffsetsRequest TransactionalRequestResult abortResult = transactionManager.beginAbort(); @@ -3506,13 +3694,13 @@ public void testRetryCommitTransactionAfterAbortTimeout() { } @Test - public void testCanBumpEpochDuringCoordinatorDisconnect() { + public void testNeedToTriggerEpochBumpFromClientDuringCoordinatorDisconnect() { doInitTransactions(0, (short) 0); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); - assertTrue(transactionManager.canBumpEpoch()); + assertTrue(transactionManager.needToTriggerEpochBumpFromClient()); apiVersions.remove(transactionManager.coordinator(CoordinatorType.TRANSACTION).idString()); - assertTrue(transactionManager.canBumpEpoch()); + assertTrue(transactionManager.needToTriggerEpochBumpFromClient()); } @ParameterizedTest @@ -3649,7 +3837,7 @@ public void testForegroundInvalidStateTransitionIsRecoverable() { assertTrue(transactionManager.hasOngoingTransaction()); prepareAddPartitionsToTxn(tp1, Errors.NONE); - runUntil(() -> transactionManager.isPartitionAdded(tp1)); + runUntil(() -> transactionManager.transactionContainsPartition(tp1)); TransactionalRequestResult retryResult = transactionManager.beginCommit(); assertTrue(transactionManager.hasOngoingTransaction()); @@ -3785,7 +3973,7 @@ public void testTransactionAbortableExceptionInTxnOffsetCommit() { private FutureRecordMetadata appendToAccumulator(TopicPartition tp) throws InterruptedException { final long nowMs = time.milliseconds(); return accumulator.append(tp.topic(), tp.partition(), nowMs, "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, - null, MAX_BLOCK_TIMEOUT, false, nowMs, TestUtils.singletonCluster()).future; + null, MAX_BLOCK_TIMEOUT, nowMs, TestUtils.singletonCluster()).future; } private void verifyCommitOrAbortTransactionRetriable(TransactionResult firstTransactionResult, @@ -4128,6 +4316,7 @@ private void doInitTransactions(long producerId, short epoch) { prepareInitPidResponse(Errors.NONE, false, producerId, epoch); runUntil(transactionManager::hasProducerId); + transactionManager.maybeUpdateTransactionV2Enabled(true); result.await(); assertTrue(result.isSuccessful()); diff --git a/clients/src/test/java/org/apache/kafka/common/UuidTest.java b/clients/src/test/java/org/apache/kafka/common/UuidTest.java index f5067a953cd0d..65316469c69e2 100644 --- a/clients/src/test/java/org/apache/kafka/common/UuidTest.java +++ b/clients/src/test/java/org/apache/kafka/common/UuidTest.java @@ -77,7 +77,7 @@ public void testStringConversion() { assertEquals(Uuid.fromString(zeroIdString), Uuid.ZERO_UUID); } - @RepeatedTest(100) + @RepeatedTest(value = 100, name = RepeatedTest.LONG_DISPLAY_NAME) public void testRandomUuid() { Uuid randomID = Uuid.randomUuid(); diff --git a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java index 9509d398305b4..bbd2268e7cb8f 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java @@ -40,7 +40,7 @@ class EnvVarConfigProviderTest { @BeforeEach public void setup() { - Map testEnvVars = new HashMap() { + Map testEnvVars = new HashMap<>() { { put("test_var1", "value1"); put("secret_var2", "value2"); diff --git a/clients/src/test/java/org/apache/kafka/common/message/ApiMessageTypeTest.java b/clients/src/test/java/org/apache/kafka/common/message/ApiMessageTypeTest.java index 10d95134399a7..4728e67f71987 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/ApiMessageTypeTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/ApiMessageTypeTest.java @@ -29,6 +29,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -97,21 +98,27 @@ public void testHeaderVersion() { assertEquals((short) 1, ApiMessageType.CREATE_TOPICS.responseHeaderVersion((short) 5)); } - /** - * Kafka currently supports direct upgrades from 0.8 to the latest version. As such, it has to support all apis - * starting from version 0 and we must have schemas from the oldest version to the latest. - */ @Test public void testAllVersionsHaveSchemas() { for (ApiMessageType type : ApiMessageType.values()) { - assertEquals(0, type.lowestSupportedVersion()); + assertTrue(type.lowestSupportedVersion() >= 0); assertEquals(type.requestSchemas().length, type.responseSchemas().length, "request and response schemas must be the same length for " + type.name()); - for (Schema schema : type.requestSchemas()) - assertNotNull(schema); - for (Schema schema : type.responseSchemas()) - assertNotNull(schema); + for (int i = 0; i < type.requestSchemas().length; ++i) { + Schema schema = type.requestSchemas()[i]; + if (i >= type.lowestSupportedVersion()) + assertNotNull(schema); + else + assertNull(schema); + } + for (int i = 0; i < type.responseSchemas().length; ++i) { + Schema schema = type.responseSchemas()[i]; + if (i >= type.lowestSupportedVersion()) + assertNotNull(schema); + else + assertNull(schema); + } assertEquals(type.highestSupportedVersion(true) + 1, type.requestSchemas().length); } diff --git a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java index 6f2a54a8d07b5..a64eada22d90f 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java @@ -82,7 +82,7 @@ public final class MessageTest { private final String memberId = "memberId"; private final String instanceId = "instanceId"; - private final List listOfVersionsNonBatchOffsetFetch = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7); + private final List listOfVersionsNonBatchOffsetFetch = Arrays.asList(1, 2, 3, 4, 5, 6, 7); @Test public void testAddOffsetsToTxnVersions() throws Exception { @@ -199,21 +199,17 @@ public void testListOffsetsRequestVersions() throws Exception { public void testListOffsetsResponseVersions() throws Exception { ListOffsetsPartitionResponse partition = new ListOffsetsPartitionResponse() .setErrorCode(Errors.NONE.code()) - .setPartitionIndex(0) - .setOldStyleOffsets(Collections.singletonList(321L)); + .setPartitionIndex(0); List topics = Collections.singletonList(new ListOffsetsTopicResponse() .setName("topic") .setPartitions(Collections.singletonList(partition))); Supplier response = () -> new ListOffsetsResponseData() .setTopics(topics); - for (short version : ApiKeys.LIST_OFFSETS.allVersions()) { + for (short version = ApiKeys.LIST_OFFSETS.oldestVersion(); version <= ApiKeys.LIST_OFFSETS.latestVersion(); ++version) { ListOffsetsResponseData responseData = response.get(); - if (version > 0) { - responseData.topics().get(0).partitions().get(0) - .setOldStyleOffsets(Collections.emptyList()) - .setOffset(456L) - .setTimestamp(123L); - } + responseData.topics().get(0).partitions().get(0) + .setOffset(456L) + .setTimestamp(123L); if (version > 1) { responseData.setThrottleTimeMs(1000); } @@ -399,7 +395,8 @@ public void testOffsetForLeaderEpochVersions() throws Exception { .setPartitions(singletonList(partitionDataNoCurrentEpoch))); testAllMessageRoundTrips(data); - testAllMessageRoundTripsBeforeVersion((short) 2, partitionDataWithCurrentEpoch, partitionDataNoCurrentEpoch); + short lowestVersion = ApiKeys.OFFSET_FOR_LEADER_EPOCH.oldestVersion(); + testAllMessageRoundTripsBetweenVersions(lowestVersion, (short) 2, partitionDataWithCurrentEpoch, partitionDataNoCurrentEpoch); testAllMessageRoundTripsFromVersion((short) 2, partitionDataWithCurrentEpoch); // Version 3 adds the optional replica Id field @@ -475,22 +472,13 @@ public void testOffsetCommitRequestVersions() throws Exception { .setCommittedLeaderEpoch(10) .setCommittedMetadata(metadata) .setCommittedOffset(offset) - .setCommitTimestamp(20) )))) .setRetentionTimeMs(20); for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { OffsetCommitRequestData requestData = request.get(); - if (version < 1) { - requestData.setMemberId(""); - requestData.setGenerationIdOrMemberEpoch(-1); - } - if (version != 1) { - requestData.topics().get(0).partitions().get(0).setCommitTimestamp(-1); - } - - if (version < 2 || version > 4) { + if (version > 4) { requestData.setRetentionTimeMs(-1); } @@ -502,9 +490,7 @@ public void testOffsetCommitRequestVersions() throws Exception { requestData.setGroupInstanceId(null); } - if (version == 1) { - testEquivalentMessageRoundTrip(version, requestData); - } else if (version >= 2 && version <= 4) { + if (version >= 2 && version <= 4) { testAllMessageRoundTripsBetweenVersions(version, (short) 5, requestData, requestData); } else { testAllMessageRoundTripsFromVersion(version, requestData); @@ -627,7 +613,7 @@ public void testTxnOffsetCommitResponseVersions() throws Exception { } @Test - public void testOffsetFetchV0ToV7() throws Exception { + public void testOffsetFetchV1ToV7() throws Exception { String groupId = "groupId"; String topicName = "topic"; @@ -655,15 +641,15 @@ public void testOffsetFetchV0ToV7() throws Exception { for (int version : listOfVersionsNonBatchOffsetFetch) { final short finalVersion = (short) version; if (version < 2) { - assertThrows(NullPointerException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionV0ToV7(finalVersion, allPartitionData)); + assertThrows(NullPointerException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, allPartitionData)); } else { - testAllMessageRoundTripsOffsetFetchFromVersionV0ToV7((short) version, allPartitionData); + testAllMessageRoundTripsOffsetFetchFromVersionToV7((short) version, allPartitionData); } if (version < 7) { - assertThrows(UnsupportedVersionException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionV0ToV7(finalVersion, requireStableData)); + assertThrows(UnsupportedVersionException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, requireStableData)); } else { - testAllMessageRoundTripsOffsetFetchFromVersionV0ToV7(finalVersion, requireStableData); + testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, requireStableData); } } @@ -695,17 +681,17 @@ public void testOffsetFetchV0ToV7() throws Exception { responseData.topics().get(0).partitions().get(0).setCommittedLeaderEpoch(-1); } - testAllMessageRoundTripsOffsetFetchFromVersionV0ToV7((short) version, responseData); + testAllMessageRoundTripsOffsetFetchFromVersionToV7((short) version, responseData); } } private void testAllMessageRoundTripsOffsetFetchV0ToV7(Message message) throws Exception { testDuplication(message); - testAllMessageRoundTripsOffsetFetchFromVersionV0ToV7(message.lowestSupportedVersion(), message); + testAllMessageRoundTripsOffsetFetchFromVersionToV7(message.lowestSupportedVersion(), message); } - private void testAllMessageRoundTripsOffsetFetchFromVersionV0ToV7(short fromVersion, - Message message) throws Exception { + private void testAllMessageRoundTripsOffsetFetchFromVersionToV7(short fromVersion, + Message message) throws Exception { for (short version = fromVersion; version <= 7; version++) { testEquivalentMessageRoundTrip(version, message); } @@ -1131,15 +1117,16 @@ public void testMessageVersions() { @Test public void testDefaultValues() { - verifyWriteRaisesUve((short) 0, "validateOnly", - new CreateTopicsRequestData().setValidateOnly(true)); - verifyWriteSucceeds((short) 0, - new CreateTopicsRequestData().setValidateOnly(false)); - verifyWriteSucceeds((short) 0, + verifyWriteSucceeds((short) 2, new OffsetCommitRequestData().setRetentionTimeMs(123)); + verifyWriteRaisesUve((short) 5, "forgotten", new FetchRequestData().setForgottenTopicsData(singletonList( new FetchRequestData.ForgottenTopic().setTopic("foo")))); + verifyWriteSucceeds((short) 5, new FetchRequestData()); + verifyWriteSucceeds((short) 7, + new FetchRequestData().setForgottenTopicsData(singletonList( + new FetchRequestData.ForgottenTopic().setTopic("foo")))); } @Test @@ -1167,8 +1154,6 @@ public void testWriteNullForNonNullableFieldRaisesException() { for (short version : ApiKeys.CREATE_TOPICS.allVersions()) { verifyWriteRaisesNpe(version, createTopics); } - MetadataRequestData metadata = new MetadataRequestData().setTopics(null); - verifyWriteRaisesNpe((short) 0, metadata); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java index a171520b2c3ee..1dbc579db0375 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.junit.jupiter.api.Test; @@ -98,6 +99,29 @@ public void testToStringWithNullStructs() { message.toString(); } + /** + * Regression test for KAFKA-18199. Tests that the size of the varint encoding a tagged nullable + * struct's size is calculated correctly. + */ + @Test + public void testTaggedStructSize() { + NullableStructMessageData message = new NullableStructMessageData() + .setNullableStruct(null) + .setNullableStruct2(null) + .setNullableStruct3(null) + .setNullableStruct4(new NullableStructMessageData.MyStruct4() + .setMyInt(4) + .setMyString(new String(new char[121]))); + + // We want the struct to be 127 bytes long, so that the varint encoding of its size is one + // short of overflowing into a two-byte representation. An extra byte is added to the + // nullable struct size to account for the is-not-null flag. + assertEquals(127, message.nullableStruct4().size(new ObjectSerializationCache(), (short) 2)); + + NullableStructMessageData newMessage = roundTrip(message, (short) 2); + assertEquals(message, newMessage); + } + private NullableStructMessageData deserialize(ByteBuffer buf, short version) { NullableStructMessageData message = new NullableStructMessageData(); message.read(new ByteBufferAccessor(buf.duplicate()), version); @@ -110,6 +134,8 @@ private ByteBuffer serialize(NullableStructMessageData message, short version) { private NullableStructMessageData roundTrip(NullableStructMessageData message, short version) { ByteBuffer buffer = serialize(message, version); + // Check size calculation + assertEquals(buffer.remaining(), message.size(new ObjectSerializationCache(), version)); return deserialize(buffer.duplicate(), version); } } diff --git a/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java index 9fc49f9506054..341e327cda904 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java @@ -360,6 +360,8 @@ private SimpleExampleMessageData roundTripSerde( short version ) { ByteBuffer buf = MessageUtil.toByteBuffer(message, version); + // Check size calculation + assertEquals(buf.remaining(), message.size(new ObjectSerializationCache(), version)); return deserialize(buf.duplicate(), version); } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java index f1d60d83bf216..f10670f8ab26c 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java @@ -177,23 +177,4 @@ public void testJmxPrefix() throws Exception { metrics.close(); } } - - @Test - public void testDeprecatedJmxPrefixWithDefaultMetrics() throws Exception { - @SuppressWarnings("deprecation") - JmxReporter reporter = new JmxReporter("my-prefix"); - - // for backwards compatibility, ensure prefix does not get overridden by the default empty namespace in metricscontext - MetricConfig metricConfig = new MetricConfig(); - Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Collections.singletonList(reporter)), Time.SYSTEM); - - MBeanServer server = ManagementFactory.getPlatformMBeanServer(); - try { - Sensor sensor = metrics.sensor("my-sensor"); - sensor.add(metrics.metricName("pack.bean1.avg", "grp1"), new Avg()); - assertEquals("my-prefix", server.getObjectInstance(new ObjectName("my-prefix:type=grp1")).getObjectName().getDomain()); - } finally { - metrics.close(); - } - } } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/stats/SampledStatTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/stats/SampledStatTest.java index 680d7651c081f..180ed58b48c67 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/stats/SampledStatTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/stats/SampledStatTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.utils.Time; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import java.util.List; @@ -41,7 +40,6 @@ public void setup() { } @Test - @DisplayName("Sample should be purged if doesn't overlap the window") public void testSampleIsPurgedIfDoesntOverlap() { MetricConfig config = new MetricConfig().timeWindow(1, SECONDS).samples(2); @@ -50,11 +48,10 @@ public void testSampleIsPurgedIfDoesntOverlap() { time.sleep(2500); double numSamples = stat.measure(config, time.milliseconds()); - assertEquals(0, numSamples); + assertEquals(0, numSamples, "Sample should be purged if doesn't overlap the window"); } @Test - @DisplayName("Sample should be kept if overlaps the window") public void testSampleIsKeptIfOverlaps() { MetricConfig config = new MetricConfig().timeWindow(1, SECONDS).samples(2); @@ -63,11 +60,10 @@ public void testSampleIsKeptIfOverlaps() { time.sleep(1500); double numSamples = stat.measure(config, time.milliseconds()); - assertEquals(1, numSamples); + assertEquals(1, numSamples, "Sample should be kept if overlaps the window"); } @Test - @DisplayName("Sample should be kept if overlaps the window and is n+1") public void testSampleIsKeptIfOverlapsAndExtra() { MetricConfig config = new MetricConfig().timeWindow(1, SECONDS).samples(2); @@ -80,7 +76,7 @@ public void testSampleIsKeptIfOverlapsAndExtra() { stat.record(config, 1, time.milliseconds()); double numSamples = stat.measure(config, time.milliseconds()); - assertEquals(3, numSamples); + assertEquals(3, numSamples, "Sample should be kept if overlaps the window and is n+1"); } // Creates a sample with events at the start and at the end. Positions clock at the end. diff --git a/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java b/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java index ad94ae1dcd9f7..eae6fb0b0a036 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SaslChannelBuilderTest.java @@ -138,9 +138,8 @@ public void testNativeGssapiCredentials() throws Exception { */ @Test public void testClientChannelBuilderWithBrokerConfigs() throws Exception { - Map configs = new HashMap<>(); CertStores certStores = new CertStores(false, "client", "localhost"); - configs.putAll(certStores.getTrustingConfig(certStores)); + Map configs = new HashMap<>(certStores.getTrustingConfig(certStores)); configs.put(SaslConfigs.SASL_KERBEROS_SERVICE_NAME, "kafka"); configs.putAll(new ConfigDef().withClientSaslSupport().parse(configs)); for (Field field : BrokerSecurityConfigs.class.getFields()) { @@ -167,7 +166,7 @@ public void testClientChannelBuilderWithBrokerConfigs() throws Exception { private SaslChannelBuilder createGssapiChannelBuilder(Map jaasContexts, GSSManager gssManager) { SaslChannelBuilder channelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts, SecurityProtocol.SASL_PLAINTEXT, new ListenerName("GSSAPI"), false, "GSSAPI", - true, null, null, null, Time.SYSTEM, new LogContext(), defaultApiVersionsSupplier()) { + null, null, null, Time.SYSTEM, new LogContext(), defaultApiVersionsSupplier()) { @Override protected GSSManager gssManager() { @@ -206,7 +205,7 @@ private SaslChannelBuilder createChannelBuilder(SecurityProtocol securityProtoco JaasContext jaasContext = new JaasContext("jaasContext", JaasContext.Type.SERVER, jaasConfig, null); Map jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); return new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, new ListenerName(saslMechanism), - false, saslMechanism, true, null, + false, saslMechanism, null, null, null, Time.SYSTEM, new LogContext(), defaultApiVersionsSupplier()); } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index b4c73d64d38c4..347f76135866d 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -891,7 +891,7 @@ private KafkaMetric getMetric(String name, Map tags) throws Exce .filter(entry -> entry.getKey().name().equals(name) && entry.getKey().tags().equals(tags)) .findFirst(); - if (!metric.isPresent()) + if (metric.isEmpty()) throw new Exception(String.format("Could not find metric called %s with tags %s", name, tags.toString())); return metric.get().getValue(); @@ -907,8 +907,8 @@ public void testLowestPriorityChannel() throws Exception { } assertNotNull(selector.lowestPriorityChannel()); for (int i = conns - 1; i >= 0; i--) { - if (i != 2) - assertEquals("", blockingRequest(String.valueOf(i), "")); + if (i != 2) + assertEquals("", blockingRequest(String.valueOf(i), "")); time.sleep(10); } assertEquals("2", selector.lowestPriorityChannel().id()); @@ -1112,7 +1112,7 @@ private KafkaMetric getMetric(String name) throws Exception { Optional> metric = metrics.metrics().entrySet().stream() .filter(entry -> entry.getKey().name().equals(name)) .findFirst(); - if (!metric.isPresent()) + if (metric.isEmpty()) throw new Exception(String.format("Could not find metric called %s", name)); return metric.get().getValue(); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java index a1b8213d12962..b1a7aa5bc1fa9 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslVersionsTransportLayerTest.java @@ -76,7 +76,7 @@ public static Stream parameters() { * Tests that connection success with the default TLS version. * Note that debug mode for javax.net.ssl can be enabled via {@code System.setProperty("javax.net.debug", "ssl:handshake");} */ - @ParameterizedTest(name = "tlsServerProtocol = {0}, tlsClientProtocol = {1}") + @ParameterizedTest(name = "testTlsDefaults(tlsServerProtocol = {0}, tlsClientProtocol = {1})") @MethodSource("parameters") public void testTlsDefaults(List serverProtocols, List clientProtocols) throws Exception { // Create certificates for use by client and server. Add server cert to client truststore and vice versa. diff --git a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java index 832d276a02d01..4461108713c07 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.compress.GzipCompression; import org.apache.kafka.common.header.Header; @@ -425,22 +424,6 @@ public void testFormatConversionWithPartialMessage() throws IOException { Records messageV0 = slice.downConvert(RecordBatch.MAGIC_VALUE_V0, 0, time).records(); assertTrue(batches(messageV0).isEmpty(), "No message should be there"); assertEquals(size - 1, messageV0.sizeInBytes(), "There should be " + (size - 1) + " bytes"); - - // Lazy down-conversion will not return any messages for a partial input batch - TopicPartition tp = new TopicPartition("topic-1", 0); - LazyDownConversionRecords lazyRecords = new LazyDownConversionRecords(tp, slice, RecordBatch.MAGIC_VALUE_V0, 0, Time.SYSTEM); - Iterator> it = lazyRecords.iterator(16 * 1024L); - assertFalse(it.hasNext(), "No messages should be returned"); - } - - @Test - public void testFormatConversionWithNoMessages() { - TopicPartition tp = new TopicPartition("topic-1", 0); - LazyDownConversionRecords lazyRecords = new LazyDownConversionRecords(tp, MemoryRecords.EMPTY, RecordBatch.MAGIC_VALUE_V0, - 0, Time.SYSTEM); - assertEquals(0, lazyRecords.sizeInBytes()); - Iterator> it = lazyRecords.iterator(16 * 1024L); - assertFalse(it.hasNext(), "No messages should be returned"); } @Test @@ -637,23 +620,6 @@ private void downConvertAndVerifyRecords(List initialRecords, convertedRecords.add(fileRecords.downConvert(toMagic, firstOffset, time).records()); verifyConvertedRecords(initialRecords, initialOffsets, convertedRecords, compression, toMagic); convertedRecords.clear(); - - // Test the lazy down-conversion path - List maximumReadSize = asList(16L * 1024L, - (long) fileRecords.sizeInBytes(), - (long) fileRecords.sizeInBytes() - 1, - (long) fileRecords.sizeInBytes() / 4, - maxBatchSize + 1, - 1L); - for (long readSize : maximumReadSize) { - TopicPartition tp = new TopicPartition("topic-1", 0); - LazyDownConversionRecords lazyRecords = new LazyDownConversionRecords(tp, fileRecords, toMagic, firstOffset, Time.SYSTEM); - Iterator> it = lazyRecords.iterator(readSize); - while (it.hasNext()) - convertedRecords.add(it.next().records()); - verifyConvertedRecords(initialRecords, initialOffsets, convertedRecords, compression, toMagic); - convertedRecords.clear(); - } } private void verifyConvertedRecords(List initialRecords, diff --git a/clients/src/test/java/org/apache/kafka/common/record/LazyDownConversionRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/LazyDownConversionRecordsTest.java deleted file mode 100644 index 59ac60e3a80df..0000000000000 --- a/clients/src/test/java/org/apache/kafka/common/record/LazyDownConversionRecordsTest.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.record; - -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.compress.Compression; -import org.apache.kafka.common.header.Header; -import org.apache.kafka.common.header.internals.RecordHeader; -import org.apache.kafka.common.network.TransferableChannel; -import org.apache.kafka.common.utils.Time; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.StandardOpenOption; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import static java.util.Arrays.asList; -import static org.apache.kafka.common.utils.Utils.utf8; -import static org.apache.kafka.test.TestUtils.tempFile; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class LazyDownConversionRecordsTest { - - /** - * Test the lazy down-conversion path in the presence of commit markers. When converting to V0 or V1, these batches - * are dropped. If there happen to be no more batches left to convert, we must get an overflow message batch after - * conversion. - */ - @Test - public void testConversionOfCommitMarker() throws IOException { - MemoryRecords recordsToConvert = MemoryRecords.withEndTransactionMarker(0, Time.SYSTEM.milliseconds(), RecordBatch.NO_PARTITION_LEADER_EPOCH, - 1, (short) 1, new EndTransactionMarker(ControlRecordType.COMMIT, 0)); - MemoryRecords convertedRecords = convertRecords(recordsToConvert, (byte) 1, recordsToConvert.sizeInBytes()); - ByteBuffer buffer = convertedRecords.buffer(); - - // read the offset and the batch length - buffer.getLong(); - int sizeOfConvertedRecords = buffer.getInt(); - - // assert we got an overflow message batch - assertTrue(sizeOfConvertedRecords > buffer.limit()); - assertFalse(convertedRecords.batchIterator().hasNext()); - } - - private static Collection parameters() { - List arguments = new ArrayList<>(); - for (byte toMagic = RecordBatch.MAGIC_VALUE_V0; toMagic <= RecordBatch.CURRENT_MAGIC_VALUE; toMagic++) { - for (boolean overflow : asList(true, false)) { - arguments.add(Arguments.of(CompressionType.NONE, toMagic, overflow)); - arguments.add(Arguments.of(CompressionType.GZIP, toMagic, overflow)); - } - } - return arguments; - } - - /** - * Test the lazy down-conversion path. - * - * If `overflow` is true, the number of bytes we want to convert is much larger - * than the number of bytes we get after conversion. This causes overflow message batch(es) to be appended towards the - * end of the converted output. - */ - @ParameterizedTest(name = "compressionType={0}, toMagic={1}, overflow={2}") - @MethodSource("parameters") - public void testConversion(CompressionType compressionType, byte toMagic, boolean overflow) throws IOException { - doTestConversion(compressionType, toMagic, overflow); - } - - private void doTestConversion(CompressionType compressionType, byte toMagic, boolean testConversionOverflow) throws IOException { - List offsets = asList(0L, 2L, 3L, 9L, 11L, 15L, 16L, 17L, 22L, 24L); - - Header[] headers = {new RecordHeader("headerKey1", "headerValue1".getBytes()), - new RecordHeader("headerKey2", "headerValue2".getBytes()), - new RecordHeader("headerKey3", "headerValue3".getBytes())}; - - List records = asList( - new SimpleRecord(1L, "k1".getBytes(), "hello".getBytes()), - new SimpleRecord(2L, "k2".getBytes(), "goodbye".getBytes()), - new SimpleRecord(3L, "k3".getBytes(), "hello again".getBytes()), - new SimpleRecord(4L, "k4".getBytes(), "goodbye for now".getBytes()), - new SimpleRecord(5L, "k5".getBytes(), "hello again".getBytes()), - new SimpleRecord(6L, "k6".getBytes(), "I sense indecision".getBytes()), - new SimpleRecord(7L, "k7".getBytes(), "what now".getBytes()), - new SimpleRecord(8L, "k8".getBytes(), "running out".getBytes(), headers), - new SimpleRecord(9L, "k9".getBytes(), "ok, almost done".getBytes()), - new SimpleRecord(10L, "k10".getBytes(), "finally".getBytes(), headers)); - assertEquals(offsets.size(), records.size(), "incorrect test setup"); - - ByteBuffer buffer = ByteBuffer.allocate(1024); - Compression compression = Compression.of(compressionType).build(); - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, - TimestampType.CREATE_TIME, 0L); - for (int i = 0; i < 3; i++) - builder.appendWithOffset(offsets.get(i), records.get(i)); - builder.close(); - - builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, TimestampType.CREATE_TIME, - 0L); - for (int i = 3; i < 6; i++) - builder.appendWithOffset(offsets.get(i), records.get(i)); - builder.close(); - - builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, TimestampType.CREATE_TIME, - 0L); - for (int i = 6; i < 10; i++) - builder.appendWithOffset(offsets.get(i), records.get(i)); - builder.close(); - buffer.flip(); - - MemoryRecords recordsToConvert = MemoryRecords.readableRecords(buffer); - int numBytesToConvert = recordsToConvert.sizeInBytes(); - if (testConversionOverflow) - numBytesToConvert *= 2; - - MemoryRecords convertedRecords = convertRecords(recordsToConvert, toMagic, numBytesToConvert); - verifyDownConvertedRecords(records, offsets, convertedRecords, compressionType, toMagic); - } - - private static MemoryRecords convertRecords(MemoryRecords recordsToConvert, byte toMagic, int bytesToConvert) throws IOException { - try (FileRecords inputRecords = FileRecords.open(tempFile())) { - inputRecords.append(recordsToConvert); - inputRecords.flush(); - - LazyDownConversionRecords lazyRecords = new LazyDownConversionRecords(new TopicPartition("test", 1), - inputRecords, toMagic, 0L, Time.SYSTEM); - LazyDownConversionRecordsSend lazySend = lazyRecords.toSend(); - File outputFile = tempFile(); - ByteBuffer convertedRecordsBuffer; - try (TransferableChannel channel = toTransferableChannel(FileChannel.open(outputFile.toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE))) { - int written = 0; - while (written < bytesToConvert) written += lazySend.writeTo(channel, written, bytesToConvert - written); - try (FileRecords convertedRecords = FileRecords.open(outputFile, true, written, false)) { - convertedRecordsBuffer = ByteBuffer.allocate(convertedRecords.sizeInBytes()); - convertedRecords.readInto(convertedRecordsBuffer, 0); - } - } - return MemoryRecords.readableRecords(convertedRecordsBuffer); - } - } - - private static TransferableChannel toTransferableChannel(FileChannel channel) { - return new TransferableChannel() { - - @Override - public boolean hasPendingWrites() { - return false; - } - - @Override - public long transferFrom(FileChannel fileChannel, long position, long count) throws IOException { - return fileChannel.transferTo(position, count, channel); - } - - @Override - public boolean isOpen() { - return channel.isOpen(); - } - - @Override - public void close() throws IOException { - channel.close(); - } - - @Override - public int write(ByteBuffer src) throws IOException { - return channel.write(src); - } - - @Override - public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { - return channel.write(srcs, offset, length); - } - - @Override - public long write(ByteBuffer[] srcs) throws IOException { - return channel.write(srcs); - } - }; - } - - private static void verifyDownConvertedRecords(List initialRecords, - List initialOffsets, - MemoryRecords downConvertedRecords, - CompressionType compressionType, - byte toMagic) { - int i = 0; - for (RecordBatch batch : downConvertedRecords.batches()) { - assertTrue(batch.magic() <= toMagic, "Magic byte should be lower than or equal to " + toMagic); - if (batch.magic() == RecordBatch.MAGIC_VALUE_V0) - assertEquals(TimestampType.NO_TIMESTAMP_TYPE, batch.timestampType()); - else - assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); - assertEquals(compressionType, batch.compressionType(), "Compression type should not be affected by conversion"); - for (Record record : batch) { - assertTrue(record.hasMagic(batch.magic()), "Inner record should have magic " + toMagic); - assertEquals(initialOffsets.get(i).longValue(), record.offset(), "Offset should not change"); - assertEquals(utf8(initialRecords.get(i).key()), utf8(record.key()), "Key should not change"); - assertEquals(utf8(initialRecords.get(i).value()), utf8(record.value()), "Value should not change"); - assertFalse(record.hasTimestampType(TimestampType.LOG_APPEND_TIME)); - if (batch.magic() == RecordBatch.MAGIC_VALUE_V0) { - assertEquals(RecordBatch.NO_TIMESTAMP, record.timestamp()); - assertFalse(record.hasTimestampType(TimestampType.CREATE_TIME)); - assertTrue(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); - } else if (batch.magic() == RecordBatch.MAGIC_VALUE_V1) { - assertEquals(initialRecords.get(i).timestamp(), record.timestamp(), "Timestamp should not change"); - assertTrue(record.hasTimestampType(TimestampType.CREATE_TIME)); - assertFalse(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); - } else { - assertEquals(initialRecords.get(i).timestamp(), record.timestamp(), "Timestamp should not change"); - assertFalse(record.hasTimestampType(TimestampType.CREATE_TIME)); - assertFalse(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); - assertArrayEquals(initialRecords.get(i).headers(), record.headers(), "Headers should not change"); - } - i += 1; - } - } - assertEquals(initialOffsets.size(), i); - } -} diff --git a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java index 80a77d647b474..3818976e423fd 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.common.record; -import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.header.internals.RecordHeaders; @@ -291,8 +290,7 @@ public void testFilterToPreservesPartitionLeaderEpoch(Args args) { builder.append(12L, null, "c".getBytes()); ByteBuffer filtered = ByteBuffer.allocate(2048); - builder.build().filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), filtered, - Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + builder.build().filterTo(new RetainNonNullKeysFilter(), filtered, BufferSupplier.NO_CACHING); filtered.flip(); MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered); @@ -332,7 +330,7 @@ public void testFilterToEmptyBatchRetention(Args args) { builder.close(); MemoryRecords records = builder.build(); ByteBuffer filtered = ByteBuffer.allocate(2048); - MemoryRecords.FilterResult filterResult = records.filterTo(new TopicPartition("foo", 0), + MemoryRecords.FilterResult filterResult = records.filterTo( new MemoryRecords.RecordFilter(0, 0) { @Override protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { @@ -345,7 +343,7 @@ protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { // delete the records return false; } - }, filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + }, filtered, BufferSupplier.NO_CACHING); // Verify filter result assertEquals(numRecords, filterResult.messagesRead()); @@ -394,7 +392,7 @@ public void testEmptyBatchRetention() { ByteBuffer filtered = ByteBuffer.allocate(2048); MemoryRecords records = MemoryRecords.readableRecords(buffer); - MemoryRecords.FilterResult filterResult = records.filterTo(new TopicPartition("foo", 0), + MemoryRecords.FilterResult filterResult = records.filterTo( new MemoryRecords.RecordFilter(0, 0) { @Override protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { @@ -406,7 +404,7 @@ protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { return false; } - }, filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + }, filtered, BufferSupplier.NO_CACHING); // Verify filter result assertEquals(0, filterResult.messagesRead()); @@ -442,7 +440,7 @@ public void testEmptyBatchDeletion() { ByteBuffer filtered = ByteBuffer.allocate(2048); MemoryRecords records = MemoryRecords.readableRecords(buffer); - MemoryRecords.FilterResult filterResult = records.filterTo(new TopicPartition("foo", 0), + MemoryRecords.FilterResult filterResult = records.filterTo( new MemoryRecords.RecordFilter(0, 0) { @Override protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { @@ -453,7 +451,7 @@ protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { return false; } - }, filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + }, filtered, BufferSupplier.NO_CACHING); // Verify filter result assertEquals(0, filterResult.outputBuffer().position()); @@ -529,7 +527,7 @@ protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { return new BatchRetentionResult(BatchRetention.RETAIN_EMPTY, false); } }; - builder.build().filterTo(new TopicPartition("random", 0), recordFilter, filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + builder.build().filterTo(recordFilter, filtered, BufferSupplier.NO_CACHING); filtered.flip(); MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered); @@ -618,7 +616,7 @@ public void testFilterToBatchDiscard(Args args) { buffer.flip(); ByteBuffer filtered = ByteBuffer.allocate(2048); - MemoryRecords.readableRecords(buffer).filterTo(new TopicPartition("foo", 0), new MemoryRecords.RecordFilter(0, 0) { + MemoryRecords.readableRecords(buffer).filterTo(new MemoryRecords.RecordFilter(0, 0) { @Override protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { // discard the second and fourth batches @@ -631,7 +629,7 @@ protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { return true; } - }, filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + }, filtered, BufferSupplier.NO_CACHING); filtered.flip(); MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered); @@ -667,8 +665,7 @@ public void testFilterToAlreadyCompactedLog(Args args) { buffer.flip(); ByteBuffer filtered = ByteBuffer.allocate(2048); - MemoryRecords.readableRecords(buffer).filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), - filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + MemoryRecords.readableRecords(buffer).filterTo(new RetainNonNullKeysFilter(), filtered, BufferSupplier.NO_CACHING); filtered.flip(); MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered); @@ -743,8 +740,7 @@ public void testFilterToPreservesProducerInfo(Args args) { buffer.flip(); ByteBuffer filtered = ByteBuffer.allocate(2048); - MemoryRecords.readableRecords(buffer).filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), - filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + MemoryRecords.readableRecords(buffer).filterTo(new RetainNonNullKeysFilter(), filtered, BufferSupplier.NO_CACHING); filtered.flip(); MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered); @@ -835,9 +831,8 @@ public void testFilterToWithUndersizedBuffer(Args args) { while (buffer.hasRemaining()) { output.rewind(); - MemoryRecords.FilterResult result = MemoryRecords.readableRecords(buffer) - .filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), output, Integer.MAX_VALUE, - BufferSupplier.NO_CACHING); + MemoryRecords.FilterResult result = MemoryRecords.readableRecords(buffer).filterTo( + new RetainNonNullKeysFilter(), output, BufferSupplier.NO_CACHING); buffer.position(buffer.position() + result.bytesRead()); result.outputBuffer().flip(); @@ -884,8 +879,7 @@ public void testFilterTo(Args args) { ByteBuffer filtered = ByteBuffer.allocate(2048); MemoryRecords.FilterResult result = MemoryRecords.readableRecords(buffer).filterTo( - new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), filtered, Integer.MAX_VALUE, - BufferSupplier.NO_CACHING); + new RetainNonNullKeysFilter(), filtered, BufferSupplier.NO_CACHING); filtered.flip(); @@ -928,14 +922,14 @@ public void testFilterTo(Args args) { RecordBatch batch = batches.get(i); assertEquals(expectedStartOffsets.get(i).longValue(), batch.baseOffset()); assertEquals(expectedEndOffsets.get(i).longValue(), batch.lastOffset()); - assertEquals(magic, batch.magic()); + assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic()); assertEquals(compression.type(), batch.compressionType()); if (magic >= RecordBatch.MAGIC_VALUE_V1) { assertEquals(expectedMaxTimestamps.get(i).longValue(), batch.maxTimestamp()); assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); } else { assertEquals(RecordBatch.NO_TIMESTAMP, batch.maxTimestamp()); - assertEquals(TimestampType.NO_TIMESTAMP_TYPE, batch.timestampType()); + assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); } } @@ -1003,8 +997,7 @@ public void testFilterToPreservesLogAppendTime(Args args) { buffer.flip(); ByteBuffer filtered = ByteBuffer.allocate(2048); - MemoryRecords.readableRecords(buffer).filterTo(new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), - filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); + MemoryRecords.readableRecords(buffer).filterTo(new RetainNonNullKeysFilter(), filtered, BufferSupplier.NO_CACHING); filtered.flip(); MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/AlterPartitionRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/AlterPartitionRequestTest.java index d5e4e39fd9915..16900fc5dac53 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/AlterPartitionRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/AlterPartitionRequestTest.java @@ -61,7 +61,7 @@ public void testBuildAlterPartitionRequest(short version) { request.topics().add(topicData); - AlterPartitionRequest.Builder builder = new AlterPartitionRequest.Builder(request, version > 1); + AlterPartitionRequest.Builder builder = new AlterPartitionRequest.Builder(request); AlterPartitionRequest alterPartitionRequest = builder.build(version); assertEquals(1, alterPartitionRequest.data().topics().size()); assertEquals(1, alterPartitionRequest.data().topics().get(0).partitions().size()); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java index bfb327e3de07c..f87ad0fbf54ba 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java @@ -23,11 +23,8 @@ import org.apache.kafka.common.message.ApiMessageType.ListenerType; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection; -import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey; import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.TestUtils; @@ -88,8 +85,8 @@ public void shouldHaveCorrectDefaultApiVersionsResponse(ApiMessageType.ListenerT public void shouldHaveCommonlyAgreedApiVersionResponseWithControllerOnForwardableAPIs() { final ApiKeys forwardableAPIKey = ApiKeys.CREATE_ACLS; final ApiKeys nonForwardableAPIKey = ApiKeys.JOIN_GROUP; - final short minVersion = 0; - final short maxVersion = 1; + final short minVersion = 2; + final short maxVersion = 3; Map activeControllerApiVersions = Utils.mkMap( Utils.mkEntry(forwardableAPIKey, new ApiVersion() .setApiKey(forwardableAPIKey.id) @@ -103,7 +100,6 @@ public void shouldHaveCommonlyAgreedApiVersionResponseWithControllerOnForwardabl ApiVersionCollection commonResponse = ApiVersionsResponse.intersectForwardableApis( ApiMessageType.ListenerType.ZK_BROKER, - RecordVersion.current(), activeControllerApiVersions, true, false @@ -115,63 +111,12 @@ public void shouldHaveCommonlyAgreedApiVersionResponseWithControllerOnForwardabl ApiKeys.JOIN_GROUP.latestVersion(), commonResponse); } - @Test - public void shouldCreateApiResponseOnlyWithKeysSupportedByMagicValue() { - ApiVersionsResponse response = new ApiVersionsResponse.Builder(). - setThrottleTimeMs(10). - setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.V1, - ListenerType.ZK_BROKER, - true, - true)). - setSupportedFeatures(Features.emptySupportedFeatures()). - setFinalizedFeatures(Collections.emptyMap()). - setFinalizedFeaturesEpoch(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH). - build(); - verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1); - assertEquals(10, response.throttleTimeMs()); - assertTrue(response.data().supportedFeatures().isEmpty()); - assertTrue(response.data().finalizedFeatures().isEmpty()); - assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data().finalizedFeaturesEpoch()); - } - - @Test - public void shouldReturnFeatureKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() { - ApiVersionsResponse response = new ApiVersionsResponse.Builder(). - setThrottleTimeMs(10). - setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.V1, - ListenerType.ZK_BROKER, - true, - true)). - setSupportedFeatures(Features.supportedFeatures( - Utils.mkMap(Utils.mkEntry("feature", new SupportedVersionRange((short) 1, (short) 4))))). - setFinalizedFeatures(Utils.mkMap(Utils.mkEntry("feature", (short) 3))). - setFinalizedFeaturesEpoch(10L). - build(); - - verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1); - assertEquals(10, response.throttleTimeMs()); - assertEquals(1, response.data().supportedFeatures().size()); - SupportedFeatureKey sKey = response.data().supportedFeatures().find("feature"); - assertNotNull(sKey); - assertEquals(1, sKey.minVersion()); - assertEquals(4, sKey.maxVersion()); - assertEquals(1, response.data().finalizedFeatures().size()); - FinalizedFeatureKey fKey = response.data().finalizedFeatures().find("feature"); - assertNotNull(fKey); - assertEquals(3, fKey.minVersionLevel()); - assertEquals(3, fKey.maxVersionLevel()); - assertEquals(10, response.data().finalizedFeaturesEpoch()); - } - @ParameterizedTest @EnumSource(names = {"ZK_BROKER", "BROKER"}) - public void shouldReturnAllKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle(ListenerType listenerType) { + public void shouldReturnAllKeysWhenThrottleMsIsDefaultThrottle(ListenerType listenerType) { ApiVersionsResponse response = new ApiVersionsResponse.Builder(). setThrottleTimeMs(AbstractResponse.DEFAULT_THROTTLE_TIME). setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.current(), listenerType, true, true)). @@ -191,7 +136,6 @@ public void shouldCreateApiResponseWithTelemetryWhenEnabled() { ApiVersionsResponse response = new ApiVersionsResponse.Builder(). setThrottleTimeMs(10). setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.V1, ListenerType.BROKER, true, true)). @@ -207,7 +151,6 @@ public void shouldNotCreateApiResponseWithTelemetryWhenDisabled() { ApiVersionsResponse response = new ApiVersionsResponse.Builder(). setThrottleTimeMs(10). setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.V1, ListenerType.BROKER, true, false)). @@ -223,7 +166,6 @@ public void testMetadataQuorumApisAreDisabled() { ApiVersionsResponse response = new ApiVersionsResponse.Builder(). setThrottleTimeMs(AbstractResponse.DEFAULT_THROTTLE_TIME). setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.current(), ListenerType.ZK_BROKER, true, true)). @@ -278,7 +220,6 @@ public void testAlterV0Features(boolean alterV0Features) { new SupportedVersionRange((short) 0, (short) 1))); ApiVersionsResponse response = new ApiVersionsResponse.Builder(). setApiVersions(ApiVersionsResponse.filterApis( - RecordVersion.current(), ListenerType.BROKER, true, true)). diff --git a/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java index 4334b2cba5a29..1714c053a9bab 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java @@ -60,17 +60,7 @@ public void shouldThrowOnV0IfNotLiteral() { @Test public void shouldThrowOnIfUnknown() { - assertThrows(IllegalArgumentException.class, () -> new CreateAclsRequest(data(UNKNOWN_ACL1), V0)); - } - - @Test - public void shouldRoundTripV0() { - final CreateAclsRequest original = new CreateAclsRequest(data(LITERAL_ACL1, LITERAL_ACL2), V0); - final ByteBuffer buffer = original.serialize(); - - final CreateAclsRequest result = CreateAclsRequest.parse(buffer, V0); - - assertRequestEquals(original, result); + assertThrows(IllegalArgumentException.class, () -> new CreateAclsRequest(data(UNKNOWN_ACL1), V1)); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java index 6a7d2ff94f760..0f7cc66de850e 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java @@ -62,32 +62,6 @@ public void shouldThrowOnUnknownElements() { assertThrows(IllegalArgumentException.class, () -> new DeleteAclsRequest.Builder(requestData(UNKNOWN_FILTER)).build(V1)); } - @Test - public void shouldRoundTripLiteralV0() { - final DeleteAclsRequest original = new DeleteAclsRequest.Builder(requestData(LITERAL_FILTER)).build(V0); - final ByteBuffer buffer = original.serialize(); - - final DeleteAclsRequest result = DeleteAclsRequest.parse(buffer, V0); - - assertRequestEquals(original, result); - } - - @Test - public void shouldRoundTripAnyV0AsLiteral() { - final DeleteAclsRequest original = new DeleteAclsRequest.Builder(requestData(ANY_FILTER)).build(V0); - final DeleteAclsRequest expected = new DeleteAclsRequest.Builder(requestData( - new AclBindingFilter(new ResourcePatternFilter( - ANY_FILTER.patternFilter().resourceType(), - ANY_FILTER.patternFilter().name(), - PatternType.LITERAL), - ANY_FILTER.entryFilter())) - ).build(V0); - - final DeleteAclsRequest result = DeleteAclsRequest.parse(original.serialize(), V0); - - assertRequestEquals(expected, result); - } - @Test public void shouldRoundTripV1() { final DeleteAclsRequest original = new DeleteAclsRequest.Builder( diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java index 01a81179bf090..a4abf88c83703 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.acl.AclPermissionType; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.DeleteAclsResponseData; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl; @@ -36,7 +35,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; public class DeleteAclsResponseTest { - private static final short V0 = 0; private static final short V1 = 1; private static final DeleteAclsMatchingAcl LITERAL_ACL1 = new DeleteAclsMatchingAcl() @@ -84,15 +82,6 @@ public class DeleteAclsResponseTest { private static final DeleteAclsFilterResult UNKNOWN_RESPONSE = new DeleteAclsFilterResult().setMatchingAcls(singletonList( UNKNOWN_ACL)); - @Test - public void shouldThrowOnV0IfNotLiteral() { - assertThrows(UnsupportedVersionException.class, () -> new DeleteAclsResponse( - new DeleteAclsResponseData() - .setThrottleTimeMs(10) - .setFilterResults(singletonList(PREFIXED_RESPONSE)), - V0)); - } - @Test public void shouldThrowOnIfUnknown() { assertThrows(IllegalArgumentException.class, () -> new DeleteAclsResponse( @@ -102,19 +91,6 @@ public void shouldThrowOnIfUnknown() { V1)); } - @Test - public void shouldRoundTripV0() { - final DeleteAclsResponse original = new DeleteAclsResponse( - new DeleteAclsResponseData() - .setThrottleTimeMs(10) - .setFilterResults(singletonList(LITERAL_RESPONSE)), - V0); - final ByteBuffer buffer = original.serialize(V0); - - final DeleteAclsResponse result = DeleteAclsResponse.parse(buffer, V0); - assertEquals(original.filterResults(), result.filterResults()); - } - @Test public void shouldRoundTripV1() { final DeleteAclsResponse original = new DeleteAclsResponse( diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java index 922cbd8bbf251..e09c1eee72155 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.acl.AclBindingFilter; import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.acl.AclPermissionType; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePatternFilter; import org.apache.kafka.common.resource.ResourceType; @@ -32,7 +31,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; public class DescribeAclsRequestTest { - private static final short V0 = 0; private static final short V1 = 1; private static final AclBindingFilter LITERAL_FILTER = new AclBindingFilter(new ResourcePatternFilter(ResourceType.TOPIC, "foo", PatternType.LITERAL), @@ -47,43 +45,9 @@ public class DescribeAclsRequestTest { private static final AclBindingFilter UNKNOWN_FILTER = new AclBindingFilter(new ResourcePatternFilter(ResourceType.UNKNOWN, "foo", PatternType.LITERAL), new AccessControlEntryFilter("User:ANONYMOUS", "127.0.0.1", AclOperation.READ, AclPermissionType.DENY)); - @Test - public void shouldThrowOnV0IfPrefixed() { - assertThrows(UnsupportedVersionException.class, () -> new DescribeAclsRequest.Builder(PREFIXED_FILTER).build(V0)); - } - @Test public void shouldThrowIfUnknown() { - assertThrows(IllegalArgumentException.class, () -> new DescribeAclsRequest.Builder(UNKNOWN_FILTER).build(V0)); - } - - @Test - public void shouldRoundTripLiteralV0() { - final DescribeAclsRequest original = new DescribeAclsRequest.Builder(LITERAL_FILTER).build(V0); - final DescribeAclsRequest result = DescribeAclsRequest.parse(original.serialize(), V0); - - assertRequestEquals(original, result); - } - - @Test - public void shouldRoundTripAnyV0AsLiteral() { - final DescribeAclsRequest original = new DescribeAclsRequest.Builder(ANY_FILTER).build(V0); - final DescribeAclsRequest expected = new DescribeAclsRequest.Builder( - new AclBindingFilter(new ResourcePatternFilter( - ANY_FILTER.patternFilter().resourceType(), - ANY_FILTER.patternFilter().name(), - PatternType.LITERAL), - ANY_FILTER.entryFilter())).build(V0); - - final DescribeAclsRequest result = DescribeAclsRequest.parse(original.serialize(), V0); - assertRequestEquals(expected, result); - } - - @Test - public void shouldRoundTripLiteralV1() { - final DescribeAclsRequest original = new DescribeAclsRequest.Builder(LITERAL_FILTER).build(V1); - final DescribeAclsRequest result = DescribeAclsRequest.parse(original.serialize(), V1); - assertRequestEquals(original, result); + assertThrows(IllegalArgumentException.class, () -> new DescribeAclsRequest.Builder(UNKNOWN_FILTER).build(V1)); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java index 25dcf2e3d1976..243b3a80e6f29 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.acl.AclBinding; import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.acl.AclPermissionType; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.DescribeAclsResponseData; import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription; import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource; @@ -43,7 +42,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; public class DescribeAclsResponseTest { - private static final short V0 = 0; private static final short V1 = 1; private static final AclDescription ALLOW_CREATE_ACL = buildAclDescription( @@ -82,30 +80,10 @@ public class DescribeAclsResponseTest { PatternType.LITERAL, Collections.singletonList(DENY_READ_ACL)); - @Test - public void shouldThrowOnV0IfNotLiteral() { - assertThrows(UnsupportedVersionException.class, - () -> buildResponse(10, Errors.NONE, Collections.singletonList(PREFIXED_ACL1)).serialize(V0)); - } - @Test public void shouldThrowIfUnknown() { assertThrows(IllegalArgumentException.class, - () -> buildResponse(10, Errors.NONE, Collections.singletonList(UNKNOWN_ACL)).serialize(V0)); - } - - @Test - public void shouldRoundTripV0() { - List resources = Arrays.asList(LITERAL_ACL1, LITERAL_ACL2); - final DescribeAclsResponse original = buildResponse(10, Errors.NONE, resources); - final ByteBuffer buffer = original.serialize(V0); - - final DescribeAclsResponse result = DescribeAclsResponse.parse(buffer, V0); - assertResponseEquals(original, result); - - final DescribeAclsResponse result2 = buildResponse(10, Errors.NONE, DescribeAclsResponse.aclsResources( - DescribeAclsResponse.aclBindings(resources))); - assertResponseEquals(original, result2); + () -> buildResponse(10, Errors.NONE, Collections.singletonList(UNKNOWN_ACL)).serialize(V1)); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java index a330190895790..60d10a689394c 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java @@ -19,15 +19,12 @@ import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.JoinGroupRequestData; -import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; -import java.nio.ByteBuffer; import java.util.Arrays; -import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; @@ -68,20 +65,4 @@ public void testRequestVersionCompatibilityFailBuild() { .setProtocolType("consumer") ).build((short) 4)); } - - @Test - public void testRebalanceTimeoutDefaultsToSessionTimeoutV0() { - int sessionTimeoutMs = 30000; - short version = 0; - - ByteBuffer buffer = MessageUtil.toByteBuffer(new JoinGroupRequestData() - .setGroupId("groupId") - .setMemberId("consumerId") - .setProtocolType("consumer") - .setSessionTimeoutMs(sessionTimeoutMs), version); - - JoinGroupRequest request = JoinGroupRequest.parse(buffer, version); - assertEquals(sessionTimeoutMs, request.data().sessionTimeoutMs()); - assertEquals(sessionTimeoutMs, request.data().rebalanceTimeoutMs()); - } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java index 5bb1186b36ece..fd1d585206497 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java @@ -54,7 +54,7 @@ public void testDuplicatePartitions() { ListOffsetsRequestData data = new ListOffsetsRequestData() .setTopics(topics) .setReplicaId(-1); - ListOffsetsRequest request = ListOffsetsRequest.parse(MessageUtil.toByteBuffer(data, (short) 0), (short) 0); + ListOffsetsRequest request = ListOffsetsRequest.parse(MessageUtil.toByteBuffer(data, (short) 1), (short) 1); assertEquals(Collections.singleton(new TopicPartition("topic", 0)), request.duplicatePartitions()); assertEquals(0, data.timeoutMs()); // default value } @@ -93,47 +93,15 @@ public void testGetErrorResponse() { } } - @Test - public void testGetErrorResponseV0() { - List topics = Collections.singletonList( - new ListOffsetsTopic() - .setName("topic") - .setPartitions(Collections.singletonList( - new ListOffsetsPartition() - .setPartitionIndex(0)))); - ListOffsetsRequest request = ListOffsetsRequest.Builder - .forConsumer(true, IsolationLevel.READ_UNCOMMITTED) - .setTargetTimes(topics) - .build((short) 0); - ListOffsetsResponse response = (ListOffsetsResponse) request.getErrorResponse(0, Errors.NOT_LEADER_OR_FOLLOWER.exception()); - - List v = Collections.singletonList( - new ListOffsetsTopicResponse() - .setName("topic") - .setPartitions(Collections.singletonList( - new ListOffsetsPartitionResponse() - .setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()) - .setOldStyleOffsets(Collections.emptyList()) - .setPartitionIndex(0)))); - ListOffsetsResponseData data = new ListOffsetsResponseData() - .setThrottleTimeMs(0) - .setTopics(v); - ListOffsetsResponse expectedResponse = new ListOffsetsResponse(data); - assertEquals(expectedResponse.data().topics(), response.data().topics()); - assertEquals(expectedResponse.throttleTimeMs(), response.throttleTimeMs()); - } - @Test public void testToListOffsetsTopics() { ListOffsetsPartition lop0 = new ListOffsetsPartition() .setPartitionIndex(0) .setCurrentLeaderEpoch(1) - .setMaxNumOffsets(2) .setTimestamp(123L); ListOffsetsPartition lop1 = new ListOffsetsPartition() .setPartitionIndex(1) .setCurrentLeaderEpoch(3) - .setMaxNumOffsets(4) .setTimestamp(567L); Map timestampsToSearch = new HashMap<>(); timestampsToSearch.put(new TopicPartition("topic", 0), lop0); @@ -174,4 +142,4 @@ public void testListOffsetsRequestOldestVersion() { assertEquals((short) 8, requireEarliestLocalTimestampRequestBuilder.oldestAllowedVersion()); assertEquals((short) 9, requireTieredStorageTimestampRequestBuilder.oldestAllowedVersion()); } -} \ No newline at end of file +} diff --git a/clients/src/test/java/org/apache/kafka/common/requests/MetadataRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/MetadataRequestTest.java index 117d0ced9bae7..c28b54fd398cf 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/MetadataRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/MetadataRequestTest.java @@ -30,23 +30,13 @@ import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; public class MetadataRequestTest { @Test - public void testEmptyMeansAllTopicsV0() { - MetadataRequestData data = new MetadataRequestData(); - MetadataRequest parsedRequest = new MetadataRequest(data, (short) 0); - assertTrue(parsedRequest.isAllTopics()); - assertNull(parsedRequest.topics()); - } - - @Test - public void testEmptyMeansEmptyForVersionsAboveV0() { - for (int i = 1; i < MetadataRequestData.SCHEMAS.length; i++) { + public void testEmptyMeansEmptyForAllVersions() { + for (int i = ApiKeys.METADATA.oldestVersion(); i < MetadataRequestData.SCHEMAS.length; i++) { MetadataRequestData data = new MetadataRequestData(); data.setAllowAutoTopicCreation(true); MetadataRequest parsedRequest = new MetadataRequest(data, (short) i); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java index d4247aca7fda7..161a4dd5f1192 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java @@ -92,7 +92,7 @@ public void testConstructor() { OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder(data); - for (short version : ApiKeys.TXN_OFFSET_COMMIT.allVersions()) { + for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { OffsetCommitRequest request = builder.build(version); assertEquals(expectedOffsets, request.offsets()); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java index c85d26dac5756..d0ef79b4479e8 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java @@ -390,16 +390,16 @@ public void testUseDefaultLeaderEpochV0ToV7() { .setErrorCode(Errors.NOT_COORDINATOR.code()) .setThrottleTimeMs(throttleTimeMs) .setTopics(Collections.singletonList( - new OffsetFetchResponseTopic() - .setName(topicOne) - .setPartitions(Collections.singletonList( - new OffsetFetchResponsePartition() - .setPartitionIndex(partitionOne) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) - .setMetadata(metadata)) - )) + new OffsetFetchResponseTopic() + .setName(topicOne) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartition() + .setPartitionIndex(partitionOne) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) + .setMetadata(metadata)) + )) ); assertEquals(expectedData, response.data()); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequestTest.java index 6a239e162abd7..1f96663be7d78 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequestTest.java @@ -42,18 +42,13 @@ public void testForConsumerRequiresVersion3() { } @Test - public void testDefaultReplicaId() { - for (short version : ApiKeys.OFFSET_FOR_LEADER_EPOCH.allVersions()) { - int replicaId = 1; - OffsetsForLeaderEpochRequest.Builder builder = OffsetsForLeaderEpochRequest.Builder.forFollower( - version, new OffsetForLeaderTopicCollection(), replicaId); - OffsetsForLeaderEpochRequest request = builder.build(); - OffsetsForLeaderEpochRequest parsed = OffsetsForLeaderEpochRequest.parse(request.serialize(), version); - if (version < 3) - assertEquals(OffsetsForLeaderEpochRequest.DEBUGGING_REPLICA_ID, parsed.replicaId()); - else - assertEquals(replicaId, parsed.replicaId()); - } + public void testForFollower() { + short version = 4; + int replicaId = 1; + OffsetsForLeaderEpochRequest.Builder builder = OffsetsForLeaderEpochRequest.Builder.forFollower( + new OffsetForLeaderTopicCollection(), replicaId); + OffsetsForLeaderEpochRequest request = builder.build(); + OffsetsForLeaderEpochRequest parsed = OffsetsForLeaderEpochRequest.parse(request.serialize(), version); + assertEquals(replicaId, parsed.replicaId()); } - } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java index f171eececc426..ecb8869c38bd2 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java @@ -25,7 +25,6 @@ import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.MemoryRecordsBuilder; import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.record.TimestampType; @@ -50,18 +49,18 @@ public class ProduceRequestTest { @Test public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() { final MemoryRecords memoryRecords = MemoryRecords.withTransactionalRecords(0, Compression.NONE, 1L, - (short) 1, 1, 1, simpleRecord); - - final ProduceRequest request = ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("topic") - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(1) - .setRecords(memoryRecords)))).iterator())) - .setAcks((short) -1) - .setTimeoutMs(10)).build(); + (short) 1, 1, 1, simpleRecord); + + final ProduceRequest request = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("topic") + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(1) + .setRecords(memoryRecords)))).iterator())) + .setAcks((short) -1) + .setTimeoutMs(10)).build(); assertTrue(RequestUtils.hasTransactionalRecords(request)); } @@ -80,52 +79,35 @@ public void shouldNotBeFlaggedAsIdempotentWhenRecordsNotIdempotent() { @Test public void shouldBeFlaggedAsIdempotentWhenIdempotentRecords() { final MemoryRecords memoryRecords = MemoryRecords.withIdempotentRecords(1, Compression.NONE, 1L, - (short) 1, 1, 1, simpleRecord); - final ProduceRequest request = ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("topic") - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(1) - .setRecords(memoryRecords)))).iterator())) - .setAcks((short) -1) - .setTimeoutMs(10)).build(); + (short) 1, 1, 1, simpleRecord); + final ProduceRequest request = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("topic") + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(1) + .setRecords(memoryRecords)))).iterator())) + .setAcks((short) -1) + .setTimeoutMs(10)).build(); assertTrue(RequestTestUtils.hasIdempotentRecords(request)); } @Test - public void testBuildWithOldMessageFormat() { + public void testBuildWithCurrentMessageFormat() { ByteBuffer buffer = ByteBuffer.allocate(256); - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, Compression.NONE, - TimestampType.CREATE_TIME, 0L); + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, + Compression.NONE, TimestampType.CREATE_TIME, 0L); builder.append(10L, null, "a".getBytes()); - ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(RecordBatch.MAGIC_VALUE_V1, + ProduceRequest.Builder requestBuilder = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) .iterator())) .setAcks((short) 1) - .setTimeoutMs(5000)); - assertEquals(2, requestBuilder.oldestAllowedVersion()); - assertEquals(2, requestBuilder.latestAllowedVersion()); - } - - @Test - public void testBuildWithCurrentMessageFormat() { - ByteBuffer buffer = ByteBuffer.allocate(256); - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, - Compression.NONE, TimestampType.CREATE_TIME, 0L); - builder.append(10L, null, "a".getBytes()); - ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(RecordBatch.CURRENT_MAGIC_VALUE, - new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) - .iterator())) - .setAcks((short) 1) - .setTimeoutMs(5000)); + .setTimeoutMs(5000), + false); assertEquals(3, requestBuilder.oldestAllowedVersion()); assertEquals(ApiKeys.PRODUCE.latestVersion(), requestBuilder.latestAllowedVersion()); } @@ -144,31 +126,31 @@ public void testV3AndAboveShouldContainOnlyOneRecordBatch() { buffer.flip(); - ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("test") - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(0) - .setRecords(MemoryRecords.readableRecords(buffer))))).iterator())) - .setAcks((short) 1) - .setTimeoutMs(5000)); + ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("test") + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(0) + .setRecords(MemoryRecords.readableRecords(buffer))))).iterator())) + .setAcks((short) 1) + .setTimeoutMs(5000)); assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class); } @Test public void testV3AndAboveCannotHaveNoRecordBatches() { - ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("test") - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(0) - .setRecords(MemoryRecords.EMPTY)))).iterator())) - .setAcks((short) 1) - .setTimeoutMs(5000)); + ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("test") + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(0) + .setRecords(MemoryRecords.EMPTY)))).iterator())) + .setAcks((short) 1) + .setTimeoutMs(5000)); assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class); } @@ -176,19 +158,19 @@ public void testV3AndAboveCannotHaveNoRecordBatches() { public void testV3AndAboveCannotUseMagicV0() { ByteBuffer buffer = ByteBuffer.allocate(256); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0, Compression.NONE, - TimestampType.NO_TIMESTAMP_TYPE, 0L); + TimestampType.NO_TIMESTAMP_TYPE, 0L); builder.append(10L, null, "a".getBytes()); - ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("test") - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData() - .setIndex(0) - .setRecords(builder.build())))).iterator())) - .setAcks((short) 1) - .setTimeoutMs(5000)); + ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("test") + .setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(0) + .setRecords(builder.build())))).iterator())) + .setAcks((short) 1) + .setTimeoutMs(5000)); assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class); } @@ -196,19 +178,19 @@ public void testV3AndAboveCannotUseMagicV0() { public void testV3AndAboveCannotUseMagicV1() { ByteBuffer buffer = ByteBuffer.allocate(256); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, Compression.NONE, - TimestampType.CREATE_TIME, 0L); + TimestampType.CREATE_TIME, 0L); builder.append(10L, null, "a".getBytes()); - ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("test") - .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(0) - .setRecords(builder.build())))) - .iterator())) - .setAcks((short) 1) - .setTimeoutMs(5000)); + ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("test") + .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() + .setIndex(0) + .setRecords(builder.build())))) + .iterator())) + .setAcks((short) 1) + .setTimeoutMs(5000)); assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class); } @@ -220,15 +202,15 @@ public void testV6AndBelowCannotUseZStdCompression() { builder.append(10L, null, "a".getBytes()); ProduceRequestData produceData = new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("test") - .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(0) - .setRecords(builder.build())))) - .iterator())) - .setAcks((short) 1) - .setTimeoutMs(1000); + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("test") + .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() + .setIndex(0) + .setRecords(builder.build())))) + .iterator())) + .setAcks((short) 1) + .setTimeoutMs(1000); // Can't create ProduceRequest instance with version within [3, 7) for (short version = 3; version < 7; version++) { @@ -237,7 +219,7 @@ public void testV6AndBelowCannotUseZStdCompression() { } // Works fine with current version (>= 7) - ProduceRequest.forCurrentMagic(produceData); + ProduceRequest.builder(produceData); } @Test @@ -247,20 +229,21 @@ public void testMixedTransactionalData() { final int sequence = 10; final MemoryRecords nonTxnRecords = MemoryRecords.withRecords(Compression.NONE, - new SimpleRecord("foo".getBytes())); + new SimpleRecord("foo".getBytes())); final MemoryRecords txnRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, - producerEpoch, sequence, new SimpleRecord("bar".getBytes())); - - ProduceRequest.Builder builder = ProduceRequest.forMagic(RecordBatch.CURRENT_MAGIC_VALUE, - new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))) - .iterator())) - .setAcks((short) -1) - .setTimeoutMs(5000)); + producerEpoch, sequence, new SimpleRecord("bar".getBytes())); + + ProduceRequest.Builder builder = ProduceRequest.builder( + new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))) + .iterator())) + .setAcks((short) -1) + .setTimeoutMs(5000), + true); final ProduceRequest request = builder.build(); assertTrue(RequestUtils.hasTransactionalRecords(request)); assertTrue(RequestTestUtils.hasIdempotentRecords(request)); @@ -273,20 +256,21 @@ public void testMixedIdempotentData() { final int sequence = 10; final MemoryRecords nonIdempotentRecords = MemoryRecords.withRecords(Compression.NONE, - new SimpleRecord("foo".getBytes())); + new SimpleRecord("foo".getBytes())); final MemoryRecords idempotentRecords = MemoryRecords.withIdempotentRecords(Compression.NONE, producerId, - producerEpoch, sequence, new SimpleRecord("bar".getBytes())); - - ProduceRequest.Builder builder = ProduceRequest.forMagic(RecordVersion.current().value, - new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(idempotentRecords))), - new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonIdempotentRecords)))) - .iterator())) - .setAcks((short) -1) - .setTimeoutMs(5000)); + producerEpoch, sequence, new SimpleRecord("bar".getBytes())); + + ProduceRequest.Builder builder = ProduceRequest.builder( + new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(idempotentRecords))), + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonIdempotentRecords)))) + .iterator())) + .setAcks((short) -1) + .setTimeoutMs(5000), + true); final ProduceRequest request = builder.build(); assertFalse(RequestUtils.hasTransactionalRecords(request)); @@ -300,15 +284,15 @@ private static void assertThrowsForAllVersions(ProduceRequ } private ProduceRequest createNonIdempotentNonTransactionalRecords() { - return ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("topic") - .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(1) - .setRecords(MemoryRecords.withRecords(Compression.NONE, simpleRecord))))) - .iterator())) - .setAcks((short) -1) - .setTimeoutMs(10)).build(); + return ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName("topic") + .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() + .setIndex(1) + .setRecords(MemoryRecords.withRecords(Compression.NONE, simpleRecord))))) + .iterator())) + .setAcks((short) -1) + .setTimeoutMs(10)).build(); } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java index 42e1d03f03469..2c4f1c792244f 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java @@ -19,13 +19,11 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.ProduceResponseData; -import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; import org.junit.jupiter.api.Test; -import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -39,36 +37,6 @@ public class ProduceResponseTest { - @SuppressWarnings("deprecation") - @Test - public void produceResponseV5Test() { - Map responseData = new HashMap<>(); - TopicPartition tp0 = new TopicPartition("test", 0); - responseData.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); - - ProduceResponse v5Response = new ProduceResponse(responseData, 10); - short version = 5; - - ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(v5Response, version, 0); - - ResponseHeader.parse(buffer, ApiKeys.PRODUCE.responseHeaderVersion(version)); // throw away. - ProduceResponse v5FromBytes = (ProduceResponse) AbstractResponse.parseResponse(ApiKeys.PRODUCE, buffer, version); - - assertEquals(1, v5FromBytes.data().responses().size()); - ProduceResponseData.TopicProduceResponse topicProduceResponse = v5FromBytes.data().responses().iterator().next(); - assertEquals(1, topicProduceResponse.partitionResponses().size()); - ProduceResponseData.PartitionProduceResponse partitionProduceResponse = topicProduceResponse.partitionResponses().iterator().next(); - TopicPartition tp = new TopicPartition(topicProduceResponse.name(), partitionProduceResponse.index()); - assertEquals(tp0, tp); - - assertEquals(100, partitionProduceResponse.logStartOffset()); - assertEquals(10000, partitionProduceResponse.baseOffset()); - assertEquals(RecordBatch.NO_TIMESTAMP, partitionProduceResponse.logAppendTimeMs()); - assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode())); - assertNull(partitionProduceResponse.errorMessage()); - assertTrue(partitionProduceResponse.recordErrors().isEmpty()); - } - @SuppressWarnings("deprecation") @Test public void produceResponseVersionTest() { diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java index 06cab34920e1f..aad3be459a682 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java @@ -117,7 +117,7 @@ KafkaPrincipal.ANONYMOUS, new ListenerName("ssl"), SecurityProtocol.SASL_SSL, @Test public void testInvalidRequestForImplicitHashCollection() throws UnknownHostException { - short version = (short) 5; // choose a version with fixed length encoding, for simplicity + short version = (short) 7; // choose a version with fixed length encoding, for simplicity ByteBuffer corruptBuffer = produceRequest(version); // corrupt the length of the topics array corruptBuffer.putInt(8, (Integer.MAX_VALUE - 1) / 2); @@ -134,7 +134,7 @@ KafkaPrincipal.ANONYMOUS, new ListenerName("ssl"), SecurityProtocol.SASL_SSL, @Test public void testInvalidRequestForArrayList() throws UnknownHostException { - short version = (short) 5; // choose a version with fixed length encoding, for simplicity + short version = (short) 7; // choose a version with fixed length encoding, for simplicity ByteBuffer corruptBuffer = produceRequest(version); // corrupt the length of the partitions array corruptBuffer.putInt(17, Integer.MAX_VALUE); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index 0154e6e2c9e0d..49b3179fa407d 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -18,9 +18,9 @@ import org.apache.kafka.common.ConsumerGroupState; import org.apache.kafka.common.ElectionType; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; @@ -136,6 +136,8 @@ import org.apache.kafka.common.message.DescribeProducersResponseData; import org.apache.kafka.common.message.DescribeQuorumRequestData; import org.apache.kafka.common.message.DescribeQuorumResponseData; +import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestData; +import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseData; import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; import org.apache.kafka.common.message.DescribeTransactionsRequestData; @@ -151,7 +153,6 @@ import org.apache.kafka.common.message.EnvelopeResponseData; import org.apache.kafka.common.message.ExpireDelegationTokenRequestData; import org.apache.kafka.common.message.ExpireDelegationTokenResponseData; -import org.apache.kafka.common.message.FetchRequestData; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.message.FetchSnapshotRequestData; import org.apache.kafka.common.message.FetchSnapshotResponseData; @@ -234,6 +235,10 @@ import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState; import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicState; import org.apache.kafka.common.message.StopReplicaResponseData; +import org.apache.kafka.common.message.StreamsGroupDescribeRequestData; +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; +import org.apache.kafka.common.message.StreamsGroupHeartbeatRequestData; +import org.apache.kafka.common.message.StreamsGroupHeartbeatResponseData; import org.apache.kafka.common.message.SyncGroupRequestData; import org.apache.kafka.common.message.SyncGroupRequestData.SyncGroupRequestAssignment; import org.apache.kafka.common.message.SyncGroupResponseData; @@ -379,7 +384,7 @@ public void testSerializationSpecialCases() { checkResponse(createFetchResponse(Errors.FETCH_SESSION_ID_NOT_FOUND, 123), (short) 7); checkOlderFetchVersions(); // Metadata - checkRequest(MetadataRequest.Builder.allTopics().build((short) 2)); + checkRequest(MetadataRequest.Builder.allTopics().build((short) 4)); // OffsetFetch checkRequest(createOffsetFetchRequestWithMultipleGroups((short) 8, true)); checkRequest(createOffsetFetchRequestWithMultipleGroups((short) 8, false)); @@ -413,12 +418,11 @@ public void testSerializationSpecialCases() { checkRequest(createTxnOffsetCommitRequestWithAutoDowngrade()); checkErrorResponse(createTxnOffsetCommitRequestWithAutoDowngrade(), unknownServerException); // DescribeAcls - checkErrorResponse(createDescribeAclsRequest((short) 0), new SecurityDisabledException("Security is not enabled.")); - checkErrorResponse(createCreateAclsRequest((short) 0), new SecurityDisabledException("Security is not enabled.")); + checkErrorResponse(createDescribeAclsRequest((short) 1), new SecurityDisabledException("Security is not enabled.")); + checkErrorResponse(createCreateAclsRequest((short) 1), new SecurityDisabledException("Security is not enabled.")); // DeleteAcls - checkErrorResponse(createDeleteAclsRequest((short) 0), new SecurityDisabledException("Security is not enabled.")); + checkErrorResponse(createDeleteAclsRequest((short) 1), new SecurityDisabledException("Security is not enabled.")); // DescribeConfigs - checkRequest(createDescribeConfigsRequestWithConfigEntries((short) 0)); checkRequest(createDescribeConfigsRequestWithConfigEntries((short) 1)); checkRequest(createDescribeConfigsRequestWithDocumentation((short) 1)); checkRequest(createDescribeConfigsRequestWithDocumentation((short) 2)); @@ -478,7 +482,7 @@ public void testProduceRequestPartitionSize() { Compression.NONE, new SimpleRecord("woot".getBytes())); MemoryRecords records1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, Compression.NONE, new SimpleRecord("woot".getBytes()), new SimpleRecord("woot".getBytes())); - ProduceRequest request = ProduceRequest.forMagic(RecordBatch.MAGIC_VALUE_V2, + ProduceRequest request = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(asList( new ProduceRequestData.TopicProduceData().setName(tp0.topic()).setPartitionData( @@ -488,8 +492,9 @@ public void testProduceRequestPartitionSize() { .iterator())) .setAcks((short) 1) .setTimeoutMs(5000) - .setTransactionalId("transactionalId")) - .build((short) 3); + .setTransactionalId("transactionalId"), + true) + .build((short) 7); assertEquals(2, request.partitionSizes().size()); assertEquals(records0.sizeInBytes(), (int) request.partitionSizes().get(tp0)); assertEquals(records1.sizeInBytes(), (int) request.partitionSizes().get(tp1)); @@ -545,33 +550,13 @@ public void produceRequestGetErrorResponseTest() { @Test public void fetchResponseVersionTest() { - LinkedHashMap responseData = new LinkedHashMap<>(); Uuid id = Uuid.randomUuid(); - Map topicNames = Collections.singletonMap(id, "test"); - TopicPartition tp = new TopicPartition("test", 0); - MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10)); FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() .setPartitionIndex(0) .setHighWatermark(1000000) .setLogStartOffset(-1) .setRecords(records); - - // Use zero UUID since we are comparing with old request versions - responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, tp), partitionData); - - LinkedHashMap tpResponseData = new LinkedHashMap<>(); - tpResponseData.put(tp, partitionData); - - FetchResponse v0Response = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, responseData); - FetchResponse v1Response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData); - FetchResponse v0Deserialized = FetchResponse.parse(v0Response.serialize((short) 0), (short) 0); - FetchResponse v1Deserialized = FetchResponse.parse(v1Response.serialize((short) 1), (short) 1); - assertEquals(0, v0Deserialized.throttleTimeMs(), "Throttle time must be zero"); - assertEquals(10, v1Deserialized.throttleTimeMs(), "Throttle time must be 10"); - assertEquals(tpResponseData, v0Deserialized.responseData(topicNames, (short) 0), "Response data does not match"); - assertEquals(tpResponseData, v1Deserialized.responseData(topicNames, (short) 1), "Response data does not match"); - LinkedHashMap idResponseData = new LinkedHashMap<>(); idResponseData.put(new TopicIdPartition(id, new TopicPartition("test", 0)), new FetchResponseData.PartitionData() @@ -689,14 +674,6 @@ public void testCreateTopicRequestV3FailsIfNoPartitionsOrReplicas() { assertTrue(exception.getMessage().contains("[foo, bar]")); } - @Test - public void testFetchRequestMaxBytesOldVersions() { - final short version = 1; - FetchRequest fr = createFetchRequest(version); - FetchRequest fr2 = FetchRequest.parse(fr.serialize(), version); - assertEquals(fr2.maxBytes(), fr.maxBytes()); - } - @Test public void testFetchRequestIsolationLevel() { FetchRequest request = createFetchRequest((short) 4, IsolationLevel.READ_COMMITTED); @@ -723,24 +700,6 @@ public void testFetchRequestWithMetadata() { assertEquals(request.isolationLevel(), deserialized.isolationLevel()); } - @Test - public void testFetchRequestCompat() { - Map fetchData = new HashMap<>(); - fetchData.put(new TopicPartition("test", 0), new FetchRequest.PartitionData(Uuid.ZERO_UUID, 100, 2, 100, Optional.of(42))); - FetchRequest req = FetchRequest.Builder - .forConsumer((short) 2, 100, 100, fetchData) - .metadata(new FetchMetadata(10, 20)) - .isolationLevel(IsolationLevel.READ_COMMITTED) - .build((short) 2); - - FetchRequestData data = req.data(); - ObjectSerializationCache cache = new ObjectSerializationCache(); - int size = data.size(cache, (short) 2); - - ByteBufferAccessor writer = new ByteBufferAccessor(ByteBuffer.allocate(size)); - data.write(writer, cache, (short) 2); - } - @Test public void testSerializeWithHeader() { CreatableTopicCollection topicsToCreate = new CreatableTopicCollection(1); @@ -788,14 +747,6 @@ public void testSerializeWithInconsistentHeaderVersion() { assertThrows(IllegalArgumentException.class, () -> createTopicsRequest.serializeWithHeader(requestHeader)); } - @Test - public void testJoinGroupRequestV0RebalanceTimeout() { - final short version = 0; - JoinGroupRequest jgr = createJoinGroupRequest(version); - JoinGroupRequest jgr2 = JoinGroupRequest.parse(jgr.serialize(), version); - assertEquals(jgr2.data().rebalanceTimeoutMs(), jgr.data().rebalanceTimeoutMs()); - } - @Test public void testOffsetFetchRequestBuilderToStringV0ToV7() { List stableFlags = asList(true, false); @@ -1058,7 +1009,7 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case DELETE_TOPICS: return createDeleteTopicsRequest(version); case DELETE_RECORDS: return createDeleteRecordsRequest(version); case INIT_PRODUCER_ID: return createInitPidRequest(version); - case OFFSET_FOR_LEADER_EPOCH: return createLeaderEpochRequestForReplica(version, 1); + case OFFSET_FOR_LEADER_EPOCH: return createLeaderEpochRequestForReplica(1); case ADD_PARTITIONS_TO_TXN: return createAddPartitionsToTxnRequest(version); case ADD_OFFSETS_TO_TXN: return createAddOffsetsToTxnRequest(version); case END_TXN: return createEndTxnRequest(version); @@ -1123,6 +1074,9 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case WRITE_SHARE_GROUP_STATE: return createWriteShareGroupStateRequest(version); case DELETE_SHARE_GROUP_STATE: return createDeleteShareGroupStateRequest(version); case READ_SHARE_GROUP_STATE_SUMMARY: return createReadShareGroupStateSummaryRequest(version); + case STREAMS_GROUP_HEARTBEAT: return createStreamsGroupHeartbeatRequest(version); + case STREAMS_GROUP_DESCRIBE: return createStreamsGroupDescribeRequest(version); + case DESCRIBE_SHARE_GROUP_OFFSETS: return createDescribeShareGroupOffsetsRequest(version); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1181,7 +1135,7 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case ALTER_CLIENT_QUOTAS: return createAlterClientQuotasResponse(); case DESCRIBE_USER_SCRAM_CREDENTIALS: return createDescribeUserScramCredentialsResponse(); case ALTER_USER_SCRAM_CREDENTIALS: return createAlterUserScramCredentialsResponse(); - case VOTE: return createVoteResponse(); + case VOTE: return createVoteResponse(version); case BEGIN_QUORUM_EPOCH: return createBeginQuorumEpochResponse(); case END_QUORUM_EPOCH: return createEndQuorumEpochResponse(); case DESCRIBE_QUORUM: return createDescribeQuorumResponse(); @@ -1217,6 +1171,9 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case WRITE_SHARE_GROUP_STATE: return createWriteShareGroupStateResponse(); case DELETE_SHARE_GROUP_STATE: return createDeleteShareGroupStateResponse(); case READ_SHARE_GROUP_STATE_SUMMARY: return createReadShareGroupStateSummaryResponse(); + case STREAMS_GROUP_HEARTBEAT: return createStreamsGroupHeartbeatResponse(); + case STREAMS_GROUP_DESCRIBE: return createStreamsGroupDescribeResponse(); + case DESCRIBE_SHARE_GROUP_OFFSETS: return createDescribeShareGroupOffsetsResponse(); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1487,7 +1444,7 @@ private ShareGroupDescribeResponse createShareGroupDescribeResponse() { .setGroupId("group") .setErrorCode((short) 0) .setErrorMessage(Errors.forCode((short) 0).message()) - .setGroupState(ShareGroupState.EMPTY.toString()) + .setGroupState(GroupState.EMPTY.toString()) .setMembers(new ArrayList<>(0)) )) .setThrottleTimeMs(1000); @@ -1700,29 +1657,34 @@ private BeginQuorumEpochResponse createBeginQuorumEpochResponse() { } private VoteRequest createVoteRequest(short version) { + VoteRequestData.PartitionData partitionData = new VoteRequestData.PartitionData() + .setPartitionIndex(0) + .setReplicaEpoch(1) + .setReplicaId(2) + .setLastOffset(3L) + .setLastOffsetEpoch(4); + if (version >= 2) { + partitionData.setPreVote(true); + } VoteRequestData data = new VoteRequestData() .setClusterId("clusterId") .setTopics(singletonList(new VoteRequestData.TopicData() - .setPartitions(singletonList(new VoteRequestData.PartitionData() - .setPartitionIndex(0) - .setCandidateEpoch(1) - .setCandidateId(2) - .setLastOffset(3L) - .setLastOffsetEpoch(4))) + .setPartitions(singletonList(partitionData)) .setTopicName("topic1"))); return new VoteRequest.Builder(data).build(version); } - private VoteResponse createVoteResponse() { + private VoteResponse createVoteResponse(short version) { + VoteResponseData.PartitionData partitionData = new VoteResponseData.PartitionData() + .setErrorCode(Errors.NONE.code()) + .setLeaderEpoch(0) + .setPartitionIndex(1) + .setLeaderId(2) + .setVoteGranted(false); VoteResponseData data = new VoteResponseData() .setErrorCode(Errors.NONE.code()) .setTopics(singletonList(new VoteResponseData.TopicData() - .setPartitions(singletonList(new VoteResponseData.PartitionData() - .setErrorCode(Errors.NONE.code()) - .setLeaderEpoch(0) - .setPartitionIndex(1) - .setLeaderId(2) - .setVoteGranted(false))))); + .setPartitions(singletonList(partitionData)))); return new VoteResponse(data); } @@ -1789,7 +1751,7 @@ private AlterPartitionRequest createAlterPartitionRequest(short version) { .setTopicName("topic1") .setTopicId(Uuid.randomUuid()) .setPartitions(singletonList(partitionData)))); - return new AlterPartitionRequest.Builder(data, version >= 1).build(version); + return new AlterPartitionRequest.Builder(data).build(version); } private AlterPartitionResponse createAlterPartitionResponse(int version) { @@ -2187,9 +2149,9 @@ private JoinGroupRequest createJoinGroupRequest(short version) { JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestData.JoinGroupRequestProtocolCollection( Collections.singleton( - new JoinGroupRequestData.JoinGroupRequestProtocol() - .setName("consumer-range") - .setMetadata(new byte[0])).iterator() + new JoinGroupRequestData.JoinGroupRequestProtocol() + .setName("consumer-range") + .setMetadata(new byte[0])).iterator() ); JoinGroupRequestData data = new JoinGroupRequestData() @@ -2346,19 +2308,7 @@ private DeleteGroupsResponse createDeleteGroupsResponse() { } private ListOffsetsRequest createListOffsetRequest(short version) { - if (version == 0) { - ListOffsetsTopic topic = new ListOffsetsTopic() - .setName("test") - .setPartitions(singletonList(new ListOffsetsPartition() - .setPartitionIndex(0) - .setTimestamp(1000000L) - .setMaxNumOffsets(10) - .setCurrentLeaderEpoch(5))); - return ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED) - .setTargetTimes(singletonList(topic)) - .build(version); - } else if (version == 1) { + if (version == 1) { ListOffsetsTopic topic = new ListOffsetsTopic() .setName("test") .setPartitions(singletonList(new ListOffsetsPartition() @@ -2388,16 +2338,7 @@ private ListOffsetsRequest createListOffsetRequest(short version) { } private ListOffsetsResponse createListOffsetResponse(short version) { - if (version == 0) { - ListOffsetsResponseData data = new ListOffsetsResponseData() - .setTopics(singletonList(new ListOffsetsTopicResponse() - .setName("test") - .setPartitions(singletonList(new ListOffsetsPartitionResponse() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - .setOldStyleOffsets(singletonList(100L)))))); - return new ListOffsetsResponse(data); - } else if (version >= 1 && version <= LIST_OFFSETS.latestVersion()) { + if (version >= 1 && version <= LIST_OFFSETS.latestVersion()) { ListOffsetsPartitionResponse partition = new ListOffsetsPartitionResponse() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code()) @@ -2558,22 +2499,9 @@ private OffsetFetchResponse createOffsetFetchResponse(short version) { } private ProduceRequest createProduceRequest(short version) { - if (version < 2) { - MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("blah".getBytes())); - ProduceRequestData data = new ProduceRequestData() - .setAcks((short) -1) - .setTimeoutMs(123) - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList( - new ProduceRequestData.TopicProduceData() - .setName("topic1") - .setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(1) - .setRecords(records)))).iterator())); - return new ProduceRequest.Builder(version, version, data).build(version); - } - byte magic = version == 2 ? RecordBatch.MAGIC_VALUE_V1 : RecordBatch.MAGIC_VALUE_V2; - MemoryRecords records = MemoryRecords.withRecords(magic, Compression.NONE, new SimpleRecord("woot".getBytes())); - return ProduceRequest.forMagic(magic, + MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, Compression.NONE, + new SimpleRecord("woot".getBytes())); + return ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList( new ProduceRequestData.TopicProduceData() @@ -2583,7 +2511,8 @@ private ProduceRequest createProduceRequest(short version) { .setRecords(records)))).iterator())) .setAcks((short) 1) .setTimeoutMs(5000) - .setTransactionalId(version >= 3 ? "transactionalId" : null)) + .setTransactionalId(version >= 3 ? "transactionalId" : null), + true) .build(version); } @@ -2980,9 +2909,9 @@ private OffsetsForLeaderEpochRequest createLeaderEpochRequestForConsumer() { return OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs).build(); } - private OffsetsForLeaderEpochRequest createLeaderEpochRequestForReplica(short version, int replicaId) { + private OffsetsForLeaderEpochRequest createLeaderEpochRequestForReplica(int replicaId) { OffsetForLeaderTopicCollection epochs = createOffsetForLeaderTopicCollection(); - return OffsetsForLeaderEpochRequest.Builder.forFollower(version, epochs, replicaId).build(); + return OffsetsForLeaderEpochRequest.Builder.forFollower(epochs, replicaId).build(); } private OffsetsForLeaderEpochResponse createLeaderEpochResponse() { @@ -3085,7 +3014,7 @@ private EndTxnResponse createEndTxnResponse() { private WriteTxnMarkersRequest createWriteTxnMarkersRequest(short version) { List partitions = singletonList(new TopicPartition("topic", 73)); WriteTxnMarkersRequest.TxnMarkerEntry txnMarkerEntry = new WriteTxnMarkersRequest.TxnMarkerEntry(21L, (short) 42, 73, TransactionResult.ABORT, partitions); - return new WriteTxnMarkersRequest.Builder(WRITE_TXN_MARKERS.latestVersion(), singletonList(txnMarkerEntry)).build(version); + return new WriteTxnMarkersRequest.Builder(singletonList(txnMarkerEntry)).build(version); } private WriteTxnMarkersResponse createWriteTxnMarkersResponse() { @@ -3108,7 +3037,18 @@ private TxnOffsetCommitRequest createTxnOffsetCommitRequest(short version) { "groupId", 21L, (short) 42, - offsets).build(); + offsets, + false).build(); + } else if (version < 5) { + return new TxnOffsetCommitRequest.Builder("transactionalId", + "groupId", + 21L, + (short) 42, + offsets, + "member", + 2, + Optional.of("instance"), + false).build(version); } else { return new TxnOffsetCommitRequest.Builder("transactionalId", "groupId", @@ -3117,7 +3057,8 @@ private TxnOffsetCommitRequest createTxnOffsetCommitRequest(short version) { offsets, "member", 2, - Optional.of("instance")).build(version); + Optional.of("instance"), + true).build(version); } } @@ -3135,7 +3076,8 @@ private TxnOffsetCommitRequest createTxnOffsetCommitRequestWithAutoDowngrade() { offsets, "member", 2, - Optional.of("instance")).build(); + Optional.of("instance"), + false).build(); } private TxnOffsetCommitResponse createTxnOffsetCommitResponse() { @@ -4015,6 +3957,59 @@ private ReadShareGroupStateSummaryResponse createReadShareGroupStateSummaryRespo return new ReadShareGroupStateSummaryResponse(data); } + private DescribeShareGroupOffsetsRequest createDescribeShareGroupOffsetsRequest(short version) { + DescribeShareGroupOffsetsRequestData data = new DescribeShareGroupOffsetsRequestData() + .setGroupId("group") + .setTopics(Collections.singletonList(new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestTopic() + .setTopicName("topic-1") + .setPartitions(Collections.singletonList(0)))); + return new DescribeShareGroupOffsetsRequest.Builder(data).build(version); + } + + private DescribeShareGroupOffsetsResponse createDescribeShareGroupOffsetsResponse() { + DescribeShareGroupOffsetsResponseData data = new DescribeShareGroupOffsetsResponseData() + .setResponses(Collections.singletonList(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic() + .setTopicName("group") + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code()) + .setStartOffset(0) + .setLeaderEpoch(0))))); + return new DescribeShareGroupOffsetsResponse(data); + } + + private AbstractRequest createStreamsGroupDescribeRequest(final short version) { + return new StreamsGroupDescribeRequest.Builder(new StreamsGroupDescribeRequestData() + .setGroupIds(Collections.singletonList("group")) + .setIncludeAuthorizedOperations(false)).build(version); + } + + private AbstractRequest createStreamsGroupHeartbeatRequest(final short version) { + return new StreamsGroupHeartbeatRequest.Builder(new StreamsGroupHeartbeatRequestData()).build(version); + } + + private AbstractResponse createStreamsGroupDescribeResponse() { + StreamsGroupDescribeResponseData data = new StreamsGroupDescribeResponseData() + .setGroups(Collections.singletonList( + new StreamsGroupDescribeResponseData.DescribedGroup() + .setGroupId("group") + .setErrorCode((short) 0) + .setErrorMessage(Errors.forCode((short) 0).message()) + .setGroupState("EMPTY") + .setGroupEpoch(0) + .setAssignmentEpoch(0) + .setMembers(new ArrayList<>(0)) + .setTopology(null) + )) + .setThrottleTimeMs(1000); + return new StreamsGroupDescribeResponse(data); + } + + private AbstractResponse createStreamsGroupHeartbeatResponse() { + return new StreamsGroupHeartbeatResponse(new StreamsGroupHeartbeatResponseData()); + } + @Test public void testInvalidSaslHandShakeRequest() { AbstractRequest request = new SaslHandshakeRequest.Builder( diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestUtilsTest.java new file mode 100644 index 0000000000000..ec7789f1a0263 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestUtilsTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.AuthorizationException; +import org.apache.kafka.common.errors.DisconnectException; +import org.apache.kafka.common.errors.MismatchedEndpointTypeException; +import org.apache.kafka.common.errors.SecurityDisabledException; +import org.apache.kafka.common.errors.UnsupportedEndpointTypeException; +import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; +import org.apache.kafka.common.errors.UnsupportedVersionException; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class RequestUtilsTest { + @Test + public void testIsFatalException() { + assertTrue(RequestUtils.isFatalException(new AuthenticationException(""))); + assertTrue(RequestUtils.isFatalException(new AuthorizationException(""))); + assertTrue(RequestUtils.isFatalException(new MismatchedEndpointTypeException(""))); + assertTrue(RequestUtils.isFatalException(new SecurityDisabledException(""))); + assertTrue(RequestUtils.isFatalException(new UnsupportedEndpointTypeException(""))); + assertTrue(RequestUtils.isFatalException(new UnsupportedForMessageFormatException(""))); + assertTrue(RequestUtils.isFatalException(new UnsupportedVersionException(""))); + + // retriable exceptions + assertFalse(RequestUtils.isFatalException(new DisconnectException(""))); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java index abb27b2a1fdf8..2f4063fd4c001 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitRequestTest.java @@ -17,6 +17,7 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition; import org.apache.kafka.common.message.TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic; import org.apache.kafka.common.message.TxnOffsetCommitResponseData; @@ -35,7 +36,9 @@ import java.util.Optional; import static org.apache.kafka.common.requests.TxnOffsetCommitRequest.getErrorResponse; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; public class TxnOffsetCommitRequestTest extends OffsetCommitRequestTest { @@ -67,7 +70,8 @@ public void setUp() { groupId, producerId, producerEpoch, - OFFSETS + OFFSETS, + true ); int generationId = 5; @@ -79,14 +83,14 @@ public void setUp() { OFFSETS, memberId, generationId, - Optional.of(groupInstanceId) + Optional.of(groupInstanceId), + true ); } @Test @Override public void testConstructor() { - Map errorsMap = new HashMap<>(); errorsMap.put(new TopicPartition(topicOne, partitionOne), Errors.NOT_COORDINATOR); errorsMap.put(new TopicPartition(topicTwo, partitionTwo), Errors.NOT_COORDINATOR); @@ -151,4 +155,18 @@ public void testGetErrorResponse() { assertEquals(expectedResponse, getErrorResponse(builderWithGroupMetadata.data, Errors.UNKNOWN_MEMBER_ID)); } + + @Test + public void testVersionSupportForGroupMetadata() { + for (short version : ApiKeys.TXN_OFFSET_COMMIT.allVersions()) { + assertDoesNotThrow(() -> builder.build(version)); + if (version >= 3) { + assertDoesNotThrow(() -> builderWithGroupMetadata.build(version)); + } else { + assertEquals("Broker doesn't support group metadata commit API on version " + version + + ", minimum supported request version is 3 which requires brokers to be on version 2.5 or above.", + assertThrows(UnsupportedVersionException.class, () -> builderWithGroupMetadata.build(version)).getMessage()); + } + } + } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java index f4fec2ac3d64f..045ac932cf920 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersRequestTest.java @@ -51,7 +51,7 @@ public void setUp() { @Test public void testConstructor() { - WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), markers); + WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(markers); for (short version : ApiKeys.WRITE_TXN_MARKERS.allVersions()) { WriteTxnMarkersRequest request = builder.build(version); assertEquals(1, request.markers().size()); @@ -66,7 +66,7 @@ public void testConstructor() { @Test public void testGetErrorResponse() { - WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), markers); + WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(markers); for (short version : ApiKeys.WRITE_TXN_MARKERS.allVersions()) { WriteTxnMarkersRequest request = builder.build(version); WriteTxnMarkersResponse errorResponse = diff --git a/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java b/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java index 49989348f849f..59b08fc147691 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java @@ -189,6 +189,10 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { String jaasConfigProp1 = "com.sun.security.auth.module.JndiLoginModule required;"; assertThrows(IllegalArgumentException.class, () -> configurationEntry(JaasContext.Type.CLIENT, jaasConfigProp1)); + //test LdapLoginModule is not allowed by default + String jaasConfigProp2 = "com.sun.security.auth.module.LdapLoginModule required;"; + assertThrows(IllegalArgumentException.class, () -> configurationEntry(JaasContext.Type.CLIENT, jaasConfigProp2)); + //test ListenerName Override writeConfiguration(Arrays.asList( "KafkaServer { test.LoginModuleDefault required; };", @@ -197,11 +201,19 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { assertThrows(IllegalArgumentException.class, () -> JaasContext.loadServerContext(new ListenerName("plaintext"), "SOME-MECHANISM", Collections.emptyMap())); + //test ListenerName Override + writeConfiguration(Arrays.asList( + "KafkaServer { test.LoginModuleDefault required; };", + "plaintext.KafkaServer { com.sun.security.auth.module.LdapLoginModule requisite; };" + )); + assertThrows(IllegalArgumentException.class, () -> JaasContext.loadServerContext(new ListenerName("plaintext"), + "SOME-MECHANISM", Collections.emptyMap())); + //test org.apache.kafka.disallowed.login.modules system property with multiple modules System.setProperty(DISALLOWED_LOGIN_MODULES_CONFIG, " com.ibm.security.auth.module.LdapLoginModule , com.ibm.security.auth.module.Krb5LoginModule "); - String jaasConfigProp2 = "com.ibm.security.auth.module.LdapLoginModule required;"; - assertThrows(IllegalArgumentException.class, () -> configurationEntry(JaasContext.Type.CLIENT, jaasConfigProp2)); + String jaasConfigProp3 = "com.ibm.security.auth.module.LdapLoginModule required;"; + assertThrows(IllegalArgumentException.class, () -> configurationEntry(JaasContext.Type.CLIENT, jaasConfigProp3)); //test ListenerName Override writeConfiguration(Arrays.asList( @@ -216,6 +228,7 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { System.setProperty(DISALLOWED_LOGIN_MODULES_CONFIG, ""); checkConfiguration("com.sun.security.auth.module.JndiLoginModule", LoginModuleControlFlag.REQUIRED, new HashMap<>()); + checkConfiguration("com.sun.security.auth.module.LdapLoginModule", LoginModuleControlFlag.REQUIRED, new HashMap<>()); //test ListenerName Override writeConfiguration(Arrays.asList( @@ -227,6 +240,17 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { assertEquals(1, context.configurationEntries().size()); checkEntry(context.configurationEntries().get(0), "com.sun.security.auth.module.JndiLoginModule", LoginModuleControlFlag.REQUISITE, Collections.emptyMap()); + + //test ListenerName Override + writeConfiguration(Arrays.asList( + "KafkaServer { com.sun.security.auth.module.LdapLoginModule required; };", + "plaintext.KafkaServer { com.sun.security.auth.module.LdapLoginModule requisite; };" + )); + context = JaasContext.loadServerContext(new ListenerName("plaintext"), + "SOME-MECHANISM", Collections.emptyMap()); + assertEquals(1, context.configurationEntries().size()); + checkEntry(context.configurationEntries().get(0), "com.sun.security.auth.module.LdapLoginModule", + LoginModuleControlFlag.REQUISITE, Collections.emptyMap()); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java index 7efe97908b82f..75b9001f98cff 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorFailureDelayTest.java @@ -201,8 +201,7 @@ private void createSelector(SecurityProtocol securityProtocol, Map configs = new TestSecurityConfig(saslClientConfigs).values(); this.channelBuilder = new AlternateSaslChannelBuilder(ConnectionMode.CLIENT, Collections.singletonMap(saslMechanism, JaasContext.loadClientContext(configs)), securityProtocol, null, - false, saslMechanism, true, credentialCache, null, time); + false, saslMechanism, credentialCache, null, time); this.channelBuilder.configure(configs); // initial authentication must succeed this.selector = NetworkTestUtils.createSelector(channelBuilder, time); @@ -1958,7 +1957,7 @@ private NioEchoServer startServerApiVersionsUnsupportedByClient(final SecurityPr }; SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts, - securityProtocol, listenerName, false, saslMechanism, true, + securityProtocol, listenerName, false, saslMechanism, credentialCache, null, null, time, new LogContext(), apiVersionSupplier); serverChannelBuilder.configure(saslServerConfigs); @@ -1999,7 +1998,7 @@ private NioEchoServer startServerWithoutSaslAuthenticateHeader(final SecurityPro }; SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts, - securityProtocol, listenerName, false, saslMechanism, true, + securityProtocol, listenerName, false, saslMechanism, credentialCache, null, null, time, new LogContext(), apiVersionSupplier) { @Override protected SaslServerAuthenticator buildServerAuthenticator(Map configs, @@ -2034,7 +2033,7 @@ private void createClientConnectionWithoutSaslAuthenticateHeader(final SecurityP final Map jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); SaslChannelBuilder clientChannelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, - securityProtocol, listenerName, false, saslMechanism, true, + securityProtocol, listenerName, false, saslMechanism, null, null, null, time, new LogContext(), null) { @Override @@ -2047,7 +2046,7 @@ protected SaslClientAuthenticator buildClientAuthenticator(Map config Subject subject) { return new SaslClientAuthenticator(configs, callbackHandler, id, subject, - servicePrincipal, serverHost, saslMechanism, true, + servicePrincipal, serverHost, saslMechanism, transportLayer, time, new LogContext()) { @Override protected SaslHandshakeRequest createSaslHandshakeRequest(short version) { @@ -2167,8 +2166,7 @@ private void createSelector(SecurityProtocol securityProtocol, Map jaasContexts, SecurityProtocol securityProtocol, ListenerName listenerName, boolean isInterBrokerListener, - String clientSaslMechanism, boolean handshakeRequestEnable, CredentialCache credentialCache, + String clientSaslMechanism, CredentialCache credentialCache, DelegationTokenCache tokenCache, Time time) { super(connectionMode, jaasContexts, securityProtocol, listenerName, isInterBrokerListener, clientSaslMechanism, - handshakeRequestEnable, credentialCache, tokenCache, null, time, new LogContext(), + credentialCache, tokenCache, null, time, new LogContext(), version -> TestUtils.defaultApiVersionsResponse(ApiMessageType.ListenerType.ZK_BROKER)); } @@ -2585,10 +2583,10 @@ protected SaslClientAuthenticator buildClientAuthenticator(Map config TransportLayer transportLayer, Subject subject) { if (++numInvocations == 1) return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, serverHost, - "DIGEST-MD5", true, transportLayer, time, new LogContext()); + "DIGEST-MD5", transportLayer, time, new LogContext()); else return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, serverHost, - "PLAIN", true, transportLayer, time, new LogContext()) { + "PLAIN", transportLayer, time, new LogContext()) { @Override protected SaslHandshakeRequest createSaslHandshakeRequest(short version) { return new SaslHandshakeRequest.Builder( diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticatorTest.java index 6778e3357f3a8..81df34f85f4b9 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticatorTest.java @@ -17,9 +17,10 @@ package org.apache.kafka.common.security.authenticator; import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; -import org.apache.kafka.common.errors.IllegalSaslStateException; +import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.errors.SaslAuthenticationException; import org.apache.kafka.common.message.ApiMessageType; +import org.apache.kafka.common.message.RequestHeaderData; import org.apache.kafka.common.message.SaslAuthenticateRequestData; import org.apache.kafka.common.message.SaslHandshakeRequestData; import org.apache.kafka.common.network.ChannelBuilders; @@ -63,6 +64,7 @@ import java.nio.Buffer; import java.nio.ByteBuffer; import java.time.Duration; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -77,7 +79,6 @@ import static org.apache.kafka.common.security.scram.internals.ScramMechanism.SCRAM_SHA_256; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.eq; @@ -107,7 +108,7 @@ public void testOversizeRequest() throws IOException { } @Test - public void testUnexpectedRequestType() throws IOException { + public void testUnexpectedRequestTypeWithValidRequestHeader() throws IOException { TransportLayer transportLayer = mock(TransportLayer.class); Map configs = Collections.singletonMap(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList(SCRAM_SHA_256.mechanismName())); @@ -126,13 +127,35 @@ public void testUnexpectedRequestType() throws IOException { return headerBuffer.remaining(); }); - try { - authenticator.authenticate(); - fail("Expected authenticate() to raise an exception"); - } catch (IllegalSaslStateException e) { - // expected exception - } + assertThrows(InvalidRequestException.class, () -> authenticator.authenticate()); + verify(transportLayer, times(2)).read(any(ByteBuffer.class)); + } + + @Test + public void testInvalidRequestHeader() throws IOException { + TransportLayer transportLayer = mock(TransportLayer.class); + Map configs = Collections.singletonMap(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, + Collections.singletonList(SCRAM_SHA_256.mechanismName())); + SaslServerAuthenticator authenticator = setupAuthenticator(configs, transportLayer, + SCRAM_SHA_256.mechanismName(), new DefaultChannelMetadataRegistry()); + + short invalidApiKeyId = (short) (Arrays.stream(ApiKeys.values()).mapToInt(k -> k.id).max().getAsInt() + 1); + ByteBuffer headerBuffer = RequestTestUtils.serializeRequestHeader(new RequestHeader( + new RequestHeaderData() + .setRequestApiKey(invalidApiKeyId) + .setRequestApiVersion((short) 0), + (short) 2)); + + when(transportLayer.read(any(ByteBuffer.class))).then(invocation -> { + invocation.getArgument(0).putInt(headerBuffer.remaining()); + return 4; + }).then(invocation -> { + // serialize only the request header. the authenticator should not parse beyond this + invocation.getArgument(0).put(headerBuffer.duplicate()); + return headerBuffer.remaining(); + }); + assertThrows(InvalidRequestException.class, () -> authenticator.authenticate()); verify(transportLayer, times(2)).read(any(ByteBuffer.class)); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java index 3e9ea7f4db11d..94f4f1fc8c49e 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java @@ -42,7 +42,7 @@ public class OAuthBearerSaslClientTest { - private static final Map TEST_PROPERTIES = new LinkedHashMap() { + private static final Map TEST_PROPERTIES = new LinkedHashMap<>() { { put("One", "1"); put("Two", "2"); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java index d50fd99a10dea..581a72a52072b 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServerTest.java @@ -52,14 +52,10 @@ public class OAuthBearerSaslServerTest { private static final String USER = "user"; - private static final Map CONFIGS; - static { - String jaasConfigText = "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule Required" + private static final String JAAS_CONFIG_TEXT = "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule Required" + " unsecuredLoginStringClaim_sub=\"" + USER + "\";"; - Map tmp = new HashMap<>(); - tmp.put(SaslConfigs.SASL_JAAS_CONFIG, new Password(jaasConfigText)); - CONFIGS = Collections.unmodifiableMap(tmp); - } + private static final Map CONFIGS = Map.of(SaslConfigs.SASL_JAAS_CONFIG, new Password(JAAS_CONFIG_TEXT)); + private static final AuthenticateCallbackHandler LOGIN_CALLBACK_HANDLER; static { LOGIN_CALLBACK_HANDLER = new OAuthBearerUnsecuredLoginCallbackHandler(); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java index 87ec3a4bf681a..0adaf34bbbeea 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java @@ -38,25 +38,25 @@ protected AccessTokenValidator createAccessTokenValidator() throws Exception { @Test public void testNull() throws Exception { AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(null), "Empty JWT provided"); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(null), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test public void testEmptyString() throws Exception { AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(""), "Empty JWT provided"); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(""), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test public void testWhitespace() throws Exception { AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(" "), "Empty JWT provided"); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(" "), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test public void testEmptySections() throws Exception { AccessTokenValidator validator = createAccessTokenValidator(); - assertThrowsWithMessage(ValidateException.class, () -> validator.validate(".."), "Malformed JWT provided"); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(".."), "Malformed JWT provided; expected three sections (header, payload, and signature)"); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java index b3eb3a026b70c..8b1c5a370652e 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.UnsupportedEncodingException; import java.net.HttpURLConnection; import java.nio.charset.StandardCharsets; import java.util.Random; @@ -172,19 +171,19 @@ public void testParseAccessTokenInvalidJson() { } @Test - public void testFormatAuthorizationHeader() throws UnsupportedEncodingException { + public void testFormatAuthorizationHeader() { assertAuthorizationHeader("id", "secret", false, "Basic aWQ6c2VjcmV0"); } @Test - public void testFormatAuthorizationHeaderEncoding() throws UnsupportedEncodingException { + public void testFormatAuthorizationHeaderEncoding() { // according to RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. assertAuthorizationHeader("SOME_RANDOM_LONG_USER_01234", "9Q|0`8i~ute-n9ksjLWb\\50\"AX@UUED5E", false, "Basic U09NRV9SQU5ET01fTE9OR19VU0VSXzAxMjM0OjlRfDBgOGl+dXRlLW45a3NqTFdiXDUwIkFYQFVVRUQ1RQ=="); // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 assertAuthorizationHeader("user!@~'", "secret-(*)!", true, "Basic dXNlciUyMSU0MCU3RSUyNzpzZWNyZXQtJTI4KiUyOSUyMQ=="); } - private void assertAuthorizationHeader(String clientId, String clientSecret, boolean urlencode, String expected) throws UnsupportedEncodingException { + private void assertAuthorizationHeader(String clientId, String clientSecret, boolean urlencode, String expected) { String actual = HttpAccessTokenRetriever.formatAuthorizationHeader(clientId, clientSecret, urlencode); assertEquals(expected, actual, String.format("Expected the HTTP Authorization header generated for client ID \"%s\" and client secret \"%s\" to match", clientId, clientSecret)); } @@ -203,14 +202,14 @@ public void testFormatAuthorizationHeaderMissingValues() { } @Test - public void testFormatRequestBody() throws IOException { + public void testFormatRequestBody() { String expected = "grant_type=client_credentials&scope=scope"; String actual = HttpAccessTokenRetriever.formatRequestBody("scope"); assertEquals(expected, actual); } @Test - public void testFormatRequestBodyWithEscaped() throws IOException { + public void testFormatRequestBodyWithEscaped() { String questionMark = "%3F"; String exclamationMark = "%21"; @@ -224,7 +223,7 @@ public void testFormatRequestBodyWithEscaped() throws IOException { } @Test - public void testFormatRequestBodyMissingValues() throws IOException { + public void testFormatRequestBodyMissingValues() { String expected = "grant_type=client_credentials"; String actual = HttpAccessTokenRetriever.formatRequestBody(null); assertEquals(expected, actual); diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java index 84e06f2381ddf..d697dd46ead88 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredValidatorCallbackHandlerTest.java @@ -28,7 +28,6 @@ import java.util.Base64; import java.util.Base64.Encoder; import java.util.Collections; -import java.util.HashMap; import java.util.Map; import javax.security.auth.callback.Callback; @@ -50,25 +49,16 @@ public class OAuthBearerUnsecuredValidatorCallbackHandlerTest { private static final String TOO_EARLY_EXPIRATION_TIME_CLAIM_TEXT = expClaimText(0); private static final String ISSUED_AT_CLAIM_TEXT = claimOrHeaderText("iat", MOCK_TIME.milliseconds() / 1000.0); private static final String SCOPE_CLAIM_TEXT = claimOrHeaderText("scope", "scope1"); - private static final Map MODULE_OPTIONS_MAP_NO_SCOPE_REQUIRED; - static { - Map tmp = new HashMap<>(); - tmp.put("unsecuredValidatorPrincipalClaimName", "principal"); - tmp.put("unsecuredValidatorAllowableClockSkewMs", "1"); - MODULE_OPTIONS_MAP_NO_SCOPE_REQUIRED = Collections.unmodifiableMap(tmp); - } - private static final Map MODULE_OPTIONS_MAP_REQUIRE_EXISTING_SCOPE; - static { - Map tmp = new HashMap<>(); - tmp.put("unsecuredValidatorRequiredScope", "scope1"); - MODULE_OPTIONS_MAP_REQUIRE_EXISTING_SCOPE = Collections.unmodifiableMap(tmp); - } - private static final Map MODULE_OPTIONS_MAP_REQUIRE_ADDITIONAL_SCOPE; - static { - Map tmp = new HashMap<>(); - tmp.put("unsecuredValidatorRequiredScope", "scope1 scope2"); - MODULE_OPTIONS_MAP_REQUIRE_ADDITIONAL_SCOPE = Collections.unmodifiableMap(tmp); - } + private static final Map MODULE_OPTIONS_MAP_NO_SCOPE_REQUIRED = Map.of( + "unsecuredValidatorPrincipalClaimName", "principal", + "unsecuredValidatorAllowableClockSkewMs", "1"); + + private static final Map MODULE_OPTIONS_MAP_REQUIRE_EXISTING_SCOPE = Map.of( + "unsecuredValidatorRequiredScope", "scope1"); + + private static final Map MODULE_OPTIONS_MAP_REQUIRE_ADDITIONAL_SCOPE = Map.of( + "unsecuredValidatorRequiredScope", "scope1 scope2"); + @Test public void validToken() { diff --git a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java index 94b95b0cfdf9e..e113957d404fb 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/scram/internals/ScramSaslServerTest.java @@ -123,6 +123,28 @@ public void validateFailedNonceExchange() throws SaslException { "Failure message: " + saslException.getMessage()); } + @Test + public void validateFailedNonceExchangeWithPrependingClientNonce() throws SaslException { + ScramSaslServer spySaslServer = Mockito.spy(saslServer); + byte[] clientFirstMsgBytes = clientFirstMessage(USER_A, USER_A); + ClientFirstMessage clientFirstMessage = new ClientFirstMessage(clientFirstMsgBytes); + + byte[] serverFirstMsgBytes = spySaslServer.evaluateResponse(clientFirstMsgBytes); + ServerFirstMessage serverFirstMessage = new ServerFirstMessage(serverFirstMsgBytes); + assertTrue(serverFirstMessage.nonce().startsWith(clientFirstMessage.nonce()), + "Nonce in server message should start with client first message's nonce"); + + //send client final message with nonce prepended with clientFirstMessage's nonce + byte[] clientFinalMessage = clientFinalMessage(clientFirstMessage.nonce() + serverFirstMessage.nonce()); + Mockito.doNothing() + .when(spySaslServer).verifyClientProof(Mockito.any(ScramMessages.ClientFinalMessage.class)); + SaslException saslException = assertThrows(SaslException.class, + () -> spySaslServer.evaluateResponse(clientFinalMessage)); + assertEquals("Invalid client nonce in the final client message.", + saslException.getMessage(), + "Failure message: " + saslException.getMessage()); + } + private byte[] clientFirstMessage(String userName, String authorizationId) { String nonce = formatter.secureRandomString(); String authorizationField = authorizationId != null ? "a=" + authorizationId : ""; diff --git a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java index 70e5d80e35474..521a0f19415ad 100644 --- a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java +++ b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java @@ -45,7 +45,7 @@ public class SerializationTest { private final String topic = "testTopic"; - private final Map, List> testData = new HashMap, List>() { + private final Map, List> testData = new HashMap<>() { { put(String.class, Arrays.asList(null, "my string")); put(Short.class, Arrays.asList(null, (short) 32767, (short) -32768)); diff --git a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java index 066e9ff74de48..b708b4eeb602d 100644 --- a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java @@ -63,7 +63,6 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -238,7 +237,7 @@ public void testTelemetrySenderTimeToNextUpdate() { assertEquals(Long.MAX_VALUE, telemetrySender.timeToNextUpdate(100)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATED)); - assertThrows(IllegalStateException.class, () -> telemetrySender.timeToNextUpdate(100)); + assertEquals(Long.MAX_VALUE, telemetrySender.timeToNextUpdate(100)); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java index 1eb65dbe8a8dd..4c214005eac80 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java @@ -20,6 +20,7 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; +import java.util.zip.CRC32C; import java.util.zip.Checksum; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -36,7 +37,7 @@ public void testUpdateByteBuffer() { private void doTestUpdateByteBuffer(byte[] bytes, ByteBuffer buffer) { buffer.put(bytes); buffer.flip(); - Checksum bufferCrc = Crc32C.create(); + Checksum bufferCrc = new CRC32C(); Checksums.update(bufferCrc, buffer, buffer.remaining()); assertEquals(Crc32C.compute(bytes, 0, bytes.length), bufferCrc.getValue()); assertEquals(0, buffer.position()); @@ -55,8 +56,8 @@ public void testUpdateInt() { final ByteBuffer buffer = ByteBuffer.allocate(4); buffer.putInt(value); - Checksum crc1 = Crc32C.create(); - Checksum crc2 = Crc32C.create(); + Checksum crc1 = new CRC32C(); + Checksum crc2 = new CRC32C(); Checksums.updateInt(crc1, value); crc2.update(buffer.array(), buffer.arrayOffset(), 4); @@ -70,8 +71,8 @@ public void testUpdateLong() { final ByteBuffer buffer = ByteBuffer.allocate(8); buffer.putLong(value); - Checksum crc1 = Crc32C.create(); - Checksum crc2 = Crc32C.create(); + Checksum crc1 = new CRC32C(); + Checksum crc2 = new CRC32C(); Checksums.updateLong(crc1, value); crc2.update(buffer.array(), buffer.arrayOffset(), 8); @@ -84,7 +85,7 @@ private void doTestUpdateByteBufferWithOffsetPosition(byte[] bytes, ByteBuffer b buffer.flip(); buffer.position(offset); - Checksum bufferCrc = Crc32C.create(); + Checksum bufferCrc = new CRC32C(); Checksums.update(bufferCrc, buffer, buffer.remaining()); assertEquals(Crc32C.compute(bytes, offset, buffer.remaining()), bufferCrc.getValue()); assertEquals(offset, buffer.position()); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java b/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java index 2c6d148e3a1e2..b8e5d1daaa417 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java @@ -18,31 +18,10 @@ import org.junit.jupiter.api.Test; -import java.util.zip.Checksum; - import static org.junit.jupiter.api.Assertions.assertEquals; public class Crc32CTest { - @Test - public void testUpdate() { - final byte[] bytes = "Any String you want".getBytes(); - final int len = bytes.length; - - Checksum crc1 = Crc32C.create(); - Checksum crc2 = Crc32C.create(); - Checksum crc3 = Crc32C.create(); - - crc1.update(bytes, 0, len); - for (byte b : bytes) - crc2.update(b); - crc3.update(bytes, 0, len / 2); - crc3.update(bytes, len / 2, len - len / 2); - - assertEquals(crc1.getValue(), crc2.getValue(), "Crc values should be the same"); - assertEquals(crc1.getValue(), crc3.getValue(), "Crc values should be the same"); - } - @Test public void testValue() { final byte[] bytes = "Some String".getBytes(); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/LogCaptureAppender.java b/clients/src/test/java/org/apache/kafka/common/utils/LogCaptureAppender.java index 1194b9a5de212..2c303a337211b 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/LogCaptureAppender.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/LogCaptureAppender.java @@ -16,33 +16,30 @@ */ package org.apache.kafka.common.utils; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; - +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.logging.log4j.core.config.Property; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.UUID; import java.util.stream.Collectors; -public class LogCaptureAppender extends AppenderSkeleton implements AutoCloseable { - private final List events = new LinkedList<>(); - private final List logLevelChanges = new LinkedList<>(); - - public static class LogLevelChange { - - public LogLevelChange(final Level originalLevel, final Class clazz) { - this.originalLevel = originalLevel; - this.clazz = clazz; - } - - private final Level originalLevel; - - private final Class clazz; - - } - +public class LogCaptureAppender extends AbstractAppender implements AutoCloseable { + private final List events = new LinkedList<>(); + private final Map, Level> logLevelChanges = new HashMap<>(); + private final List loggers = new ArrayList<>(); + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") public static class Event { private final String level; @@ -74,31 +71,44 @@ public Optional getThrowableClassName() { } } + public LogCaptureAppender() { + super("LogCaptureAppender-" + UUID.randomUUID(), null, null, true, Property.EMPTY_ARRAY); + } + public static LogCaptureAppender createAndRegister() { final LogCaptureAppender logCaptureAppender = new LogCaptureAppender(); - Logger.getRootLogger().addAppender(logCaptureAppender); + Logger logger = LogManager.getRootLogger(); + logCaptureAppender.addToLogger(logger); return logCaptureAppender; } public static LogCaptureAppender createAndRegister(final Class clazz) { final LogCaptureAppender logCaptureAppender = new LogCaptureAppender(); - Logger.getLogger(clazz).addAppender(logCaptureAppender); + Logger logger = LogManager.getLogger(clazz); + logCaptureAppender.addToLogger(logger); return logCaptureAppender; } - public void setClassLogger(final Class clazz, Level level) { - logLevelChanges.add(new LogLevelChange(Logger.getLogger(clazz).getLevel(), clazz)); - Logger.getLogger(clazz).setLevel(level); + public void addToLogger(Logger logger) { + org.apache.logging.log4j.core.Logger coreLogger = (org.apache.logging.log4j.core.Logger) logger; + this.start(); + coreLogger.addAppender(this); + loggers.add(coreLogger); } - public static void unregister(final LogCaptureAppender logCaptureAppender) { - Logger.getRootLogger().removeAppender(logCaptureAppender); + public void setClassLogger(final Class clazz, Level level) { + if (!logLevelChanges.containsKey(clazz)) { + Level currentLevel = LogManager.getLogger(clazz).getLevel(); + logLevelChanges.put(clazz, currentLevel); + } + + Configurator.setLevel(clazz.getName(), level); } @Override - protected void append(final LoggingEvent event) { + public void append(final LogEvent event) { synchronized (events) { - events.add(event); + events.add(event.toImmutable()); } } @@ -110,10 +120,10 @@ public List getMessages(String level) { } public List getMessages() { - final LinkedList result = new LinkedList<>(); + final List result = new LinkedList<>(); synchronized (events) { - for (final LoggingEvent event : events) { - result.add(event.getRenderedMessage()); + for (final LogEvent event : events) { + result.add(event.getMessage().getFormattedMessage()); } } return result; @@ -122,25 +132,26 @@ public List getMessages() { public List getEvents() { final LinkedList result = new LinkedList<>(); synchronized (events) { - for (final LoggingEvent event : events) { - final String[] throwableStrRep = event.getThrowableStrRep(); + for (final LogEvent event : events) { + final Throwable throwable = event.getThrown(); final Optional throwableString; final Optional throwableClassName; - if (throwableStrRep == null) { + if (throwable == null) { throwableString = Optional.empty(); throwableClassName = Optional.empty(); } else { - final StringBuilder throwableStringBuilder = new StringBuilder(); - - for (final String s : throwableStrRep) { - throwableStringBuilder.append(s); - } - - throwableString = Optional.of(throwableStringBuilder.toString()); - throwableClassName = Optional.of(event.getThrowableInformation().getThrowable().getClass().getName()); + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stringWriter); + throwable.printStackTrace(printWriter); + throwableString = Optional.of(stringWriter.toString()); + throwableClassName = Optional.of(throwable.getClass().getName()); } - result.add(new Event(event.getLevel().toString(), event.getRenderedMessage(), throwableString, throwableClassName)); + result.add(new Event( + event.getLevel().toString(), + event.getMessage().getFormattedMessage(), + throwableString, + throwableClassName)); } } return result; @@ -148,15 +159,21 @@ public List getEvents() { @Override public void close() { - for (final LogLevelChange logLevelChange : logLevelChanges) { - Logger.getLogger(logLevelChange.clazz).setLevel(logLevelChange.originalLevel); + for (Map.Entry, Level> entry : logLevelChanges.entrySet()) { + Class clazz = entry.getKey(); + Level originalLevel = entry.getValue(); + Configurator.setLevel(clazz.getName(), originalLevel); } logLevelChanges.clear(); - unregister(this); + + unregister(); } - @Override - public boolean requiresLayout() { - return false; + public void unregister() { + for (org.apache.logging.log4j.core.Logger logger : loggers) { + logger.removeAppender(this); + } + loggers.clear(); + this.stop(); } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index 8928159526cfc..16fc6af154b20 100755 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -770,9 +770,7 @@ public void testRecursiveDeleteWithDeletedFile() throws IOException { when(mockIterator.next()).thenReturn(rootDir.toPath()).thenReturn(subDir.toPath()); when(mockIterator.hasNext()).thenReturn(true).thenReturn(true).thenReturn(false); - assertDoesNotThrow(() -> { - Utils.delete(spyRootFile); - }); + assertDoesNotThrow(() -> Utils.delete(spyRootFile)); assertFalse(Files.exists(rootDir.toPath())); assertFalse(Files.exists(subDir.toPath())); } diff --git a/clients/src/test/java/org/apache/kafka/test/TestUtils.java b/clients/src/test/java/org/apache/kafka/test/TestUtils.java index f1c529394bedc..5878a640d3665 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestUtils.java @@ -31,7 +31,6 @@ import org.apache.kafka.common.network.Send; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.record.UnalignedRecords; import org.apache.kafka.common.requests.ApiVersionsResponse; import org.apache.kafka.common.requests.ByteBufferChannel; @@ -74,7 +73,6 @@ import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -279,6 +277,23 @@ public static File tempDirectory(final Path parent, String prefix) { return file; } + /** + * Create a random log directory in the format - used for Kafka partition logs. + * It is the responsibility of the caller to set up a shutdown hook for deletion of the directory. + */ + public static File randomPartitionLogDir(File parentDir) { + int attempts = 1000; + while (attempts > 0) { + File f = new File(parentDir, "kafka-" + RANDOM.nextInt(1000000)); + if (f.mkdir()) { + f.deleteOnExit(); + return f; + } + attempts--; + } + throw new RuntimeException("Failed to create directory after 1000 attempts"); + } + public static Properties producerConfig(final String bootstrapServers, final Class keySerializer, final Class valueSerializer, @@ -552,8 +567,10 @@ public static Set generateRandomTopicPartitions(int numTopic, in */ public static T assertFutureThrows(Future future, Class exceptionCauseClass) { ExecutionException exception = assertThrows(ExecutionException.class, future::get); - assertInstanceOf(exceptionCauseClass, exception.getCause(), - "Unexpected exception cause " + exception.getCause()); + Throwable cause = exception.getCause(); + assertEquals(exceptionCauseClass, cause.getClass(), + "Expected a " + exceptionCauseClass.getSimpleName() + " exception, but got " + + cause.getClass().getSimpleName()); return exceptionCauseClass.cast(exception.getCause()); } @@ -566,19 +583,6 @@ public static void assertFutureThrows( assertEquals(expectedMessage, receivedException.getMessage()); } - public static void assertFutureError(Future future, Class exceptionClass) - throws InterruptedException { - try { - future.get(); - fail("Expected a " + exceptionClass.getSimpleName() + " exception, but got success."); - } catch (ExecutionException ee) { - Throwable cause = ee.getCause(); - assertEquals(exceptionClass, cause.getClass(), - "Expected a " + exceptionClass.getSimpleName() + " exception, but got " + - cause.getClass().getSimpleName()); - } - } - public static ApiKeys apiKeyFrom(NetworkReceive networkReceive) { return RequestHeader.parse(networkReceive.payload().duplicate()).apiKey(); } @@ -660,7 +664,7 @@ public static ApiVersionsResponse defaultApiVersionsResponse( ) { return createApiVersionsResponse( throttleTimeMs, - ApiVersionsResponse.filterApis(RecordVersion.current(), listenerType, true, true), + ApiVersionsResponse.filterApis(listenerType, true, true), Features.emptySupportedFeatures(), false ); @@ -673,7 +677,7 @@ public static ApiVersionsResponse defaultApiVersionsResponse( ) { return createApiVersionsResponse( throttleTimeMs, - ApiVersionsResponse.filterApis(RecordVersion.current(), listenerType, enableUnstableLastVersion, true), + ApiVersionsResponse.filterApis(listenerType, enableUnstableLastVersion, true), Features.emptySupportedFeatures(), false ); diff --git a/clients/src/test/resources/log4j.properties b/clients/src/test/resources/log4j.properties deleted file mode 100644 index 0992580eca1d8..0000000000000 --- a/clients/src/test/resources/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=OFF, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.org.apache.kafka=ERROR -# We are testing for a particular INFO log message in CommonNameLoggingTrustManagerFactoryWrapper -log4j.logger.org.apache.kafka.common.security.ssl.CommonNameLoggingTrustManagerFactoryWrapper=INFO diff --git a/clients/src/test/resources/log4j2.yaml b/clients/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..bfe8b3835a0c6 --- /dev/null +++ b/clients/src/test/resources/log4j2.yaml @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: OFF + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka + level: ERROR + # We are testing for a particular INFO log message in CommonNameLoggingTrustManagerFactoryWrapper + - name: org.apache.kafka.common.security.ssl.CommonNameLoggingTrustManagerFactoryWrapper + level: INFO diff --git a/committer-tools/kafka-merge-pr.py b/committer-tools/kafka-merge-pr.py index 63439f3e0e408..d649181d74600 100755 --- a/committer-tools/kafka-merge-pr.py +++ b/committer-tools/kafka-merge-pr.py @@ -70,7 +70,7 @@ DEV_BRANCH_NAME = "trunk" -DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "4.0.0") +DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "4.1.0") ORIGINAL_HEAD = "" diff --git a/config/connect-log4j.properties b/config/connect-log4j.properties deleted file mode 100644 index 979cb3869f952..0000000000000 --- a/config/connect-log4j.properties +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.rootLogger=INFO, stdout, connectAppender - -# Send the logs to the console. -# -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout - -# Send the logs to a file, rolling the file at midnight local time. For example, the `File` option specifies the -# location of the log files (e.g. ${kafka.logs.dir}/connect.log), and at midnight local time the file is closed -# and copied in the same directory but with a filename that ends in the `DatePattern` option. -# -log4j.appender.connectAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.connectAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.connectAppender.File=${kafka.logs.dir}/connect.log -log4j.appender.connectAppender.layout=org.apache.log4j.PatternLayout - -# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information -# in the log messages, where appropriate. This makes it easier to identify those log messages that apply to a -# specific connector. -# -connect.log.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n - -log4j.appender.stdout.layout.ConversionPattern=${connect.log.pattern} -log4j.appender.connectAppender.layout.ConversionPattern=${connect.log.pattern} diff --git a/config/connect-log4j2.yaml b/config/connect-log4j2.yaml new file mode 100644 index 0000000000000..89a9a96736587 --- /dev/null +++ b/config/connect-log4j2.yaml @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "kafka.logs.dir" + value: "." + - name: "logPattern" + value: "[%d] %p %X{connector.context}%m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + RollingFile: + - name: ConnectAppender + fileName: "${sys:kafka.logs.dir}/connect.log" + filePattern: "${sys:kafka.logs.dir}/connect-%d{yyyy-MM-dd-HH}.log" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + modulate: true + interval: 1 + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + - ref: ConnectAppender diff --git a/config/kraft/broker.properties b/config/kraft/broker.properties index c1b1d084dbc08..61a536c9b3071 100644 --- a/config/kraft/broker.properties +++ b/config/kraft/broker.properties @@ -13,11 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. -# - ############################# Server Basics ############################# # The role of this server. Setting this puts us in KRaft mode diff --git a/config/kraft/controller.properties b/config/kraft/controller.properties index c1d9ff7868201..84963c95701d1 100644 --- a/config/kraft/controller.properties +++ b/config/kraft/controller.properties @@ -13,11 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. -# - ############################# Server Basics ############################# # The role of this server. Setting this puts us in KRaft mode diff --git a/config/kraft/reconfig-server.properties b/config/kraft/reconfig-server.properties index a0a25133e8254..d4b1fe0bc4dbd 100644 --- a/config/kraft/reconfig-server.properties +++ b/config/kraft/reconfig-server.properties @@ -13,11 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. -# - ############################# Server Basics ############################# # The role of this server. Setting this puts us in KRaft mode @@ -87,9 +82,11 @@ num.partitions=1 num.recovery.threads.per.data.dir=1 ############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# The replication factor for the group metadata internal topics "__consumer_offsets", "__share_group_state" and "__transaction_state" # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. offsets.topic.replication.factor=1 +share.coordinator.state.topic.replication.factor=1 +share.coordinator.state.topic.min.isr=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 diff --git a/config/kraft/server.properties b/config/kraft/server.properties index 34bef2f9c909e..311fefbdf86ed 100644 --- a/config/kraft/server.properties +++ b/config/kraft/server.properties @@ -13,11 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. -# - ############################# Server Basics ############################# # The role of this server. Setting this puts us in KRaft mode @@ -87,15 +82,13 @@ num.partitions=1 num.recovery.threads.per.data.dir=1 ############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# The replication factor for the group metadata internal topics "__consumer_offsets", "__share_group_state" and "__transaction_state" # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -# Share state topic settings share.coordinator.state.topic.replication.factor=1 share.coordinator.state.topic.min.isr=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 ############################# Log Flush Policy ############################# diff --git a/config/log4j.properties b/config/log4j.properties deleted file mode 100644 index 4dbdd83f83b74..0000000000000 --- a/config/log4j.properties +++ /dev/null @@ -1,96 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Unspecified loggers and loggers with additivity=true output to server.log and stdout -# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise -log4j.rootLogger=INFO, stdout, kafkaAppender - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log -log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log -log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log -log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log -log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log -log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log -log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -# Change the line below to adjust ZK client logging -log4j.logger.org.apache.zookeeper=INFO - -# Change the two lines below to adjust the general broker logging level (output to server.log and stdout) -log4j.logger.kafka=INFO -log4j.logger.org.apache.kafka=INFO - -# Change to DEBUG or TRACE to enable request logging -log4j.logger.kafka.request.logger=WARN, requestAppender -log4j.additivity.kafka.request.logger=false - -# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output -# related to the handling of requests -#log4j.logger.kafka.network.Processor=TRACE, requestAppender -#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender -#log4j.additivity.kafka.server.KafkaApis=false -log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender -log4j.additivity.kafka.network.RequestChannel$=false - -# Change the line below to adjust KRaft mode controller logging -log4j.logger.org.apache.kafka.controller=INFO, controllerAppender -log4j.additivity.org.apache.kafka.controller=false - -# Change the line below to adjust ZK mode controller logging -log4j.logger.kafka.controller=TRACE, controllerAppender -log4j.additivity.kafka.controller=false - -log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender -log4j.additivity.kafka.log.LogCleaner=false - -log4j.logger.state.change.logger=INFO, stateChangeAppender -log4j.additivity.state.change.logger=false - -# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses -log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender -log4j.additivity.kafka.authorizer.logger=false - diff --git a/config/log4j2.yaml b/config/log4j2.yaml new file mode 100644 index 0000000000000..7ee6f001e18ea --- /dev/null +++ b/config/log4j2.yaml @@ -0,0 +1,152 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Unspecified loggers and loggers with additivity=true output to server.log and stdout +# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise +Configuration: + Properties: + Property: + # Fallback if the system property is not set + - name: "kafka.logs.dir" + value: "." + - name: "logPattern" + value: "[%d] %p %m (%c)%n" + + # Appenders configuration + # See: https://logging.apache.org/log4j/2.x/manual/appenders.html + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + RollingFile: + - name: KafkaAppender + fileName: "${sys:kafka.logs.dir}/server.log" + filePattern: "${sys:kafka.logs.dir}/server.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + modulate: true + interval: 1 + # State Change appender + - name: StateChangeAppender + fileName: "${sys:kafka.logs.dir}/state-change.log" + filePattern: "${sys:kafka.logs.dir}/stage-change.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + modulate: true + interval: 1 + # Request appender + - name: RequestAppender + fileName: "${sys:kafka.logs.dir}/kafka-request.log" + filePattern: "${sys:kafka.logs.dir}/kafka-request.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + modulate: true + interval: 1 + # Cleaner appender + - name: CleanerAppender + fileName: "${sys:kafka.logs.dir}/log-cleaner.log" + filePattern: "${sys:kafka.logs.dir}/log-cleaner.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + modulate: true + interval: 1 + # Controller appender + - name: ControllerAppender + fileName: "${sys:kafka.logs.dir}/controller.log" + filePattern: "${sys:kafka.logs.dir}/controller.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + modulate: true + interval: 1 + # Authorizer appender + - name: AuthorizerAppender + fileName: "${sys:kafka.logs.dir}/kafka-authorizer.log" + filePattern: "${sys:kafka.logs.dir}/kafka-authorizer.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + modulate: true + interval: 1 + + # Loggers configuration + # See: https://logging.apache.org/log4j/2.x/manual/configuration.html#configuring-loggers + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + - ref: KafkaAppender + Logger: + # Kafka logger + - name: kafka + level: INFO + # Kafka org.apache logger + - name: org.apache.kafka + level: INFO + # Kafka request logger + - name: kafka.request.logger + level: WARN + additivity: false + AppenderRef: + ref: RequestAppender + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE + # for additional output related to the handling of requests +# - name: kafka.network.Processor +# level: TRACE +# additivity: false +# AppenderRef: +# ref: RequestAppender +# - name: kafka.server.KafkaApis +# level: TRACE +# additivity: false +# AppenderRef: +# ref: RequestAppender + # Kafka network RequestChannel$ logger + - name: kafka.network.RequestChannel$ + level: WARN + additivity: false + AppenderRef: + ref: RequestAppender + # Controller logger + - name: org.apache.kafka.controller + level: INFO + additivity: false + AppenderRef: + ref: ControllerAppender + # LogCleaner logger + - name: kafka.log.LogCleaner + level: INFO + additivity: false + AppenderRef: + ref: CleanerAppender + # State change logger + - name: state.change.logger + level: INFO + additivity: false + AppenderRef: + ref: StateChangeAppender + # Authorizer logger + - name: kafka.authorizer.logger + level: INFO + additivity: false + AppenderRef: + ref: AuthorizerAppender \ No newline at end of file diff --git a/config/server.properties b/config/server.properties deleted file mode 100644 index 21ba1c7d9c61b..0000000000000 --- a/config/server.properties +++ /dev/null @@ -1,138 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required. -# See kafka.server.KafkaConfig for additional details and defaults -# - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=0 - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. If not configured, the host name will be equal to the value of -# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -#listeners=PLAINTEXT://:9092 - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -#advertised.listeners=PLAINTEXT://your.host.name:9092 - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/tmp/kafka-logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 - -############################# Zookeeper ############################# - -# Zookeeper connection string (see zookeeper docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2181 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=18000 - - -############################# Group Coordinator Settings ############################# - -# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. -# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. -# The default value for this is 3 seconds. -# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. -# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. -group.initial.rebalance.delay.ms=0 diff --git a/config/tools-log4j.properties b/config/tools-log4j2.yaml similarity index 71% rename from config/tools-log4j.properties rename to config/tools-log4j2.yaml index b19e343265fc3..bb5d1adf41b65 100644 --- a/config/tools-log4j.properties +++ b/config/tools-log4j2.yaml @@ -12,10 +12,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c)%n" -log4j.rootLogger=WARN, stderr + Appenders: + Console: + name: STDERR + target: SYSTEM_ERR + PatternLayout: + pattern: "${logPattern}" + Loggers: + Root: + level: WARN + AppenderRef: + - ref: STDERR -log4j.appender.stderr=org.apache.log4j.ConsoleAppender -log4j.appender.stderr.layout=org.apache.log4j.PatternLayout -log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.stderr.Target=System.err diff --git a/config/zookeeper.properties b/config/zookeeper.properties deleted file mode 100644 index 90f4332ec31cf..0000000000000 --- a/config/zookeeper.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 -# disable the per-ip limit on the number of connections since this is a non-production config -maxClientCnxns=0 -# Disable the adminserver by default to avoid port conflicts. -# Set the port to something non-conflicting if choosing to enable this -admin.enableServer=false -# admin.serverPort=8080 diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java index 8115675f5a532..d8c55573e5c6f 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java @@ -420,7 +420,7 @@ public Schema valueSchema() { public Schema build() { return new ConnectSchema(type, isOptional(), defaultValue, name, version, doc, parameters == null ? null : Collections.unmodifiableMap(parameters), - fields == null ? null : Collections.unmodifiableList(new ArrayList<>(fields.values())), keySchema, valueSchema); + fields == null ? null : List.copyOf(fields.values()), keySchema, valueSchema); } /** @@ -441,4 +441,4 @@ private static void checkNotNull(String fieldName, Object val, String fieldToSet if (val == null) throw new SchemaBuilderException("Invalid SchemaBuilder call: " + fieldName + " must be specified to set " + fieldToSet); } -} \ No newline at end of file +} diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java index ee2d6cca43d52..6a9dd56aeb39c 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java @@ -160,8 +160,7 @@ private static Object projectPrimitive(Schema source, Object record, Schema targ assert source.type().isPrimitive(); assert target.type().isPrimitive(); Object result; - if (isPromotable(source.type(), target.type()) && record instanceof Number) { - Number numberRecord = (Number) record; + if (isPromotable(source.type(), target.type()) && record instanceof Number numberRecord) { switch (target.type()) { case INT8: result = numberRecord.byteValue(); diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java index a528271d1ab44..cd332d2a8b3b3 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java @@ -584,8 +584,7 @@ private static java.util.Date convertToTimestamp(Schema toSchema, Schema fromSch SchemaAndValue parsed = parseString(value.toString()); value = parsed.value(); } - if (value instanceof java.util.Date) { - java.util.Date date = (java.util.Date) value; + if (value instanceof java.util.Date date) { if (fromSchema != null) { String fromSchemaName = fromSchema.name(); if (Date.LOGICAL_NAME.equals(fromSchemaName)) { @@ -655,8 +654,7 @@ private static Struct convertToStructInternal(Schema toSchema, Object value) { */ protected static long asLong(Object value, Schema fromSchema, Throwable error) { try { - if (value instanceof Number) { - Number number = (Number) value; + if (value instanceof Number number) { return number.longValue(); } if (value instanceof String) { @@ -695,8 +693,7 @@ protected static long asLong(Object value, Schema fromSchema, Throwable error) { */ protected static double asDouble(Object value, Schema schema, Throwable error) { try { - if (value instanceof Number) { - Number number = (Number) value; + if (value instanceof Number number) { return number.doubleValue(); } if (value instanceof String) { @@ -733,18 +730,15 @@ protected static void append(StringBuilder sb, Object value, boolean embedded) { } else if (value instanceof ByteBuffer) { byte[] bytes = Utils.readBytes((ByteBuffer) value); append(sb, bytes, embedded); - } else if (value instanceof List) { - List list = (List) value; + } else if (value instanceof List list) { sb.append('['); appendIterable(sb, list.iterator()); sb.append(']'); - } else if (value instanceof Map) { - Map map = (Map) value; + } else if (value instanceof Map map) { sb.append('{'); appendIterable(sb, map.entrySet().iterator()); sb.append('}'); - } else if (value instanceof Struct) { - Struct struct = (Struct) value; + } else if (value instanceof Struct struct) { Schema schema = struct.schema(); boolean first = true; sb.append('{'); @@ -759,13 +753,11 @@ protected static void append(StringBuilder sb, Object value, boolean embedded) { append(sb, struct.get(field), true); } sb.append('}'); - } else if (value instanceof Map.Entry) { - Map.Entry entry = (Map.Entry) value; + } else if (value instanceof Map.Entry entry) { append(sb, entry.getKey(), true); sb.append(':'); append(sb, entry.getValue(), true); - } else if (value instanceof java.util.Date) { - java.util.Date dateValue = (java.util.Date) value; + } else if (value instanceof java.util.Date dateValue) { String formatted = dateFormatFor(dateValue).format(dateValue); sb.append(formatted); } else { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java index 3b9f3470f6c14..376e95847c44e 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java @@ -81,8 +81,7 @@ public boolean equals(Object obj) { if (obj == this) { return true; } - if (obj instanceof Header) { - Header that = (Header) obj; + if (obj instanceof Header that) { return Objects.equals(this.key, that.key()) && Objects.equals(this.schema(), that.schema()) && Objects.equals(this.value(), that.value()); } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java index 5c37ddc5e58b4..63ee8ab6598d1 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java @@ -55,8 +55,7 @@ public ConnectHeaders(Iterable
          original) { if (original == null) { return; } - if (original instanceof ConnectHeaders) { - ConnectHeaders originalHeaders = (ConnectHeaders) original; + if (original instanceof ConnectHeaders originalHeaders) { if (!originalHeaders.isEmpty()) { headers = new LinkedList<>(originalHeaders.headers); } @@ -343,8 +342,7 @@ public boolean equals(Object obj) { if (obj == this) { return true; } - if (obj instanceof Headers) { - Headers that = (Headers) obj; + if (obj instanceof Headers that) { Iterator
          thisIter = this.iterator(); Iterator
          thatIter = that.iterator(); while (thisIter.hasNext() && thatIter.hasNext()) { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java index 73f87dd04ee05..ca960414dd5b8 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java @@ -51,7 +51,7 @@ public interface ConnectRestExtension extends Configurable, Versioned, Closeable * will invoke this method after registering the default Connect resources. If the implementations attempt * to re-register any of the Connect resources, it will be ignored and will be logged. * - * @param restPluginContext The context provides access to JAX-RS {@link javax.ws.rs.core.Configurable} and {@link + * @param restPluginContext The context provides access to JAX-RS {@link jakarta.ws.rs.core.Configurable} and {@link * ConnectClusterState}.The custom JAX-RS resources can be registered via the {@link * ConnectRestExtensionContext#configurable()} */ diff --git a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java index 5e357be8c9142..0bfcee678b1aa 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java @@ -19,19 +19,20 @@ import org.apache.kafka.connect.health.ConnectClusterState; -import javax.ws.rs.core.Configurable; +import jakarta.ws.rs.core.Configurable; + /** * The interface provides the ability for {@link ConnectRestExtension} implementations to access the JAX-RS - * {@link javax.ws.rs.core.Configurable} and cluster state {@link ConnectClusterState}. The implementation for the interface is provided + * {@link jakarta.ws.rs.core.Configurable} and cluster state {@link ConnectClusterState}. The implementation for the interface is provided * by the Connect framework. */ public interface ConnectRestExtensionContext { /** - * Provides an implementation of {@link javax.ws.rs.core.Configurable} that can be used to register JAX-RS resources. + * Provides an implementation of {@link jakarta.ws.rs.core.Configurable} that can be used to register JAX-RS resources. * - * @return the JAX-RS {@link javax.ws.rs.core.Configurable}; never {@code null} + * @return the JAX-RS {@link jakarta.ws.rs.core.Configurable}; never {@code null} */ Configurable> configurable(); diff --git a/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTask.java b/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTask.java index f4e25979f900b..655c89ac670fe 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTask.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTask.java @@ -152,14 +152,6 @@ public Map preCommit(Map partitions) { - this.onPartitionsAssigned(partitions); - } - - /** - * @deprecated Use {@link #open(Collection)} for partition initialization. - */ - @Deprecated - public void onPartitionsAssigned(Collection partitions) { } /** @@ -175,14 +167,6 @@ public void onPartitionsAssigned(Collection partitions) { * @param partitions The list of partitions that should be closed */ public void close(Collection partitions) { - this.onPartitionsRevoked(partitions); - } - - /** - * @deprecated Use {@link #close(Collection)} instead for partition cleanup. - */ - @Deprecated - public void onPartitionsRevoked(Collection partitions) { } /** diff --git a/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTask.java b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTask.java index abe3e36bf1e76..c8dd4db6d37a1 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTask.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTask.java @@ -131,28 +131,6 @@ public void commit() throws InterruptedException { @Override public abstract void stop(); - /** - *

          - * Commit an individual {@link SourceRecord} when the callback from the producer client is received. This method is - * also called when a record is filtered by a transformation, and thus will never be ACK'd by a broker. - *

          - * This is an alias for {@link #commitRecord(SourceRecord, RecordMetadata)} for backwards compatibility. The default - * implementation of {@link #commitRecord(SourceRecord, RecordMetadata)} just calls this method. It is not necessary - * to override both methods. - *

          - * SourceTasks are not required to implement this functionality; Kafka Connect will record offsets - * automatically. This hook is provided for systems that also need to store offsets internally - * in their own system. - * - * @param record {@link SourceRecord} that was successfully sent via the producer or filtered by a transformation - * @throws InterruptedException - * @deprecated Use {@link #commitRecord(SourceRecord, RecordMetadata)} instead. - */ - @Deprecated - public void commitRecord(SourceRecord record) throws InterruptedException { - // This space intentionally left blank. - } - /** *

          * Commit an individual {@link SourceRecord} when the callback from the producer client is received. This method is @@ -164,8 +142,7 @@ public void commitRecord(SourceRecord record) throws InterruptedException { * automatically. This hook is provided for systems that also need to store offsets internally * in their own system. *

          - * The default implementation just calls {@link #commitRecord(SourceRecord)}, which is a nop by default. It is - * not necessary to implement both methods. + * The default implementation is a nop. It is not necessary to implement the method. * * @param record {@link SourceRecord} that was successfully sent via the producer, filtered by a transformation, or dropped on producer exception * @param metadata {@link RecordMetadata} record metadata returned from the broker, or null if the record was filtered or if producer exceptions are ignored @@ -173,7 +150,6 @@ public void commitRecord(SourceRecord record) throws InterruptedException { */ public void commitRecord(SourceRecord record, RecordMetadata metadata) throws InterruptedException { - // by default, just call other method for backwards compatibility - commitRecord(record); + // by default, just do nothing } } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java index c9b6ef047e475..4c62393e3ea04 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java @@ -29,6 +29,14 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -849,7 +857,10 @@ public void shouldParseStringsWithMultipleDelimiters() { @Test public void shouldConvertTimeValues() { - java.util.Date current = new java.util.Date(); + LocalDateTime localTime = LocalDateTime.now(); + LocalTime localTimeTruncated = localTime.toLocalTime().truncatedTo(ChronoUnit.MILLIS); + ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime); + java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000); long currentMillis = current.getTime() % MILLIS_PER_DAY; // java.util.Date - just copy @@ -857,18 +868,21 @@ public void shouldConvertTimeValues() { assertEquals(current, t1); // java.util.Date as a Timestamp - discard the date and keep just day's milliseconds - t1 = Values.convertToTime(Timestamp.SCHEMA, current); - assertEquals(new java.util.Date(currentMillis), t1); + java.util.Date t2 = Values.convertToTime(Timestamp.SCHEMA, current); + assertEquals(new java.util.Date(currentMillis), t2); - // ISO8601 strings - currently broken because tokenization breaks at colon + // ISO8601 strings - accept a string matching pattern "HH:mm:ss.SSS'Z'" + java.util.Date t3 = Values.convertToTime(Time.SCHEMA, localTime.format(DateTimeFormatter.ofPattern(Values.ISO_8601_TIME_FORMAT_PATTERN))); + LocalTime time3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(t3.getTime()), ZoneId.systemDefault()).toLocalTime(); + assertEquals(localTimeTruncated, time3); // Millis as string - java.util.Date t3 = Values.convertToTime(Time.SCHEMA, Long.toString(currentMillis)); - assertEquals(currentMillis, t3.getTime()); + java.util.Date t4 = Values.convertToTime(Time.SCHEMA, Long.toString(currentMillis)); + assertEquals(currentMillis, t4.getTime()); // Millis as long - java.util.Date t4 = Values.convertToTime(Time.SCHEMA, currentMillis); - assertEquals(currentMillis, t4.getTime()); + java.util.Date t5 = Values.convertToTime(Time.SCHEMA, currentMillis); + assertEquals(currentMillis, t5.getTime()); } @Test @@ -883,23 +897,28 @@ public void shouldConvertDateValues() { // java.util.Date as a Timestamp - discard the day's milliseconds and keep the date java.util.Date currentDate = new java.util.Date(current.getTime() - currentMillis); - d1 = Values.convertToDate(Timestamp.SCHEMA, currentDate); - assertEquals(currentDate, d1); + java.util.Date d2 = Values.convertToDate(Timestamp.SCHEMA, currentDate); + assertEquals(currentDate, d2); - // ISO8601 strings - currently broken because tokenization breaks at colon + // ISO8601 strings - accept a string matching pattern "yyyy-MM-dd" + java.util.Date d3 = Values.convertToDate(Date.SCHEMA, LocalDate.ofEpochDay(days).format(DateTimeFormatter.ISO_LOCAL_DATE)); + assertEquals(currentDate, d3); // Days as string - java.util.Date d3 = Values.convertToDate(Date.SCHEMA, Long.toString(days)); - assertEquals(currentDate, d3); + java.util.Date d4 = Values.convertToDate(Date.SCHEMA, Long.toString(days)); + assertEquals(currentDate, d4); // Days as long - java.util.Date d4 = Values.convertToDate(Date.SCHEMA, days); - assertEquals(currentDate, d4); + java.util.Date d5 = Values.convertToDate(Date.SCHEMA, days); + assertEquals(currentDate, d5); } @Test public void shouldConvertTimestampValues() { - java.util.Date current = new java.util.Date(); + LocalDateTime localTime = LocalDateTime.now(); + LocalDateTime localTimeTruncated = localTime.truncatedTo(ChronoUnit.MILLIS); + ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime); + java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000); long currentMillis = current.getTime() % MILLIS_PER_DAY; // java.util.Date - just copy @@ -912,18 +931,21 @@ public void shouldConvertTimestampValues() { assertEquals(currentDate, ts1); // java.util.Date as a Time - discard the date and keep the day's milliseconds - ts1 = Values.convertToTimestamp(Time.SCHEMA, currentMillis); - assertEquals(new java.util.Date(currentMillis), ts1); + java.util.Date ts2 = Values.convertToTimestamp(Time.SCHEMA, currentMillis); + assertEquals(new java.util.Date(currentMillis), ts2); - // ISO8601 strings - currently broken because tokenization breaks at colon + // ISO8601 strings - accept a string matching pattern "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'" + java.util.Date ts3 = Values.convertToTime(Time.SCHEMA, localTime.format(DateTimeFormatter.ofPattern(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN))); + LocalDateTime time3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(ts3.getTime()), ZoneId.systemDefault()); + assertEquals(localTimeTruncated, time3); // Millis as string - java.util.Date ts3 = Values.convertToTimestamp(Timestamp.SCHEMA, Long.toString(current.getTime())); - assertEquals(current, ts3); + java.util.Date ts4 = Values.convertToTimestamp(Timestamp.SCHEMA, Long.toString(current.getTime())); + assertEquals(current, ts4); // Millis as long - java.util.Date ts4 = Values.convertToTimestamp(Timestamp.SCHEMA, current.getTime()); - assertEquals(current, ts4); + java.util.Date ts5 = Values.convertToTimestamp(Timestamp.SCHEMA, current.getTime()); + assertEquals(current, ts5); } @Test diff --git a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java index 58aac7994aefc..a72f85d068888 100644 --- a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java +++ b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java @@ -51,7 +51,7 @@ * * *

          This is a reference implementation of the {@link ConnectRestExtension} interface. It registers an implementation of {@link - * javax.ws.rs.container.ContainerRequestFilter} that does JAAS based authentication of incoming Basic Auth credentials. {@link + * jakarta.ws.rs.container.ContainerRequestFilter} that does JAAS based authentication of incoming Basic Auth credentials. {@link * ConnectRestExtension} implementations are loaded via the plugin class loader using {@link java.util.ServiceLoader} mechanism and hence * the packaged jar includes {@code META-INF/services/org.apache.kafka.connect.rest.extension.ConnectRestExtension} with the entry * {@code org.apache.kafka.connect.extension.auth.jaas.BasicAuthSecurityRestExtension} diff --git a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java index b090ee21d449d..d404bdc7dc19a 100644 --- a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java +++ b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java @@ -42,12 +42,13 @@ import javax.security.auth.login.Configuration; import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; -import javax.ws.rs.HttpMethod; -import javax.ws.rs.Priorities; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.container.ContainerRequestFilter; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.SecurityContext; + +import jakarta.ws.rs.HttpMethod; +import jakarta.ws.rs.Priorities; +import jakarta.ws.rs.container.ContainerRequestContext; +import jakarta.ws.rs.container.ContainerRequestFilter; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.SecurityContext; @Priority(Priorities.AUTHENTICATION) public class JaasBasicAuthFilter implements ContainerRequestFilter { diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java index 146bd6a2adf63..81f3a7327d576 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java @@ -31,7 +31,8 @@ import java.util.function.Supplier; import javax.security.auth.login.Configuration; -import javax.ws.rs.core.Configurable; + +import jakarta.ws.rs.core.Configurable; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java index 24ecadcc0a09b..bcd6e0ab31995 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java @@ -39,11 +39,12 @@ import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.ChoiceCallback; -import javax.ws.rs.HttpMethod; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.SecurityContext; -import javax.ws.rs.core.UriInfo; + +import jakarta.ws.rs.HttpMethod; +import jakarta.ws.rs.container.ContainerRequestContext; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.SecurityContext; +import jakarta.ws.rs.core.UriInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; diff --git a/connect/file/src/test/resources/log4j.properties b/connect/file/src/test/resources/log4j.properties deleted file mode 100644 index 548e8c33cfbe9..0000000000000 --- a/connect/file/src/test/resources/log4j.properties +++ /dev/null @@ -1,28 +0,0 @@ -## -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -## -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -# -# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information -# in the log message, where appropriate. This makes it easier to identify those log messages that apply to a -# specific connector. Simply add this parameter to the log layout configuration below to include the contextual information. -# -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %X{connector.context}%m (%c:%L)%n - -log4j.logger.kafka=WARN diff --git a/connect/file/src/test/resources/log4j2.yaml b/connect/file/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..1e9f550fa6d6d --- /dev/null +++ b/connect/file/src/test/resources/log4j2.yaml @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %X{connector.context}%m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: kafka + level: WARN diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java index ba4aab5f836cd..30d68971568f2 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java @@ -149,10 +149,9 @@ public class JsonConverter implements Converter, HeaderConverter, Versioned { LOGICAL_CONVERTERS.put(Decimal.LOGICAL_NAME, new LogicalTypeConverter() { @Override public JsonNode toJson(final Schema schema, final Object value, final JsonConverterConfig config) { - if (!(value instanceof BigDecimal)) + if (!(value instanceof BigDecimal decimal)) throw new DataException("Invalid type for Decimal, expected BigDecimal but was " + value.getClass()); - final BigDecimal decimal = (BigDecimal) value; switch (config.decimalFormat()) { case NUMERIC: return JSON_NODE_FACTORY.numberNode(decimal); @@ -241,15 +240,15 @@ public JsonConverter() { /** * Creates a JsonConvert initializing serializer and deserializer. * - * @param enableAfterburner permits to enable/disable the registration of Jackson Afterburner module. + * @param enableBlackbird permits to enable/disable the registration of Jackson Blackbird module. *

          * NOTE: This is visible only for testing */ - public JsonConverter(boolean enableAfterburner) { + public JsonConverter(boolean enableBlackbird) { serializer = new JsonSerializer( Set.of(), JSON_NODE_FACTORY, - enableAfterburner + enableBlackbird ); deserializer = new JsonDeserializer( @@ -259,7 +258,7 @@ public JsonConverter(boolean enableAfterburner) { DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS ), JSON_NODE_FACTORY, - enableAfterburner + enableBlackbird ); } diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java index 40d4631dfb9ee..775768f37d31d 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java @@ -24,7 +24,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.JsonNodeFactory; -import com.fasterxml.jackson.module.afterburner.AfterburnerModule; +import com.fasterxml.jackson.module.blackbird.BlackbirdModule; import java.util.Collections; import java.util.Set; @@ -53,13 +53,13 @@ public JsonDeserializer() { JsonDeserializer( final Set deserializationFeatures, final JsonNodeFactory jsonNodeFactory, - final boolean enableAfterburner + final boolean enableBlackbird ) { objectMapper.enable(JsonReadFeature.ALLOW_LEADING_ZEROS_FOR_NUMBERS.mappedFeature()); deserializationFeatures.forEach(objectMapper::enable); objectMapper.setNodeFactory(jsonNodeFactory); - if (enableAfterburner) { - objectMapper.registerModule(new AfterburnerModule()); + if (enableBlackbird) { + objectMapper.registerModule(new BlackbirdModule()); } } diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java index 64c441414885d..e40f530469af6 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.node.JsonNodeFactory; -import com.fasterxml.jackson.module.afterburner.AfterburnerModule; +import com.fasterxml.jackson.module.blackbird.BlackbirdModule; import java.util.Collections; import java.util.Set; @@ -52,12 +52,12 @@ public JsonSerializer() { JsonSerializer( final Set serializationFeatures, final JsonNodeFactory jsonNodeFactory, - final boolean enableAfterburner + final boolean enableBlackbird ) { serializationFeatures.forEach(objectMapper::enable); objectMapper.setNodeFactory(jsonNodeFactory); - if (enableAfterburner) { - objectMapper.registerModule(new AfterburnerModule()); + if (enableBlackbird) { + objectMapper.registerModule(new BlackbirdModule()); } } diff --git a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.java b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.java index 7733ccf3fd5cd..ae273c36849a7 100644 --- a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.java +++ b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.java @@ -115,6 +115,6 @@ public boolean isCheckpointsTopic(String topic) { @Override public boolean isMM2InternalTopic(String topic) { - return topic.endsWith(internalSuffix()); + return topic.startsWith("mm2") && topic.endsWith(internalSuffix()) || isCheckpointsTopic(topic); } } diff --git a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/ReplicationPolicy.java b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/ReplicationPolicy.java index fbd8725eb64bf..34d3344509719 100644 --- a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/ReplicationPolicy.java +++ b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/ReplicationPolicy.java @@ -98,7 +98,7 @@ default boolean isCheckpointsTopic(String topic) { * This is used to make sure the topic doesn't need to be replicated. */ default boolean isMM2InternalTopic(String topic) { - return topic.endsWith(".internal"); + return topic.startsWith("mm2") && topic.endsWith(".internal") || isCheckpointsTopic(topic); } /** @@ -106,7 +106,6 @@ default boolean isMM2InternalTopic(String topic) { */ default boolean isInternalTopic(String topic) { boolean isKafkaInternalTopic = topic.startsWith("__") || topic.startsWith("."); - boolean isDefaultConnectTopic = topic.endsWith("-internal") || topic.endsWith(".internal"); - return isMM2InternalTopic(topic) || isKafkaInternalTopic || isDefaultConnectTopic; + return isMM2InternalTopic(topic) || isKafkaInternalTopic; } } diff --git a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java index 802d0b606c234..86aaf8ffd0e2b 100644 --- a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java +++ b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java @@ -38,15 +38,17 @@ public void setUp() { @Test public void testInternalTopic() { + Map config = new HashMap<>(); + config.put(MirrorClientConfig.REPLICATION_POLICY_SEPARATOR, "."); + DEFAULT_REPLICATION_POLICY.configure(config); + // starts with '__' assertTrue(DEFAULT_REPLICATION_POLICY.isInternalTopic("__consumer_offsets")); // starts with '.' assertTrue(DEFAULT_REPLICATION_POLICY.isInternalTopic(".hiddentopic")); - // ends with '.internal': default DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG in standalone mode. + // starts with 'mm2' and ends with '.internal': default DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG in standalone mode. assertTrue(DEFAULT_REPLICATION_POLICY.isInternalTopic("mm2-offsets.CLUSTER.internal")); - // ends with '-internal' - assertTrue(DEFAULT_REPLICATION_POLICY.isInternalTopic("mm2-offsets-CLUSTER-internal")); // non-internal topic. assertFalse(DEFAULT_REPLICATION_POLICY.isInternalTopic("mm2-offsets_CLUSTER_internal")); } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultTopicFilter.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultTopicFilter.java index 35da6132c9fb8..95414685ba74a 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultTopicFilter.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultTopicFilter.java @@ -33,7 +33,7 @@ public class DefaultTopicFilter implements TopicFilter { public static final String TOPICS_EXCLUDE_CONFIG = "topics.exclude"; private static final String TOPICS_EXCLUDE_DOC = "List of topics and/or regexes that should not be replicated."; - public static final String TOPICS_EXCLUDE_DEFAULT = ".*[\\-\\.]internal, .*\\.replica, __.*"; + public static final String TOPICS_EXCLUDE_DEFAULT = "mm2.*\\.internal, .*\\.replica, __.*"; private Pattern includePattern; private Pattern excludePattern; diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java index 0af3b14e3b8e9..218c64e85a478 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java @@ -132,6 +132,12 @@ public Class taskClass() { // divide consumer groups among tasks @Override public List> taskConfigs(int maxTasks) { + // If the replication is disabled or checkpoint emission is disabled by setting 'emit.checkpoints.enabled' to false, + // the interval of checkpoint emission will be negative and no 'MirrorCheckpointTask' will be created. + if (!config.enabled() || config.emitCheckpointsInterval().isNegative()) { + return Collections.emptyList(); + } + if (knownConsumerGroups == null) { // If knownConsumerGroup is null, it means the initial loading has not finished. // An exception should be thrown to trigger the retry behavior in the framework. @@ -139,13 +145,11 @@ public List> taskConfigs(int maxTasks) { throw new RetriableException("Timeout while loading consumer groups."); } - // if the replication is disabled, known consumer group is empty, or checkpoint emission is - // disabled by setting 'emit.checkpoints.enabled' to false, the interval of checkpoint emission - // will be negative and no 'MirrorCheckpointTask' will be created - if (!config.enabled() || knownConsumerGroups.isEmpty() - || config.emitCheckpointsInterval().isNegative()) { + // If the consumer group is empty, no 'MirrorCheckpointTask' will be created. + if (knownConsumerGroups.isEmpty()) { return Collections.emptyList(); } + int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); List> groupsPartitioned = ConnectorUtils.groupPartitions(new ArrayList<>(knownConsumerGroups), numTasks); return IntStream.range(0, numTasks) diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java index e659c4aae79a2..254e2bf8ca649 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java @@ -20,9 +20,10 @@ import org.apache.kafka.clients.admin.ConsumerGroupDescription; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.ConsumerGroupState; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.data.Schema; @@ -45,7 +46,6 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.apache.kafka.connect.mirror.MirrorUtils.adminCall; @@ -195,7 +195,7 @@ Map checkpointsForGroup(Map shouldCheckpointTopic(x.getKey().topic())) // Only perform relevant checkpoints filtered by "topic filter" .map(x -> checkpoint(group, x.getKey(), x.getValue())) - .flatMap(o -> o.map(Stream::of).orElseGet(Stream::empty)) // do not emit checkpoints for partitions that don't have offset-syncs + .flatMap(o -> o.stream()) // do not emit checkpoints for partitions that don't have offset-syncs .filter(x -> x.downstreamOffset() >= 0) // ignore offsets we cannot translate accurately .filter(this::checkpointIsMoreRecent) // do not emit checkpoints for partitions that have a later checkpoint .collect(Collectors.toMap(Checkpoint::topicPartition, Function.identity())); @@ -297,11 +297,12 @@ private void refreshIdleConsumerGroupOffset() throws ExecutionException, Interru for (String group : consumerGroups) { try { ConsumerGroupDescription consumerGroupDesc = consumerGroupsDesc.get(group).get(); - ConsumerGroupState consumerGroupState = consumerGroupDesc.state(); + GroupState consumerGroupState = consumerGroupDesc.groupState(); // sync offset to the target cluster only if the state of current consumer group is: // (1) idle: because the consumer at target is not actively consuming the mirrored topic // (2) dead: the new consumer that is recently created at source and never existed at target - if (consumerGroupState == ConsumerGroupState.EMPTY) { + // This case will be reported as a GroupIdNotFoundException + if (consumerGroupState == GroupState.EMPTY) { idleConsumerGroupsOffset.put( group, adminCall( @@ -311,8 +312,13 @@ private void refreshIdleConsumerGroupOffset() throws ExecutionException, Interru ); } // new consumer upstream has state "DEAD" and will be identified during the offset sync-up - } catch (InterruptedException | ExecutionException e) { - log.error("Error querying for consumer group {} on cluster {}.", group, targetClusterAlias, e); + } catch (InterruptedException ie) { + log.error("Error querying for consumer group {} on cluster {}.", group, targetClusterAlias, ie); + } catch (ExecutionException ee) { + // check for non-existent new consumer upstream which will be identified during the offset sync-up + if (!(ee.getCause() instanceof GroupIdNotFoundException)) { + log.error("Error querying for consumer group {} on cluster {}.", group, targetClusterAlias, ee); + } } } } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java index 3bc7aed02b36e..b0cc368a5fcbe 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java @@ -102,11 +102,7 @@ public class MirrorMaker { private static final long SHUTDOWN_TIMEOUT_SECONDS = 60L; - public static final List> CONNECTOR_CLASSES = Collections.unmodifiableList( - Arrays.asList( - MirrorSourceConnector.class, - MirrorHeartbeatConnector.class, - MirrorCheckpointConnector.class)); + public static final List> CONNECTOR_CLASSES = List.of(MirrorSourceConnector.class, MirrorHeartbeatConnector.class, MirrorCheckpointConnector.class); private final Map herders = new HashMap<>(); private CountDownLatch startLatch; diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java index f65899dac6e3a..a129390b39785 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java @@ -420,7 +420,7 @@ private Set toTopics(Collection tps) { void syncTopicAcls() throws InterruptedException, ExecutionException { Optional> rawBindings = listTopicAclBindings(); - if (!rawBindings.isPresent()) + if (rawBindings.isEmpty()) return; List filteredBindings = rawBindings.get().stream() .filter(x -> x.pattern().resourceType() == ResourceType.TOPIC) diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java index 75ce230218366..1a5ef6cc4583a 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java @@ -186,8 +186,7 @@ boolean update(long upstreamOffset, long downstreamOffset) { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof PartitionState)) return false; - PartitionState that = (PartitionState) o; + if (!(o instanceof PartitionState that)) return false; return previousUpstreamOffset == that.previousUpstreamOffset && previousDownstreamOffset == that.previousDownstreamOffset && lastSyncDownstreamOffset == that.lastSyncDownstreamOffset && diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java index 06480bcf4a5a4..4c02160a194f5 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java @@ -24,11 +24,11 @@ import java.util.Map; -import javax.inject.Inject; -import javax.ws.rs.NotFoundException; -import javax.ws.rs.Path; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.UriInfo; +import jakarta.inject.Inject; +import jakarta.ws.rs.NotFoundException; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.UriInfo; @Path("/{source}/{target}/connectors") public class InternalMirrorResource extends InternalClusterResource { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java index a410adde94458..21bcc7cbad5e9 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java @@ -40,7 +40,7 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.source.ExactlyOnceSupport; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.Test; import java.util.ArrayList; @@ -167,10 +167,10 @@ public void testAclFiltering() { new DefaultReplicationPolicy(), x -> true, getConfigPropertyFilter()); assertFalse(connector.shouldReplicateAcl( new AclBinding(new ResourcePattern(ResourceType.TOPIC, "test_topic", PatternType.LITERAL), - new AccessControlEntry("kafka", "", AclOperation.WRITE, AclPermissionType.ALLOW))), "should not replicate ALLOW WRITE"); + new AccessControlEntry("kafka", "", AclOperation.WRITE, AclPermissionType.ALLOW))), "should not replicate ALLOW WRITE"); assertTrue(connector.shouldReplicateAcl( new AclBinding(new ResourcePattern(ResourceType.TOPIC, "test_topic", PatternType.LITERAL), - new AccessControlEntry("kafka", "", AclOperation.ALL, AclPermissionType.ALLOW))), "should replicate ALLOW ALL"); + new AccessControlEntry("kafka", "", AclOperation.ALL, AclPermissionType.ALLOW))), "should replicate ALLOW ALL"); } @Test diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeForwardingAdminWithLocalMetadata.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeForwardingAdminWithLocalMetadata.java index 1f2f56166a23f..406cc280d5971 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeForwardingAdminWithLocalMetadata.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeForwardingAdminWithLocalMetadata.java @@ -17,9 +17,6 @@ package org.apache.kafka.connect.mirror.clients.admin; -import org.apache.kafka.clients.admin.AlterConfigsOptions; -import org.apache.kafka.clients.admin.AlterConfigsResult; -import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.CreateAclsOptions; import org.apache.kafka.clients.admin.CreateAclsResult; import org.apache.kafka.clients.admin.CreatePartitionsOptions; @@ -30,7 +27,6 @@ import org.apache.kafka.clients.admin.NewPartitions; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.common.acl.AclBinding; -import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.errors.TopicExistsException; import org.slf4j.Logger; @@ -79,23 +75,6 @@ public CreatePartitionsResult createPartitions(Map newPar return createPartitionsResult; } - @Deprecated - @Override - public AlterConfigsResult alterConfigs(Map configs, AlterConfigsOptions options) { - AlterConfigsResult alterConfigsResult = super.alterConfigs(configs, options); - configs.forEach((configResource, newConfigs) -> alterConfigsResult.values().get(configResource).whenComplete((ignored, error) -> { - if (error == null) { - if (configResource.type() == ConfigResource.Type.TOPIC) { - FakeLocalMetadataStore.updateTopicConfig(configResource.name(), newConfigs); - } - } else { - log.error("Unable to intercept admin client operation", error); - } - })); - return alterConfigsResult; - } - - @Override public CreateAclsResult createAcls(Collection acls, CreateAclsOptions options) { CreateAclsResult aclsResult = super.createAcls(acls, options); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java index d20484e788525..2ba4438bdba9b 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java @@ -280,7 +280,13 @@ public void testMultiNodeCluster() throws Exception { // Cluster aliases final String a = "A"; // Use a convoluted cluster name to ensure URL encoding/decoding works - final String b = "B- ._~:/?#[]@!$&'()*+;=\"<>%{}|\\^`618"; + // The servlet 6.0 spec no longer allows some characters such as forward slashes, control characters, + // etc. even if they are encoded. Jetty 12 will enforce this and throw a 400 ambiguous error + // so the string of characters for the variable "b" has been updated to only include characters + // that are valid with the new spec. + // See https://jakarta.ee/specifications/servlet/6.0/jakarta-servlet-spec-6.0#uri-path-canonicalization + // and specifically the section: "10. Rejecting Suspicious Sequences." for details. + final String b = "B-_~:?#[]@!$&'()*+=\"<>{}|^`618"; final String ab = a + "->" + b; final String ba = b + "->" + a; final String testTopicPrefix = "test-topic-"; diff --git a/connect/mirror/src/test/resources/log4j.properties b/connect/mirror/src/test/resources/log4j.properties deleted file mode 100644 index c4ca6a2388fb1..0000000000000 --- a/connect/mirror/src/test/resources/log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -## -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -## -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -# -# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information -# in the log message, where appropriate. This makes it easier to identify those log messages that apply to a -# specific connector. Simply add this parameter to the log layout configuration below to include the contextual information. -# -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %X{connector.context}%m (%c:%L)%n -# -# The following line includes no MDC context parameters: -#log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n (%t) - -log4j.logger.kafka=WARN -log4j.logger.state.change.logger=OFF -log4j.logger.org.apache.kafka.connect=DEBUG diff --git a/connect/mirror/src/test/resources/log4j2.yaml b/connect/mirror/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..b63606d0ba56f --- /dev/null +++ b/connect/mirror/src/test/resources/log4j2.yaml @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %X{connector.context}%m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: kafka + level: WARN + + - name: state.change.logger + level: "OFF" + + - name: org.apache.kafka.connect + level: DEBUG diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java index 120e03c6f8e3b..43af6b274b6ac 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java @@ -121,9 +121,7 @@ CreateConnectorRequest parseConnectorConfigurationFile(String filePath) throws I File connectorConfigurationFile = Paths.get(filePath).toFile(); try { - Map connectorConfigs = objectMapper.readValue( - connectorConfigurationFile, - new TypeReference>() { }); + Map connectorConfigs = objectMapper.readValue(connectorConfigurationFile, new TypeReference<>() { }); if (!connectorConfigs.containsKey(NAME_CONFIG)) { throw new ConnectException("Connector configuration at '" + filePath + "' is missing the mandatory '" + NAME_CONFIG + "' " @@ -136,8 +134,7 @@ CreateConnectorRequest parseConnectorConfigurationFile(String filePath) throws I try { objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - CreateConnectorRequest createConnectorRequest = objectMapper.readValue(connectorConfigurationFile, - new TypeReference() { }); + CreateConnectorRequest createConnectorRequest = objectMapper.readValue(connectorConfigurationFile, new TypeReference<>() { }); if (createConnectorRequest.config().containsKey(NAME_CONFIG)) { if (!createConnectorRequest.config().get(NAME_CONFIG).equals(createConnectorRequest.name())) { throw new ConnectException("Connector name configuration in 'config' doesn't match the one specified in 'name' at '" + filePath diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java index 3f72aefdb5fdb..5984a32d3cd9f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java @@ -34,7 +34,9 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.NotFoundException; import org.apache.kafka.connect.runtime.isolation.LoaderSwap; +import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfo; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos; @@ -63,7 +65,9 @@ import org.apache.kafka.connect.util.Stage; import org.apache.kafka.connect.util.TemporaryStage; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,8 +89,6 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.function.Function; @@ -94,9 +96,15 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_VERSION; import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_VERSION_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG; + /** * Abstract Herder implementation which handles connector/task lifecycle tracking. Extensions @@ -138,7 +146,7 @@ public abstract class AbstractHerder implements Herder, TaskStatus.Listener, Con private final Time time; protected final Loggers loggers; - private final ConcurrentMap tempConnectors = new ConcurrentHashMap<>(); + private final CachedConnectors cachedConnectors; public AbstractHerder(Worker worker, String workerId, @@ -157,6 +165,7 @@ public AbstractHerder(Worker worker, this.connectorExecutor = Executors.newCachedThreadPool(); this.time = time; this.loggers = new Loggers(time); + this.cachedConnectors = new CachedConnectors(worker.getPlugins()); } @Override @@ -327,20 +336,6 @@ public ConnectorInfo connectorInfo(String connector) { ); } - protected Map> buildTasksConfig(String connector) { - final ClusterConfigState configState = configBackingStore.snapshot(); - - if (!configState.contains(connector)) - return Collections.emptyMap(); - - Map> configs = new HashMap<>(); - for (ConnectorTaskId cti : configState.tasks(connector)) { - configs.put(cti, configState.rawTaskConfig(cti)); - } - - return configs; - } - @Override public ConnectorStateInfo connectorStatus(String connName) { ConnectorStatus connector = statusBackingStore.get(connName); @@ -412,6 +407,8 @@ protected Map validateSourceConnectorConfig(SourceConnector * may be null, in which case no validation will be performed under the assumption that the * connector will use inherit the converter settings from the worker. Some errors encountered * during validation may be {@link ConfigValue#addErrorMessage(String) added} to this object + * @param pluginVersionValue the {@link ConfigValue} for the converter version property in the connector config; + * * @param pluginInterface the interface for the plugin type * (e.g., {@code org.apache.kafka.connect.storage.Converter.class}); * may not be null @@ -432,14 +429,18 @@ protected Map validateSourceConnectorConfig(SourceConnector * @param the plugin class to perform validation for */ + @SuppressWarnings("unchecked") private ConfigInfos validateConverterConfig( Map connectorConfig, ConfigValue pluginConfigValue, + ConfigValue pluginVersionValue, Class pluginInterface, Function configDefAccessor, String pluginName, String pluginProperty, + String pluginVersionProperty, Map defaultProperties, + ClassLoader connectorLoader, Function reportStage ) { Objects.requireNonNull(connectorConfig); @@ -447,12 +448,15 @@ private ConfigInfos validateConverterConfig( Objects.requireNonNull(configDefAccessor); Objects.requireNonNull(pluginName); Objects.requireNonNull(pluginProperty); + Objects.requireNonNull(pluginVersionProperty); String pluginClass = connectorConfig.get(pluginProperty); + String pluginVersion = connectorConfig.get(pluginVersionProperty); if (pluginClass == null || pluginConfigValue == null || !pluginConfigValue.errorMessages().isEmpty() + || !pluginVersionValue.errorMessages().isEmpty() ) { // Either no custom converter was specified, or one was specified but there's a problem with it. // No need to proceed any further. @@ -462,11 +466,22 @@ private ConfigInfos validateConverterConfig( T pluginInstance; String stageDescription = "instantiating the connector's " + pluginName + " for validation"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { - pluginInstance = Utils.newInstance(pluginClass, pluginInterface); + VersionRange range = PluginUtils.connectorVersionRequirement(pluginVersion); + pluginInstance = (T) plugins().newPlugin(pluginClass, range, connectorLoader); + } catch (VersionedPluginLoadingException e) { + log.error("Failed to load {} class {} with version {}", pluginName, pluginClass, pluginVersion, e); + pluginConfigValue.addErrorMessage(e.getMessage()); + pluginVersionValue.addErrorMessage(e.getMessage()); + return null; } catch (ClassNotFoundException | RuntimeException e) { log.error("Failed to instantiate {} class {}; this should have been caught by prior validation logic", pluginName, pluginClass, e); pluginConfigValue.addErrorMessage("Failed to load class " + pluginClass + (e.getMessage() != null ? ": " + e.getMessage() : "")); return null; + } catch (InvalidVersionSpecificationException e) { + // this should have been caught by prior validation logic + log.error("Invalid version range for {} class {} with version {}", pluginName, pluginClass, pluginVersion, e); + pluginVersionValue.addErrorMessage(e.getMessage()); + return null; } try { @@ -508,55 +523,55 @@ private ConfigInfos validateConverterConfig( } } - private ConfigInfos validateHeaderConverterConfig( - Map connectorConfig, - ConfigValue headerConverterConfigValue, + private ConfigInfos validateAllConverterConfigs( + Map connectorProps, + Map validatedConnectorConfig, + ClassLoader connectorLoader, Function reportStage ) { - return validateConverterConfig( - connectorConfig, - headerConverterConfigValue, + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + // do custom converter-specific validation + ConfigInfos headerConverterConfigInfos = validateConverterConfig( + connectorProps, + validatedConnectorConfig.get(HEADER_CONVERTER_CLASS_CONFIG), + validatedConnectorConfig.get(HEADER_CONVERTER_VERSION_CONFIG), HeaderConverter.class, HeaderConverter::config, "header converter", HEADER_CONVERTER_CLASS_CONFIG, + HEADER_CONVERTER_VERSION_CONFIG, Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()), + connectorLoader, reportStage ); - } - - private ConfigInfos validateKeyConverterConfig( - Map connectorConfig, - ConfigValue keyConverterConfigValue, - Function reportStage - ) { - return validateConverterConfig( - connectorConfig, - keyConverterConfigValue, + ConfigInfos keyConverterConfigInfos = validateConverterConfig( + connectorProps, + validatedConnectorConfig.get(KEY_CONVERTER_CLASS_CONFIG), + validatedConnectorConfig.get(KEY_CONVERTER_VERSION_CONFIG), Converter.class, Converter::config, "key converter", KEY_CONVERTER_CLASS_CONFIG, + KEY_CONVERTER_VERSION_CONFIG, Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.KEY.getName()), + connectorLoader, reportStage ); - } - private ConfigInfos validateValueConverterConfig( - Map connectorConfig, - ConfigValue valueConverterConfigValue, - Function reportStage - ) { - return validateConverterConfig( - connectorConfig, - valueConverterConfigValue, + ConfigInfos valueConverterConfigInfos = validateConverterConfig( + connectorProps, + validatedConnectorConfig.get(VALUE_CONVERTER_CLASS_CONFIG), + validatedConnectorConfig.get(VALUE_CONVERTER_VERSION_CONFIG), Converter.class, Converter::config, "value converter", VALUE_CONVERTER_CLASS_CONFIG, + VALUE_CONVERTER_VERSION_CONFIG, Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.VALUE.getName()), + connectorLoader, reportStage ); + return mergeConfigInfos(connType, headerConverterConfigInfos, keyConverterConfigInfos, valueConverterConfigInfos); } @Override @@ -648,6 +663,146 @@ protected boolean connectorUsesProducer(org.apache.kafka.connect.health.Connecto || SinkConnectorConfig.hasDlqTopicConfig(connProps); } + private ConfigInfos validateClientOverrides( + Map connectorProps, + org.apache.kafka.connect.health.ConnectorType connectorType, + Class connectorClass, + Function reportStage, + boolean doLog + ) { + if (connectorClass == null || connectorType == null) { + return null; + } + AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps, doLog); + String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG); + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + ConfigInfos producerConfigInfos = null; + ConfigInfos consumerConfigInfos = null; + ConfigInfos adminConfigInfos = null; + String stageDescription = null; + + if (connectorUsesProducer(connectorType, connectorProps)) { + stageDescription = "validating producer config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + producerConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX, + connectorConfig, + ProducerConfig.configDef(), + connectorClass, + connectorType, + ConnectorClientConfigRequest.ClientType.PRODUCER, + connectorClientConfigOverridePolicy); + } + } + if (connectorUsesAdmin(connectorType, connectorProps)) { + stageDescription = "validating admin config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + adminConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX, + connectorConfig, + AdminClientConfig.configDef(), + connectorClass, + connectorType, + ConnectorClientConfigRequest.ClientType.ADMIN, + connectorClientConfigOverridePolicy); + } + } + if (connectorUsesConsumer(connectorType, connectorProps)) { + stageDescription = "validating consumer config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + consumerConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX, + connectorConfig, + ConsumerConfig.configDef(), + connectorClass, + connectorType, + ConnectorClientConfigRequest.ClientType.CONSUMER, + connectorClientConfigOverridePolicy); + } + } + return mergeConfigInfos(connType, + producerConfigInfos, + consumerConfigInfos, + adminConfigInfos + ); + } + + private ConfigInfos validateConnectorPluginSpecifiedConfigs( + Map connectorProps, + Map validatedConnectorConfig, + ConfigDef enrichedConfigDef, + Connector connector, + Function reportStage + ) { + List configValues = new ArrayList<>(validatedConnectorConfig.values()); + Map configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys()); + Set allGroups = new LinkedHashSet<>(enrichedConfigDef.groups()); + + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + // do custom connector-specific validation + ConfigDef configDef; + String stageDescription = "retrieving the configuration definition from the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + configDef = connector.config(); + } + if (null == configDef) { + throw new BadRequestException( + String.format( + "%s.config() must return a ConfigDef that is not null.", + connector.getClass().getName() + ) + ); + } + + Config config; + stageDescription = "performing multi-property validation for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + config = connector.validate(connectorProps); + } + if (null == config) { + throw new BadRequestException( + String.format( + "%s.validate() must return a Config that is not null.", + connector.getClass().getName() + ) + ); + } + configKeys.putAll(configDef.configKeys()); + allGroups.addAll(configDef.groups()); + configValues.addAll(config.configValues()); + return generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups)); + } + + private void addNullValuedErrors(Map connectorProps, Map validatedConfig) { + connectorProps.entrySet().stream() + .filter(e -> e.getValue() == null) + .map(Map.Entry::getKey) + .forEach(prop -> + validatedConfig.computeIfAbsent(prop, ConfigValue::new) + .addErrorMessage("Null value can not be supplied as the configuration value.")); + } + + private ConfigInfos invalidVersionedConnectorValidation( + Map connectorProps, + VersionedPluginLoadingException e, + Function reportStage + ) { + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + ConfigDef configDef = ConnectorConfig.enrichedConfigDef(worker.getPlugins(), connType); + Map validatedConfig; + try (TemporaryStage stage = reportStage.apply("validating connector configuration")) { + validatedConfig = configDef.validateAll(connectorProps); + } + validatedConfig.get(CONNECTOR_CLASS_CONFIG).addErrorMessage(e.getMessage()); + validatedConfig.get(CONNECTOR_VERSION).addErrorMessage(e.getMessage()); + validatedConfig.get(CONNECTOR_VERSION).recommendedValues(e.availableVersions().stream().map(v -> (Object) v).collect(Collectors.toList())); + addNullValuedErrors(connectorProps, validatedConfig); + return generateResult(connType, configDef.configKeys(), new ArrayList<>(validatedConfig.values()), new ArrayList<>(configDef.groups())); + } + ConfigInfos validateConnectorConfig( Map connectorProps, Function reportStage, @@ -660,150 +815,60 @@ ConfigInfos validateConnectorConfig( connectorProps = worker.configTransformer().transform(connectorProps); } } - String connType = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); - if (connType == null) + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + if (connType == null) { throw new BadRequestException("Connector config " + connectorProps + " contains no connector type"); + } + + VersionRange connVersion; + Connector connector; + ClassLoader connectorLoader; + try { + connVersion = PluginUtils.connectorVersionRequirement(connectorProps.get(CONNECTOR_VERSION)); + connector = cachedConnectors.getConnector(connType, connVersion); + connectorLoader = plugins().pluginLoader(connType, connVersion); + log.info("Validating connector {}, version {}", connType, connector.version()); + } catch (VersionedPluginLoadingException e) { + log.warn("Failed to load connector {} with version {}, skipping additional validations (connector, converters, transformations, client overrides) ", + connType, connectorProps.get(CONNECTOR_VERSION), e); + return invalidVersionedConnectorValidation(connectorProps, e, reportStage); + } catch (Exception e) { + throw new BadRequestException(e.getMessage(), e); + } - Connector connector = getConnector(connType); - ClassLoader connectorLoader = plugins().connectorLoader(connType); try (LoaderSwap loaderSwap = plugins().withClassLoader(connectorLoader)) { - org.apache.kafka.connect.health.ConnectorType connectorType; + ConfigDef enrichedConfigDef; Map validatedConnectorConfig; + org.apache.kafka.connect.health.ConnectorType connectorType; if (connector instanceof SourceConnector) { connectorType = org.apache.kafka.connect.health.ConnectorType.SOURCE; - enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.configDef(), connectorProps, false); + enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.enrichedConfigDef(plugins(), connectorProps, worker.config()), connectorProps, false); stageDescription = "validating source connector-specific properties for the connector"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { validatedConnectorConfig = validateSourceConnectorConfig((SourceConnector) connector, enrichedConfigDef, connectorProps); } } else { connectorType = org.apache.kafka.connect.health.ConnectorType.SINK; - enrichedConfigDef = ConnectorConfig.enrich(plugins(), SinkConnectorConfig.configDef(), connectorProps, false); + enrichedConfigDef = ConnectorConfig.enrich(plugins(), SinkConnectorConfig.enrichedConfigDef(plugins(), connectorProps, worker.config()), connectorProps, false); stageDescription = "validating sink connector-specific properties for the connector"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { validatedConnectorConfig = validateSinkConnectorConfig((SinkConnector) connector, enrichedConfigDef, connectorProps); } } - connectorProps.entrySet().stream() - .filter(e -> e.getValue() == null) - .map(Map.Entry::getKey) - .forEach(prop -> - validatedConnectorConfig.computeIfAbsent(prop, ConfigValue::new) - .addErrorMessage("Null value can not be supplied as the configuration value.") - ); + addNullValuedErrors(connectorProps, validatedConnectorConfig); - List configValues = new ArrayList<>(validatedConnectorConfig.values()); - Map configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys()); - Set allGroups = new LinkedHashSet<>(enrichedConfigDef.groups()); + // the order of operations here is important, converter validations can add error messages to the connector config + // which are collected and converted to ConfigInfos in validateConnectorPluginSpecifiedConfigs + ConfigInfos converterConfigInfo = validateAllConverterConfigs(connectorProps, validatedConnectorConfig, connectorLoader, reportStage); + ConfigInfos clientOverrideInfo = validateClientOverrides(connectorProps, connectorType, connector.getClass(), reportStage, doLog); + ConfigInfos connectorConfigInfo = validateConnectorPluginSpecifiedConfigs(connectorProps, validatedConnectorConfig, enrichedConfigDef, connector, reportStage); - // do custom connector-specific validation - ConfigDef configDef; - stageDescription = "retrieving the configuration definition from the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - configDef = connector.config(); - } - if (null == configDef) { - throw new BadRequestException( - String.format( - "%s.config() must return a ConfigDef that is not null.", - connector.getClass().getName() - ) - ); - } - - Config config; - stageDescription = "performing multi-property validation for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - config = connector.validate(connectorProps); - } - if (null == config) { - throw new BadRequestException( - String.format( - "%s.validate() must return a Config that is not null.", - connector.getClass().getName() - ) - ); - } - configKeys.putAll(configDef.configKeys()); - allGroups.addAll(configDef.groups()); - configValues.addAll(config.configValues()); - - // do custom converter-specific validation - ConfigInfos headerConverterConfigInfos = validateHeaderConverterConfig( - connectorProps, - validatedConnectorConfig.get(HEADER_CONVERTER_CLASS_CONFIG), - reportStage - ); - ConfigInfos keyConverterConfigInfos = validateKeyConverterConfig( - connectorProps, - validatedConnectorConfig.get(KEY_CONVERTER_CLASS_CONFIG), - reportStage - ); - ConfigInfos valueConverterConfigInfos = validateValueConverterConfig( - connectorProps, - validatedConnectorConfig.get(VALUE_CONVERTER_CLASS_CONFIG), - reportStage - ); - - ConfigInfos configInfos = generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups)); - AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps, doLog); - String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG); - ConfigInfos producerConfigInfos = null; - ConfigInfos consumerConfigInfos = null; - ConfigInfos adminConfigInfos = null; - - if (connectorUsesProducer(connectorType, connectorProps)) { - stageDescription = "validating producer config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - producerConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX, - connectorConfig, - ProducerConfig.configDef(), - connector.getClass(), - connectorType, - ConnectorClientConfigRequest.ClientType.PRODUCER, - connectorClientConfigOverridePolicy); - } - } - if (connectorUsesAdmin(connectorType, connectorProps)) { - stageDescription = "validating admin config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - adminConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX, - connectorConfig, - AdminClientConfig.configDef(), - connector.getClass(), - connectorType, - ConnectorClientConfigRequest.ClientType.ADMIN, - connectorClientConfigOverridePolicy); - } - } - if (connectorUsesConsumer(connectorType, connectorProps)) { - stageDescription = "validating consumer config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - consumerConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX, - connectorConfig, - ConsumerConfig.configDef(), - connector.getClass(), - connectorType, - ConnectorClientConfigRequest.ClientType.CONSUMER, - connectorClientConfigOverridePolicy); - } - } return mergeConfigInfos(connType, - configInfos, - producerConfigInfos, - consumerConfigInfos, - adminConfigInfos, - headerConverterConfigInfos, - keyConverterConfigInfos, - valueConverterConfigInfos + connectorConfigInfo, + clientOverrideInfo, + converterConfigInfo ); } } @@ -950,10 +1015,6 @@ private static ConfigValueInfo convertConfigValue(ConfigValue configValue, Type return new ConfigValueInfo(configValue.name(), value, recommendedValues, configValue.errorMessages(), configValue.visible()); } - protected Connector getConnector(String connType) { - return tempConnectors.computeIfAbsent(connType, k -> plugins().newConnector(k)); - } - /** * Retrieves ConnectorType for the class specified in the connector config * @param connConfig the connector config, may be null @@ -964,13 +1025,14 @@ public ConnectorType connectorType(Map connConfig) { if (connConfig == null) { return ConnectorType.UNKNOWN; } - String connClass = connConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + String connClass = connConfig.get(CONNECTOR_CLASS_CONFIG); if (connClass == null) { return ConnectorType.UNKNOWN; } try { - return ConnectorType.from(getConnector(connClass).getClass()); - } catch (ConnectException e) { + VersionRange range = PluginUtils.connectorVersionRequirement(connConfig.get(CONNECTOR_VERSION)); + return ConnectorType.from(cachedConnectors.getConnector(connClass, range).getClass()); + } catch (Exception e) { log.warn("Unable to retrieve connector type", e); return ConnectorType.UNKNOWN; } @@ -1092,25 +1154,33 @@ static Set keysWithVariableValues(Map rawConfig, Pattern @Override public List connectorPluginConfig(String pluginName) { + return connectorPluginConfig(pluginName, null); + } + + @Override + public List connectorPluginConfig(String pluginName, VersionRange range) { + Plugins p = plugins(); Class pluginClass; try { - pluginClass = p.pluginClass(pluginName); + pluginClass = p.pluginClass(pluginName, range); } catch (ClassNotFoundException cnfe) { throw new NotFoundException("Unknown plugin " + pluginName + "."); + } catch (VersionedPluginLoadingException e) { + throw new BadRequestException(e.getMessage(), e); } try (LoaderSwap loaderSwap = p.withClassLoader(pluginClass.getClassLoader())) { - Object plugin = p.newPlugin(pluginName); + Object plugin = p.newPlugin(pluginName, range); // Contains definitions coming from Connect framework ConfigDef baseConfigDefs = null; // Contains definitions specifically declared on the plugin ConfigDef pluginConfigDefs; if (plugin instanceof SinkConnector) { - baseConfigDefs = SinkConnectorConfig.configDef(); + baseConfigDefs = SinkConnectorConfig.enrichedConfigDef(p, pluginName); pluginConfigDefs = ((SinkConnector) plugin).config(); } else if (plugin instanceof SourceConnector) { - baseConfigDefs = SourceConnectorConfig.configDef(); + baseConfigDefs = SourceConnectorConfig.enrichedConfigDef(p, pluginName); pluginConfigDefs = ((SourceConnector) plugin).config(); } else if (plugin instanceof Converter) { pluginConfigDefs = ((Converter) plugin).config(); @@ -1128,8 +1198,9 @@ public List connectorPluginConfig(String pluginName) { // give precedence to the one defined by the plugin class // Preserve the ordering of properties as they're returned from each ConfigDef Map configsMap = new LinkedHashMap<>(pluginConfigDefs.configKeys()); - if (baseConfigDefs != null) + if (baseConfigDefs != null) { baseConfigDefs.configKeys().forEach(configsMap::putIfAbsent); + } List results = new ArrayList<>(); for (ConfigKey configKey : configsMap.values()) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/CachedConnectors.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/CachedConnectors.java new file mode 100644 index 0000000000000..ebfa3522f90d2 --- /dev/null +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/CachedConnectors.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.connect.runtime; + +import org.apache.kafka.connect.connector.Connector; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; + +import org.apache.maven.artifact.versioning.VersionRange; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class CachedConnectors { + + private static final String LATEST_VERSION = "latest"; + + private final Map> connectors; + private final Map invalidConnectors; + private final Map> invalidVersions; + private final Plugins plugins; + + public CachedConnectors(Plugins plugins) { + this.plugins = plugins; + this.connectors = new ConcurrentHashMap<>(); + this.invalidConnectors = new ConcurrentHashMap<>(); + this.invalidVersions = new ConcurrentHashMap<>(); + } + + private void validate(String connectorName, VersionRange range) throws ConnectException, VersionedPluginLoadingException { + if (invalidConnectors.containsKey(connectorName)) { + throw new ConnectException(invalidConnectors.get(connectorName)); + } + + String version = range == null ? LATEST_VERSION : range.toString(); + if (invalidVersions.containsKey(connectorName) && invalidVersions.get(connectorName).containsKey(version)) { + throw new VersionedPluginLoadingException(invalidVersions.get(connectorName).get(version).getMessage()); + } + } + + private Connector lookup(String connectorName, VersionRange range) throws Exception { + String version = range == null ? LATEST_VERSION : range.toString(); + if (connectors.containsKey(connectorName) && connectors.get(connectorName).containsKey(version)) { + return connectors.get(connectorName).get(version); + } + + try { + Connector connector = plugins.newConnector(connectorName, range); + connectors.computeIfAbsent(connectorName, k -> new ConcurrentHashMap<>()).put(version, connector); + return connector; + } catch (VersionedPluginLoadingException e) { + invalidVersions.computeIfAbsent(connectorName, k -> new ConcurrentHashMap<>()).put(version, e); + throw e; + } catch (Exception e) { + invalidConnectors.put(connectorName, e); + throw e; + } + } + + public Connector getConnector(String connectorName, VersionRange range) throws Exception { + validate(connectorName, range); + return lookup(connectorName, range); + } +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java index ff62c25eee585..8df0d40bf09af 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java @@ -81,8 +81,7 @@ public ConnectMetrics(String workerId, WorkerConfig config, Time time, String cl .timeWindow(sampleWindowMs, TimeUnit.MILLISECONDS).recordLevel( Sensor.RecordingLevel.forName(metricsRecordingLevel)); - Map contextLabels = new HashMap<>(); - contextLabels.putAll(config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); + Map contextLabels = new HashMap<>(config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); contextLabels.put(WorkerConfig.CONNECT_KAFKA_CLUSTER_ID, clusterId); Object groupId = config.originals().get(DistributedConfig.GROUP_ID_CONFIG); if (groupId != null) { @@ -223,8 +222,7 @@ public int hashCode() { public boolean equals(Object obj) { if (obj == this) return true; - if (obj instanceof MetricGroupId) { - MetricGroupId that = (MetricGroupId) obj; + if (obj instanceof MetricGroupId that) { return this.groupName.equals(that.groupName) && this.tags.equals(that.tags); } return false; @@ -391,8 +389,7 @@ public Sensor sensor(String name, MetricConfig config, Sensor... parents) { public synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents) { // We need to make sure that all sensor names are unique across all groups, so use the sensor prefix Sensor result = metrics.sensor(sensorPrefix + name, config, Long.MAX_VALUE, recordingLevel, parents); - if (result != null) - sensorNames.add(result.name()); + sensorNames.add(result.name()); return result; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java index cb604ad73eef5..67102f69cc5ad 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java @@ -27,7 +27,11 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.runtime.errors.ToleranceType; import org.apache.kafka.connect.runtime.isolation.PluginDesc; +import org.apache.kafka.connect.runtime.isolation.PluginType; +import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.isolation.PluginsRecommenders; +import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.HeaderConverter; import org.apache.kafka.connect.transforms.Transformation; @@ -35,6 +39,8 @@ import org.apache.kafka.connect.util.ConcreteSubClassValidator; import org.apache.kafka.connect.util.InstantiableClassValidator; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,6 +88,11 @@ public class ConnectorConfig extends AbstractConfig { " or use \"FileStreamSink\" or \"FileStreamSinkConnector\" to make the configuration a bit shorter"; private static final String CONNECTOR_CLASS_DISPLAY = "Connector class"; + public static final String CONNECTOR_VERSION = "connector." + WorkerConfig.PLUGIN_VERSION_SUFFIX; + private static final String CONNECTOR_VERSION_DOC = "Version of the connector."; + private static final String CONNECTOR_VERSION_DISPLAY = "Connector version"; + private static final ConfigDef.Validator CONNECTOR_VERSION_VALIDATOR = new PluginVersionValidator(); + public static final String KEY_CONVERTER_CLASS_CONFIG = WorkerConfig.KEY_CONVERTER_CLASS_CONFIG; public static final String KEY_CONVERTER_CLASS_DOC = WorkerConfig.KEY_CONVERTER_CLASS_DOC; public static final String KEY_CONVERTER_CLASS_DISPLAY = "Key converter class"; @@ -90,6 +101,12 @@ public class ConnectorConfig extends AbstractConfig { new InstantiableClassValidator() ); + public static final String KEY_CONVERTER_VERSION_CONFIG = WorkerConfig.KEY_CONVERTER_VERSION; + private static final String KEY_CONVERTER_VERSION_DOC = "Version of the key converter."; + private static final String KEY_CONVERTER_VERSION_DISPLAY = "Key converter version"; + private static final ConfigDef.Validator KEY_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); + + public static final String VALUE_CONVERTER_CLASS_CONFIG = WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG; public static final String VALUE_CONVERTER_CLASS_DOC = WorkerConfig.VALUE_CONVERTER_CLASS_DOC; public static final String VALUE_CONVERTER_CLASS_DISPLAY = "Value converter class"; @@ -98,17 +115,24 @@ public class ConnectorConfig extends AbstractConfig { new InstantiableClassValidator() ); + public static final String VALUE_CONVERTER_VERSION_CONFIG = WorkerConfig.VALUE_CONVERTER_VERSION; + private static final String VALUE_CONVERTER_VERSION_DOC = "Version of the value converter."; + private static final String VALUE_CONVERTER_VERSION_DISPLAY = "Value converter version"; + private static final ConfigDef.Validator VALUE_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); + public static final String HEADER_CONVERTER_CLASS_CONFIG = WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG; public static final String HEADER_CONVERTER_CLASS_DOC = WorkerConfig.HEADER_CONVERTER_CLASS_DOC; public static final String HEADER_CONVERTER_CLASS_DISPLAY = "Header converter class"; - // The Connector config should not have a default for the header converter, since the absence of a config property means that - // the worker config settings should be used. Thus, we set the default to null here. - public static final String HEADER_CONVERTER_CLASS_DEFAULT = null; private static final ConfigDef.Validator HEADER_CONVERTER_CLASS_VALIDATOR = ConfigDef.CompositeValidator.of( ConcreteSubClassValidator.forSuperClass(HeaderConverter.class), new InstantiableClassValidator() ); + public static final String HEADER_CONVERTER_VERSION_CONFIG = WorkerConfig.HEADER_CONVERTER_VERSION; + private static final String HEADER_CONVERTER_VERSION_DOC = "Version of the header converter."; + private static final String HEADER_CONVERTER_VERSION_DISPLAY = "Header converter version"; + private static final ConfigDef.Validator HEADER_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); + public static final String TASKS_MAX_CONFIG = "tasks.max"; private static final String TASKS_MAX_DOC = "Maximum number of tasks to use for this connector."; public static final int TASKS_MAX_DEFAULT = 1; @@ -187,7 +211,11 @@ public class ConnectorConfig extends AbstractConfig { public static final String CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX = "admin.override."; public static final String PREDICATES_PREFIX = "predicates."; - private final EnrichedConnectorConfig enrichedConfig; + private static final PluginsRecommenders EMPTY_RECOMMENDER = new PluginsRecommenders(); + private static final ConverterDefaults CONVERTER_DEFAULTS = new ConverterDefaults(null, null); + + private final ConnectorConfig.EnrichedConnectorConfig enrichedConfig; + private static class EnrichedConnectorConfig extends AbstractConfig { EnrichedConnectorConfig(ConfigDef configDef, Map props) { super(configDef, props); @@ -199,17 +227,27 @@ public Object get(String key) { } } - public static ConfigDef configDef() { + protected static ConfigDef configDef( + String defaultConnectorVersion, + ConverterDefaults keyConverterDefaults, + ConverterDefaults valueConverterDefaults, + ConverterDefaults headerConverterDefaults, + PluginsRecommenders recommender + ) { int orderInGroup = 0; int orderInErrorGroup = 0; return new ConfigDef() .define(NAME_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, nonEmptyStringWithoutControlChars(), Importance.HIGH, NAME_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, NAME_DISPLAY) .define(CONNECTOR_CLASS_CONFIG, Type.STRING, Importance.HIGH, CONNECTOR_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.LONG, CONNECTOR_CLASS_DISPLAY) + .define(CONNECTOR_VERSION, Type.STRING, defaultConnectorVersion, CONNECTOR_VERSION_VALIDATOR, Importance.MEDIUM, CONNECTOR_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, CONNECTOR_VERSION_DISPLAY, recommender.connectorPluginVersionRecommender()) .define(TASKS_MAX_CONFIG, Type.INT, TASKS_MAX_DEFAULT, atLeast(TASKS_MIN_CONFIG), Importance.HIGH, TASKS_MAX_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, TASK_MAX_DISPLAY) .define(TASKS_MAX_ENFORCE_CONFIG, Type.BOOLEAN, TASKS_MAX_ENFORCE_DEFAULT, Importance.LOW, TASKS_MAX_ENFORCE_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, TASKS_MAX_ENFORCE_DISPLAY) - .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, null, KEY_CONVERTER_CLASS_VALIDATOR, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY) - .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, null, VALUE_CONVERTER_CLASS_VALIDATOR, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY) - .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, HEADER_CONVERTER_CLASS_VALIDATOR, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY) + .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, keyConverterDefaults.type, KEY_CONVERTER_CLASS_VALIDATOR, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY, recommender.converterPluginRecommender()) + .define(KEY_CONVERTER_VERSION_CONFIG, Type.STRING, keyConverterDefaults.version, KEY_CONVERTER_VERSION_VALIDATOR, Importance.LOW, KEY_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_VERSION_DISPLAY, recommender.keyConverterPluginVersionRecommender()) + .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, valueConverterDefaults.type, VALUE_CONVERTER_CLASS_VALIDATOR, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY, recommender.converterPluginRecommender()) + .define(VALUE_CONVERTER_VERSION_CONFIG, Type.STRING, valueConverterDefaults.version, VALUE_CONVERTER_VERSION_VALIDATOR, Importance.LOW, VALUE_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_VERSION_DISPLAY, recommender.valueConverterPluginVersionRecommender()) + .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, headerConverterDefaults.type, HEADER_CONVERTER_CLASS_VALIDATOR, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY, recommender.headerConverterPluginRecommender()) + .define(HEADER_CONVERTER_VERSION_CONFIG, Type.STRING, headerConverterDefaults.version, HEADER_CONVERTER_VERSION_VALIDATOR, Importance.LOW, HEADER_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_VERSION_DISPLAY, recommender.headerConverterPluginVersionRecommender()) .define(TRANSFORMS_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("transformation"), Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, ++orderInGroup, Width.LONG, TRANSFORMS_DISPLAY) .define(PREDICATES_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("predicate"), Importance.LOW, PREDICATES_DOC, PREDICATES_GROUP, ++orderInGroup, Width.LONG, PREDICATES_DISPLAY) .define(CONFIG_RELOAD_ACTION_CONFIG, Type.STRING, CONFIG_RELOAD_ACTION_RESTART, @@ -226,6 +264,28 @@ public static ConfigDef configDef() { ERRORS_LOG_ENABLE_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_ENABLE_DISPLAY) .define(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, Type.BOOLEAN, ERRORS_LOG_INCLUDE_MESSAGES_DEFAULT, Importance.MEDIUM, ERRORS_LOG_INCLUDE_MESSAGES_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_INCLUDE_MESSAGES_DISPLAY); + + } + + public static ConfigDef configDef() { + return configDef(null, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, EMPTY_RECOMMENDER); + } + + // ConfigDef with additional defaults and recommenders + public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { + PluginsRecommenders recommender = new PluginsRecommenders(plugins); + ConverterDefaults keyConverterDefaults = converterDefaults(plugins, KEY_CONVERTER_CLASS_CONFIG, + WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION, connProps, workerConfig, PluginType.CONVERTER); + ConverterDefaults valueConverterDefaults = converterDefaults(plugins, VALUE_CONVERTER_CLASS_CONFIG, + WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION, connProps, workerConfig, PluginType.CONVERTER); + ConverterDefaults headerConverterDefaults = converterDefaults(plugins, HEADER_CONVERTER_CLASS_CONFIG, + WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, WorkerConfig.HEADER_CONVERTER_VERSION, connProps, workerConfig, PluginType.HEADER_CONVERTER); + return configDef(plugins.latestVersion(connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG), PluginType.SINK, PluginType.SOURCE), + keyConverterDefaults, valueConverterDefaults, headerConverterDefaults, recommender); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { + return configDef(plugins.latestVersion(connectorClass, PluginType.SINK, PluginType.SOURCE), CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, EMPTY_RECOMMENDER); } private static ConfigDef.CompositeValidator aliasValidator(String kind) { @@ -271,7 +331,7 @@ public long errorMaxDelayInMillis() { public ToleranceType errorToleranceType() { String tolerance = getString(ERRORS_TOLERANCE_CONFIG); - for (ToleranceType type: ToleranceType.values()) { + for (ToleranceType type : ToleranceType.values()) { if (type.name().equalsIgnoreCase(tolerance)) { return type; } @@ -336,10 +396,9 @@ public > List> transformationS *

          * {@code requireFullConfig} specifies whether required config values that are missing should cause an exception to be thrown. */ - @SuppressWarnings({"rawtypes", "unchecked"}) public static ConfigDef enrich(Plugins plugins, ConfigDef baseConfigDef, Map props, boolean requireFullConfig) { ConfigDef newDef = new ConfigDef(baseConfigDef); - new EnrichablePlugin>("Transformation", TRANSFORMS_CONFIG, TRANSFORMS_GROUP, (Class) Transformation.class, + new EnrichablePlugin>("Transformation", TRANSFORMS_CONFIG, TRANSFORMS_GROUP, PluginType.TRANSFORMATION, props, requireFullConfig) { @Override @@ -358,19 +417,19 @@ protected ConfigDef initialConfigDef() { } @Override - protected Stream> configDefsForClass(String typeConfig) { - return super.configDefsForClass(typeConfig) - .filter(entry -> { - // The implicit parameters mask any from the transformer with the same name - if (TransformationStage.PREDICATE_CONFIG.equals(entry.getKey()) - || TransformationStage.NEGATE_CONFIG.equals(entry.getKey())) { - log.warn("Transformer config {} is masked by implicit config of that name", - entry.getKey()); - return false; - } else { - return true; - } - }); + protected Stream> configDefsForClass(String typeConfig, String versionConfig, Plugins plugins) { + return super.configDefsForClass(typeConfig, versionConfig, plugins) + .filter(entry -> { + // The implicit parameters mask any from the transformer with the same name + if (TransformationStage.PREDICATE_CONFIG.equals(entry.getKey()) + || TransformationStage.NEGATE_CONFIG.equals(entry.getKey())) { + log.warn("Transformer config {} is masked by implicit config of that name", + entry.getKey()); + return false; + } else { + return true; + } + }); } @Override @@ -388,10 +447,16 @@ protected void validateProps(String prefix) { "but there is no config '" + prefixedPredicate + "' defining a predicate to be negated."); } } - }.enrich(newDef); + + @Override + protected ConfigDef.Recommender versionRecommender(String typeConfig) { + return new PluginsRecommenders(plugins).transformationPluginRecommender(typeConfig); + } + + }.enrich(newDef, plugins); new EnrichablePlugin>("Predicate", PREDICATES_CONFIG, PREDICATES_GROUP, - (Class) Predicate.class, props, requireFullConfig) { + PluginType.PREDICATE, props, requireFullConfig) { @Override protected Set>> plugins() { return plugins.predicates(); @@ -401,10 +466,87 @@ protected Set>> plugins() { protected ConfigDef config(Predicate predicate) { return predicate.config(); } - }.enrich(newDef); + + @Override + protected ConfigDef.Recommender versionRecommender(String typeConfig) { + return new PluginsRecommenders(plugins).predicatePluginRecommender(typeConfig); + } + + }.enrich(newDef, plugins); + return newDef; } + private static ConverterDefaults converterDefaults( + Plugins plugins, + String connectorConverterConfig, + String workerConverterConfig, + String workerConverterVersionConfig, + Map connectorProps, + WorkerConfig workerConfig, + PluginType converterType + ) { + /* + if a converter is specified in the connector config it overrides the worker config for the corresponding converter + otherwise the worker config is used, hence if the converter is not provided in the connector config, the default + is the one provided in the worker config + + for converters which version is used depends on a several factors with multi-versioning support + A. If the converter class is provided as part of the connector properties + 1. if the version is not provided, + - if the converter is packaged with the connector then, the packaged version is used + - if the converter is not packaged with the connector, the latest version is used + 2. if the version is provided, the provided version is used + B. If the converter class is not provided as part of the connector properties, but provided as part of the worker properties + 1. if the version is not provided, the latest version is used + 2. if the version is provided, the provided version is used + C. If the converter class is not provided as part of the connector properties and not provided as part of the worker properties, + the converter to use is unknown hence no default version can be determined (null) + + Note: Connect when using service loading has an issue outlined in KAFKA-18119. The issue means that the above + logic does not hold currently for clusters using service loading when converters are defined in the connector. + However, the logic to determine the default should ideally follow the one outlined above, and the code here + should still show the correct default version regardless of the bug. + */ + final String connectorConverter = connectorProps.get(connectorConverterConfig); + // since header converter defines a default in the worker config we need to handle it separately + final String workerConverter = workerConverterConfig.equals(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG) ? + workerConfig.getClass(workerConverterConfig).getName() : workerConfig.originalsStrings().get(workerConverterConfig); + final String connectorClass = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + final String connectorVersion = connectorProps.get(ConnectorConfig.CONNECTOR_VERSION); + String type = null; + if (connectorClass == null || (connectorConverter == null && workerConverter == null)) { + return new ConverterDefaults(null, null); + } + // update the default of connector converter based on if the worker converter is provided + type = workerConverter; + + String version = null; + if (connectorConverter != null) { + version = fetchPluginVersion(plugins, connectorClass, connectorVersion, connectorConverter, converterType); + } else { + version = workerConfig.originalsStrings().get(workerConverterVersionConfig); + if (version == null) { + version = plugins.latestVersion(workerConverter, converterType); + } + } + return new ConverterDefaults(type, version); + } + + private static String fetchPluginVersion(Plugins plugins, String connectorClass, String connectorVersion, String pluginName, PluginType pluginType) { + if (pluginName == null || connectorClass == null) { + return null; + } + try { + VersionRange range = PluginUtils.connectorVersionRequirement(connectorVersion); + return plugins.pluginVersion(pluginName, plugins.pluginLoader(connectorClass, range), pluginType); + } catch (InvalidVersionSpecificationException | VersionedPluginLoadingException e) { + // these errors should be captured in other places, so we can ignore them here + log.warn("Failed to determine default plugin version for {}", connectorClass, e); + } + return null; + } + /** * An abstraction over "enrichable plugins" ({@link Transformation}s and {@link Predicate}s) used for computing the * contribution to a Connectors ConfigDef. @@ -419,24 +561,27 @@ abstract static class EnrichablePlugin { private final String aliasKind; private final String aliasConfig; private final String aliasGroup; + private final PluginType pluginType; private final Class baseClass; private final Map props; private final boolean requireFullConfig; + @SuppressWarnings("unchecked") public EnrichablePlugin( String aliasKind, - String aliasConfig, String aliasGroup, Class baseClass, + String aliasConfig, String aliasGroup, PluginType pluginType, Map props, boolean requireFullConfig) { this.aliasKind = aliasKind; this.aliasConfig = aliasConfig; this.aliasGroup = aliasGroup; - this.baseClass = baseClass; + this.pluginType = pluginType; + this.baseClass = (Class) pluginType.superClass(); this.props = props; this.requireFullConfig = requireFullConfig; } /** Add the configs for this alias to the given {@code ConfigDef}. */ - void enrich(ConfigDef newDef) { + void enrich(ConfigDef newDef, Plugins plugins) { Object aliases = ConfigDef.parseType(aliasConfig, props.get(aliasConfig), Type.LIST); if (!(aliases instanceof List)) { return; @@ -444,49 +589,71 @@ void enrich(ConfigDef newDef) { LinkedHashSet uniqueAliases = new LinkedHashSet<>((List) aliases); for (Object o : uniqueAliases) { - if (!(o instanceof String)) { + if (!(o instanceof String alias)) { throw new ConfigException("Item in " + aliasConfig + " property is not of " + "type String"); } - String alias = (String) o; final String prefix = aliasConfig + "." + alias + "."; final String group = aliasGroup + ": " + alias; int orderInGroup = 0; final String typeConfig = prefix + "type"; + final String versionConfig = prefix + WorkerConfig.PLUGIN_VERSION_SUFFIX; + final String defaultVersion = fetchPluginVersion(plugins, props.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG), + props.get(ConnectorConfig.CONNECTOR_VERSION), props.get(typeConfig), pluginType); + + // Add the class configuration final ConfigDef.Validator typeValidator = ConfigDef.LambdaValidator.with( - (String name, Object value) -> { - validateProps(prefix); - // The value will be null if the class couldn't be found; no point in performing follow-up validation - if (value != null) { - getConfigDefFromConfigProvidingClass(typeConfig, (Class) value); - } - }, - () -> "valid configs for " + alias + " " + aliasKind.toLowerCase(Locale.ENGLISH)); + (String name, Object value) -> { + validateProps(prefix); + // The value will be null if the class couldn't be found; no point in performing follow-up validation + if (value != null) { + getConfigDefFromPlugin(typeConfig, ((Class) value).getName(), props.getOrDefault(versionConfig, defaultVersion), plugins); + } + }, + () -> "valid configs for " + alias + " " + aliasKind.toLowerCase(Locale.ENGLISH)); newDef.define(typeConfig, Type.CLASS, ConfigDef.NO_DEFAULT_VALUE, typeValidator, Importance.HIGH, "Class for the '" + alias + "' " + aliasKind.toLowerCase(Locale.ENGLISH) + ".", group, orderInGroup++, Width.LONG, baseClass.getSimpleName() + " type for " + alias, Collections.emptyList(), new ClassRecommender()); - final ConfigDef configDef = populateConfigDef(typeConfig); + // Add the version configuration + final ConfigDef.Validator versionValidator = (name, value) -> { + if (value != null) { + try { + getConfigDefFromPlugin(typeConfig, props.get(typeConfig), (String) value, plugins); + } catch (VersionedPluginLoadingException e) { + throw e; + } catch (Exception e) { + // ignore any other exception here as they are not related to version validation and + // will be captured in the validation of the class configuration + } + } + }; + newDef.define(versionConfig, Type.STRING, defaultVersion, versionValidator, Importance.HIGH, + "Version of the '" + alias + "' " + aliasKind.toLowerCase(Locale.ENGLISH) + ".", group, orderInGroup++, Width.LONG, + baseClass.getSimpleName() + " version for " + alias, + Collections.emptyList(), versionRecommender(typeConfig)); + + final ConfigDef configDef = populateConfigDef(typeConfig, versionConfig, plugins); if (configDef == null) continue; newDef.embed(prefix, group, orderInGroup, configDef); } } /** Subclasses can add extra validation of the {@link #props}. */ - protected void validateProps(String prefix) { } + protected void validateProps(String prefix) { + } /** * Populates the ConfigDef according to the configs returned from {@code configs()} method of class * named in the {@code ...type} parameter of the {@code props}. */ - protected ConfigDef populateConfigDef(String typeConfig) { + protected ConfigDef populateConfigDef(String typeConfig, String versionConfig, Plugins plugins) { final ConfigDef configDef = initialConfigDef(); try { - configDefsForClass(typeConfig) + configDefsForClass(typeConfig, versionConfig, plugins) .forEach(entry -> configDef.define(entry.getValue())); - } catch (ConfigException e) { if (requireFullConfig) { throw e; @@ -501,9 +668,11 @@ protected ConfigDef populateConfigDef(String typeConfig) { * Return a stream of configs provided by the {@code configs()} method of class * named in the {@code ...type} parameter of the {@code props}. */ - protected Stream> configDefsForClass(String typeConfig) { - final Class cls = (Class) ConfigDef.parseType(typeConfig, props.get(typeConfig), Type.CLASS); - return getConfigDefFromConfigProvidingClass(typeConfig, cls) + protected Stream> configDefsForClass(String typeConfig, String versionConfig, Plugins plugins) { + if (props.get(typeConfig) == null) { + throw new ConfigException(typeConfig, null, "Not a " + baseClass.getSimpleName()); + } + return getConfigDefFromPlugin(typeConfig, props.get(typeConfig), props.get(versionConfig), plugins) .configKeys().entrySet().stream(); } @@ -512,31 +681,47 @@ protected ConfigDef initialConfigDef() { return new ConfigDef(); } - /** - * Return {@link ConfigDef} from {@code cls}, which is expected to be a non-null {@code Class}, - * by instantiating it and invoking {@link #config(T)}. - * @param key - * @param cls The subclass of the baseclass. - */ - ConfigDef getConfigDefFromConfigProvidingClass(String key, Class cls) { - if (cls == null) { - throw new ConfigException(key, null, "Not a " + baseClass.getSimpleName()); + @SuppressWarnings("unchecked") + ConfigDef getConfigDefFromPlugin(String key, String pluginClass, String version, Plugins plugins) { + String connectorClass = props.get(CONNECTOR_CLASS_CONFIG); + if (pluginClass == null || connectorClass == null) { + // if transformation class is null or connector class is null, we return empty as these validations are done in respective validators + return new ConfigDef(); + } + VersionRange connectorVersionRange; + try { + connectorVersionRange = PluginUtils.connectorVersionRequirement(props.get(CONNECTOR_VERSION)); + } catch (InvalidVersionSpecificationException e) { + // this should be caught in connector version validation + return new ConfigDef(); } + + VersionRange pluginVersion; + try { + pluginVersion = PluginUtils.connectorVersionRequirement(version); + } catch (InvalidVersionSpecificationException e) { + throw new VersionedPluginLoadingException(e.getMessage()); + } + + // validate that the plugin class is a subclass of the base class + final Class cls = (Class) ConfigDef.parseType(key, props.get(key), Type.CLASS); Utils.ensureConcreteSubclass(baseClass, cls); - T pluginInstance; + T plugin; try { - pluginInstance = Utils.newInstance(cls, baseClass); + plugin = (T) plugins.newPlugin(pluginClass, pluginVersion, plugins.pluginLoader(connectorClass, connectorVersionRange)); + } catch (VersionedPluginLoadingException e) { + throw e; } catch (Exception e) { - throw new ConfigException(key, String.valueOf(cls), "Error getting config definition from " + baseClass.getSimpleName() + ": " + e.getMessage()); + throw new ConfigException(key, pluginClass, "Error getting config definition from " + baseClass.getSimpleName() + ": " + e.getMessage()); } - ConfigDef configDef = config(pluginInstance); + ConfigDef configDef = config(plugin); if (null == configDef) { throw new ConnectException( - String.format( - "%s.config() must return a ConfigDef that is not null.", - cls.getName() - ) + String.format( + "%s.config() must return a ConfigDef that is not null.", + plugin.getClass().getName() + ) ); } return configDef; @@ -555,6 +740,8 @@ ConfigDef getConfigDefFromConfigProvidingClass(String key, Class cls) { */ protected abstract Set> plugins(); + protected abstract ConfigDef.Recommender versionRecommender(String typeConfig); + /** * Recommend bundled transformations or predicates. */ @@ -576,4 +763,34 @@ public boolean visible(String name, Map parsedConfig) { } } + private static class ConverterDefaults { + private final String type; + private final String version; + + public ConverterDefaults(String type, String version) { + this.type = type; + this.version = version; + } + + public String type() { + return type; + } + + public String version() { + return version; + } + } + + public static class PluginVersionValidator implements ConfigDef.Validator { + + @Override + public void ensureValid(String name, Object value) { + + try { + PluginUtils.connectorVersionRequirement((String) value); + } catch (InvalidVersionSpecificationException e) { + throw new VersionedPluginLoadingException(e.getMessage()); + } + } + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java index bcff615c4147e..d837776be3829 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java @@ -321,7 +321,7 @@ private void commitTransaction() { error = flushError.get(); if (error != null) { - recordCommitFailure(time.milliseconds() - started, null); + recordCommitFailure(time.milliseconds() - started); offsetWriter.cancelFlush(); throw maybeWrapProducerSendException( "Failed to flush offsets and/or records for task " + id, diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java index fbdfcab09318b..859e3f2728e12 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java @@ -32,6 +32,8 @@ import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.ConnectorTaskId; +import org.apache.maven.artifact.versioning.VersionRange; + import java.util.Collection; import java.util.List; import java.util.Map; @@ -102,13 +104,6 @@ public interface Herder { */ void connectorConfig(String connName, Callback> callback); - /** - * Get the configuration for all tasks of a connector. - * @param connName name of the connector - * @param callback callback to invoke with the configuration - */ - void tasksConfig(String connName, Callback>> callback); - /** * Set the configuration for a connector. This supports creation and updating. * @param connName name of the connector @@ -329,6 +324,8 @@ default void validateConnectorConfig(Map connectorConfig, Callba */ List connectorPluginConfig(String pluginName); + List connectorPluginConfig(String pluginName, VersionRange version); + /** * Get the current offsets for a connector. * @param connName the name of the connector whose offsets are to be retrieved diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java index 9e59b13d34adf..1593e3708fdf0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java @@ -19,19 +19,22 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configurator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; -import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.TreeMap; +import java.util.stream.Collectors; /** * Manages logging levels on a single worker. Supports dynamic adjustment and querying @@ -44,12 +47,24 @@ public class Loggers { private static final Logger log = LoggerFactory.getLogger(Loggers.class); + private static final String ROOT_LOGGER_NAME = "root"; + /** * Log4j uses "root" (case-insensitive) as name of the root logger. + * Note: In log4j, the root logger's name was "root" and Kafka also followed that name for dynamic logging control feature. + * + * While log4j2 changed the root logger's name to empty string (see: [[LogManager.ROOT_LOGGER_NAME]]), + * for backward-compatibility purposes, we accept both empty string and "root" as valid root logger names. + * This is why we have a dedicated definition that includes both values. */ - private static final String ROOT_LOGGER_NAME = "root"; + private static final List VALID_ROOT_LOGGER_NAMES = List.of(LogManager.ROOT_LOGGER_NAME, ROOT_LOGGER_NAME); private final Time time; + + /** + * Maps logger names to their last modification timestamps. + * Note: The logger name "root" refers to the actual root logger of log4j2. + */ private final Map lastModifiedTimes; public Loggers(Time time) { @@ -66,18 +81,17 @@ public Loggers(Time time) { public synchronized LoggerLevel level(String logger) { Objects.requireNonNull(logger, "Logger may not be null"); - org.apache.log4j.Logger foundLogger = null; - if (ROOT_LOGGER_NAME.equalsIgnoreCase(logger)) { + org.apache.logging.log4j.Logger foundLogger = null; + if (isValidRootLoggerName(logger)) { foundLogger = rootLogger(); } else { - Enumeration en = currentLoggers(); + var currentLoggers = currentLoggers().values(); // search within existing loggers for the given name. // using LogManger.getLogger() will create a logger if it doesn't exist // (potential leak since these don't get cleaned up). - while (en.hasMoreElements()) { - org.apache.log4j.Logger l = en.nextElement(); - if (logger.equals(l.getName())) { - foundLogger = l; + for (org.apache.logging.log4j.Logger currentLogger : currentLoggers) { + if (logger.equals(currentLogger.getName())) { + foundLogger = currentLogger; break; } } @@ -96,20 +110,16 @@ public synchronized LoggerLevel level(String logger) { * @return the levels of all known loggers; may be empty, but never null */ public synchronized Map allLevels() { - Map result = new TreeMap<>(); - - Enumeration enumeration = currentLoggers(); - Collections.list(enumeration) - .stream() - .filter(logger -> logger.getLevel() != null) - .forEach(logger -> result.put(logger.getName(), loggerLevel(logger))); - - org.apache.log4j.Logger root = rootLogger(); - if (root.getLevel() != null) { - result.put(ROOT_LOGGER_NAME, loggerLevel(root)); - } - - return result; + return currentLoggers() + .values() + .stream() + .filter(logger -> !logger.getLevel().equals(Level.OFF)) + .collect(Collectors.toMap( + this::getLoggerName, + this::loggerLevel, + (existing, replacing) -> replacing, + TreeMap::new) + ); } /** @@ -122,14 +132,25 @@ public synchronized Map allLevels() { public synchronized List setLevel(String namespace, Level level) { Objects.requireNonNull(namespace, "Logging namespace may not be null"); Objects.requireNonNull(level, "Level may not be null"); + String internalNameSpace = isValidRootLoggerName(namespace) ? LogManager.ROOT_LOGGER_NAME : namespace; - log.info("Setting level of namespace {} and children to {}", namespace, level); - List childLoggers = loggers(namespace); + log.info("Setting level of namespace {} and children to {}", internalNameSpace, level); + + var loggers = loggers(internalNameSpace); + var nameToLevel = allLevels(); List result = new ArrayList<>(); - for (org.apache.log4j.Logger logger: childLoggers) { - setLevel(logger, level); - result.add(logger.getName()); + Configurator.setAllLevels(internalNameSpace, level); + for (org.apache.logging.log4j.Logger logger : loggers) { + // We need to track level changes for each logger and record their update timestamps to ensure this method + // correctly returns only the loggers whose levels were actually modified. + String name = getLoggerName(logger); + String newLevel = logger.getLevel().name(); + String oldLevel = nameToLevel.getOrDefault(name, new LoggerLevel("", time.milliseconds())).level(); + if (!newLevel.equalsIgnoreCase(oldLevel)) { + lastModifiedTimes.put(name, time.milliseconds()); + result.add(name); + } } Collections.sort(result); @@ -143,25 +164,24 @@ public synchronized List setLevel(String namespace, Level level) { * @return all loggers that fall under the given namespace; never null, and will always contain * at least one logger (the ancestor logger for the namespace) */ - private synchronized List loggers(String namespace) { + private synchronized Collection loggers(String namespace) { Objects.requireNonNull(namespace, "Logging namespace may not be null"); - if (ROOT_LOGGER_NAME.equalsIgnoreCase(namespace)) { - List result = Collections.list(currentLoggers()); - result.add(rootLogger()); - return result; + if (isValidRootLoggerName(namespace)) { + return currentLoggers().values(); } - List result = new ArrayList<>(); - org.apache.log4j.Logger ancestorLogger = lookupLogger(namespace); - Enumeration en = currentLoggers(); + var result = new ArrayList(); + var nameToLogger = currentLoggers(); + var ancestorLogger = lookupLogger(namespace); + var currentLoggers = nameToLogger.values(); + boolean present = false; - while (en.hasMoreElements()) { - org.apache.log4j.Logger current = en.nextElement(); - if (current.getName().startsWith(namespace)) { - result.add(current); + for (org.apache.logging.log4j.Logger currentLogger : currentLoggers) { + if (currentLogger.getName().startsWith(namespace)) { + result.add(currentLogger); } - if (namespace.equals(current.getName())) { + if (namespace.equals(currentLogger.getName())) { present = true; } } @@ -174,43 +194,42 @@ private synchronized List loggers(String namespace) { } // visible for testing - org.apache.log4j.Logger lookupLogger(String logger) { - return LogManager.getLogger(logger); + org.apache.logging.log4j.Logger lookupLogger(String logger) { + return LogManager.getLogger(isValidRootLoggerName(logger) ? LogManager.ROOT_LOGGER_NAME : logger); } - @SuppressWarnings("unchecked") - // visible for testing - Enumeration currentLoggers() { - return LogManager.getCurrentLoggers(); + Map currentLoggers() { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + var results = new HashMap(); + context.getConfiguration().getLoggers().forEach((name, logger) -> results.put(name, LogManager.getLogger(name))); + context.getLoggerRegistry().getLoggers().forEach(logger -> results.put(logger.getName(), logger)); + return results; } // visible for testing - org.apache.log4j.Logger rootLogger() { + org.apache.logging.log4j.Logger rootLogger() { return LogManager.getRootLogger(); } - private void setLevel(org.apache.log4j.Logger logger, Level level) { - Level currentLevel = logger.getLevel(); - if (currentLevel == null) - currentLevel = logger.getEffectiveLevel(); - - if (level.equals(currentLevel)) { - log.debug("Skipping update for logger {} since its level is already {}", logger.getName(), level); - return; - } - - log.debug("Setting level of logger {} (excluding children) to {}", logger.getName(), level); - logger.setLevel(level); - lastModifiedTimes.put(logger.getName(), time.milliseconds()); + private LoggerLevel loggerLevel(org.apache.logging.log4j.Logger logger) { + Long lastModified = lastModifiedTimes.get(getLoggerName(logger)); + return new LoggerLevel(Objects.toString(logger.getLevel()), lastModified); } - private LoggerLevel loggerLevel(org.apache.log4j.Logger logger) { - Level level = logger.getLevel(); - if (level == null) - level = logger.getEffectiveLevel(); - - Long lastModified = lastModifiedTimes.get(logger.getName()); - return new LoggerLevel(Objects.toString(level), lastModified); + private boolean isValidRootLoggerName(String namespace) { + return VALID_ROOT_LOGGER_NAMES.stream() + .anyMatch(rootLoggerNames -> rootLoggerNames.equalsIgnoreCase(namespace)); } + /** + * Converts logger name to ensure backward compatibility between log4j and log4j2. + * If the logger name is empty (log4j2's root logger representation), converts it to "root" (log4j's style). + * Otherwise, returns the original logger name. + * + * @param logger The logger instance to get the name from + * @return The logger name - returns "root" for empty string, otherwise returns the original logger name + */ + private String getLoggerName(org.apache.logging.log4j.Logger logger) { + return logger.getName().equals(LogManager.ROOT_LOGGER_NAME) ? ROOT_LOGGER_NAME : logger.getName(); + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java index 2ab7dfa089763..4584255e23132 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java @@ -73,19 +73,29 @@ public class SinkConnectorConfig extends ConnectorConfig { "keys, all error context header keys will start with __connect.errors."; private static final String DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY = "Enable Error Context Headers"; - static final ConfigDef CONFIG = ConnectorConfig.configDef() - .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) - .define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY) - .define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY) - .define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY) - .define(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT, Importance.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DOC, ERROR_GROUP, 8, ConfigDef.Width.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY); + private static ConfigDef configDef(ConfigDef baseConfigs) { + return baseConfigs + .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) + .define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY) + .define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY) + .define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY) + .define(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT, Importance.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DOC, ERROR_GROUP, 8, ConfigDef.Width.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY); + } public static ConfigDef configDef() { - return CONFIG; + return configDef(ConnectorConfig.configDef()); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connProps, workerConfig)); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connectorClass)); } public SinkConnectorConfig(Plugins plugins, Map props) { - super(plugins, CONFIG, props); + super(plugins, configDef(), props); } /** @@ -206,6 +216,6 @@ public boolean enableErrantRecordReporter() { } public static void main(String[] args) { - System.out.println(CONFIG.toHtml(4, config -> "sinkconnectorconfigs_" + config)); + System.out.println(configDef().toHtml(4, config -> "sinkconnectorconfigs_" + config)); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java index bc797563b10dd..336468f491aa3 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java @@ -125,10 +125,10 @@ private static class EnrichedSourceConnectorConfig extends ConnectorConfig { private final EnrichedSourceConnectorConfig enrichedSourceConfig; private final String offsetsTopic; - public static ConfigDef configDef() { + private static ConfigDef configDef(ConfigDef baseConfigDef) { ConfigDef.Validator atLeastZero = ConfigDef.Range.atLeast(0); int orderInGroup = 0; - return new ConfigDef(ConnectorConfig.configDef()) + return new ConfigDef(baseConfigDef) .define( TOPIC_CREATION_GROUPS_CONFIG, ConfigDef.Type.LIST, @@ -203,6 +203,18 @@ public static ConfigDef configDef() { OFFSETS_TOPIC_DISPLAY); } + public static ConfigDef configDef() { + return configDef(ConnectorConfig.configDef()); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connProps, workerConfig)); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connectorClass)); + } + public static ConfigDef embedDefaultGroup(ConfigDef baseConfigDef) { String defaultGroup = "default"; ConfigDef newDefaultDef = new ConfigDef(baseConfigDef); @@ -236,10 +248,9 @@ public static ConfigDef enrich(ConfigDef baseConfigDef, Map prop short defaultGroupReplicationFactor = defaultGroupConfig.getShort(defaultGroupPrefix + REPLICATION_FACTOR_CONFIG); int defaultGroupPartitions = defaultGroupConfig.getInt(defaultGroupPrefix + PARTITIONS_CONFIG); topicCreationGroups.stream().distinct().forEach(group -> { - if (!(group instanceof String)) { + if (!(group instanceof String alias)) { throw new ConfigException("Item in " + TOPIC_CREATION_GROUPS_CONFIG + " property is not of type String"); } - String alias = (String) group; String prefix = TOPIC_CREATION_PREFIX + alias + "."; String configGroup = TOPIC_CREATION_GROUP + ": " + alias; newDef.embed(prefix, configGroup, 0, diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/StateTracker.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/StateTracker.java index 7c10f42148e00..9dddec09ae340 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/StateTracker.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/StateTracker.java @@ -22,7 +22,7 @@ /** * Utility class that tracks the current state and the duration of time spent in each state. - * This class is threadsafe. + * This class is thread-safe. */ public class StateTracker { @@ -60,7 +60,7 @@ public State currentState() { /** * An immutable record of the accumulated times at the most recent state change. This class is required to - * efficiently make {@link StateTracker} threadsafe. + * efficiently make {@link StateTracker} thread-safe. */ private static final class StateChange { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java index 16dcd80b43f84..3175f6dcd4023 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java @@ -93,10 +93,9 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof TopicStatus)) { + if (!(o instanceof TopicStatus that)) { return false; } - TopicStatus that = (TopicStatus) o; return task == that.task && discoverTimestamp == that.discoverTimestamp && topic.equals(that.topic) && diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java index 591e9816a7a50..1f97a907b642e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java @@ -278,6 +278,10 @@ public void stop() { ThreadUtils.shutdownExecutorServiceQuietly(executor, EXECUTOR_SHUTDOWN_TERMINATION_TIMEOUT_MS, TimeUnit.MILLISECONDS); } + public WorkerConfig config() { + return config; + } + /** * Start a connector managed by this worker. * diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java index ca188ffd97af7..a68cdb4ea03d0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java @@ -70,6 +70,8 @@ public class WorkerConfig extends AbstractConfig { public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; public static final String CLIENT_DNS_LOOKUP_DOC = CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC; + public static final String PLUGIN_VERSION_SUFFIX = "plugin.version"; + public static final String KEY_CONVERTER_CLASS_CONFIG = "key.converter"; public static final String KEY_CONVERTER_CLASS_DOC = "Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -77,6 +79,10 @@ public class WorkerConfig extends AbstractConfig { " independent of connectors it allows any connector to work with any serialization format." + " Examples of common formats include JSON and Avro."; + public static final String KEY_CONVERTER_VERSION = "key.converter." + PLUGIN_VERSION_SUFFIX; + public static final String KEY_CONVERTER_VERSION_DEFAULT = null; + public static final String KEY_CONVERTER_VERSION_DOC = "Version of the key converter."; + public static final String VALUE_CONVERTER_CLASS_CONFIG = "value.converter"; public static final String VALUE_CONVERTER_CLASS_DOC = "Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -84,6 +90,10 @@ public class WorkerConfig extends AbstractConfig { " independent of connectors it allows any connector to work with any serialization format." + " Examples of common formats include JSON and Avro."; + public static final String VALUE_CONVERTER_VERSION = "value.converter." + PLUGIN_VERSION_SUFFIX; + public static final String VALUE_CONVERTER_VERSION_DEFAULT = null; + public static final String VALUE_CONVERTER_VERSION_DOC = "Version of the value converter."; + public static final String HEADER_CONVERTER_CLASS_CONFIG = "header.converter"; public static final String HEADER_CONVERTER_CLASS_DOC = "HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -93,6 +103,10 @@ public class WorkerConfig extends AbstractConfig { " header values to strings and deserialize them by inferring the schemas."; public static final String HEADER_CONVERTER_CLASS_DEFAULT = SimpleHeaderConverter.class.getName(); + public static final String HEADER_CONVERTER_VERSION = "header.converter." + PLUGIN_VERSION_SUFFIX; + public static final String HEADER_CONVERTER_VERSION_DEFAULT = null; + public static final String HEADER_CONVERTER_VERSION_DOC = "Version of the header converter."; + public static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG = "task.shutdown.graceful.timeout.ms"; private static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC = @@ -200,8 +214,12 @@ protected static ConfigDef baseConfigDef() { CLIENT_DNS_LOOKUP_DOC) .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_CONVERTER_CLASS_DOC) + .define(KEY_CONVERTER_VERSION, Type.STRING, + KEY_CONVERTER_VERSION_DEFAULT, Importance.LOW, KEY_CONVERTER_VERSION_DOC) .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_CONVERTER_CLASS_DOC) + .define(VALUE_CONVERTER_VERSION, Type.STRING, + VALUE_CONVERTER_VERSION_DEFAULT, Importance.LOW, VALUE_CONVERTER_VERSION_DOC) .define(TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG, Type.LONG, TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DEFAULT, Importance.LOW, TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC) @@ -237,6 +255,8 @@ protected static ConfigDef baseConfigDef() { .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, Importance.LOW, HEADER_CONVERTER_CLASS_DOC) + .define(HEADER_CONVERTER_VERSION, Type.STRING, + HEADER_CONVERTER_VERSION_DEFAULT, Importance.LOW, HEADER_CONVERTER_VERSION_DOC) .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, Collections.emptyList(), Importance.LOW, CONFIG_PROVIDERS_DOC) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java index 1f4e930ae5a46..424de8f3de5b1 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java @@ -228,7 +228,7 @@ protected void iteration() { // Maybe commit if (!committing && (context.isCommitRequested() || now >= nextCommit)) { - commitOffsets(now, false); + commitOffsets(now); nextCommit = now + offsetCommitIntervalMs; context.clearCommitRequest(); } @@ -282,7 +282,7 @@ private void onCommitCompleted(Throwable error, long seqno, Map offsets, boolean cl } } - private void commitOffsets(long now, boolean closing) { - commitOffsets(now, closing, consumer.assignment()); + private void commitOffsets(long now) { + commitOffsets(now, false, consumer.assignment()); } private void commitOffsets(long now, boolean closing, Collection topicPartitions) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java index 0d0eba32d86c4..55cc097083d02 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java @@ -262,11 +262,11 @@ public boolean commitOffsets() { shouldFlush = offsetWriter.beginFlush(timeout - time.milliseconds(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.warn("{} Interrupted while waiting for previous offset flush to complete, cancelling", this); - recordCommitFailure(time.milliseconds() - started, e); + recordCommitFailure(time.milliseconds() - started); return false; } catch (TimeoutException e) { log.warn("{} Timed out while waiting for previous offset flush to complete, cancelling", this); - recordCommitFailure(time.milliseconds() - started, e); + recordCommitFailure(time.milliseconds() - started); return false; } if (!shouldFlush) { @@ -292,7 +292,7 @@ public boolean commitOffsets() { // any data if (flushFuture == null) { offsetWriter.cancelFlush(); - recordCommitFailure(time.milliseconds() - started, null); + recordCommitFailure(time.milliseconds() - started); return false; } try { @@ -304,17 +304,17 @@ public boolean commitOffsets() { } catch (InterruptedException e) { log.warn("{} Flush of offsets interrupted, cancelling", this); offsetWriter.cancelFlush(); - recordCommitFailure(time.milliseconds() - started, e); + recordCommitFailure(time.milliseconds() - started); return false; } catch (ExecutionException e) { log.error("{} Flush of offsets threw an unexpected exception: ", this, e); offsetWriter.cancelFlush(); - recordCommitFailure(time.milliseconds() - started, e); + recordCommitFailure(time.milliseconds() - started); return false; } catch (TimeoutException e) { log.error("{} Timed out waiting to flush offsets to storage; will try again on next flush interval with latest offsets", this); offsetWriter.cancelFlush(); - recordCommitFailure(time.milliseconds() - started, null); + recordCommitFailure(time.milliseconds() - started); return false; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java index 98171fe47b6aa..9b70572fe24a7 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java @@ -356,17 +356,16 @@ protected void recordActiveTopic(String topic) { * @param duration the length of time in milliseconds for the commit attempt to complete */ protected void recordCommitSuccess(long duration) { - taskMetricsGroup.recordCommit(duration, true, null); + taskMetricsGroup.recordCommit(duration, true); } /** * Record that offsets have been committed. * * @param duration the length of time in milliseconds for the commit attempt to complete - * @param error the unexpected error that occurred; may be null in the case of timeouts or interruptions */ - protected void recordCommitFailure(long duration, Throwable error) { - taskMetricsGroup.recordCommit(duration, false, error); + protected void recordCommitFailure(long duration) { + taskMetricsGroup.recordCommit(duration, false); } /** @@ -434,7 +433,7 @@ void close() { metricGroup.close(); } - void recordCommit(long duration, boolean success, Throwable error) { + void recordCommit(long duration, boolean success) { if (success) { commitTime.record(duration); commitAttempts.record(1.0d); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java index 9150000223bdf..16ab0d47a3c72 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java @@ -103,6 +103,10 @@ public final class DistributedConfig extends WorkerConfig { private static final String METADATA_RECOVERY_STRATEGY_DOC = CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC; public static final String DEFAULT_METADATA_RECOVERY_STRATEGY = CommonClientConfigs.DEFAULT_METADATA_RECOVERY_STRATEGY; + public static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG = CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG; + private static final String METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC = CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC; + public static final long DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS = CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS; + /** * worker.sync.timeout.ms */ @@ -526,7 +530,14 @@ private static ConfigDef config(Crypto crypto) { ConfigDef.CaseInsensitiveValidString .in(Utils.enumOptions(MetadataRecoveryStrategy.class)), ConfigDef.Importance.LOW, - METADATA_RECOVERY_STRATEGY_DOC); + METADATA_RECOVERY_STRATEGY_DOC) + .define(METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, + ConfigDef.Type.LONG, + DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, + atLeast(0), + ConfigDef.Importance.LOW, + METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); + } private final ExactlyOnceSourceSupport exactlyOnceSourceSupport; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java index 908c2929481b5..23bcbd200c0a0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java @@ -112,8 +112,9 @@ import javax.crypto.KeyGenerator; import javax.crypto.SecretKey; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriBuilder; + +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriBuilder; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; import static org.apache.kafka.common.utils.Utils.UncheckedCloseable; @@ -906,26 +907,6 @@ public void connectorInfo(final String connName, final Callback c ); } - @Override - public void tasksConfig(String connName, final Callback>> callback) { - log.trace("Submitting tasks config request {}", connName); - - addRequest( - () -> { - if (checkRebalanceNeeded(callback)) - return null; - - if (!configState.contains(connName)) { - callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null); - } else { - callback.onCompletion(null, buildTasksConfig(connName)); - } - return null; - }, - forwardErrorAndTickThreadStages(callback) - ); - } - @Override protected Map rawConfig(String connName) { return configState.rawConnectorConfig(connName); @@ -1530,7 +1511,7 @@ public void restartConnectorAndTasks(RestartRequest request, Callback plan = buildRestartPlan(request); - if (!plan.isPresent()) { + if (plan.isEmpty()) { callback.onCompletion(new NotFoundException("Status for connector " + connectorName + " not found", null), null); } else { callback.onCompletion(null, plan.get().restartConnectorStateInfo()); @@ -1578,7 +1559,7 @@ void processRestartRequests() { protected synchronized void doRestartConnectorAndTasks(RestartRequest request) { String connectorName = request.connectorName(); Optional maybePlan = buildRestartPlan(request); - if (!maybePlan.isPresent()) { + if (maybePlan.isEmpty()) { log.debug("Skipping restart of connector '{}' since no status is available: {}", connectorName, request); return; } @@ -2212,8 +2193,7 @@ private void reconfigureConnectorTasksWithExponentialBackoffRetries(long initial } boolean isPossibleExpiredKeyException(long initialRequestTime, Throwable error) { - if (error instanceof ConnectRestException) { - ConnectRestException connectError = (ConnectRestException) error; + if (error instanceof ConnectRestException connectError) { return connectError.statusCode() == Response.Status.FORBIDDEN.getStatusCode() && initialRequestTime + TimeUnit.MINUTES.toMillis(1) >= time.milliseconds(); } @@ -2584,9 +2564,8 @@ public int compareTo(DistributedHerderRequest o) { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof DistributedHerderRequest)) + if (!(o instanceof DistributedHerderRequest other)) return false; - DistributedHerderRequest other = (DistributedHerderRequest) o; return compareTo(other) == 0; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java index 138bf9fc51473..c8ddfe5b70053 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java @@ -440,8 +440,7 @@ private String ownerUrl(String connector) { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof LeaderState)) return false; - LeaderState that = (LeaderState) o; + if (!(o instanceof LeaderState that)) return false; return Objects.equals(allMembers, that.allMembers) && Objects.equals(connectorOwners, that.connectorOwners) && Objects.equals(taskOwners, that.taskOwners); @@ -644,10 +643,9 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof WorkerLoad)) { + if (!(o instanceof WorkerLoad that)) { return false; } - WorkerLoad that = (WorkerLoad) o; return worker.equals(that.worker) && connectors.equals(that.connectors) && tasks.equals(that.tasks); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java index f4fdcaf801edc..c89eb33082fbe 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java @@ -89,8 +89,7 @@ public WorkerGroupMember(DistributedConfig config, .tags(metricsTags); List reporters = CommonClientConfigs.metricsReporters(clientId, config); - Map contextLabels = new HashMap<>(); - contextLabels.putAll(config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); + Map contextLabels = new HashMap<>(config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); contextLabels.put(WorkerConfig.CONNECT_KAFKA_CLUSTER_ID, config.kafkaClusterId()); contextLabels.put(WorkerConfig.CONNECT_GROUP_ID, config.getString(DistributedConfig.GROUP_ID_CONFIG)); MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, contextLabels); @@ -122,6 +121,7 @@ public WorkerGroupMember(DistributedConfig config, true, new ApiVersions(), logContext, + config.getLong(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG), MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)) ); this.client = new ConsumerNetworkClient( diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java index 28886b3557c7a..2b9ba9fc5b7b9 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java @@ -214,8 +214,7 @@ protected V execAndRetry(ProcessingContext context, Operation operatio errorHandlingMetrics.recordRetry(); } else { log.trace("Can't retry. start={}, attempt={}, deadline={}", startTime, attempt, deadline); - context.error(e); - return null; + throw e; } if (stopping) { log.trace("Shutdown has been scheduled. Marking operation as failed."); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java index fdbadef7b6939..bdeb224cfde60 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java @@ -16,17 +16,23 @@ */ package org.apache.kafka.connect.runtime.isolation; +import org.apache.maven.artifact.versioning.DefaultArtifactVersion; +import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.URL; import java.net.URLClassLoader; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; /** * A custom classloader dedicated to loading Connect plugin classes in classloading isolation. @@ -69,36 +75,117 @@ public DelegatingClassLoader() { /** * Retrieve the PluginClassLoader associated with a plugin class + * * @param name The fully qualified class name of the plugin * @return the PluginClassLoader that should be used to load this, or null if the plugin is not isolated. */ // VisibleForTesting - PluginClassLoader pluginClassLoader(String name) { + PluginClassLoader pluginClassLoader(String name, VersionRange range) { if (!PluginUtils.shouldLoadInIsolation(name)) { return null; } + SortedMap, ClassLoader> inner = pluginLoaders.get(name); if (inner == null) { return null; } - ClassLoader pluginLoader = inner.get(inner.lastKey()); + + + ClassLoader pluginLoader = findPluginLoader(inner, name, range); return pluginLoader instanceof PluginClassLoader - ? (PluginClassLoader) pluginLoader - : null; + ? (PluginClassLoader) pluginLoader + : null; } - ClassLoader connectorLoader(String connectorClassOrAlias) { - String fullName = aliases.getOrDefault(connectorClassOrAlias, connectorClassOrAlias); - ClassLoader classLoader = pluginClassLoader(fullName); - if (classLoader == null) classLoader = this; + PluginClassLoader pluginClassLoader(String name) { + return pluginClassLoader(name, null); + } + + ClassLoader loader(String classOrAlias, VersionRange range) { + String fullName = aliases.getOrDefault(classOrAlias, classOrAlias); + ClassLoader classLoader = pluginClassLoader(fullName, range); + if (classLoader == null) { + classLoader = this; + } log.debug( - "Getting plugin class loader: '{}' for connector: {}", - classLoader, - connectorClassOrAlias + "Got plugin class loader: '{}' for connector: {}", + classLoader, + classOrAlias ); return classLoader; } + ClassLoader loader(String classOrAlias) { + return loader(classOrAlias, null); + } + + ClassLoader connectorLoader(String connectorClassOrAlias) { + return loader(connectorClassOrAlias); + } + + String resolveFullClassName(String classOrAlias) { + return aliases.getOrDefault(classOrAlias, classOrAlias); + } + + PluginDesc pluginDesc(String classOrAlias, String preferredLocation, Set allowedTypes) { + if (classOrAlias == null) { + return null; + } + String fullName = aliases.getOrDefault(classOrAlias, classOrAlias); + SortedMap, ClassLoader> inner = pluginLoaders.get(fullName); + if (inner == null) { + return null; + } + PluginDesc result = null; + for (Map.Entry, ClassLoader> entry : inner.entrySet()) { + if (!allowedTypes.contains(entry.getKey().type())) { + continue; + } + result = entry.getKey(); + if (result.location().equals(preferredLocation)) { + return result; + } + } + return result; + } + + private ClassLoader findPluginLoader( + SortedMap, ClassLoader> loaders, + String pluginName, + VersionRange range + ) { + + if (range != null) { + + if (null != range.getRecommendedVersion()) { + throw new VersionedPluginLoadingException(String.format("A soft version range is not supported for plugin loading, " + + "this is an internal error as connect should automatically convert soft ranges to hard ranges. " + + "Provided soft version: %s ", range)); + } + + ClassLoader loader = null; + for (Map.Entry, ClassLoader> entry : loaders.entrySet()) { + // the entries should be in sorted order of versions so this should end up picking the latest version which matches the range + if (range.containsVersion(entry.getKey().encodedVersion())) { + loader = entry.getValue(); + } + } + + if (loader == null) { + List availableVersions = loaders.keySet().stream().map(PluginDesc::version).collect(Collectors.toList()); + throw new VersionedPluginLoadingException(String.format( + "Plugin %s not found that matches the version range %s, available versions: %s", + pluginName, + range, + availableVersions + ), availableVersions); + } + return loader; + } + + return loaders.get(loaders.lastKey()); + } + public void installDiscoveredPlugins(PluginScanResult scanResult) { pluginLoaders.putAll(computePluginLoaders(scanResult)); for (String pluginClassName : pluginLoaders.keySet()) { @@ -112,21 +199,76 @@ public void installDiscoveredPlugins(PluginScanResult scanResult) { @Override protected Class loadClass(String name, boolean resolve) throws ClassNotFoundException { + return loadVersionedPluginClass(name, null, resolve); + } + + protected Class loadVersionedPluginClass( + String name, + VersionRange range, + boolean resolve + ) throws VersionedPluginLoadingException, ClassNotFoundException { + String fullName = aliases.getOrDefault(name, name); - PluginClassLoader pluginLoader = pluginClassLoader(fullName); + PluginClassLoader pluginLoader = pluginClassLoader(fullName, range); + Class plugin; if (pluginLoader != null) { - log.trace("Retrieving loaded class '{}' from '{}'", fullName, pluginLoader); - return pluginLoader.loadClass(fullName, resolve); + log.trace("Retrieving loaded class '{}' from '{}'", name, pluginLoader); + plugin = pluginLoader.loadClass(fullName, resolve); + } else { + plugin = super.loadClass(fullName, resolve); + if (range == null) { + return plugin; + } + verifyClasspathVersionedPlugin(fullName, plugin, range); + } + return plugin; + } + + private void verifyClasspathVersionedPlugin(String fullName, Class plugin, VersionRange range) throws VersionedPluginLoadingException { + String pluginVersion; + SortedMap, ClassLoader> scannedPlugin = pluginLoaders.get(fullName); + + if (scannedPlugin == null) { + throw new VersionedPluginLoadingException(String.format( + "Plugin %s is not part of Connect's plugin loading mechanism (ClassPath or Plugin Path)", + fullName + )); } - return super.loadClass(fullName, resolve); + // if a plugin implements two interfaces (like JsonConverter implements both converter and header converter) + // it will have two entries under classpath, one for each scan. Hence, we count distinct by version. + List classpathPlugins = scannedPlugin.keySet().stream() + .filter(pluginDesc -> pluginDesc.location().equals("classpath")) + .map(PluginDesc::version) + .distinct() + .collect(Collectors.toList()); + + if (classpathPlugins.size() > 1) { + throw new VersionedPluginLoadingException(String.format( + "Plugin %s has multiple versions specified in class path, " + + "only one version is allowed in class path for loading a plugin with version range", + fullName + )); + } else if (classpathPlugins.isEmpty()) { + throw new VersionedPluginLoadingException("Invalid plugin found in classpath"); + } else { + pluginVersion = classpathPlugins.get(0); + if (!range.containsVersion(new DefaultArtifactVersion(pluginVersion))) { + throw new VersionedPluginLoadingException(String.format( + "Plugin %s has version %s which does not match the required version range %s", + fullName, + pluginVersion, + range + ), Collections.singletonList(pluginVersion)); + } + } } private static Map, ClassLoader>> computePluginLoaders(PluginScanResult plugins) { Map, ClassLoader>> pluginLoaders = new HashMap<>(); plugins.forEach(pluginDesc -> - pluginLoaders.computeIfAbsent(pluginDesc.className(), k -> new TreeMap<>()) - .put(pluginDesc, pluginDesc.loader())); + pluginLoaders.computeIfAbsent(pluginDesc.className(), k -> new TreeMap<>()) + .put(pluginDesc, pluginDesc.loader())); return pluginLoaders; } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java index a58aef7ceca74..b480124c6dc5f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.connect.runtime.isolation; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.maven.artifact.versioning.DefaultArtifactVersion; @@ -60,6 +61,11 @@ public String toString() { '}'; } + @JsonIgnore + public DefaultArtifactVersion encodedVersion() { + return encodedVersion; + } + public Class pluginClass() { return klass; } @@ -97,10 +103,9 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof PluginDesc)) { + if (!(o instanceof PluginDesc that)) { return false; } - PluginDesc that = (PluginDesc) o; return Objects.equals(klass, that.klass) && Objects.equals(version, that.version) && type == that.type; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java index 932e87395f728..ae6d3ba3a1cd4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java @@ -16,6 +16,8 @@ */ package org.apache.kafka.connect.runtime.isolation; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -469,7 +471,7 @@ private static Collection forJavaClassPath() { } return distinctUrls(urls); } - + private static Collection forClassLoader(ClassLoader classLoader) { final Collection result = new ArrayList<>(); while (classLoader != null) { @@ -483,7 +485,7 @@ private static Collection forClassLoader(ClassLoader classLoader) { } return distinctUrls(result); } - + private static Collection distinctUrls(Collection urls) { Map distinct = new HashMap<>(urls.size()); for (URL url : urls) { @@ -491,4 +493,21 @@ private static Collection distinctUrls(Collection urls) { } return distinct.values(); } + + public static VersionRange connectorVersionRequirement(String version) throws InvalidVersionSpecificationException { + if (version == null || version.equals("latest")) { + return null; + } + version = version.trim(); + + // check first if the given version is valid + VersionRange range = VersionRange.createFromVersionSpec(version); + + if (range.hasRestrictions()) { + return range; + } + // now if the version is not enclosed we consider it as a hard requirement and enclose it in [] + version = "[" + version + "]"; + return VersionRange.createFromVersionSpec(version); + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java index 816f870157e49..8be45e773b3e5 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java @@ -35,6 +35,8 @@ import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,11 +45,13 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; +import java.util.function.Function; import java.util.stream.Collectors; public class Plugins { @@ -168,11 +172,20 @@ protected static Class pluginClass( String classOrAlias, Class pluginClass ) throws ClassNotFoundException { - Class klass = loader.loadClass(classOrAlias, false); + return pluginClass(loader, classOrAlias, pluginClass, null); + } + + @SuppressWarnings("unchecked") + protected static Class pluginClass( + DelegatingClassLoader loader, + String classOrAlias, + Class pluginClass, + VersionRange range + ) throws VersionedPluginLoadingException, ClassNotFoundException { + Class klass = loader.loadVersionedPluginClass(classOrAlias, range, false); if (pluginClass.isAssignableFrom(klass)) { return (Class) klass; } - throw new ClassNotFoundException( "Requested class: " + classOrAlias @@ -184,6 +197,10 @@ public Class pluginClass(String classOrAlias) throws ClassNotFoundException { return pluginClass(delegatingLoader, classOrAlias, Object.class); } + public Class pluginClass(String classOrAlias, VersionRange range) throws VersionedPluginLoadingException, ClassNotFoundException { + return pluginClass(delegatingLoader, classOrAlias, Object.class, range); + } + public static ClassLoader compareAndSwapLoaders(ClassLoader loader) { ClassLoader current = Thread.currentThread().getContextClassLoader(); if (!current.equals(loader)) { @@ -240,14 +257,46 @@ public Runnable withClassLoader(ClassLoader classLoader, Runnable operation) { }; } + public Function safeLoaderSwapper() { + return loader -> { + if (!(loader instanceof PluginClassLoader)) { + loader = delegatingLoader; + } + return withClassLoader(loader); + }; + } + + public String latestVersion(String classOrAlias, PluginType... allowedTypes) { + return pluginVersion(classOrAlias, null, allowedTypes); + } + + public String pluginVersion(String classOrAlias, ClassLoader sourceLoader, PluginType... allowedTypes) { + String location = (sourceLoader instanceof PluginClassLoader) ? ((PluginClassLoader) sourceLoader).location() : null; + PluginDesc desc = delegatingLoader.pluginDesc(classOrAlias, location, new HashSet<>(Arrays.asList(allowedTypes))); + if (desc != null) { + return desc.version(); + } + return null; + } + public DelegatingClassLoader delegatingLoader() { return delegatingLoader; } + // kept for compatibility public ClassLoader connectorLoader(String connectorClassOrAlias) { - return delegatingLoader.connectorLoader(connectorClassOrAlias); + return delegatingLoader.loader(connectorClassOrAlias); } + public ClassLoader pluginLoader(String classOrAlias, VersionRange range) { + return delegatingLoader.loader(classOrAlias, range); + } + + public ClassLoader pluginLoader(String classOrAlias) { + return delegatingLoader.loader(classOrAlias); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) public Set> connectors() { Set> connectors = new TreeSet<>((Set) sinkConnectors()); @@ -259,48 +308,96 @@ public Set> sinkConnectors() { return scanResult.sinkConnectors(); } + Set> sinkConnectors(String connectorClassOrAlias) { + return pluginsOfClass(connectorClassOrAlias, scanResult.sinkConnectors()); + } + public Set> sourceConnectors() { return scanResult.sourceConnectors(); } + Set> sourceConnectors(String connectorClassOrAlias) { + return pluginsOfClass(connectorClassOrAlias, scanResult.sourceConnectors()); + } + public Set> converters() { return scanResult.converters(); } + Set> converters(String converterClassOrAlias) { + return pluginsOfClass(converterClassOrAlias, scanResult.converters()); + } + public Set> headerConverters() { return scanResult.headerConverters(); } + Set> headerConverters(String headerConverterClassOrAlias) { + return pluginsOfClass(headerConverterClassOrAlias, scanResult.headerConverters()); + } + public Set>> transformations() { return scanResult.transformations(); } + Set>> transformations(String transformationClassOrAlias) { + return pluginsOfClass(transformationClassOrAlias, scanResult.transformations()); + } + public Set>> predicates() { return scanResult.predicates(); } + Set>> predicates(String predicateClassOrAlias) { + return pluginsOfClass(predicateClassOrAlias, scanResult.predicates()); + } + public Set> connectorClientConfigPolicies() { return scanResult.connectorClientConfigPolicies(); } + private Set> pluginsOfClass(String classNameOrAlias, Set> allPluginsOfType) { + String className = delegatingLoader.resolveFullClassName(classNameOrAlias); + Set> plugins = new TreeSet<>(); + for (PluginDesc desc : allPluginsOfType) { + if (desc.className().equals(className)) { + plugins.add(desc); + } + } + return plugins; + } + public Object newPlugin(String classOrAlias) throws ClassNotFoundException { Class klass = pluginClass(delegatingLoader, classOrAlias, Object.class); return newPlugin(klass); } + public Object newPlugin(String classOrAlias, VersionRange range) throws VersionedPluginLoadingException, ClassNotFoundException { + Class klass = pluginClass(delegatingLoader, classOrAlias, Object.class, range); + return newPlugin(klass); + } + + public Object newPlugin(String classOrAlias, VersionRange range, ClassLoader sourceLoader) throws ClassNotFoundException { + if (range == null && sourceLoader instanceof PluginClassLoader) { + return newPlugin(sourceLoader.loadClass(classOrAlias)); + } + return newPlugin(classOrAlias, range); + } + public Connector newConnector(String connectorClassOrAlias) { Class klass = connectorClass(connectorClassOrAlias); return newPlugin(klass); } - public Class connectorClass(String connectorClassOrAlias) { + public Connector newConnector(String connectorClassOrAlias, VersionRange range) throws VersionedPluginLoadingException { + Class klass = connectorClass(connectorClassOrAlias, range); + return newPlugin(klass); + } + + public Class connectorClass(String connectorClassOrAlias, VersionRange range) throws VersionedPluginLoadingException { Class klass; try { - klass = pluginClass( - delegatingLoader, - connectorClassOrAlias, - Connector.class - ); + klass = pluginClass(delegatingLoader, connectorClassOrAlias, Connector.class, range); } catch (ClassNotFoundException e) { List> matches = new ArrayList<>(); Set> connectors = connectors(); @@ -336,6 +433,10 @@ public Class connectorClass(String connectorClassOrAlias) { return klass; } + public Class connectorClass(String connectorClassOrAlias) { + return connectorClass(connectorClassOrAlias, null); + } + public Task newTask(Class taskClass) { return newPlugin(taskClass); } @@ -350,54 +451,49 @@ public Task newTask(Class taskClass) { * @throws ConnectException if the {@link Converter} implementation class could not be found */ public Converter newConverter(AbstractConfig config, String classPropertyName, ClassLoaderUsage classLoaderUsage) { + return newConverter(config, classPropertyName, null, classLoaderUsage); + } + + /** + * Used to get a versioned converter. If the version is specified, it will always use the plugins classloader. + * + * @param config the configuration containing the {@link Converter}'s configuration; may not be null + * @param classPropertyName the name of the property that contains the name of the {@link Converter} class; may not be null + * @param versionPropertyName the name of the property that contains the version of the {@link Converter} class; may not be null + * @return the instantiated and configured {@link Converter}; null if the configuration did not define the specified property + * @throws ConnectException if the {@link Converter} implementation class could not be found, + * @throws VersionedPluginLoadingException if the version requested is not found + */ + public Converter newConverter(AbstractConfig config, String classPropertyName, String versionPropertyName) { + ClassLoaderUsage classLoader = config.getString(versionPropertyName) == null ? ClassLoaderUsage.CURRENT_CLASSLOADER : ClassLoaderUsage.PLUGINS; + return newConverter(config, classPropertyName, versionPropertyName, classLoader); + } + + private Converter newConverter(AbstractConfig config, String classPropertyName, String versionPropertyName, ClassLoaderUsage classLoaderUsage) { if (!config.originals().containsKey(classPropertyName)) { // This configuration does not define the converter via the specified property name return null; } - - Class klass = null; - switch (classLoaderUsage) { - case CURRENT_CLASSLOADER: - // Attempt to load first with the current classloader, and plugins as a fallback. - // Note: we can't use config.getConfiguredInstance because Converter doesn't implement Configurable, and even if it did - // we have to remove the property prefixes before calling config(...) and we still always want to call Converter.config. - klass = pluginClassFromConfig(config, classPropertyName, Converter.class, scanResult.converters()); - break; - case PLUGINS: - // Attempt to load with the plugin class loader, which uses the current classloader as a fallback - String converterClassOrAlias = config.getClass(classPropertyName).getName(); - try { - klass = pluginClass(delegatingLoader, converterClassOrAlias, Converter.class); - } catch (ClassNotFoundException e) { - throw new ConnectException( - "Failed to find any class that implements Converter and which name matches " - + converterClassOrAlias + ", available converters are: " - + pluginNames(scanResult.converters()) - ); - } - break; - } - if (klass == null) { - throw new ConnectException("Unable to initialize the Converter specified in '" + classPropertyName + "'"); - } - // Determine whether this is a key or value converter based upon the supplied property name ... final boolean isKeyConverter = WorkerConfig.KEY_CONVERTER_CLASS_CONFIG.equals(classPropertyName); // Configure the Converter using only the old configuration mechanism ... String configPrefix = classPropertyName + "."; Map converterConfig = config.originalsWithPrefix(configPrefix); + log.debug("Configuring the {} converter with configuration keys:{}{}", - isKeyConverter ? "key" : "value", System.lineSeparator(), converterConfig.keySet()); + isKeyConverter ? "key" : "value", System.lineSeparator(), converterConfig.keySet()); - Converter plugin; - try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { - plugin = newPlugin(klass); + Converter plugin = newVersionedPlugin(config, classPropertyName, versionPropertyName, + Converter.class, classLoaderUsage, scanResult.converters()); + try (LoaderSwap loaderSwap = safeLoaderSwapper().apply(plugin.getClass().getClassLoader())) { plugin.configure(converterConfig, isKeyConverter); } return plugin; } + + /** * Load an internal converter, used by the worker for (de)serializing data in internal topics. * @@ -427,99 +523,124 @@ public Converter newInternalConverter(boolean isKey, String className, Map klass = null; - switch (classLoaderUsage) { - case CURRENT_CLASSLOADER: - if (!config.originals().containsKey(classPropertyName)) { - // This connector configuration does not define the header converter via the specified property name - return null; - } - // Attempt to load first with the current classloader, and plugins as a fallback. - // Note: we can't use config.getConfiguredInstance because we have to remove the property prefixes - // before calling config(...) - klass = pluginClassFromConfig(config, classPropertyName, HeaderConverter.class, scanResult.headerConverters()); - break; - case PLUGINS: - // Attempt to load with the plugin class loader, which uses the current classloader as a fallback. - // Note that there will always be at least a default header converter for the worker - String converterClassOrAlias = config.getClass(classPropertyName).getName(); - try { - klass = pluginClass( - delegatingLoader, - converterClassOrAlias, - HeaderConverter.class - ); - } catch (ClassNotFoundException e) { - throw new ConnectException( - "Failed to find any class that implements HeaderConverter and which name matches " - + converterClassOrAlias - + ", available header converters are: " - + pluginNames(scanResult.headerConverters()) - ); - } - } - if (klass == null) { - throw new ConnectException("Unable to initialize the HeaderConverter specified in '" + classPropertyName + "'"); + return newHeaderConverter(config, classPropertyName, null, classLoaderUsage); + } + + /** + * If the given configuration defines a {@link HeaderConverter} using the named configuration property, return a new configured + * instance. If the version is specified, it will always use the plugins classloader. + * + * @param config the configuration containing the {@link HeaderConverter}'s configuration; may not be null + * @param classPropertyName the name of the property that contains the name of the {@link HeaderConverter} class; may not be null + * @param versionPropertyName the config for the version for the header converter + * @return the instantiated and configured {@link HeaderConverter}; null if the configuration did not define the specified property + * @throws ConnectException if the {@link HeaderConverter} implementation class could not be found + */ + public HeaderConverter newHeaderConverter(AbstractConfig config, String classPropertyName, String versionPropertyName) { + ClassLoaderUsage classLoader = config.getString(versionPropertyName) == null ? ClassLoaderUsage.CURRENT_CLASSLOADER : ClassLoaderUsage.PLUGINS; + return newHeaderConverter(config, classPropertyName, versionPropertyName, classLoader); + } + + private HeaderConverter newHeaderConverter(AbstractConfig config, String classPropertyName, String versionPropertyName, ClassLoaderUsage classLoaderUsage) { + if (!config.originals().containsKey(classPropertyName) && classLoaderUsage == ClassLoaderUsage.CURRENT_CLASSLOADER) { + // This configuration does not define the Header Converter via the specified property name + return null; } + HeaderConverter plugin = newVersionedPlugin(config, classPropertyName, versionPropertyName, + HeaderConverter.class, classLoaderUsage, scanResult.headerConverters()); String configPrefix = classPropertyName + "."; Map converterConfig = config.originalsWithPrefix(configPrefix); converterConfig.put(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()); log.debug("Configuring the header converter with configuration keys:{}{}", System.lineSeparator(), converterConfig.keySet()); - HeaderConverter plugin; - try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { - plugin = newPlugin(klass); + try (LoaderSwap loaderSwap = safeLoaderSwapper().apply(plugin.getClass().getClassLoader())) { plugin.configure(converterConfig); } return plugin; } - public ConfigProvider newConfigProvider(AbstractConfig config, String providerPrefix, ClassLoaderUsage classLoaderUsage) { - String classPropertyName = providerPrefix + ".class"; - Map originalConfig = config.originalsStrings(); - if (!originalConfig.containsKey(classPropertyName)) { - // This configuration does not define the config provider via the specified property name - return null; + @SuppressWarnings({"unchecked", "rawtypes"}) + private U newVersionedPlugin( + AbstractConfig config, + String classPropertyName, + String versionPropertyName, + Class basePluginClass, + ClassLoaderUsage classLoaderUsage, + SortedSet> availablePlugins + ) { + + String version = versionPropertyName == null ? null : config.getString(versionPropertyName); + VersionRange range = null; + if (version != null) { + try { + range = PluginUtils.connectorVersionRequirement(version); + } catch (InvalidVersionSpecificationException e) { + throw new ConnectException(String.format("Invalid version range for %s: %s", classPropertyName, version), e); + } } - Class klass = null; + + assert range == null || classLoaderUsage == ClassLoaderUsage.PLUGINS; + + Class klass = null; + String basePluginClassName = basePluginClass.getSimpleName(); switch (classLoaderUsage) { case CURRENT_CLASSLOADER: // Attempt to load first with the current classloader, and plugins as a fallback. - klass = pluginClassFromConfig(config, classPropertyName, ConfigProvider.class, scanResult.configProviders()); + klass = pluginClassFromConfig(config, classPropertyName, basePluginClass, availablePlugins); break; case PLUGINS: // Attempt to load with the plugin class loader, which uses the current classloader as a fallback - String configProviderClassOrAlias = originalConfig.get(classPropertyName); + + // if the config specifies the class name, use it, otherwise use the default which we can get from config.getClass + String classOrAlias = config.originalsStrings().get(classPropertyName); + if (classOrAlias == null) { + classOrAlias = config.getClass(classPropertyName).getName(); + } try { - klass = pluginClass(delegatingLoader, configProviderClassOrAlias, ConfigProvider.class); + klass = pluginClass(delegatingLoader, classOrAlias, basePluginClass, range); } catch (ClassNotFoundException e) { throw new ConnectException( - "Failed to find any class that implements ConfigProvider and which name matches " - + configProviderClassOrAlias + ", available ConfigProviders are: " - + pluginNames(scanResult.configProviders()) + "Failed to find any class that implements " + basePluginClassName + " and which name matches " + + classOrAlias + ", available plugins are: " + + pluginNames(availablePlugins) ); } break; } if (klass == null) { - throw new ConnectException("Unable to initialize the ConfigProvider specified in '" + classPropertyName + "'"); + throw new ConnectException("Unable to initialize the " + basePluginClassName + + " specified in " + classPropertyName); } + U plugin; + try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { + plugin = newPlugin(klass); + } + return plugin; + } + + public ConfigProvider newConfigProvider(AbstractConfig config, String providerPrefix, ClassLoaderUsage classLoaderUsage) { + String classPropertyName = providerPrefix + ".class"; + Map originalConfig = config.originalsStrings(); + if (!originalConfig.containsKey(classPropertyName)) { + // This configuration does not define the config provider via the specified property name + return null; + } + + ConfigProvider plugin = newVersionedPlugin(config, classPropertyName, null, ConfigProvider.class, classLoaderUsage, scanResult.configProviders()); + // Configure the ConfigProvider String configPrefix = providerPrefix + ".param."; Map configProviderConfig = config.originalsWithPrefix(configPrefix); - - ConfigProvider plugin; - try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { - plugin = newPlugin(klass); + try (LoaderSwap loaderSwap = safeLoaderSwapper().apply(plugin.getClass().getClassLoader())) { plugin.configure(configProviderConfig); } return plugin; @@ -557,8 +678,7 @@ public T newPlugin(String klassName, AbstractConfig config, Class pluginK } try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { plugin = newPlugin(klass); - if (plugin instanceof Versioned) { - Versioned versionedPlugin = (Versioned) plugin; + if (plugin instanceof Versioned versionedPlugin) { if (Utils.isBlank(versionedPlugin.version())) { throw new ConnectException("Version not defined for '" + klassName + "'"); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java new file mode 100644 index 0000000000000..76f28659726df --- /dev/null +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java @@ -0,0 +1,265 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.connect.runtime.isolation; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.runtime.ConnectorConfig; +import org.apache.kafka.connect.transforms.Transformation; +import org.apache.kafka.connect.transforms.predicates.Predicate; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class PluginsRecommenders { + + private final Plugins plugins; + private final ConverterPluginRecommender converterPluginRecommender; + private final ConnectorPluginVersionRecommender connectorPluginVersionRecommender; + private final HeaderConverterPluginRecommender headerConverterPluginRecommender; + private final KeyConverterPluginVersionRecommender keyConverterPluginVersionRecommender; + private final ValueConverterPluginVersionRecommender valueConverterPluginVersionRecommender; + private final HeaderConverterPluginVersionRecommender headerConverterPluginVersionRecommender; + + public PluginsRecommenders() { + this(null); + } + + public PluginsRecommenders(Plugins plugins) { + this.plugins = plugins; + this.converterPluginRecommender = new ConverterPluginRecommender(); + this.connectorPluginVersionRecommender = new ConnectorPluginVersionRecommender(); + this.headerConverterPluginRecommender = new HeaderConverterPluginRecommender(); + this.keyConverterPluginVersionRecommender = new KeyConverterPluginVersionRecommender(); + this.valueConverterPluginVersionRecommender = new ValueConverterPluginVersionRecommender(); + this.headerConverterPluginVersionRecommender = new HeaderConverterPluginVersionRecommender(); + } + + public ConverterPluginRecommender converterPluginRecommender() { + return converterPluginRecommender; + } + + public ConnectorPluginVersionRecommender connectorPluginVersionRecommender() { + return connectorPluginVersionRecommender; + } + + public HeaderConverterPluginRecommender headerConverterPluginRecommender() { + return headerConverterPluginRecommender; + } + + public KeyConverterPluginVersionRecommender keyConverterPluginVersionRecommender() { + return keyConverterPluginVersionRecommender; + } + + public ValueConverterPluginVersionRecommender valueConverterPluginVersionRecommender() { + return valueConverterPluginVersionRecommender; + } + + public HeaderConverterPluginVersionRecommender headerConverterPluginVersionRecommender() { + return headerConverterPluginVersionRecommender; + } + + public TransformationPluginRecommender transformationPluginRecommender(String classOrAlias) { + return new TransformationPluginRecommender(classOrAlias); + } + + public PredicatePluginRecommender predicatePluginRecommender(String classOrAlias) { + return new PredicatePluginRecommender(classOrAlias); + } + + public class ConnectorPluginVersionRecommender implements ConfigDef.Recommender { + + @SuppressWarnings({"unchecked", "rawtypes"}) + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + String connectorClassOrAlias = (String) parsedConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + if (connectorClassOrAlias == null) { + //should never happen + return Collections.emptyList(); + } + List sourceConnectors = plugins.sourceConnectors(connectorClassOrAlias).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + if (!sourceConnectors.isEmpty()) { + return sourceConnectors; + } + return plugins.sinkConnectors(connectorClassOrAlias).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return parsedConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG) != null; + } + + } + + public class ConverterPluginRecommender implements ConfigDef.Recommender { + + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + return plugins.converters().stream() + .map(PluginDesc::pluginClass).distinct().collect(Collectors.toList()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return true; + } + } + + public class HeaderConverterPluginRecommender implements ConfigDef.Recommender { + + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + return plugins.headerConverters().stream() + .map(PluginDesc::pluginClass).distinct().collect(Collectors.toList()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return true; + } + } + + public abstract class ConverterPluginVersionRecommender implements ConfigDef.Recommender { + + protected Function> recommendations() { + return converterClass -> plugins.converters(converterClass).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + } + + protected abstract String converterConfig(); + + @SuppressWarnings({"rawtypes"}) + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + if (parsedConfig.get(converterConfig()) == null) { + return Collections.emptyList(); + } + Class converterClass = (Class) parsedConfig.get(converterConfig()); + return recommendations().apply(converterClass.getName()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return parsedConfig.get(converterConfig()) != null; + } + } + + public class KeyConverterPluginVersionRecommender extends ConverterPluginVersionRecommender { + + @Override + protected String converterConfig() { + return ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; + } + + } + + public class ValueConverterPluginVersionRecommender extends ConverterPluginVersionRecommender { + + @Override + protected String converterConfig() { + return ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; + } + } + + public class HeaderConverterPluginVersionRecommender extends ConverterPluginVersionRecommender { + + @Override + protected String converterConfig() { + return ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; + } + + @Override + protected Function> recommendations() { + return converterClass -> plugins.headerConverters(converterClass).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + } + } + + // Recommender for transformation and predicate plugins + public abstract class SMTPluginRecommender implements ConfigDef.Recommender { + + protected abstract Function>> plugins(); + + protected final String classOrAliasConfig; + + public SMTPluginRecommender(String classOrAliasConfig) { + this.classOrAliasConfig = classOrAliasConfig; + } + + @Override + @SuppressWarnings({"rawtypes"}) + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + if (parsedConfig.get(classOrAliasConfig) == null) { + return Collections.emptyList(); + } + + Class classOrAlias = (Class) parsedConfig.get(classOrAliasConfig); + return plugins().apply(classOrAlias.getName()) + .stream().map(PluginDesc::version).distinct().collect(Collectors.toList()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return true; + } + } + + public class TransformationPluginRecommender extends SMTPluginRecommender> { + + public TransformationPluginRecommender(String classOrAliasConfig) { + super(classOrAliasConfig); + } + + @Override + protected Function>>> plugins() { + return plugins::transformations; + } + } + + public class PredicatePluginRecommender extends SMTPluginRecommender> { + + public PredicatePluginRecommender(String classOrAliasConfig) { + super(classOrAliasConfig); + } + + @Override + protected Function>>> plugins() { + return plugins::predicates; + } + } +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/ReflectionScanner.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/ReflectionScanner.java index 5b91c150c86a5..85f514d5f1ee6 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/ReflectionScanner.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/ReflectionScanner.java @@ -29,7 +29,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.ServiceLoader; import java.util.SortedSet; import java.util.TreeSet; @@ -77,7 +79,7 @@ private static String versionFor(Class pluginKlass) throws Refl @Override protected PluginScanResult scanPlugins(PluginSource source) { ClassGraph classGraphBuilder = new ClassGraph() - .addClassLoader(source.loader()) + .overrideClassLoaders(classLoaderOrder(source)) .enableExternalClasses() .enableClassInfo(); try (ScanResult classGraph = classGraphBuilder.scan()) { @@ -105,6 +107,21 @@ private SortedSet>> getTransformationPluginDesc(Plu return (SortedSet>>) (SortedSet) getPluginDesc(classGraph, PluginType.TRANSFORMATION, source); } + private ClassLoader[] classLoaderOrder(PluginSource source) { + // Classgraph will first scan all the class URLs from the provided classloader chain and use said chain during classloading. + // We compute and provide the classloader chain starting from the isolated PluginClassLoader to ensure that it adheres + // to the child first delegation model used in connect. In addition, classgraph can fail to find URLs from the + // application classloader as it uses an illegal reflections access. Providing the entire chain of classloaders + // which included the application classloader forces classpath URLs to be scanned separately. + List classLoaderOrder = new ArrayList<>(); + ClassLoader cl = source.loader(); + while (cl != null) { + classLoaderOrder.add(cl); + cl = cl.getParent(); + } + return classLoaderOrder.toArray(new ClassLoader[0]); + } + @SuppressWarnings({"unchecked"}) private SortedSet> getPluginDesc( ScanResult classGraph, diff --git a/clients/src/main/java/org/apache/kafka/common/errors/NotLeaderForPartitionException.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/VersionedPluginLoadingException.java similarity index 57% rename from clients/src/main/java/org/apache/kafka/common/errors/NotLeaderForPartitionException.java rename to connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/VersionedPluginLoadingException.java index 30efc49dc7584..8fb3042549b5f 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/NotLeaderForPartitionException.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/VersionedPluginLoadingException.java @@ -14,31 +14,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.common.errors; -/** - * This server is not the leader for the given partition. - * @deprecated since 2.6. Use {@link NotLeaderOrFollowerException}. - */ -@Deprecated -public class NotLeaderForPartitionException extends InvalidMetadataException { +package org.apache.kafka.connect.runtime.isolation; - private static final long serialVersionUID = 1L; +import org.apache.kafka.common.config.ConfigException; - public NotLeaderForPartitionException() { - super(); - } +import java.util.List; + +public class VersionedPluginLoadingException extends ConfigException { - public NotLeaderForPartitionException(String message) { + private List availableVersions = null; + + public VersionedPluginLoadingException(String message) { super(message); } - public NotLeaderForPartitionException(Throwable cause) { - super(cause); + public VersionedPluginLoadingException(String message, List availableVersions) { + super(message); + this.availableVersions = availableVersions; } - public NotLeaderForPartitionException(String message, Throwable cause) { - super(message, cause); + public List availableVersions() { + return availableVersions; } - } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java index bcd4fa18fc29c..ca2ab18d43b43 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java @@ -24,8 +24,8 @@ import java.util.Map; import java.util.Objects; -import javax.ws.rs.core.Configurable; -import javax.ws.rs.core.Configuration; +import jakarta.ws.rs.core.Configurable; +import jakarta.ws.rs.core.Configuration; /** * The implementation delegates to {@link ResourceConfig} so that we can handle duplicate diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java index 8098f8c97cc53..1990ebdf36926 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java @@ -20,7 +20,7 @@ import org.apache.kafka.connect.health.ConnectClusterState; import org.apache.kafka.connect.rest.ConnectRestExtensionContext; -import javax.ws.rs.core.Configurable; +import jakarta.ws.rs.core.Configurable; public class ConnectRestExtensionContextImpl implements ConnectRestExtensionContext { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java index dd38f769fe8ff..4dedc7289b8f4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java @@ -33,9 +33,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriBuilder; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriBuilder; public class HerderRequestHandler { @@ -113,6 +113,7 @@ public T completeOrForwardRequest(FutureCallback cb, } String forwardUrl = uriBuilder.build().toString(); log.debug("Forwarding request {} {} {}", forwardUrl, method, body); + // TODO, we may need to set the request timeout as Idle timeout on the HttpClient return translator.translate(restClient.httpRequest(forwardUrl, method, headers, body, resultType)); } else { log.error("Request '{} {}' failed because it couldn't find the target Connect worker within two hops (between workers).", diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java index 6fe4134d1c52a..902187a83fc8a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java @@ -20,7 +20,7 @@ import org.apache.kafka.connect.runtime.distributed.Crypto; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; -import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.client.Request; import java.security.InvalidKeyException; import java.security.MessageDigest; @@ -31,7 +31,8 @@ import javax.crypto.Mac; import javax.crypto.SecretKey; -import javax.ws.rs.core.HttpHeaders; + +import jakarta.ws.rs.core.HttpHeaders; public class InternalRequestSignature { @@ -59,8 +60,10 @@ public static void addToRequest(Crypto crypto, SecretKey key, byte[] requestBody throw new ConnectException(e); } byte[] requestSignature = sign(mac, key, requestBody); - request.header(InternalRequestSignature.SIGNATURE_HEADER, Base64.getEncoder().encodeToString(requestSignature)) - .header(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER, signatureAlgorithm); + request.headers(field -> { + field.add(InternalRequestSignature.SIGNATURE_HEADER, Base64.getEncoder().encodeToString(requestSignature)); + field.add(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER, signatureAlgorithm); + }); } /** diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java index a6db20ce64e54..511f7f9f2c7a2 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java @@ -26,13 +26,15 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.eclipse.jetty.client.ContentResponse; import org.eclipse.jetty.client.HttpClient; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; -import org.eclipse.jetty.client.util.StringContentProvider; +import org.eclipse.jetty.client.Request; +import org.eclipse.jetty.client.StringRequestContent; +import org.eclipse.jetty.client.transport.HttpClientTransportDynamic; import org.eclipse.jetty.http.HttpField; import org.eclipse.jetty.http.HttpFields; import org.eclipse.jetty.http.HttpStatus; +import org.eclipse.jetty.io.ClientConnector; import org.eclipse.jetty.util.ssl.SslContextFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +48,9 @@ import java.util.concurrent.TimeoutException; import javax.crypto.SecretKey; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; + +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.Response; /** * Client for outbound REST requests to other members of a Connect cluster @@ -65,7 +68,15 @@ public RestClient(AbstractConfig config) { // VisibleForTesting HttpClient httpClient(SslContextFactory.Client sslContextFactory) { - return sslContextFactory != null ? new HttpClient(sslContextFactory) : new HttpClient(); + final HttpClient client; + if (sslContextFactory != null) { + ClientConnector clientConnector = new ClientConnector(); + clientConnector.setSslContextFactory(sslContextFactory); + client = new HttpClient(new HttpClientTransportDynamic(clientConnector)); + } else { + client = new HttpClient(); + } + return client; } /** @@ -162,7 +173,7 @@ private HttpResponse httpRequest(HttpClient client, String url, String me addHeadersToRequest(headers, req); if (serializedBody != null) { - req.content(new StringContentProvider(serializedBody, StandardCharsets.UTF_8), "application/json"); + req.body(new StringRequestContent("application/json", serializedBody, StandardCharsets.UTF_8)); } if (sessionKey != null && requestSignatureAlgorithm != null) { @@ -220,7 +231,7 @@ private static void addHeadersToRequest(HttpHeaders headers, Request req) { if (headers != null) { String credentialAuthorization = headers.getHeaderString(HttpHeaders.AUTHORIZATION); if (credentialAuthorization != null) { - req.header(HttpHeaders.AUTHORIZATION, credentialAuthorization); + req.headers(field -> field.add(HttpHeaders.AUTHORIZATION, credentialAuthorization)); } } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java index 9468166763cea..b6c7690a51d79 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java @@ -28,8 +28,12 @@ import org.apache.kafka.connect.runtime.rest.errors.ConnectExceptionMapper; import org.apache.kafka.connect.runtime.rest.util.SSLUtils; -import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; +import com.fasterxml.jackson.jakarta.rs.json.JacksonJsonProvider; +import org.eclipse.jetty.ee10.servlet.FilterHolder; +import org.eclipse.jetty.ee10.servlet.ServletContextHandler; +import org.eclipse.jetty.ee10.servlet.ServletHolder; +import org.eclipse.jetty.ee10.servlets.HeaderFilter; import org.eclipse.jetty.server.Connector; import org.eclipse.jetty.server.CustomRequestLog; import org.eclipse.jetty.server.Handler; @@ -37,12 +41,8 @@ import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.server.Slf4jRequestLogWriter; import org.eclipse.jetty.server.handler.ContextHandlerCollection; +import org.eclipse.jetty.server.handler.CrossOriginHandler; import org.eclipse.jetty.server.handler.StatisticsHandler; -import org.eclipse.jetty.servlet.FilterHolder; -import org.eclipse.jetty.servlet.ServletContextHandler; -import org.eclipse.jetty.servlet.ServletHolder; -import org.eclipse.jetty.servlets.CrossOriginFilter; -import org.eclipse.jetty.servlets.HeaderFilter; import org.eclipse.jetty.util.ssl.SslContextFactory; import org.glassfish.hk2.utilities.Binder; import org.glassfish.hk2.utilities.binding.AbstractBinder; @@ -60,12 +60,13 @@ import java.util.EnumSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; -import javax.servlet.DispatcherType; -import javax.ws.rs.core.UriBuilder; +import jakarta.servlet.DispatcherType; +import jakarta.ws.rs.core.UriBuilder; /** * Embedded server for the REST API that provides the control plane for Kafka Connect workers. @@ -189,6 +190,9 @@ public final Connector createConnector(String listener, boolean isAdmin) { connector.setPort(port); + // TODO: do we need this? + connector.setIdleTimeout(requestTimeout.timeoutMs()); + return connector; } @@ -263,20 +267,21 @@ protected final void initializeResources() { ServletHolder adminServletHolder = new ServletHolder(new ServletContainer(adminResourceConfig)); adminContext.setContextPath("/"); adminContext.addServlet(adminServletHolder, "/*"); - adminContext.setVirtualHosts(new String[]{"@" + ADMIN_SERVER_CONNECTOR_NAME}); + adminContext.setVirtualHosts(List.of("@" + ADMIN_SERVER_CONNECTOR_NAME)); contextHandlers.add(adminContext); } String allowedOrigins = config.allowedOrigins(); if (!Utils.isBlank(allowedOrigins)) { - FilterHolder filterHolder = new FilterHolder(new CrossOriginFilter()); - filterHolder.setName("cross-origin"); - filterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, allowedOrigins); + CrossOriginHandler crossOriginHandler = new CrossOriginHandler(); + crossOriginHandler.setAllowedOriginPatterns(Set.of(allowedOrigins.split(","))); String allowedMethods = config.allowedMethods(); if (!Utils.isBlank(allowedMethods)) { - filterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, allowedMethods); + crossOriginHandler.setAllowedMethods(Set.of(allowedMethods.split(","))); } - context.addFilter(filterHolder, "/*", EnumSet.of(DispatcherType.REQUEST)); + // Setting to true matches the previously used CrossOriginFilter + crossOriginHandler.setDeliverPreflightRequests(true); + context.insertHandler(crossOriginHandler); } String headerConfig = config.responseHeaders(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java index fd732cc9f1e2c..96993c37c5ce6 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java @@ -102,9 +102,7 @@ public abstract class RestServerConfig extends AbstractConfig { static final String RESPONSE_HTTP_HEADERS_DOC = "Rules for REST API HTTP response headers"; // Visible for testing static final String RESPONSE_HTTP_HEADERS_DEFAULT = ""; - private static final Collection HEADER_ACTIONS = Collections.unmodifiableList( - Arrays.asList("set", "add", "setDate", "addDate") - ); + private static final Collection HEADER_ACTIONS = List.of("set", "add", "setDate", "addDate"); /** @@ -307,11 +305,10 @@ static void validateHeaderConfigAction(String action) { private static class ListenersValidator implements ConfigDef.Validator { @Override public void ensureValid(String name, Object value) { - if (!(value instanceof List)) { + if (!(value instanceof List items)) { throw new ConfigException("Invalid value type for listeners (expected list of URLs , ex: http://localhost:8080,https://localhost:8443)."); } - List items = (List) value; if (items.isEmpty()) { throw new ConfigException("Invalid value for listeners, at least one URL is expected, ex: http://localhost:8080,https://localhost:8443."); } @@ -339,11 +336,10 @@ public void ensureValid(String name, Object value) { return; } - if (!(value instanceof List)) { + if (!(value instanceof List items)) { throw new ConfigException("Invalid value type for admin.listeners (expected list)."); } - List items = (List) value; if (items.isEmpty()) { return; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java index 2813a65c53c41..bf15772f7fb51 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java @@ -64,10 +64,9 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (!(obj instanceof ConnectorOffset)) { + if (!(obj instanceof ConnectorOffset that)) { return false; } - ConnectorOffset that = (ConnectorOffset) obj; return Objects.equals(this.partition, that.partition) && Objects.equals(this.offset, that.offset); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java index d37138a82ceb1..cae87914941dc 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java @@ -77,10 +77,9 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (!(obj instanceof ConnectorOffsets)) { + if (!(obj instanceof ConnectorOffsets that)) { return false; } - ConnectorOffsets that = (ConnectorOffsets) obj; return Objects.equals(this.offsets, that.offsets); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java index 6280473af964d..82d9957b40db1 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java @@ -124,9 +124,8 @@ public int compareTo(TaskState that) { public boolean equals(Object o) { if (o == this) return true; - if (!(o instanceof TaskState)) + if (!(o instanceof TaskState other)) return false; - TaskState other = (TaskState) o; return compareTo(other) == 0; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java index e4dc8fd0b6f67..f1f47e53c4445 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java @@ -47,10 +47,9 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (!(obj instanceof Message)) { + if (!(obj instanceof Message that)) { return false; } - Message that = (Message) obj; return Objects.equals(this.message, that.message); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java index 33bbb04b3f75c..1e33732dc58db 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.connect.runtime.rest.errors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; public class BadRequestException extends ConnectRestException { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java index 9ce3e9e74d115..65053151b4fad 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java @@ -23,14 +23,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import javax.ws.rs.ext.ExceptionMapper; +import jakarta.ws.rs.WebApplicationException; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriInfo; +import jakarta.ws.rs.ext.ExceptionMapper; /** - * Maps uncaught exceptions thrown while handling REST requests to appropriate {@link javax.ws.rs.core.Response}s + * Maps uncaught exceptions thrown while handling REST requests to appropriate {@link jakarta.ws.rs.core.Response}s */ public class ConnectExceptionMapper implements ExceptionMapper { private static final Logger log = LoggerFactory.getLogger(ConnectExceptionMapper.class); @@ -42,14 +42,13 @@ public class ConnectExceptionMapper implements ExceptionMapper { public Response toResponse(Exception exception) { log.debug("Uncaught exception in REST call to /{}", uriInfo.getPath(), exception); - if (exception instanceof ConnectRestException) { - ConnectRestException restException = (ConnectRestException) exception; + if (exception instanceof ConnectRestException restException) { return Response.status(restException.statusCode()) .entity(new ErrorMessage(restException.errorCode(), restException.getMessage())) .build(); } - if (exception instanceof NotFoundException || exception instanceof javax.ws.rs.NotFoundException) { + if (exception instanceof NotFoundException || exception instanceof jakarta.ws.rs.NotFoundException) { return Response.status(Response.Status.NOT_FOUND) .entity(new ErrorMessage(Response.Status.NOT_FOUND.getStatusCode(), exception.getMessage())) .build(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java index f45f72ddd8bd3..0d45ea578be86 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java @@ -18,7 +18,7 @@ import org.apache.kafka.connect.errors.ConnectException; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; public class ConnectRestException extends ConnectException { private final int statusCode; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java index 6de327bf5578b..ea92f09bd4177 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java @@ -20,6 +20,7 @@ import org.apache.kafka.connect.runtime.Herder; import org.apache.kafka.connect.runtime.isolation.PluginDesc; import org.apache.kafka.connect.runtime.isolation.PluginType; +import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.rest.RestRequestTimeout; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos; import org.apache.kafka.connect.runtime.rest.entities.ConfigKeyInfo; @@ -29,8 +30,10 @@ import org.apache.kafka.connect.util.Stage; import org.apache.kafka.connect.util.StagedTimeoutException; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; + import java.time.Instant; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashSet; @@ -41,21 +44,20 @@ import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; -import javax.inject.Inject; -import javax.ws.rs.BadRequestException; -import javax.ws.rs.Consumes; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; +import jakarta.inject.Inject; +import jakarta.ws.rs.BadRequestException; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; @Path("/connector-plugins") @Produces(MediaType.APPLICATION_JSON) @@ -146,7 +148,7 @@ public List listConnectorPlugins( .filter(p -> PluginType.SINK.toString().equals(p.type()) || PluginType.SOURCE.toString().equals(p.type())) .collect(Collectors.toList())); } else { - return Collections.unmodifiableList(new ArrayList<>(connectorPlugins)); + return List.copyOf(connectorPlugins); } } } @@ -154,9 +156,18 @@ public List listConnectorPlugins( @GET @Path("/{pluginName}/config") @Operation(summary = "Get the configuration definition for the specified pluginName") - public List getConnectorConfigDef(final @PathParam("pluginName") String pluginName) { + public List getConnectorConfigDef(final @PathParam("pluginName") String pluginName, + final @QueryParam("version") @DefaultValue("latest") String version) { + + VersionRange range = null; + try { + range = PluginUtils.connectorVersionRequirement(version); + } catch (InvalidVersionSpecificationException e) { + throw new BadRequestException("Invalid version specification: " + version, e); + } + synchronized (this) { - return herder.connectorPluginConfig(pluginName); + return herder.connectorPluginConfig(pluginName, range); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java index 5722012b741af..efbf39d790bef 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java @@ -46,29 +46,28 @@ import java.util.List; import java.util.Map; -import javax.inject.Inject; -import javax.servlet.ServletContext; -import javax.ws.rs.BadRequestException; -import javax.ws.rs.Consumes; -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.PATCH; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.core.UriInfo; - import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; +import jakarta.inject.Inject; +import jakarta.servlet.ServletContext; +import jakarta.ws.rs.BadRequestException; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DELETE; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PATCH; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriBuilder; +import jakarta.ws.rs.core.UriInfo; import static org.apache.kafka.connect.runtime.rest.HerderRequestHandler.IdentityTranslator; import static org.apache.kafka.connect.runtime.rest.HerderRequestHandler.Translator; @@ -81,7 +80,7 @@ public class ConnectorsResource { private final Herder herder; private final HerderRequestHandler requestHandler; - @javax.ws.rs.core.Context + @jakarta.ws.rs.core.Context private ServletContext context; private final boolean isTopicTrackingDisabled; private final boolean isTopicTrackingResetDisabled; @@ -176,18 +175,6 @@ public Map getConnectorConfig(final @PathParam("connector") Stri return requestHandler.completeRequest(cb); } - @GET - @Path("/{connector}/tasks-config") - @Operation(deprecated = true, summary = "Get the configuration of all tasks for the specified connector") - public Map> getTasksConfig( - final @PathParam("connector") String connector) throws Throwable { - log.warn("The 'GET /connectors/{connector}/tasks-config' endpoint is deprecated and will be removed in the next major release. " - + "Please use the 'GET /connectors/{connector}/tasks' endpoint instead."); - FutureCallback>> cb = new FutureCallback<>(); - herder.tasksConfig(connector, cb); - return requestHandler.completeRequest(cb); - } - @GET @Path("/{connector}/status") @Operation(summary = "Get the status for the specified connector") diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java index b9756c381d99a..8ffec431f36de 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java @@ -30,18 +30,17 @@ import java.util.List; import java.util.Map; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.UriInfo; - import io.swagger.v3.oas.annotations.Operation; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.UriInfo; /** * Contains endpoints necessary for intra-cluster communication--that is, requests that @@ -66,7 +65,7 @@ protected InternalClusterResource(RestClient restClient, RestRequestTimeout requ /** * @return a {@link Herder} instance that can be used to satisfy the current request; may not be null - * @throws javax.ws.rs.NotFoundException if no such herder can be provided + * @throws jakarta.ws.rs.NotFoundException if no such herder can be provided */ protected abstract Herder herderForRequest(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java index 228c7cd67baf6..760d36a8fc3c3 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java @@ -20,8 +20,8 @@ import org.apache.kafka.connect.runtime.rest.RestClient; import org.apache.kafka.connect.runtime.rest.RestRequestTimeout; -import javax.inject.Inject; -import javax.ws.rs.Path; +import jakarta.inject.Inject; +import jakarta.ws.rs.Path; @Path("/connectors") public class InternalConnectResource extends InternalClusterResource { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java index 44aa617bd4bae..dbbfb46375dfd 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java @@ -21,7 +21,7 @@ import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.slf4j.LoggerFactory; import java.util.List; @@ -29,20 +29,19 @@ import java.util.Map; import java.util.Objects; -import javax.inject.Inject; -import javax.ws.rs.Consumes; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; +import jakarta.inject.Inject; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; /** * A set of endpoints to adjust the log levels of runtime loggers. diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java index 8cdad7bc800f0..0af2983395ee0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java @@ -28,14 +28,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import javax.inject.Inject; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - import io.swagger.v3.oas.annotations.Operation; +import jakarta.inject.Inject; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; @Path("/") @Produces(MediaType.APPLICATION_JSON) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java index 19b539c3405e4..09a827fba59ed 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java @@ -149,7 +149,7 @@ public int generation() { public synchronized void connectors(Callback> callback) { callback.onCompletion(null, connectors()); } - + @Override public synchronized void connectorInfo(String connName, Callback callback) { ConnectorInfo connectorInfo = connectorInfo(connName); @@ -378,7 +378,7 @@ public synchronized void restartConnectorAndTasks(RestartRequest request, Callba } Optional maybePlan = buildRestartPlan(request); - if (!maybePlan.isPresent()) { + if (maybePlan.isEmpty()) { cb.onCompletion(new NotFoundException("Status for connector " + connectorName + " not found", null), null); return; } @@ -624,9 +624,8 @@ public void cancel() { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof StandaloneHerderRequest)) + if (!(o instanceof StandaloneHerderRequest other)) return false; - StandaloneHerderRequest other = (StandaloneHerderRequest) o; return seq == other.seq; } @@ -635,15 +634,4 @@ public int hashCode() { return Objects.hash(seq); } } - - @Override - public void tasksConfig(String connName, Callback>> callback) { - Map> tasksConfig = buildTasksConfig(connName); - if (tasksConfig.isEmpty()) { - callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null); - return; - } - callback.onCompletion(null, tasksConfig); - } - } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetUtils.java index e35a5f12ee7ab..21452a175fbef 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetUtils.java @@ -109,7 +109,7 @@ public static void processPartitionKey(byte[] partitionKey, byte[] offsetValue, return; } - if (!(keyList.get(0) instanceof String)) { + if (!(keyList.get(0) instanceof String connectorName)) { log.warn("Ignoring offset partition key with an unexpected format for the first element in the partition key list. " + "Expected type: {}, actual type: {}", String.class.getName(), className(keyList.get(0))); return; @@ -123,7 +123,6 @@ public static void processPartitionKey(byte[] partitionKey, byte[] offsetValue, return; } - String connectorName = (String) keyList.get(0); Map partition = (Map) keyList.get(1); connectorPartitions.computeIfAbsent(connectorName, ignored -> new HashSet<>()); if (offsetValue == null) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java index ed09d4a37a005..56f559dc245e0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java @@ -20,7 +20,6 @@ import org.apache.kafka.connect.runtime.isolation.Plugins; import org.apache.kafka.connect.transforms.predicates.Predicate; -import java.io.PrintStream; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -53,29 +52,22 @@ private

          > DocInfo(Class

          predicateClass, String overvie .sorted(Comparator.comparing(docInfo -> docInfo.predicateName)) .collect(Collectors.toList()); - private static void printPredicateHtml(PrintStream out, DocInfo docInfo) { - out.println("

          "); - - out.print("
          "); - out.print("" + docInfo.predicateName + ""); - out.println("
          "); - - out.println(docInfo.overview); - - out.println("

          "); - - out.println(docInfo.configDef.toHtml(6, key -> docInfo.predicateName + "_" + key)); - - out.println("

          "); - } - - private static void printHtml(PrintStream out) { + private static String toHtml() { + StringBuilder b = new StringBuilder(); for (final DocInfo docInfo : PREDICATES) { - printPredicateHtml(out, docInfo); + b.append("
          \n"); + b.append("
          "); + b.append("" + docInfo.predicateName + ""); + b.append("
          \n"); + b.append(docInfo.overview + "\n"); + b.append("

          \n"); + b.append(docInfo.configDef.toHtml(6, key -> docInfo.predicateName + "_" + key) + "\n"); + b.append("

          \n"); } + return b.toString(); } public static void main(String... args) { - printHtml(System.out); + System.out.println(toHtml()); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java index 2c7250eb588c3..100f938bd9b5d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java @@ -34,7 +34,6 @@ import org.apache.kafka.connect.transforms.TimestampRouter; import org.apache.kafka.connect.transforms.ValueToKey; -import java.io.PrintStream; import java.util.Arrays; import java.util.List; @@ -71,30 +70,23 @@ private DocInfo(String transformationName, String overview, ConfigDef configDef) new DocInfo(ValueToKey.class.getName(), ValueToKey.OVERVIEW_DOC, ValueToKey.CONFIG_DEF) ); - private static void printTransformationHtml(PrintStream out, DocInfo docInfo) { - out.println("
          "); - - out.print("
          "); - out.print("" + docInfo.transformationName + ""); - out.println("
          "); - - out.println(docInfo.overview); - - out.println("

          "); - - out.println(docInfo.configDef.toHtml(6, key -> docInfo.transformationName + "_" + key)); - - out.println("

          "); - } - - private static void printHtml(PrintStream out) { + private static String toHtml() { + StringBuilder b = new StringBuilder(); for (final DocInfo docInfo : TRANSFORMATIONS) { - printTransformationHtml(out, docInfo); + b.append("
          \n"); + b.append("
          "); + b.append("" + docInfo.transformationName + ""); + b.append("
          \n"); + b.append(docInfo.overview + "\n"); + b.append("

          \n"); + b.append(docInfo.configDef.toHtml(6, key -> docInfo.transformationName + "_" + key) + "\n"); + b.append("

          \n"); } + return b.toString(); } public static void main(String... args) { - printHtml(System.out); + System.out.println(toHtml()); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java index c09eba62a2377..fd62fc172f4cf 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java @@ -32,7 +32,7 @@ default void recordStage(Stage stage) { } default Callback chainStaging(Callback chained) { - return new Callback() { + return new Callback<>() { @Override public void recordStage(Stage stage) { Callback.this.recordStage(stage); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java index 23f6d8a9c4937..e36df1b7dbc57 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java @@ -111,34 +111,6 @@ public class KafkaBasedLog { // initialized as false for backward compatibility private volatile boolean reportErrorsToCallback = false; - /** - * Create a new KafkaBasedLog object. This does not start reading the log and writing is not permitted until - * {@link #start()} is invoked. - * - * @param topic the topic to treat as a log - * @param producerConfigs configuration options to use when creating the internal producer. At a minimum this must - * contain compatible serializer settings for the generic types used on this class. Some - * setting, such as the number of acks, will be overridden to ensure correct behavior of this - * class. - * @param consumerConfigs configuration options to use when creating the internal consumer. At a minimum this must - * contain compatible serializer settings for the generic types used on this class. Some - * setting, such as the auto offset reset policy, will be overridden to ensure correct - * behavior of this class. - * @param consumedCallback callback to invoke for each {@link ConsumerRecord} consumed when tailing the log - * @param time Time interface - * @param initializer the component that should be run when this log is {@link #start() started}; may be null - * @deprecated Replaced by {@link #KafkaBasedLog(String, Map, Map, Supplier, Callback, Time, java.util.function.Consumer)} - */ - @Deprecated - public KafkaBasedLog(String topic, - Map producerConfigs, - Map consumerConfigs, - Callback> consumedCallback, - Time time, - Runnable initializer) { - this(topic, producerConfigs, consumerConfigs, () -> null, consumedCallback, time, initializer != null ? admin -> initializer.run() : null); - } - /** * Create a new KafkaBasedLog object. This does not start reading the log and writing is not permitted until * {@link #start()} is invoked. @@ -159,12 +131,12 @@ public KafkaBasedLog(String topic, * @param initializer the function that should be run when this log is {@link #start() started}; may be null */ public KafkaBasedLog(String topic, - Map producerConfigs, - Map consumerConfigs, - Supplier topicAdminSupplier, - Callback> consumedCallback, - Time time, - java.util.function.Consumer initializer) { + Map producerConfigs, + Map consumerConfigs, + Supplier topicAdminSupplier, + Callback> consumedCallback, + Time time, + java.util.function.Consumer initializer) { this.topic = topic; this.producerConfigs = producerConfigs; this.consumerConfigs = consumerConfigs; @@ -212,7 +184,7 @@ public static KafkaBasedLog withExistingClients(String topic, ) { Objects.requireNonNull(topicAdmin); Objects.requireNonNull(readTopicPartition); - return new KafkaBasedLog(topic, + return new KafkaBasedLog<>(topic, Collections.emptyMap(), Collections.emptyMap(), () -> topicAdmin, @@ -266,8 +238,8 @@ public void start(boolean reportErrorsToCallback) { // Then create the producer and consumer producer = Optional.ofNullable(createProducer()); - if (!producer.isPresent()) - log.trace("Creating read-only KafkaBasedLog for topic " + topic); + if (producer.isEmpty()) + log.trace("Creating read-only KafkaBasedLog for topic {}", topic); consumer = createConsumer(); List partitions = new ArrayList<>(); @@ -308,13 +280,13 @@ public void start(boolean reportErrorsToCallback) { thread = new WorkThread(); thread.start(); - log.info("Finished reading KafkaBasedLog for topic " + topic); + log.info("Finished reading KafkaBasedLog for topic {}", topic); - log.info("Started KafkaBasedLog for topic " + topic); + log.info("Started KafkaBasedLog for topic {}", topic); } public void stop() { - log.info("Stopping KafkaBasedLog for topic " + topic); + log.info("Stopping KafkaBasedLog for topic {}", topic); synchronized (this) { stopRequested = true; @@ -338,7 +310,7 @@ public void stop() { // do not close the admin client, since we don't own it admin = null; - log.info("Stopped KafkaBasedLog for topic " + topic); + log.info("Stopped KafkaBasedLog for topic {}", topic); } /** @@ -466,16 +438,16 @@ protected boolean readPartition(TopicPartition topicPartition) { return true; } - private void poll(long timeoutMs) { + private void poll() { try { - ConsumerRecords records = consumer.poll(Duration.ofMillis(timeoutMs)); + ConsumerRecords records = consumer.poll(Duration.ofMillis(Integer.MAX_VALUE)); for (ConsumerRecord record : records) consumedCallback.onCompletion(null, record); } catch (WakeupException e) { // Expected on get() or stop(). The calling code should handle this throw e; } catch (KafkaException e) { - log.error("Error polling: " + e); + log.error("Error polling: ", e); if (reportErrorsToCallback) { consumedCallback.onCompletion(e, null); } @@ -507,7 +479,7 @@ private void readToLogEnd(boolean shouldRetry) { } else { log.trace("Behind end offset {} for {}; last-read offset is {}", endOffset, topicPartition, lastConsumedOffset); - poll(Integer.MAX_VALUE); + poll(); break; } } @@ -609,7 +581,7 @@ public void run() { } try { - poll(Integer.MAX_VALUE); + poll(); } catch (WakeupException e) { // See previous comment, both possible causes of this wakeup are handled by starting this loop again continue; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/RetryUtil.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/RetryUtil.java index a0a68e0e81e76..cb8d51c0a43fd 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/RetryUtil.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/RetryUtil.java @@ -59,7 +59,7 @@ public static T retryUntilTimeout(Callable callable, Supplier des // visible for testing static T retryUntilTimeout(Callable callable, Supplier description, Duration timeoutDuration, long retryBackoffMs, Time time) throws Exception { - // if null supplier or string is provided, the message will be default to "callabe" + // if null supplier or string is provided, the message will be default to "callable" final String descriptionStr = Optional.ofNullable(description) .map(Supplier::get) .orElse("callable"); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/SafeObjectInputStream.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/SafeObjectInputStream.java index 0ad3889b5f0ea..df2da55278005 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/SafeObjectInputStream.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/SafeObjectInputStream.java @@ -20,29 +20,21 @@ import java.io.InputStream; import java.io.ObjectInputStream; import java.io.ObjectStreamClass; -import java.util.Collections; -import java.util.HashSet; import java.util.Set; public class SafeObjectInputStream extends ObjectInputStream { - protected static final Set DEFAULT_NO_DESERIALIZE_CLASS_NAMES; - - static { - - Set s = new HashSet<>(); - s.add("org.apache.commons.collections.functors.InvokerTransformer"); - s.add("org.apache.commons.collections.functors.InstantiateTransformer"); - s.add("org.apache.commons.collections4.functors.InvokerTransformer"); - s.add("org.apache.commons.collections4.functors.InstantiateTransformer"); - s.add("org.codehaus.groovy.runtime.ConvertedClosure"); - s.add("org.codehaus.groovy.runtime.MethodClosure"); - s.add("org.springframework.beans.factory.ObjectFactory"); - s.add("com.sun.org.apache.xalan.internal.xsltc.trax.TemplatesImpl"); - s.add("org.apache.xalan.xsltc.trax.TemplatesImpl"); - DEFAULT_NO_DESERIALIZE_CLASS_NAMES = Collections.unmodifiableSet(s); - } - + protected static final Set DEFAULT_NO_DESERIALIZE_CLASS_NAMES = Set.of( + "org.apache.commons.collections.functors.InvokerTransformer", + "org.apache.commons.collections.functors.InstantiateTransformer", + "org.apache.commons.collections4.functors.InvokerTransformer", + "org.apache.commons.collections4.functors.InstantiateTransformer", + "org.codehaus.groovy.runtime.ConvertedClosure", + "org.codehaus.groovy.runtime.MethodClosure", + "org.springframework.beans.factory.ObjectFactory", + "com.sun.org.apache.xalan.internal.xsltc.trax.TemplatesImpl", + "org.apache.xalan.xsltc.trax.TemplatesImpl" + ); public SafeObjectInputStream(InputStream in) throws IOException { super(in); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java index fb007314c798b..5393fd2a01310 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java @@ -122,10 +122,9 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof TopicCreationGroup)) { + if (!(o instanceof TopicCreationGroup that)) { return false; } - TopicCreationGroup that = (TopicCreationGroup) o; return Objects.equals(name, that.name) && numPartitions == that.numPartitions && replicationFactor == that.replicationFactor diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java index 90a0e96a78af5..17135a6936613 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java @@ -63,7 +63,7 @@ import java.util.stream.IntStream; import java.util.stream.Stream; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -109,9 +109,7 @@ public class BlockingConnectorTest { private static final String SINK_TASK_FLUSH = "SinkTask::flush"; private static final String SINK_TASK_PRE_COMMIT = "SinkTask::preCommit"; private static final String SINK_TASK_OPEN = "SinkTask::open"; - private static final String SINK_TASK_ON_PARTITIONS_ASSIGNED = "SinkTask::onPartitionsAssigned"; private static final String SINK_TASK_CLOSE = "SinkTask::close"; - private static final String SINK_TASK_ON_PARTITIONS_REVOKED = "SinkTask::onPartitionsRevoked"; private static final String SOURCE_TASK_INITIALIZE = "SourceTask::initialize"; private static final String SOURCE_TASK_POLL = "SourceTask::poll"; private static final String SOURCE_TASK_COMMIT = "SourceTask::commit"; @@ -738,16 +736,13 @@ public void commit() throws InterruptedException { super.commit(); } - @Override - @SuppressWarnings("deprecation") - public void commitRecord(SourceRecord record) throws InterruptedException { - block.maybeBlockOn(SOURCE_TASK_COMMIT_RECORD); - super.commitRecord(record); - } - @Override public void commitRecord(SourceRecord record, RecordMetadata metadata) throws InterruptedException { - block.maybeBlockOn(SOURCE_TASK_COMMIT_RECORD_WITH_METADATA); + if (metadata == null) { + block.maybeBlockOn(SOURCE_TASK_COMMIT_RECORD); + } else { + block.maybeBlockOn(SOURCE_TASK_COMMIT_RECORD_WITH_METADATA); + } super.commitRecord(record, metadata); } } @@ -869,25 +864,11 @@ public void open(Collection partitions) { super.open(partitions); } - @Override - @SuppressWarnings("deprecation") - public void onPartitionsAssigned(Collection partitions) { - block.maybeBlockOn(SINK_TASK_ON_PARTITIONS_ASSIGNED); - super.onPartitionsAssigned(partitions); - } - @Override public void close(Collection partitions) { block.maybeBlockOn(SINK_TASK_CLOSE); super.close(partitions); } - - @Override - @SuppressWarnings("deprecation") - public void onPartitionsRevoked(Collection partitions) { - block.maybeBlockOn(SINK_TASK_ON_PARTITIONS_REVOKED); - super.onPartitionsRevoked(partitions); - } } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java index 50e690057ec94..079887c361d24 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.connect.integration; +import org.apache.kafka.clients.MetadataRecoveryStrategy; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.provider.FileConfigProvider; @@ -31,7 +32,6 @@ import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffsets; import org.apache.kafka.connect.runtime.rest.entities.CreateConnectorRequest; import org.apache.kafka.connect.runtime.rest.errors.ConnectRestException; -import org.apache.kafka.connect.runtime.rest.resources.ConnectorsResource; import org.apache.kafka.connect.sink.SinkConnector; import org.apache.kafka.connect.sink.SinkRecord; import org.apache.kafka.connect.sink.SinkTask; @@ -54,7 +54,6 @@ import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.slf4j.event.Level; import java.io.File; import java.io.FileOutputStream; @@ -74,10 +73,11 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; +import static jakarta.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static org.apache.kafka.clients.CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; +import static org.apache.kafka.clients.CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG; import static org.apache.kafka.common.config.AbstractConfig.CONFIG_PROVIDERS_CONFIG; import static org.apache.kafka.common.config.TopicConfig.DELETE_RETENTION_MS_CONFIG; import static org.apache.kafka.common.config.TopicConfig.SEGMENT_MS_CONFIG; @@ -566,35 +566,6 @@ public void testStoppedState() throws Exception { ); } - /** - * The GET /connectors/{connector}/tasks-config endpoint was deprecated in - * KIP-970 - * and is slated for removal in the next major release. This test verifies that the deprecation warning log is emitted on trying to use the - * deprecated endpoint. - */ - @Test - public void testTasksConfigDeprecation() throws Exception { - connect = connectBuilder.build(); - // start the clusters - connect.start(); - - connect.configureConnector(CONNECTOR_NAME, defaultSourceConnectorProps(TOPIC_NAME)); - connect.assertions().assertConnectorAndExactlyNumTasksAreRunning( - CONNECTOR_NAME, - NUM_TASKS, - "Connector tasks did not start in time" - ); - - try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(ConnectorsResource.class)) { - connect.requestGet(connect.endpointForResource("connectors/" + CONNECTOR_NAME + "/tasks-config")); - List logEvents = logCaptureAppender.getEvents(); - assertEquals(1, logEvents.size()); - assertEquals(Level.WARN.toString(), logEvents.get(0).getLevel()); - assertTrue(logEvents.get(0).getMessage().contains("deprecated")); - } - - } - @Test public void testCreateConnectorWithPausedInitialState() throws Exception { connect = connectBuilder.build(); @@ -840,6 +811,7 @@ public void testRequestTimeouts() throws Exception { // Workaround for KAFKA-15676, which can cause the scheduled rebalance delay to // be spuriously triggered after the group coordinator for a Connect cluster is bounced workerProps.put(SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG, "0"); + workerProps.put(METADATA_RECOVERY_STRATEGY_CONFIG, MetadataRecoveryStrategy.NONE.name); useFixedBrokerPort(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java index a127f85b12de7..ef55e0b3258b5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java @@ -25,7 +25,6 @@ import org.apache.kafka.connect.storage.StringConverter; import org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @@ -49,19 +48,15 @@ public class ConnectorClientPolicyIntegrationTest { private static final int NUM_WORKERS = 1; private static final String CONNECTOR_NAME = "simple-conn"; - @AfterEach - public void close() { - } - @Test - public void testCreateWithOverridesForNonePolicy() throws Exception { + public void testCreateWithOverridesForNonePolicy() { Map props = basicConnectorConfig(); props.put(ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + SaslConfigs.SASL_JAAS_CONFIG, "sasl"); assertFailCreateConnector("None", props); } @Test - public void testCreateWithNotAllowedOverridesForPrincipalPolicy() throws Exception { + public void testCreateWithNotAllowedOverridesForPrincipalPolicy() { Map props = basicConnectorConfig(); props.put(ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + SaslConfigs.SASL_JAAS_CONFIG, "sasl"); props.put(ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); @@ -98,7 +93,7 @@ public void testCreateWithAllowedOverridesForDefaultPolicy() throws Exception { assertPassCreateConnector(null, props); } - private EmbeddedConnectCluster connectClusterWithPolicy(String policy) throws InterruptedException { + private EmbeddedConnectCluster connectClusterWithPolicy(String policy) { // setup Connect worker properties Map workerProps = new HashMap<>(); workerProps.put(OFFSET_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(5_000)); @@ -125,7 +120,7 @@ private EmbeddedConnectCluster connectClusterWithPolicy(String policy) throws In return connect; } - private void assertFailCreateConnector(String policy, Map props) throws InterruptedException { + private void assertFailCreateConnector(String policy, Map props) { EmbeddedConnectCluster connect = connectClusterWithPolicy(policy); try { connect.configureConnector(CONNECTOR_NAME, props); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java index 1c398a2239673..9b76bf2ce64cb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java @@ -40,7 +40,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; @@ -86,7 +86,7 @@ public void setup(TestInfo testInfo) { connectorHandle = RuntimeHandles.get().connectorHandle(connectorName); } - private void startOrReuseConnectWithNumWorkers(int numWorkers) throws Exception { + private void startOrReuseConnectWithNumWorkers(int numWorkers) { connect = CONNECT_CLUSTERS.computeIfAbsent(numWorkers, n -> { // setup Connect worker properties Map workerProps = new HashMap<>(); @@ -123,7 +123,7 @@ public static void close() { } @Test - public void testRestartUnknownConnectorNoParams() throws Exception { + public void testRestartUnknownConnectorNoParams() { String connectorName = "Unknown"; // build a Connect cluster backed by a Kafka KRaft cluster @@ -137,14 +137,14 @@ public void testRestartUnknownConnectorNoParams() throws Exception { } @Test - public void testRestartUnknownConnector() throws Exception { + public void testRestartUnknownConnector() { restartUnknownConnector(false, false); restartUnknownConnector(false, true); restartUnknownConnector(true, false); restartUnknownConnector(true, true); } - private void restartUnknownConnector(boolean onlyFailed, boolean includeTasks) throws Exception { + private void restartUnknownConnector(boolean onlyFailed, boolean includeTasks) { String connectorName = "Unknown"; // build a Connect cluster backed by a Kafka KRaft cluster diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java index 256629f4b11bd..6f386267e21fc 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java @@ -74,7 +74,6 @@ public class ErrorHandlingIntegrationTest { private static final Logger log = LoggerFactory.getLogger(ErrorHandlingIntegrationTest.class); - private static final int NUM_WORKERS = 1; private static final String DLQ_TOPIC = "my-connector-errors"; private static final String CONNECTOR_NAME = "error-conn"; private static final String TASK_ID = "error-conn-0"; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java index a625dc983e8a8..d85ac9a440cb4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java @@ -36,7 +36,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java index 78c9a61406559..e0f395f442508 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java @@ -55,9 +55,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; +import static jakarta.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java index 8ccc31baa86c9..1af52dba59f89 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java @@ -36,11 +36,11 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.core.Response; -import static javax.ws.rs.core.Response.Status.BAD_REQUEST; +import static jakarta.ws.rs.core.Response.Status.BAD_REQUEST; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java index 7969471918e1d..86473ffe613b4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java @@ -31,8 +31,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static javax.ws.rs.core.Response.Status.BAD_REQUEST; -import static javax.ws.rs.core.Response.Status.FORBIDDEN; +import static jakarta.ws.rs.core.Response.Status.BAD_REQUEST; +import static jakarta.ws.rs.core.Response.Status.FORBIDDEN; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java index 69a65ba7bfbde..7a48660629518 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java @@ -40,7 +40,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.integration.BlockingConnectorTest.Block.BLOCK_CONFIG; import static org.apache.kafka.connect.integration.BlockingConnectorTest.CONNECTOR_START; @@ -184,8 +184,7 @@ private Map testSetLoggingLevel( newLevels, e -> hasNamespace(e, namespace) && (!level(e).equals(level) - || !isModified(e) - || lastModified(e) < requestTime + || (isModified(e) && lastModified(e) < requestTime) ) ); assertEquals( diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopCounter.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopCounter.java index 2591189643084..42b660c018297 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopCounter.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopCounter.java @@ -115,19 +115,6 @@ public StartAndStopLatch expectedRestarts(int expectedRestarts) { return expectedRestarts(expectedRestarts, expectedRestarts); } - /** - * Obtain a {@link StartAndStopLatch} that can be used to wait until the expected number of restarts - * has been completed. - * - * @param expectedRestarts the expected number of restarts - * @param dependents any dependent latches that must also complete in order for the - * resulting latch to complete - * @return the latch; never null - */ - public StartAndStopLatch expectedRestarts(int expectedRestarts, List dependents) { - return expectedRestarts(expectedRestarts, expectedRestarts, dependents); - } - /** * Obtain a {@link StartAndStopLatch} that can be used to wait until the expected number of starts * has been completed. diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java index 105d238d56f27..50c7d829a4a9e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java @@ -28,8 +28,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.Map; @@ -58,8 +56,6 @@ @Tag("integration") public class TransformationIntegrationTest { - private static final Logger log = LoggerFactory.getLogger(TransformationIntegrationTest.class); - private static final int NUM_RECORDS_PRODUCED = 2000; private static final int NUM_TOPIC_PARTITIONS = 3; private static final long RECORD_TRANSFER_DURATION_MS = TimeUnit.SECONDS.toMillis(30); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java index 420cf5e745ced..6a5556fcf50ac 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java @@ -50,6 +50,7 @@ import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.ConfigBackingStore; +import org.apache.kafka.connect.storage.SimpleHeaderConverter; import org.apache.kafka.connect.storage.StatusBackingStore; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; @@ -61,6 +62,7 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; @@ -177,6 +179,7 @@ public class AbstractHerderTest { private final ConnectorClientConfigOverridePolicy noneConnectorClientConfigOverridePolicy = new NoneConnectorClientConfigOverridePolicy(); @Mock private Worker worker; + @Mock private WorkerConfig workerConfig; @Mock private WorkerConfigTransformer transformer; @Mock private ConfigBackingStore configStore; @Mock private StatusBackingStore statusStore; @@ -206,10 +209,10 @@ public void testConnectorClientConfigOverridePolicyClose() { public void testConnectorStatus() { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, 0); - AbstractHerder herder = testHerder(); + when(plugins.newConnector(anyString(), any())).thenReturn(new SampleSourceConnector()); + when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenReturn(new SampleSourceConnector()); - when(herder.plugins()).thenReturn(plugins); + AbstractHerder herder = testHerder(); when(herder.rawConfig(connectorName)).thenReturn(Collections.singletonMap( ConnectorConfig.CONNECTOR_CLASS_CONFIG, SampleSourceConnector.class.getName() @@ -240,10 +243,10 @@ public void testConnectorStatus() { public void testConnectorStatusMissingPlugin() { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, 0); - AbstractHerder herder = testHerder(); + when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("Unable to find class")); + when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenThrow(new ConnectException("Unable to find class")); - when(herder.plugins()).thenReturn(plugins); + AbstractHerder herder = testHerder(); when(herder.rawConfig(connectorName)) .thenReturn(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "missing")); @@ -271,10 +274,11 @@ public void testConnectorStatusMissingPlugin() { @Test public void testConnectorInfo() { - AbstractHerder herder = testHerder(); - when(plugins.newConnector(anyString())).thenReturn(new SampleSourceConnector()); - when(herder.plugins()).thenReturn(plugins); + when(plugins.newConnector(anyString(), any())).thenReturn(new SampleSourceConnector()); + when(worker.getPlugins()).thenReturn(plugins); + + AbstractHerder herder = testHerder(); when(configStore.snapshot()).thenReturn(SNAPSHOT); @@ -310,10 +314,11 @@ public void testResumeConnector() { @Test public void testConnectorInfoMissingPlugin() { - AbstractHerder herder = testHerder(); - when(plugins.newConnector(anyString())).thenThrow(new ConnectException("No class found")); - when(herder.plugins()).thenReturn(plugins); + when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("No class found")); + when(worker.getPlugins()).thenReturn(plugins); + + AbstractHerder herder = testHerder(); when(configStore.snapshot()).thenReturn(SNAPSHOT); @@ -481,7 +486,7 @@ public void testConfigValidationMissingName() { Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); // Base connector config has 15 fields, connector's configs add 7 - assertEquals(22, infos.size()); + assertEquals(26, infos.size()); // Missing name should generate an error assertEquals(ConnectorConfig.NAME_CONFIG, infos.get(ConnectorConfig.NAME_CONFIG).configValue().name()); @@ -555,12 +560,14 @@ public void testConfigValidationTopicsRegexWithDlq() { } @Test - public void testConfigValidationTransformsExtendResults() { + @SuppressWarnings("rawtypes") + public void testConfigValidationTransformsExtendResults() throws ClassNotFoundException { final Class connectorClass = SampleSourceConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); // 2 transform aliases defined -> 2 plugin lookups - when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); + Mockito.lenient().when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); + Mockito.lenient().when(plugins.newPlugin(SampleTransformation.class.getName(), null, classLoader)).thenReturn(new SampleTransformation()); // Define 2 transformations. One has a class defined and so can get embedded configs, the other is missing // class info that should generate an error. @@ -571,6 +578,7 @@ public void testConfigValidationTransformsExtendResults() { config.put(ConnectorConfig.TRANSFORMS_CONFIG + ".xformA.type", SampleTransformation.class.getName()); config.put("required", "value"); // connector required config ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false); + assertEquals(herder.connectorType(config), ConnectorType.SOURCE); // We expect there to be errors due to the missing name and .... Note that these assertions depend heavily on @@ -592,7 +600,7 @@ public void testConfigValidationTransformsExtendResults() { assertEquals(1, result.errorCount()); Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); - assertEquals(27, infos.size()); + assertEquals(33, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class assertEquals("transforms.xformA.type", infos.get("transforms.xformA.type").configValue().name()); @@ -607,12 +615,15 @@ public void testConfigValidationTransformsExtendResults() { } @Test - public void testConfigValidationPredicatesExtendResults() { + @SuppressWarnings("rawtypes") + public void testConfigValidationPredicatesExtendResults() throws ClassNotFoundException { final Class connectorClass = SampleSourceConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); - when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); - when(plugins.predicates()).thenReturn(Collections.singleton(predicatePluginDesc())); + Mockito.lenient().when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); + Mockito.lenient().when(plugins.predicates()).thenReturn(Collections.singleton(predicatePluginDesc())); + Mockito.lenient().when(plugins.newPlugin(SampleTransformation.class.getName(), null, classLoader)).thenReturn(new SampleTransformation()); + Mockito.lenient().when(plugins.newPlugin(SamplePredicate.class.getName(), null, classLoader)).thenReturn(new SamplePredicate()); // Define 2 predicates. One has a class defined and so can get embedded configs, the other is missing // class info that should generate an error. @@ -649,7 +660,7 @@ public void testConfigValidationPredicatesExtendResults() { assertEquals(1, result.errorCount()); Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); - assertEquals(29, infos.size()); + assertEquals(36, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class assertEquals("transforms.xformA.type", infos.get("transforms.xformA.type").configValue().name()); assertTrue(infos.get("transforms.xformA.type").configValue().errors().isEmpty()); @@ -710,8 +721,8 @@ public void testConfigValidationPrincipalOnlyOverride() { ); assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); - // Base connector config has 15 fields, connector's configs add 7, and 2 producer overrides - assertEquals(24, result.values().size()); + // Base connector config has 19 fields, connector's configs add 7, and 2 producer overrides + assertEquals(28, result.values().size()); assertTrue(result.values().stream().anyMatch( configInfo -> ackConfigKey.equals(configInfo.configValue().name()) && !configInfo.configValue().errors().isEmpty())); assertTrue(result.values().stream().anyMatch( @@ -1040,8 +1051,8 @@ private void testConnectorPluginConfig( ) throws ClassNotFoundException { AbstractHerder herder = testHerder(); - when(plugins.pluginClass(pluginName)).then(invocation -> newPluginInstance.get().getClass()); - when(plugins.newPlugin(anyString())).then(invocation -> newPluginInstance.get()); + when(plugins.pluginClass(pluginName, null)).then(invocation -> newPluginInstance.get().getClass()); + when(plugins.newPlugin(anyString(), any())).then(invocation -> newPluginInstance.get()); when(herder.plugins()).thenReturn(plugins); List configs = herder.connectorPluginConfig(pluginName); @@ -1060,7 +1071,7 @@ public void testGetConnectorConfigDefWithBadName() throws Exception { String connName = "AnotherPlugin"; AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.pluginClass(anyString())).thenThrow(new ClassNotFoundException()); + when(plugins.pluginClass(anyString(), any())).thenThrow(new ClassNotFoundException()); assertThrows(NotFoundException.class, () -> herder.connectorPluginConfig(connName)); } @@ -1070,17 +1081,17 @@ public void testGetConnectorConfigDefWithInvalidPluginType() throws Exception { String connName = "AnotherPlugin"; AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.pluginClass(anyString())).thenReturn((Class) Object.class); - when(plugins.newPlugin(anyString())).thenReturn(new DirectoryConfigProvider()); + when(plugins.pluginClass(anyString(), any())).thenReturn((Class) Object.class); + when(plugins.newPlugin(anyString(), any())).thenReturn(new DirectoryConfigProvider()); assertThrows(BadRequestException.class, () -> herder.connectorPluginConfig(connName)); } @Test public void testGetConnectorTypeWithMissingPlugin() { String connName = "AnotherPlugin"; - AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenThrow(new ConnectException("No class found")); + when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("No class found")); + AbstractHerder herder = testHerder(); assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connName))); } @@ -1196,7 +1207,7 @@ protected void addConfigKey(Map keys, String name, keys.putAll(configDef.configKeys()); } - protected void addValue(List values, String name, String value, String...errors) { + protected void addValue(List values, String name, String value, String... errors) { values.add(new ConfigValue(name, value, new ArrayList<>(), Arrays.asList(errors))); } @@ -1211,7 +1222,7 @@ protected void assertNoInfoKey(ConfigInfos infos, String name) { assertNull(info.configKey()); } - protected void assertInfoValue(ConfigInfos infos, String name, String value, String...errors) { + protected void assertInfoValue(ConfigInfos infos, String name, String value, String... errors) { ConfigValueInfo info = findInfo(infos, name).configValue(); assertEquals(name, info.name()); assertEquals(value, info.value()); @@ -1232,7 +1243,7 @@ private void testConfigProviderRegex(String rawConnConfig) { private void testConfigProviderRegex(String rawConnConfig, boolean expected) { Set keys = keysWithVariableValues(Collections.singletonMap("key", rawConnConfig), ConfigTransformer.DEFAULT_PATTERN); - boolean actual = keys != null && !keys.isEmpty() && keys.contains("key"); + boolean actual = !keys.isEmpty() && keys.contains("key"); assertEquals(expected, actual, String.format("%s should have matched regex", rawConnConfig)); } @@ -1244,15 +1255,14 @@ private AbstractHerder createConfigValidationHerder(Class c private AbstractHerder createConfigValidationHerder(Class connectorClass, ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy, int countOfCallingNewConnector) { - - AbstractHerder herder = testHerder(connectorClientConfigOverridePolicy); - // Call to validateConnectorConfig when(worker.configTransformer()).thenReturn(transformer); @SuppressWarnings("unchecked") final ArgumentCaptor> mapArgumentCaptor = ArgumentCaptor.forClass(Map.class); when(transformer.transform(mapArgumentCaptor.capture())).thenAnswer(invocation -> mapArgumentCaptor.getValue()); when(worker.getPlugins()).thenReturn(plugins); + + AbstractHerder herder = testHerder(connectorClientConfigOverridePolicy); final Connector connector; try { connector = connectorClass.getConstructor().newInstance(); @@ -1275,14 +1285,17 @@ private AbstractHerder testHerder(ConnectorClientConfigOverridePolicy connectorC .defaultAnswer(CALLS_REAL_METHODS)); } + @SuppressWarnings({"unchecked", "rawtypes"}) private void mockValidationIsolation(String connectorClass, Connector connector) { - when(plugins.newConnector(connectorClass)).thenReturn(connector); - when(plugins.connectorLoader(connectorClass)).thenReturn(classLoader); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(worker.config()).thenReturn(workerConfig); + when(plugins.newConnector(anyString(), any())).thenReturn(connector); + when(plugins.pluginLoader(connectorClass, null)).thenReturn(classLoader); when(plugins.withClassLoader(classLoader)).thenReturn(loaderSwap); } private void verifyValidationIsolation() { - verify(plugins).newConnector(anyString()); + verify(plugins).newConnector(anyString(), any()); verify(plugins).withClassLoader(classLoader); verify(loaderSwap).close(); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java index 0cb4db7064726..f33e9bc514b6c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java @@ -80,6 +80,7 @@ import java.util.concurrent.TimeoutException; import java.util.function.Supplier; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; @@ -264,7 +265,7 @@ public void testSendRecordsConvertsData() { assertArrayEquals(SERIALIZED_KEY, sent.getValue().key()); assertArrayEquals(SERIALIZED_RECORD, sent.getValue().value()); - + verifyTaskGetTopic(); verifyTopicCreation(); } @@ -362,8 +363,8 @@ public void testHeadersWithCustomConverter() throws Exception { StringConverter stringConverter = new StringConverter(); SampleConverterWithHeaders testConverter = new SampleConverterWithHeaders(); - createWorkerTask(stringConverter, testConverter, stringConverter, RetryWithToleranceOperatorTest.noopOperator(), - Collections::emptyList); + createWorkerTask(stringConverter, testConverter, stringConverter, RetryWithToleranceOperatorTest.noneOperator(), + Collections::emptyList, transformationChain); expectSendRecord(null); expectApplyTransformationChain(); @@ -706,6 +707,118 @@ public void testSendRecordsRetriableException() { verify(transformationChain, times(2)).apply(any(), eq(record3)); } + @Test + public void testSendRecordsFailedTransformationErrorToleranceNone() { + SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + + RetryWithToleranceOperator retryWithToleranceOperator = RetryWithToleranceOperatorTest.noneOperator(); + TransformationChain transformationChainRetriableException = + WorkerTestUtils.getTransformationChain(retryWithToleranceOperator, List.of(new RetriableException("Test"), record1)); + createWorkerTask(transformationChainRetriableException, retryWithToleranceOperator); + + expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); + + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); + + workerTask.toSend = Arrays.asList(record1); + + // The transformation errored out so the error should be re-raised by sendRecords with error tolerance None + Exception exception = assertThrows(ConnectException.class, workerTask::sendRecords); + assertTrue(exception.getMessage().contains("Tolerance exceeded")); + + // Ensure the transformation was called + verify(transformationChainRetriableException, times(1)).apply(any(), eq(record1)); + + // The second transform call will succeed, batch should succeed at sending the one record (none were skipped) + assertTrue(workerTask.sendRecords()); + verifySendRecord(1); + } + + @Test + public void testSendRecordsFailedTransformationErrorToleranceAll() { + RetryWithToleranceOperator retryWithToleranceOperator = RetryWithToleranceOperatorTest.allOperator(); + TransformationChain transformationChainRetriableException = WorkerTestUtils.getTransformationChain( + retryWithToleranceOperator, + List.of(new RetriableException("Test"))); + + createWorkerTask(transformationChainRetriableException, retryWithToleranceOperator); + + SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + + expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); + + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + + workerTask.toSend = Arrays.asList(record1); + + // The transformation errored out so the error should be ignored & the record skipped with error tolerance all + assertTrue(workerTask.sendRecords()); + + // Ensure the transformation was called + verify(transformationChainRetriableException, times(1)).apply(any(), eq(record1)); + } + + @Test + public void testSendRecordsConversionExceptionErrorToleranceNone() { + SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + SourceRecord record3 = new SourceRecord(PARTITION, OFFSET, TOPIC, 3, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + + RetryWithToleranceOperator retryWithToleranceOperator = RetryWithToleranceOperatorTest.noneOperator(); + List results = Stream.of(record1, record2, record3) + .collect(Collectors.toList()); + TransformationChain chain = WorkerTestUtils.getTransformationChain( + retryWithToleranceOperator, + results); + createWorkerTask(chain, retryWithToleranceOperator); + + // When we try to convert the key/value of each record, throw an exception + throwExceptionWhenConvertKey(emptyHeaders(), TOPIC); + + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); + + workerTask.toSend = Arrays.asList(record1, record2, record3); + + // Send records should fail when errors.tolerance is none and the conversion call fails + Exception exception = assertThrows(ConnectException.class, workerTask::sendRecords); + assertTrue(exception.getMessage().contains("Tolerance exceeded")); + assertThrows(ConnectException.class, workerTask::sendRecords); + assertThrows(ConnectException.class, workerTask::sendRecords); + + // Set the conversion call to succeed, batch should succeed at sending all three records (none were skipped) + expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); + assertTrue(workerTask.sendRecords()); + verifySendRecord(3); + } + + @Test + public void testSendRecordsConversionExceptionErrorToleranceAll() { + SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + SourceRecord record3 = new SourceRecord(PARTITION, OFFSET, TOPIC, 3, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); + + RetryWithToleranceOperator retryWithToleranceOperator = RetryWithToleranceOperatorTest.allOperator(); + List results = Stream.of(record1, record2, record3) + .collect(Collectors.toList()); + TransformationChain chain = WorkerTestUtils.getTransformationChain( + retryWithToleranceOperator, + results); + createWorkerTask(chain, retryWithToleranceOperator); + + // When we try to convert the key/value of each record, throw an exception + throwExceptionWhenConvertKey(emptyHeaders(), TOPIC); + + workerTask.toSend = Arrays.asList(record1, record2, record3); + + // With errors.tolerance to all, the faiiled conversion should simply skip the record, and record successful batch + assertTrue(workerTask.sendRecords()); + } + private void expectSendRecord(Headers headers) { if (headers != null) expectConvertHeadersAndKeyValue(headers, TOPIC); @@ -806,6 +919,20 @@ private void expectConvertHeadersAndKeyValue(Headers headers, String topic) { assertEquals(valueConverter.fromConnectData(topic, headers, RECORD_SCHEMA, RECORD), SERIALIZED_RECORD); } + private void throwExceptionWhenConvertKey(Headers headers, String topic) { + if (headers.iterator().hasNext()) { + when(headerConverter.fromConnectHeader(anyString(), anyString(), eq(Schema.STRING_SCHEMA), + anyString())) + .thenAnswer((Answer) invocation -> { + String headerValue = invocation.getArgument(3, String.class); + return headerValue.getBytes(StandardCharsets.UTF_8); + }); + } + + when(keyConverter.fromConnectData(eq(topic), any(Headers.class), eq(KEY_SCHEMA), eq(KEY))) + .thenThrow(new RetriableException("Failed to convert key")); + } + private void expectApplyTransformationChain() { when(transformationChain.apply(any(), any(SourceRecord.class))) .thenAnswer(AdditionalAnswers.returnsSecondArg()); @@ -817,12 +944,19 @@ private RecordHeaders emptyHeaders() { return new RecordHeaders(); } + private void createWorkerTask(TransformationChain transformationChain, RetryWithToleranceOperator toleranceOperator) { + createWorkerTask(keyConverter, valueConverter, headerConverter, toleranceOperator, Collections::emptyList, + transformationChain); + } + private void createWorkerTask() { - createWorkerTask(keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noopOperator(), Collections::emptyList); + createWorkerTask( + keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), Collections::emptyList, transformationChain); } private void createWorkerTask(Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, - RetryWithToleranceOperator retryWithToleranceOperator, Supplier>> errorReportersSupplier) { + RetryWithToleranceOperator retryWithToleranceOperator, Supplier>> errorReportersSupplier, + TransformationChain transformationChain) { workerTask = new AbstractWorkerSourceTask( taskId, sourceTask, statusListener, TargetState.STARTED, keyConverter, valueConverter, headerConverter, transformationChain, sourceTaskContext, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java index 6092f8ca7bdc7..a5af1d134692c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java @@ -41,6 +41,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class ConnectorConfigTest> { @@ -455,13 +457,19 @@ public static class Value> extends AbstractKeyValueTr } @Test - public void testEnrichedConfigDef() { + @SuppressWarnings("rawtypes") + public void testEnrichedConfigDef() throws ClassNotFoundException { String alias = "hdt"; String prefix = ConnectorConfig.TRANSFORMS_CONFIG + "." + alias + "."; Map props = new HashMap<>(); props.put(ConnectorConfig.TRANSFORMS_CONFIG, alias); + props.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, TestConnector.class.getName()); props.put(prefix + "type", HasDuplicateConfigTransformation.class.getName()); - ConfigDef def = ConnectorConfig.enrich(MOCK_PLUGINS, new ConfigDef(), props, false); + Plugins mockPlugins = mock(Plugins.class); + when(mockPlugins.newPlugin(HasDuplicateConfigTransformation.class.getName(), + null, (ClassLoader) null)).thenReturn(new HasDuplicateConfigTransformation()); + when(mockPlugins.transformations()).thenReturn(Collections.emptySet()); + ConfigDef def = ConnectorConfig.enrich(mockPlugins, new ConfigDef(), props, false); assertEnrichedConfigDef(def, prefix, HasDuplicateConfigTransformation.MUST_EXIST_KEY, ConfigDef.Type.BOOLEAN); assertEnrichedConfigDef(def, prefix, TransformationStage.PREDICATE_CONFIG, ConfigDef.Type.STRING); assertEnrichedConfigDef(def, prefix, TransformationStage.NEGATE_CONFIG, ConfigDef.Type.BOOLEAN); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java index 0974f35d16c71..f4374d18500ea 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java @@ -74,8 +74,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.time.Duration; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -408,15 +406,6 @@ private void assertErrorHandlingMetricValue(String name, double expected) { assertEquals(expected, measured, 0.001d); } - private void verifyCloseSource() throws IOException { - verify(producer).close(any(Duration.class)); - verify(admin).close(any(Duration.class)); - verify(offsetReader).close(); - verify(offsetStore).stop(); - // headerConverter.close() can throw IOException - verify(headerConverter).close(); - } - private void expectTopicCreation(String topic) { if (enableTopicCreation) { when(admin.describeTopics(topic)).thenReturn(Collections.emptyMap()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java index be3dc2401ad69..4ee0f61572cdd 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java @@ -278,7 +278,7 @@ private void createWorkerTask(TargetState initialState) { private void createWorkerTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter) { workerTask = new ExactlyOnceWorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, - config, clusterConfigState, metrics, errorHandlingMetrics, plugins.delegatingLoader(), time, RetryWithToleranceOperatorTest.noopOperator(), statusBackingStore, + config, clusterConfigState, metrics, errorHandlingMetrics, plugins.delegatingLoader(), time, RetryWithToleranceOperatorTest.noneOperator(), statusBackingStore, sourceConfig, Runnable::run, preProducerCheck, postProducerCheck, Collections::emptyList); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java index 3dbe688a076a2..a965a061f82b5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java @@ -20,231 +20,165 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; -import org.apache.log4j.Hierarchy; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.junit.jupiter.MockitoExtension; -import org.mockito.junit.jupiter.MockitoSettings; -import org.mockito.quality.Strictness; - -import java.util.Arrays; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashMap; + import java.util.List; import java.util.Map; -import java.util.Vector; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import static org.apache.logging.log4j.Level.DEBUG; +import static org.apache.logging.log4j.Level.ERROR; +import static org.apache.logging.log4j.Level.INFO; +import static org.apache.logging.log4j.Level.WARN; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; -@ExtendWith(MockitoExtension.class) -@MockitoSettings(strictness = Strictness.STRICT_STUBS) public class LoggersTest { - private static final long INITIAL_TIME = 1696951712135L; + private final LoggerContext context = (LoggerContext) LogManager.getContext(false); + private Loggers loggers; private Time time; @BeforeEach public void setup() { time = new MockTime(0, INITIAL_TIME, 0); + loggers = new Loggers(time); } @Test - public void testGetLoggersIgnoresNullLevels() { - Logger root = logger("root"); - - Logger a = logger("a"); - a.setLevel(null); - Logger b = logger("b"); - b.setLevel(Level.INFO); + public void testLevelWithNullLoggerName() { + NullPointerException exception = assertThrows(NullPointerException.class, () -> loggers.level(null)); + assertEquals("Logger may not be null", exception.getMessage()); + } - Loggers loggers = new TestLoggers(root, a, b); + @Test + public void testLevelWithValidRootLoggerNames() { + assertEquals( + loggers.level(""), + loggers.level("root"), + "Root logger level should be the same whether accessed via empty string or 'root' name" + ); + } - Map expectedLevels = Collections.singletonMap( - "b", - new LoggerLevel(Level.INFO.toString(), null) + @Test + public void testLevelWithExistLoggerName() { + loggers.setLevel("foo", DEBUG); + assertEquals(new LoggerLevel(DEBUG.name(), INITIAL_TIME), + loggers.level("foo") ); - Map actualLevels = loggers.allLevels(); - assertEquals(expectedLevels, actualLevels); } @Test - public void testGetLoggerFallsBackToEffectiveLogLevel() { - Logger root = logger("root"); - root.setLevel(Level.ERROR); + public void testLevelWithNonExistLoggerName() { + assertNull(loggers.level("another-dummy"), "Unconfigured logger should return null"); + } - Hierarchy hierarchy = new Hierarchy(root); - Logger a = hierarchy.getLogger("a"); - a.setLevel(null); - Logger b = hierarchy.getLogger("b"); - b.setLevel(Level.INFO); + @Test + public void testLevelWithNewlyCreatedLogger() { + loggers.setLevel("dummy", ERROR); + assertEquals( + new LoggerLevel(ERROR.name(), time.milliseconds()), + loggers.level("dummy"), + "Newly created logger should have the level we just set" + ); + } - Loggers loggers = new TestLoggers(root, a, b); + @Test + public void testAllLevelsAfterCreatingNewLogger() { + loggers.setLevel("foo", WARN); + loggers.setLevel("bar", ERROR); + Map loggerToLevel = loggers.allLevels(); + Map expectedLevels = Map.of( + "foo", new LoggerLevel(WARN.name(), INITIAL_TIME), + "bar", new LoggerLevel(ERROR.name(), INITIAL_TIME) + ); - LoggerLevel expectedLevel = new LoggerLevel(Level.ERROR.toString(), null); - LoggerLevel actualLevel = loggers.level("a"); - assertEquals(expectedLevel, actualLevel); + assertTrue(loggerToLevel.entrySet().containsAll(expectedLevels.entrySet())); } @Test - public void testGetUnknownLogger() { - Logger root = logger("root"); - root.setLevel(Level.ERROR); - - Hierarchy hierarchy = new Hierarchy(root); - Logger a = hierarchy.getLogger("a"); - a.setLevel(null); - Logger b = hierarchy.getLogger("b"); - b.setLevel(Level.INFO); + public void testSetLevelWithNullNameSpaceOrNullLevel() { + NullPointerException exception = assertThrows(NullPointerException.class, () -> loggers.setLevel(null, null)); + assertEquals("Logging namespace may not be null", exception.getMessage()); - Loggers loggers = new TestLoggers(root, a, b); + exception = assertThrows(NullPointerException.class, () -> loggers.setLevel("dummy", null)); + assertEquals("Level may not be null", exception.getMessage()); + } - LoggerLevel level = loggers.level("c"); - assertNull(level); + @Test + public void testSetLevelWithValidRootLoggerNames() { + loggers.setLevel("", ERROR); + List setLevelResultWithRoot = loggers.setLevel("root", ERROR); + assertTrue(setLevelResultWithRoot.isEmpty(), + "Setting level with empty string ('') and 'root' should affect the same set of loggers - " + + "when setting the same level twice, second call should return empty list indicating no loggers were affected"); } @Test public void testSetLevel() { - Logger root = logger("root"); - root.setLevel(Level.ERROR); - - Logger x = logger("a.b.c.p.X"); - Logger y = logger("a.b.c.p.Y"); - Logger z = logger("a.b.c.p.Z"); - Logger w = logger("a.b.c.s.W"); - x.setLevel(Level.INFO); - y.setLevel(Level.INFO); - z.setLevel(Level.INFO); - w.setLevel(Level.INFO); - - // We don't explicitly register a logger for a.b.c.p, so it won't appear in the list of current loggers; - // one should be created by the Loggers instance when we set the level - TestLoggers loggers = new TestLoggers(root, x, y, z, w); - - List modified = loggers.setLevel("a.b.c.p", Level.DEBUG); - assertEquals(Arrays.asList("a.b.c.p", "a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z"), modified); - assertEquals(Level.DEBUG.toString(), loggers.level("a.b.c.p").level()); - assertEquals(Level.DEBUG, x.getLevel()); - assertEquals(Level.DEBUG, y.getLevel()); - assertEquals(Level.DEBUG, z.getLevel()); - - LoggerLevel expectedLevel = new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME); - LoggerLevel actualLevel = loggers.level("a.b.c.p"); - assertEquals(expectedLevel, actualLevel); - - // Sleep a little and adjust the level of a leaf logger - time.sleep(10); - loggers.setLevel("a.b.c.p.X", Level.ERROR); - expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); - actualLevel = loggers.level("a.b.c.p.X"); - assertEquals(expectedLevel, actualLevel); - - // Make sure that the direct parent logger and a sibling logger remain unaffected - expectedLevel = new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME); - actualLevel = loggers.level("a.b.c.p"); - assertEquals(expectedLevel, actualLevel); - - expectedLevel = new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME); - actualLevel = loggers.level("a.b.c.p.Y"); - assertEquals(expectedLevel, actualLevel); - - // Set the same level again, and verify that the last modified time hasn't been altered - time.sleep(10); - loggers.setLevel("a.b.c.p.X", Level.ERROR); - expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); - actualLevel = loggers.level("a.b.c.p.X"); - assertEquals(expectedLevel, actualLevel); + loggers.setLevel("a.b.c", DEBUG); + loggers.setLevel("a.b", ERROR); + loggers.setLevel("a", WARN); + Map expected = Map.of( + "a", new LoggerLevel(WARN.name(), INITIAL_TIME), + "a.b", new LoggerLevel(WARN.name(), INITIAL_TIME), + "a.b.c", new LoggerLevel(WARN.name(), INITIAL_TIME) + ); + assertTrue(loggers.allLevels().entrySet().containsAll(expected.entrySet())); } @Test - public void testSetRootLevel() { - Logger root = logger("root"); - root.setLevel(Level.ERROR); - - Logger p = logger("a.b.c.p"); - Logger x = logger("a.b.c.p.X"); - Logger y = logger("a.b.c.p.Y"); - Logger z = logger("a.b.c.p.Z"); - Logger w = logger("a.b.c.s.W"); - x.setLevel(Level.INFO); - y.setLevel(Level.INFO); - z.setLevel(Level.INFO); - w.setLevel(Level.INFO); - - Loggers loggers = new TestLoggers(root, x, y, z, w); - - List modified = loggers.setLevel("root", Level.DEBUG); - assertEquals(Arrays.asList("a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z", "a.b.c.s.W", "root"), modified); - - assertNull(p.getLevel()); - - assertEquals(root.getLevel(), Level.DEBUG); - - assertEquals(w.getLevel(), Level.DEBUG); - assertEquals(x.getLevel(), Level.DEBUG); - assertEquals(y.getLevel(), Level.DEBUG); - assertEquals(z.getLevel(), Level.DEBUG); - - Map expectedLevels = new HashMap<>(); - expectedLevels.put("root", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME)); - expectedLevels.put("a.b.c.p.X", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME)); - expectedLevels.put("a.b.c.p.Y", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME)); - expectedLevels.put("a.b.c.p.Z", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME)); - expectedLevels.put("a.b.c.s.W", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME)); - - Map actualLevels = loggers.allLevels(); - assertEquals(expectedLevels, actualLevels); + public void testLookupLoggerAfterCreatingNewLogger() { + loggers.setLevel("dummy", INFO); + Logger logger = loggers.lookupLogger("dummy"); + assertNotNull(logger); + assertEquals(INFO, logger.getLevel()); } @Test - public void testSetLevelNullArguments() { - Logger root = logger("root"); - Loggers loggers = new TestLoggers(root); - assertThrows(NullPointerException.class, () -> loggers.setLevel(null, Level.INFO)); - assertThrows(NullPointerException.class, () -> loggers.setLevel("root", null)); + public void testSetLevelWithSameLevel() { + String loggerName = "dummy"; + loggers.setLevel(loggerName, DEBUG); + time.sleep(100); + loggers.setLevel(loggerName, DEBUG); + assertEquals( + new LoggerLevel(DEBUG.name(), INITIAL_TIME), + loggers.allLevels().get(loggerName), + "Setting same log level should not update the lastModified timestamp" + ); } - private class TestLoggers extends Loggers { - - private final Logger rootLogger; - private final Map currentLoggers; - - public TestLoggers(Logger rootLogger, Logger... knownLoggers) { - super(time); - this.rootLogger = rootLogger; - this.currentLoggers = new HashMap<>(Stream.of(knownLoggers) - .collect(Collectors.toMap( - Logger::getName, - Function.identity() - ))); - } - - @Override - Logger lookupLogger(String logger) { - return currentLoggers.computeIfAbsent(logger, l -> new Logger(logger) { }); - } - - @Override - Enumeration currentLoggers() { - return new Vector<>(currentLoggers.values()).elements(); - } - - @Override - Logger rootLogger() { - return rootLogger; - } + @Test + public void testSetLevelWithDifferentLevels() { + String loggerName = "dummy"; + loggers.setLevel(loggerName, DEBUG); + time.sleep(100); + loggers.setLevel(loggerName, WARN); + assertEquals( + new LoggerLevel(WARN.name(), INITIAL_TIME + 100), + loggers.allLevels().get(loggerName), + "Setting different log level should update the lastModified timestamp" + ); } - private Logger logger(String name) { - return new Logger(name) { }; - } + @Test + public void testLookupLoggerWithValidRootLoggerNames() { + assertEquals( + loggers.lookupLogger("root"), + loggers.lookupLogger(""), + "Both 'root' and empty string should retrieve the root logger" + ); + assertEquals( + loggers.lookupLogger(""), + loggers.rootLogger(), + "Empty string lookup should match direct root logger access" + ); + } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java new file mode 100644 index 0000000000000..3df5028461190 --- /dev/null +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java @@ -0,0 +1,268 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.runtime; + +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.STRICT_STUBS) +public class MockLoggersTest { + private static final long INITIAL_TIME = 1696951712135L; + private final LoggerContext context = (LoggerContext) LogManager.getContext(false); + private Time time; + + @BeforeEach + public void setup() { + time = new MockTime(0, INITIAL_TIME, 0); + } + + @Test + public void testGetLoggersIgnoresNullLevels() { + LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + Logger root = loggerContext.getRootLogger(); + Configurator.setLevel(root, Level.OFF); + + Logger a = loggerContext.getLogger("a"); + Configurator.setLevel(a, null); + + Logger b = loggerContext.getLogger("b"); + Configurator.setLevel(b, Level.INFO); + + Loggers loggers = new TestLoggers(root, a, b); + + Map expectedLevels = Collections.singletonMap( + "b", + new LoggerLevel(Level.INFO.toString(), null) + ); + Map actualLevels = loggers.allLevels(); + assertEquals(expectedLevels, actualLevels); + } + + @Test + public void testGetLoggerFallsBackToEffectiveLogLevel() { + LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + Logger root = loggerContext.getRootLogger(); + Configurator.setLevel(root, Level.ERROR); + + Logger a = loggerContext.getLogger("a"); + Configurator.setLevel(a, null); + + Logger b = loggerContext.getLogger("b"); + Configurator.setLevel(b, Level.INFO); + + Loggers loggers = new TestLoggers(root, a, b); + + LoggerLevel expectedLevel = new LoggerLevel(Level.ERROR.toString(), null); + LoggerLevel actualLevel = loggers.level("a"); + assertEquals(expectedLevel, actualLevel); + } + + @Test + public void testGetUnknownLogger() { + LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + Logger root = loggerContext.getRootLogger(); + Configurator.setLevel(root, Level.ERROR); + + Logger a = loggerContext.getLogger("a"); + Configurator.setLevel(a, null); + + Logger b = loggerContext.getLogger("b"); + Configurator.setLevel(b, Level.INFO); + + Loggers loggers = new TestLoggers(root, a, b); + + LoggerLevel level = loggers.level("c"); + assertNull(level); + } + + @Test + public void testSetLevel() { + LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + Logger root = loggerContext.getRootLogger(); + Configurator.setLevel(root, Level.ERROR); + + Logger x = loggerContext.getLogger("a.b.c.p.X"); + Logger y = loggerContext.getLogger("a.b.c.p.Y"); + Logger z = loggerContext.getLogger("a.b.c.p.Z"); + Logger w = loggerContext.getLogger("a.b.c.s.W"); + Configurator.setLevel(x, Level.INFO); + Configurator.setLevel(y, Level.INFO); + Configurator.setLevel(z, Level.INFO); + Configurator.setLevel(w, Level.INFO); + + // We don't explicitly register a logger for a.b.c.p, so it won't appear in the list of current loggers; + // one should be created by the Loggers instance when we set the level + TestLoggers loggers = new TestLoggers(root, x, y, z, w); + + List modified = loggers.setLevel("a.b.c.p", Level.WARN); + assertEquals(Arrays.asList("a.b.c.p", "a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z"), modified); + assertEquals(Level.WARN.toString(), loggers.level("a.b.c.p").level()); + assertEquals(Level.WARN, x.getLevel()); + assertEquals(Level.WARN, y.getLevel()); + assertEquals(Level.WARN, z.getLevel()); + + LoggerLevel expectedLevel = new LoggerLevel(Level.WARN.toString(), INITIAL_TIME); + LoggerLevel actualLevel = loggers.level("a.b.c.p"); + assertEquals(expectedLevel, actualLevel); + + // Sleep a little and adjust the level of a leaf logger + time.sleep(10); + loggers.setLevel("a.b.c.p.X", Level.ERROR); + expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); + actualLevel = loggers.level("a.b.c.p.X"); + assertEquals(expectedLevel, actualLevel); + + // Make sure that the direct parent logger and a sibling logger remain unaffected + expectedLevel = new LoggerLevel(Level.WARN.toString(), INITIAL_TIME); + actualLevel = loggers.level("a.b.c.p"); + assertEquals(expectedLevel, actualLevel); + + expectedLevel = new LoggerLevel(Level.WARN.toString(), INITIAL_TIME); + actualLevel = loggers.level("a.b.c.p.Y"); + assertEquals(expectedLevel, actualLevel); + + // Set the same level again, and verify that the last modified time hasn't been altered + time.sleep(10); + loggers.setLevel("a.b.c.p.X", Level.ERROR); + expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); + actualLevel = loggers.level("a.b.c.p.X"); + assertEquals(expectedLevel, actualLevel); + } + + @Test + public void testSetRootLevel() { + // In this test case, we focus on setting the level for the root logger. + // Ideally, we want to start with a "clean" configuration to conduct this test case. + // By programmatically creating a new configuration at the beginning, we can ensure + // that this test case is not affected by existing Log4j configurations. + LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + Configuration config = loggerContext.getConfiguration(); + String rootLoggerName = "root"; + LoggerConfig rootConfig = new LoggerConfig(rootLoggerName, Level.ERROR, false); + config.addLogger(rootLoggerName, rootConfig); + loggerContext.updateLoggers(); + + Logger root = LogManager.getLogger(rootLoggerName); + Configurator.setLevel(root, Level.ERROR); + + Logger p = loggerContext.getLogger("a.b.c.p"); + Logger x = loggerContext.getLogger("a.b.c.p.X"); + Logger y = loggerContext.getLogger("a.b.c.p.Y"); + Logger z = loggerContext.getLogger("a.b.c.p.Z"); + Logger w = loggerContext.getLogger("a.b.c.s.W"); + Configurator.setLevel(p, Level.INFO); + Configurator.setLevel(x, Level.INFO); + Configurator.setLevel(y, Level.INFO); + Configurator.setLevel(z, Level.INFO); + Configurator.setLevel(w, Level.INFO); + + Loggers loggers = new TestLoggers(root, x, y, z, w); + + List modified = loggers.setLevel(rootLoggerName, Level.DEBUG); + assertEquals(Arrays.asList("a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z", "a.b.c.s.W", rootLoggerName), modified); + + assertEquals(Level.DEBUG, p.getLevel()); + + assertEquals(Level.DEBUG, root.getLevel()); + + assertEquals(Level.DEBUG, w.getLevel()); + assertEquals(Level.DEBUG, x.getLevel()); + assertEquals(Level.DEBUG, y.getLevel()); + assertEquals(Level.DEBUG, z.getLevel()); + + Map expectedLevels = Map.of( + "a.b.c.p.X", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME), + "a.b.c.p.Y", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME), + "a.b.c.p.Z", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME), + "a.b.c.s.W", new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME), + rootLoggerName, new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME) + ); + + Map actualLevels = loggers.allLevels(); + assertEquals(expectedLevels, actualLevels); + } + + @Test + public void testSetLevelNullArguments() { + LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + Logger root = loggerContext.getRootLogger(); + Loggers loggers = new TestLoggers(root); + assertThrows(NullPointerException.class, () -> loggers.setLevel(null, Level.INFO)); + assertThrows(NullPointerException.class, () -> loggers.setLevel("root", null)); + } + + private class TestLoggers extends Loggers { + + private final Logger rootLogger; + private final Map currentLoggers; + + public TestLoggers(Logger rootLogger, Logger... knownLoggers) { + super(time); + this.rootLogger = rootLogger; + this.currentLoggers = new HashMap<>(Stream.of(knownLoggers) + .collect(Collectors.toMap( + Logger::getName, + Function.identity() + ))); + this.currentLoggers.put(rootLogger.getName(), rootLogger); + } + + @Override + Logger lookupLogger(String logger) { + return currentLoggers.computeIfAbsent(logger, LogManager::getLogger); + } + + @Override + Map currentLoggers() { + return currentLoggers; + } + + @Override + Logger rootLogger() { + return rootLogger; + } + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java index cb91530439f3d..1324b9a22638c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java @@ -27,7 +27,7 @@ public class SampleSourceConnector extends SourceConnector { - public static final String VERSION = "an entirely different version"; + public static final String VERSION = "some great version"; @Override public String version() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java index e8e8fbcbdd89f..cb39ac42ebc49 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceConnectorConfigTest.java @@ -73,7 +73,7 @@ public void shouldNotFailWithExplicitlySpecifiedDefaultTopicCreationGroup() { TOPIC_CREATION_GROUP_1, TOPIC_CREATION_GROUP_2)); props.put(DEFAULT_TOPIC_CREATION_PREFIX + REPLICATION_FACTOR_CONFIG, "1"); props.put(DEFAULT_TOPIC_CREATION_PREFIX + PARTITIONS_CONFIG, "1"); - SourceConnectorConfig config = new SourceConnectorConfig(MOCK_PLUGINS, props, true); + new SourceConnectorConfig(MOCK_PLUGINS, props, true); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java index bd70ed357c6e8..106659d0f8f46 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java @@ -22,7 +22,7 @@ import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.util.ConnectorTaskId; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java index 8feeee0588a0f..6b8368e002c43 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java @@ -342,7 +342,7 @@ private void assertNoRemainingDeques() { } @SafeVarargs - private final void assertRemovedDeques(Map... partitions) { + private void assertRemovedDeques(Map... partitions) { for (Map partition : partitions) { assertFalse(submittedRecords.records.containsKey(partition), "Deque for partition " + partition + " should have been cleaned up from internal records map"); } @@ -365,7 +365,7 @@ private void assertMetadataNoPending(CommittableOffsets committableOffsets, int @SafeVarargs @SuppressWarnings("varargs") - private final void assertMetadata( + private void assertMetadata( CommittableOffsets committableOffsets, int committableMessages, int uncommittableMessages, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java index 2a6c0ed2b9d1d..ef7f17e1d09a2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java @@ -29,12 +29,16 @@ import org.apache.kafka.connect.transforms.SetSchemaMetadata; import org.apache.kafka.connect.transforms.TimestampConverter; import org.apache.kafka.connect.transforms.TimestampRouter; +import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.ValueToKey; import org.junit.jupiter.api.Test; import java.util.HashMap; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + /** * Tests that transformations' configs can be composed with ConnectorConfig during its construction, ensuring no * conflicting fields or other issues. @@ -42,8 +46,19 @@ * This test appears here simply because it requires both connect-runtime and connect-transforms and connect-runtime * already depends on connect-transforms. */ +@SuppressWarnings("rawtypes") public class TransformationConfigTest { + private Plugins setupMockPlugins(Transformation transformation) { + Plugins plugins = mock(Plugins.class); + try { + when(plugins.newPlugin(transformation.getClass().getName(), null, (ClassLoader) null)).thenReturn(transformation); + } catch (ClassNotFoundException e) { + // Shouldn't happen since we're mocking the plugins + } + return plugins; + } + @Test public void testEmbeddedConfigCast() { // Validate that we can construct a Connector config containing the extended config for the transform @@ -54,7 +69,7 @@ public void testEmbeddedConfigCast() { connProps.put("transforms.example.type", Cast.Value.class.getName()); connProps.put("transforms.example.spec", "int8"); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new Cast.Value()); new ConnectorConfig(plugins, connProps); } @@ -68,7 +83,7 @@ public void testEmbeddedConfigExtractField() { connProps.put("transforms.example.type", ExtractField.Value.class.getName()); connProps.put("transforms.example.field", "field"); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new ExtractField.Value()); new ConnectorConfig(plugins, connProps); } @@ -81,7 +96,7 @@ public void testEmbeddedConfigFlatten() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", Flatten.Value.class.getName()); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new Flatten.Value()); new ConnectorConfig(plugins, connProps); } @@ -95,7 +110,7 @@ public void testEmbeddedConfigHoistField() { connProps.put("transforms.example.type", HoistField.Value.class.getName()); connProps.put("transforms.example.field", "field"); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new HoistField.Value()); new ConnectorConfig(plugins, connProps); } @@ -108,7 +123,7 @@ public void testEmbeddedConfigInsertField() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", InsertField.Value.class.getName()); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new InsertField.Value()); new ConnectorConfig(plugins, connProps); } @@ -123,7 +138,7 @@ public void testEmbeddedConfigMaskField() { connProps.put("transforms.example.fields", "field"); connProps.put("transforms.example.replacement", "nothing"); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new MaskField.Value()); new ConnectorConfig(plugins, connProps); } @@ -138,7 +153,7 @@ public void testEmbeddedConfigRegexRouter() { connProps.put("transforms.example.regex", "(.*)"); connProps.put("transforms.example.replacement", "prefix-$1"); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new RegexRouter()); new ConnectorConfig(plugins, connProps); } @@ -151,7 +166,7 @@ public void testEmbeddedConfigReplaceField() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", ReplaceField.Value.class.getName()); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new ReplaceField.Value()); new ConnectorConfig(plugins, connProps); } @@ -164,7 +179,7 @@ public void testEmbeddedConfigSetSchemaMetadata() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", SetSchemaMetadata.Value.class.getName()); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new SetSchemaMetadata.Value()); new ConnectorConfig(plugins, connProps); } @@ -178,7 +193,7 @@ public void testEmbeddedConfigTimestampConverter() { connProps.put("transforms.example.type", TimestampConverter.Value.class.getName()); connProps.put("transforms.example.target.type", "unix"); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new TimestampConverter.Value()); new ConnectorConfig(plugins, connProps); } @@ -191,7 +206,7 @@ public void testEmbeddedConfigTimestampRouter() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", TimestampRouter.class.getName()); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new TimestampRouter()); new ConnectorConfig(plugins, connProps); } @@ -205,7 +220,7 @@ public void testEmbeddedConfigValueToKey() { connProps.put("transforms.example.type", ValueToKey.class.getName()); connProps.put("transforms.example.fields", "field"); - Plugins plugins = null; // Safe when we're only constructing the config + Plugins plugins = setupMockPlugins(new ValueToKey()); new ConnectorConfig(plugins, connProps); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java index b3d197acc066d..4e91183fd3125 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java @@ -23,7 +23,7 @@ import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetCommitCallback; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.WakeupException; @@ -90,6 +90,7 @@ import static java.util.Arrays.asList; import static java.util.Collections.singleton; +import static org.apache.kafka.connect.runtime.WorkerTestUtils.getTransformationChain; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -192,13 +193,18 @@ private void createTask(TargetState initialState) { createTask(initialState, keyConverter, valueConverter, headerConverter); } + private void createTask(TargetState initialState, TransformationChain transformationChain, RetryWithToleranceOperator toleranceOperator) { + createTask(initialState, keyConverter, valueConverter, headerConverter, toleranceOperator, Collections::emptyList, transformationChain); + } + private void createTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter) { - createTask(initialState, keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noopOperator(), Collections::emptyList); + createTask(initialState, keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), Collections::emptyList, transformationChain); } private void createTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, RetryWithToleranceOperator> retryWithToleranceOperator, - Supplier>>> errorReportersSupplier) { + Supplier>>> errorReportersSupplier, + TransformationChain transformationChain) { workerTask = new WorkerSinkTask( taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, @@ -854,6 +860,103 @@ public void testWakeupNotThrownDuringShutdown() { verify(sinkTask).close(any(Collection.class)); } + @Test + public void testRaisesFailedRetriableExceptionFromConvert() { + createTask(initialState); + + workerTask.initialize(TASK_CONFIG); + workerTask.initializeAndStart(); + verifyInitializeTask(); + + expectPollInitialAssignment() + .thenAnswer(expectConsumerPoll(1)) + .thenAnswer(invocation -> { + // stop the task during its second iteration + workerTask.stop(); + return new ConsumerRecords<>(Map.of(), Map.of()); + }); + throwExceptionOnConversion(null, new RecordHeaders()); + + workerTask.iteration(); + + assertThrows(ConnectException.class, workerTask::execute); + } + + @Test + public void testSkipsFailedRetriableExceptionFromConvert() { + createTask(initialState, keyConverter, valueConverter, headerConverter, + RetryWithToleranceOperatorTest.allOperator(), Collections::emptyList, transformationChain); + + workerTask.initialize(TASK_CONFIG); + workerTask.initializeAndStart(); + verifyInitializeTask(); + + expectPollInitialAssignment() + .thenAnswer(expectConsumerPoll(1)) + .thenAnswer(invocation -> { + // stop the task during its second iteration + workerTask.stop(); + return new ConsumerRecords<>(Map.of(), Map.of()); + }); + throwExceptionOnConversion(null, new RecordHeaders()); + + workerTask.iteration(); + workerTask.execute(); + + verify(sinkTask, times(3)).put(Collections.emptyList()); + } + + @Test + public void testRaisesFailedRetriableExceptionFromTransform() { + RetryWithToleranceOperator retryWithToleranceOperator = RetryWithToleranceOperatorTest.noneOperator(); + TransformationChain transformationChainRetriableException = getTransformationChain( + retryWithToleranceOperator, List.of(new RetriableException("Test"))); + createTask(initialState, transformationChainRetriableException, retryWithToleranceOperator); + + workerTask.initialize(TASK_CONFIG); + workerTask.initializeAndStart(); + verifyInitializeTask(); + + expectPollInitialAssignment() + .thenAnswer(expectConsumerPoll(1)) + .thenAnswer(invocation -> { + // stop the task during its second iteration + workerTask.stop(); + return new ConsumerRecords<>(Map.of(), Map.of()); + }); + expectConversion(null, new RecordHeaders()); + + workerTask.iteration(); + + assertThrows(ConnectException.class, workerTask::execute); + } + + @Test + public void testSkipsFailedRetriableExceptionFromTransform() { + RetryWithToleranceOperator retryWithToleranceOperator = RetryWithToleranceOperatorTest.allOperator(); + TransformationChain transformationChainRetriableException = getTransformationChain( + retryWithToleranceOperator, List.of(new RetriableException("Test"))); + createTask(initialState, transformationChainRetriableException, retryWithToleranceOperator); + + workerTask.initialize(TASK_CONFIG); + workerTask.initializeAndStart(); + verifyInitializeTask(); + + expectPollInitialAssignment() + .thenAnswer(expectConsumerPoll(1)) + .thenAnswer(invocation -> { + // stop the task during its second iteration + workerTask.stop(); + return new ConsumerRecords<>(Map.of(), Map.of()); + }); + expectConversion(null, new RecordHeaders()); + + workerTask.iteration(); + workerTask.execute(); + + verify(sinkTask, times(3)).put(Collections.emptyList()); + } + @Test public void testRequestCommit() { createTask(initialState); @@ -1752,15 +1855,15 @@ public void testOriginalTopicWithTopicMutatingTransformations() { @Test public void testPartitionCountInCaseOfPartitionRevocation() { - MockConsumer mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + MockConsumer mockConsumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); // Setting up Worker Sink Task to check metrics workerTask = new WorkerSinkTask( taskId, sinkTask, statusListener, TargetState.PAUSED, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, mockConsumer, pluginLoader, time, - RetryWithToleranceOperatorTest.noopOperator(), null, statusBackingStore, Collections::emptyList); + RetryWithToleranceOperatorTest.noneOperator(), null, statusBackingStore, Collections::emptyList); mockConsumer.updateBeginningOffsets( - new HashMap() {{ + new HashMap<>() {{ put(TOPIC_PARTITION, 0L); put(TOPIC_PARTITION2, 0L); }} @@ -1852,6 +1955,19 @@ private void expectConversionAndTransformation(final String topicPrefix, final H expectTransformation(topicPrefix); } + private void expectConversion(final String topicPrefix, final Headers headers) { + when(keyConverter.toConnectData(TOPIC, headers, RAW_KEY)).thenReturn(new SchemaAndValue(KEY_SCHEMA, KEY)); + when(valueConverter.toConnectData(TOPIC, headers, RAW_VALUE)).thenReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)); + + for (Header header : headers) { + when(headerConverter.toConnectHeader(TOPIC, header.key(), header.value())).thenReturn(new SchemaAndValue(VALUE_SCHEMA, new String(header.value()))); + } + } + + private void throwExceptionOnConversion(final String topicPrefix, final Headers headers) { + when(keyConverter.toConnectData(TOPIC, headers, RAW_KEY)).thenThrow(new RetriableException("Failed to convert")); + } + @SuppressWarnings("unchecked") private void expectTransformation(final String topicPrefix) { when(transformationChain.apply(any(ProcessingContext.class), any(SinkRecord.class))).thenAnswer((Answer) diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java index a41ce37c356d9..2ed01a747a726 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java @@ -177,7 +177,7 @@ public void setup() { workerTask = new WorkerSinkTask( taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, - consumer, pluginLoader, time, RetryWithToleranceOperatorTest.noopOperator(), null, statusBackingStore, + consumer, pluginLoader, time, RetryWithToleranceOperatorTest.noneOperator(), null, statusBackingStore, Collections::emptyList); recordsReturned = 0; } @@ -641,7 +641,7 @@ private void expectRebalanceDuringPoll(long startOffset) { } private void expectPreCommit(ExpectOffsetCommitCommand... commands) { - doAnswer(new Answer() { + doAnswer(new Answer<>() { int index = 0; @Override @@ -662,7 +662,7 @@ public Object answer(InvocationOnMock invocation) { } private void expectOffsetCommit(ExpectOffsetCommitCommand... commands) { - doAnswer(new Answer() { + doAnswer(new Answer<>() { int index = 0; @Override @@ -718,7 +718,6 @@ private RecordHeaders emptyHeaders() { private abstract static class TestSinkTask extends SinkTask { } - @SuppressWarnings("NewClassNamingConvention") private static class ExpectOffsetCommitCommand { final long expectedMessages; final RuntimeException error; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java index 77d56a207d764..a04b3bc7caa56 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java @@ -54,7 +54,7 @@ import org.apache.kafka.connect.util.TopicAdmin; import org.apache.kafka.connect.util.TopicCreationGroup; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; @@ -231,7 +231,7 @@ public void tearDown() { } private void createWorkerTask() { - createWorkerTask(TargetState.STARTED, RetryWithToleranceOperatorTest.noopOperator()); + createWorkerTask(TargetState.STARTED, RetryWithToleranceOperatorTest.noneOperator()); } private void createWorkerTaskWithErrorToleration() { @@ -239,7 +239,7 @@ private void createWorkerTaskWithErrorToleration() { } private void createWorkerTask(TargetState initialState) { - createWorkerTask(initialState, RetryWithToleranceOperatorTest.noopOperator()); + createWorkerTask(initialState, RetryWithToleranceOperatorTest.noneOperator()); } private void createWorkerTask(TargetState initialState, RetryWithToleranceOperator retryWithToleranceOperator) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java index 14e29cded9ca6..b4ad23b37a968 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java @@ -1777,8 +1777,7 @@ public void testWorkerMetrics(boolean enableTopicCreation) throws Exception { List list = worker.metrics().metrics().reporters(); for (MetricsReporter reporter : list) { - if (reporter instanceof MockMetricsReporter) { - MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) reporter; + if (reporter instanceof MockMetricsReporter mockMetricsReporter) { //verify connect cluster is set in MetricsContext assertEquals(CLUSTER_ID, mockMetricsReporter.getMetricsContext().contextLabels().get(WorkerConfig.CONNECT_KAFKA_CLUSTER_ID)); } @@ -2149,7 +2148,7 @@ public void testAlterOffsetsSourceConnector(boolean enableTopicCreation) throws @ParameterizedTest @ValueSource(booleans = {true, false}) @SuppressWarnings("unchecked") - public void testAlterOffsetsSourceConnectorError(boolean enableTopicCreation) throws Exception { + public void testAlterOffsetsSourceConnectorError(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); mockInternalConverters(); @@ -2188,7 +2187,7 @@ public void testAlterOffsetsSourceConnectorError(boolean enableTopicCreation) th @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testNormalizeSourceConnectorOffsets(boolean enableTopicCreation) throws Exception { + public void testNormalizeSourceConnectorOffsets(boolean enableTopicCreation) { setup(enableTopicCreation); Map, Map> offsets = Collections.singletonMap( Collections.singletonMap("filename", "/path/to/filename"), @@ -2334,7 +2333,7 @@ private void alterOffsetsSinkConnector(Map, Map> parti @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testAlterOffsetsSinkConnectorAlterOffsetsError(boolean enableTopicCreation) throws Exception { + public void testAlterOffsetsSinkConnectorAlterOffsetsError(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); String connectorClass = SampleSinkConnector.class.getName(); @@ -2375,7 +2374,7 @@ public void testAlterOffsetsSinkConnectorAlterOffsetsError(boolean enableTopicCr @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testAlterOffsetsSinkConnectorDeleteOffsetsError(boolean enableTopicCreation) throws Exception { + public void testAlterOffsetsSinkConnectorDeleteOffsetsError(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); String connectorClass = SampleSinkConnector.class.getName(); @@ -2426,7 +2425,7 @@ public void testAlterOffsetsSinkConnectorDeleteOffsetsError(boolean enableTopicC @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testAlterOffsetsSinkConnectorSynchronousError(boolean enableTopicCreation) throws Exception { + public void testAlterOffsetsSinkConnectorSynchronousError(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); String connectorClass = SampleSinkConnector.class.getName(); @@ -2557,7 +2556,7 @@ public void testResetOffsetsSinkConnector(boolean enableTopicCreation) throws Ex @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testResetOffsetsSinkConnectorDeleteConsumerGroupError(boolean enableTopicCreation) throws Exception { + public void testResetOffsetsSinkConnectorDeleteConsumerGroupError(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); String connectorClass = SampleSinkConnector.class.getName(); @@ -2594,7 +2593,7 @@ public void testResetOffsetsSinkConnectorDeleteConsumerGroupError(boolean enable @ParameterizedTest @ValueSource(booleans = {true, false}) @SuppressWarnings("unchecked") - public void testModifySourceConnectorOffsetsTimeout(boolean enableTopicCreation) throws Exception { + public void testModifySourceConnectorOffsetsTimeout(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); Time time = new MockTime(); @@ -2630,7 +2629,7 @@ public void testModifySourceConnectorOffsetsTimeout(boolean enableTopicCreation) @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testModifyOffsetsSinkConnectorTimeout(boolean enableTopicCreation) throws Exception { + public void testModifyOffsetsSinkConnectorTimeout(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); String connectorClass = SampleSinkConnector.class.getName(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java index 462d02f3e6d8e..06c3a42b64f8d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java @@ -16,12 +16,18 @@ */ package org.apache.kafka.connect.runtime; +import org.apache.kafka.connect.connector.ConnectRecord; import org.apache.kafka.connect.runtime.distributed.ExtendedAssignment; -import org.apache.kafka.connect.runtime.distributed.ExtendedWorkerState; +import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; +import org.apache.kafka.connect.transforms.Transformation; +import org.apache.kafka.connect.transforms.predicates.Predicate; import org.apache.kafka.connect.util.ConnectorTaskId; +import org.mockito.Mockito; +import org.mockito.stubbing.OngoingStubbing; + import java.util.AbstractMap.SimpleEntry; import java.util.Collections; import java.util.HashMap; @@ -30,35 +36,14 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.apache.kafka.connect.runtime.distributed.WorkerCoordinator.WorkerLoad; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class WorkerTestUtils { - public static WorkerLoad emptyWorkerLoad(String worker) { - return new WorkerLoad.Builder(worker).build(); - } - - public WorkerLoad workerLoad(String worker, int connectorStart, int connectorNum, - int taskStart, int taskNum) { - return new WorkerLoad.Builder(worker).with( - newConnectors(connectorStart, connectorStart + connectorNum), - newTasks(taskStart, taskStart + taskNum)).build(); - } - - public static List newConnectors(int start, int end) { - return IntStream.range(start, end) - .mapToObj(i -> "connector" + i) - .collect(Collectors.toList()); - } - - public static List newTasks(int start, int end) { - return IntStream.range(start, end) - .mapToObj(i -> new ConnectorTaskId("task", i)) - .collect(Collectors.toList()); - } - public static ClusterConfigState clusterConfigState(long offset, int connectorNum, int taskNum) { @@ -82,24 +67,6 @@ public static ClusterConfigState clusterConfigState(long offset, Collections.emptySet()); } - public static Map memberConfigs(String givenLeader, - long givenOffset, - Map givenAssignments) { - return givenAssignments.entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - e -> new ExtendedWorkerState(expectedLeaderUrl(givenLeader), givenOffset, e.getValue()))); - } - - public static Map memberConfigs(String givenLeader, - long givenOffset, - int start, - int connectorNum) { - return IntStream.range(start, connectorNum + 1) - .mapToObj(i -> new SimpleEntry<>("worker" + i, new ExtendedWorkerState(expectedLeaderUrl(givenLeader), givenOffset, null))) - .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue)); - } - public static Map connectorTaskCounts(int start, int connectorNum, int taskCounts) { @@ -198,4 +165,33 @@ public static void assertAssignment(boolean expectFailed, assertEquals(expectedDelay, assignment.delay(), "Wrong rebalance delay in " + assignment); } + + public static > TransformationChain getTransformationChain( + RetryWithToleranceOperator toleranceOperator, + List results) { + Transformation transformation = mock(Transformation.class); + OngoingStubbing stub = when(transformation.apply(any())); + for (Object result: results) { + if (result instanceof Exception) { + stub = stub.thenThrow((Exception) result); + } else { + stub = stub.thenReturn((R) result); + } + } + return buildTransformationChain(transformation, toleranceOperator); + } + + public static > TransformationChain buildTransformationChain( + Transformation transformation, + RetryWithToleranceOperator toleranceOperator) { + Predicate predicate = mock(Predicate.class); + when(predicate.test(any())).thenReturn(true); + TransformationStage stage = new TransformationStage( + predicate, + false, + transformation); + TransformationChain realTransformationChainRetriableException = new TransformationChain(List.of(stage), toleranceOperator); + TransformationChain transformationChainRetriableException = Mockito.spy(realTransformationChainRetriableException); + return transformationChainRetriableException; + } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java index bbc074e97308f..74da1703482af 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java @@ -114,9 +114,9 @@ import javax.crypto.SecretKey; +import static jakarta.ws.rs.core.Response.Status.FORBIDDEN; +import static jakarta.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; import static java.util.Collections.singletonList; -import static javax.ws.rs.core.Response.Status.FORBIDDEN; -import static javax.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; import static org.apache.kafka.common.utils.Utils.UncheckedCloseable; import static org.apache.kafka.connect.runtime.AbstractStatus.State.FAILED; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; @@ -320,7 +320,7 @@ public void setUp() throws Exception { herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, noneConnectorClientConfigOverridePolicy, Collections.emptyList(), null, new AutoCloseable[]{uponShutdown})); - + verify(worker).getPlugins(); configUpdateListener = herder.new ConfigUpdateListener(); rebalanceListener = herder.new RebalanceListener(time); conn1SinkConfig = new SinkConnectorConfig(plugins, CONN1_CONFIG); @@ -2333,8 +2333,6 @@ public void testAccessors() throws Exception { herder.connectorConfig(CONN1, connectorConfigCb); FutureCallback> taskConfigsCb = new FutureCallback<>(); herder.taskConfigs(CONN1, taskConfigsCb); - FutureCallback>> tasksConfigCb = new FutureCallback<>(); - herder.tasksConfig(CONN1, tasksConfigCb); herder.tick(); assertTrue(listConnectorsCb.isDone()); @@ -2351,11 +2349,6 @@ public void testAccessors() throws Exception { new TaskInfo(TASK1, TASK_CONFIG), new TaskInfo(TASK2, TASK_CONFIG)), taskConfigsCb.get()); - Map> tasksConfig = new HashMap<>(); - tasksConfig.put(TASK0, TASK_CONFIG); - tasksConfig.put(TASK1, TASK_CONFIG); - tasksConfig.put(TASK2, TASK_CONFIG); - assertEquals(tasksConfig, tasksConfigCb.get()); // Config transformation should not occur when requesting connector or task info verify(configTransformer, never()).transform(eq(CONN1), any()); @@ -3557,7 +3550,7 @@ public void testTaskReconfigurationRetriesWithLeaderRequestForwardingException() herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, noneConnectorClientConfigOverridePolicy, Collections.emptyList(), new MockSynchronousExecutor(), new AutoCloseable[]{})); - + verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); when(member.memberId()).thenReturn("member"); @@ -3999,6 +3992,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceDisabled() throws Excepti public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exception { // Setup herder with exactly-once support for source connectors enabled herder = exactlyOnceHerder(); + verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); // Get the initial assignment when(member.memberId()).thenReturn("leader"); @@ -4064,6 +4058,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exceptio public void testModifyOffsetsSourceConnectorExactlyOnceEnabledZombieFencingFailure() { // Setup herder with exactly-once support for source connectors enabled herder = exactlyOnceHerder(); + verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); // Get the initial assignment diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java index b59c1863a9179..86bc897fafe23 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java @@ -923,8 +923,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - Set expectedWorkers = new HashSet<>(); - expectedWorkers.addAll(Arrays.asList(newWorker, flakyWorker)); + Set expectedWorkers = new HashSet<>(Arrays.asList(newWorker, flakyWorker)); assertEquals(expectedWorkers, assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java index d85c2246dff85..fad39d84129dc 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java @@ -78,9 +78,8 @@ public void testMetrics() throws Exception { boolean foundJmxReporter = false; assertEquals(2, member.metrics().reporters().size()); for (MetricsReporter reporter : member.metrics().reporters()) { - if (reporter instanceof MockConnectMetrics.MockMetricsReporter) { + if (reporter instanceof MockConnectMetrics.MockMetricsReporter mockMetricsReporter) { foundMockReporter = true; - MockConnectMetrics.MockMetricsReporter mockMetricsReporter = (MockConnectMetrics.MockMetricsReporter) reporter; assertEquals("cluster-1", mockMetricsReporter.getMetricsContext().contextLabels().get(WorkerConfig.CONNECT_KAFKA_CLUSTER_ID)); assertEquals("group-1", mockMetricsReporter.getMetricsContext().contextLabels().get(WorkerConfig.CONNECT_GROUP_ID)); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java index 68931a8a993f5..23c4bc25553c6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java @@ -87,7 +87,7 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class RetryWithToleranceOperatorTest { - private static final Map PROPERTIES = new HashMap() {{ + private static final Map PROPERTIES = new HashMap<>() {{ put(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG, Objects.toString(2)); put(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG, Objects.toString(3000)); put(CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG, Sensor.RecordingLevel.INFO.toString()); @@ -97,10 +97,10 @@ public class RetryWithToleranceOperatorTest { put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); }}; - public static RetryWithToleranceOperator noopOperator() { + public static RetryWithToleranceOperator noneOperator() { return genericOperator(ERRORS_RETRY_TIMEOUT_DEFAULT, NONE, new ErrorHandlingMetrics( - new ConnectorTaskId("noop-connector", -1), - new ConnectMetrics("noop-worker", new TestableWorkerConfig(PROPERTIES), + new ConnectorTaskId("errors-none-tolerate-connector", -1), + new ConnectMetrics("errors-none-tolerate-worker", new TestableWorkerConfig(PROPERTIES), Time.SYSTEM, "test-cluster"))); } @@ -147,56 +147,77 @@ public void testExecuteFailedNoTolerance() { @Test public void testHandleExceptionInTransformations() { - testHandleExceptionInStage(Stage.TRANSFORMATION, new Exception()); + testHandleExceptionInStage(Stage.TRANSFORMATION, new Exception(), ALL); } + @Test + public void testHandleRetriableExceptionInTransformationsToleranceNone() { + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.TRANSFORMATION, new RetriableException("Test"), NONE)); + } + + @Test public void testHandleExceptionInHeaderConverter() { - testHandleExceptionInStage(Stage.HEADER_CONVERTER, new Exception()); + testHandleExceptionInStage(Stage.HEADER_CONVERTER, new Exception(), ALL); + } + + @Test + public void testHandleRetriableExceptionInHeaderConverterToleranceNone() { + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.HEADER_CONVERTER, new RetriableException("Test"), NONE)); } @Test public void testHandleExceptionInValueConverter() { - testHandleExceptionInStage(Stage.VALUE_CONVERTER, new Exception()); + testHandleExceptionInStage(Stage.VALUE_CONVERTER, new Exception(), ALL); + } + + @Test + public void testHandleRetriableExceptionInValueConverterToleranceNone() { + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.VALUE_CONVERTER, new RetriableException("Test"), NONE)); } @Test public void testHandleExceptionInKeyConverter() { - testHandleExceptionInStage(Stage.KEY_CONVERTER, new Exception()); + testHandleExceptionInStage(Stage.KEY_CONVERTER, new Exception(), ALL); + } + + @Test + public void testHandleRetriableExceptionInKeyConverterToleranceNone() { + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.KEY_CONVERTER, new RetriableException("Test"), NONE)); } @Test public void testHandleExceptionInTaskPut() { - testHandleExceptionInStage(Stage.TASK_PUT, new org.apache.kafka.connect.errors.RetriableException("Test")); + testHandleExceptionInStage(Stage.TASK_PUT, new org.apache.kafka.connect.errors.RetriableException("Test"), ALL); } @Test public void testHandleExceptionInTaskPoll() { - testHandleExceptionInStage(Stage.TASK_POLL, new org.apache.kafka.connect.errors.RetriableException("Test")); + testHandleExceptionInStage(Stage.TASK_POLL, new org.apache.kafka.connect.errors.RetriableException("Test"), ALL); } @Test public void testThrowExceptionInTaskPut() { - assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.TASK_PUT, new Exception())); + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.TASK_PUT, new Exception(), ALL)); } @Test public void testThrowExceptionInTaskPoll() { - assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.TASK_POLL, new Exception())); + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.TASK_POLL, new Exception(), ALL)); } @Test public void testThrowExceptionInKafkaConsume() { - assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.KAFKA_CONSUME, new Exception())); + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.KAFKA_CONSUME, new Exception(), ALL)); } @Test public void testThrowExceptionInKafkaProduce() { - assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.KAFKA_PRODUCE, new Exception())); + assertThrows(ConnectException.class, () -> testHandleExceptionInStage(Stage.KAFKA_PRODUCE, new Exception(), ALL)); } - private void testHandleExceptionInStage(Stage type, Exception ex) { - RetryWithToleranceOperator> retryWithToleranceOperator = setupExecutor(); + private void testHandleExceptionInStage(Stage type, Exception ex, ToleranceType toleranceType) { + RetryWithToleranceOperator> retryWithToleranceOperator = setupExecutor(toleranceType); ProcessingContext> context = new ProcessingContext<>(consumerRecord); Operation exceptionThrower = () -> { throw ex; @@ -205,8 +226,8 @@ private void testHandleExceptionInStage(Stage type, Exception ex) { assertTrue(context.failed()); } - private RetryWithToleranceOperator setupExecutor() { - return genericOperator(0, ALL, errorHandlingMetrics); + private RetryWithToleranceOperator setupExecutor(ToleranceType toleranceType) { + return genericOperator(0, toleranceType, errorHandlingMetrics); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java index d083f980349a1..ca099976444e9 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java @@ -17,6 +17,8 @@ package org.apache.kafka.connect.runtime.isolation; +import org.apache.kafka.connect.storage.Converter; + import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -26,11 +28,13 @@ import java.nio.file.Path; import java.util.Collections; import java.util.HashSet; +import java.util.Optional; import java.util.Set; import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -137,6 +141,18 @@ public void testVersionedPluginsHasVersion(PluginScanner scanner) { versionedPluginResult.forEach(pluginDesc -> assertEquals("1.0.0", pluginDesc.version())); } + @ParameterizedTest + @MethodSource("parameters") + public void testClasspathPluginIsAlsoLoadedInIsolation(PluginScanner scanner) { + Set isolatedClassPathPlugin = TestPlugins.pluginPath(TestPlugins.TestPlugin.CLASSPATH_CONVERTER); + PluginScanResult result = scan(scanner, isolatedClassPathPlugin); + Optional> pluginDesc = result.converters().stream() + .filter(desc -> desc.className().equals(TestPlugins.TestPlugin.CLASSPATH_CONVERTER.className())) + .findFirst(); + assertTrue(pluginDesc.isPresent()); + assertInstanceOf(PluginClassLoader.class, pluginDesc.get().loader()); + } + private PluginScanResult scan(PluginScanner scanner, Set pluginLocations) { ClassLoaderFactory factory = new ClassLoaderFactory(); Set pluginSources = PluginUtils.pluginSources(pluginLocations, PluginScannerTest.class.getClassLoader(), factory); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java index 30babb1bb651e..23041f9c31937 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java @@ -518,7 +518,6 @@ public void testNonCollidingAliases() { Collections.emptySortedSet(), Collections.emptySortedSet() ); - Map aliases = PluginUtils.computeAliases(result); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); expectedAliases.put("MockSinkConnector", MockSinkConnector.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java index d1c723852bc5b..55a3445a3318c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java @@ -186,8 +186,7 @@ public void shouldInstantiateAndConfigureConnectRestExtension() { assertNotNull(connectRestExtensions); assertEquals(1, connectRestExtensions.size(), "One Rest Extension expected"); assertNotNull(connectRestExtensions.get(0)); - assertTrue(connectRestExtensions.get(0) instanceof TestConnectRestExtension, - "Should be instance of TestConnectRestExtension"); + assertInstanceOf(TestConnectRestExtension.class, connectRestExtensions.get(0), "Should be instance of TestConnectRestExtension"); assertNotNull(((TestConnectRestExtension) connectRestExtensions.get(0)).configs); assertEquals(config.originals(), ((TestConnectRestExtension) connectRestExtensions.get(0)).configs); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java index 70b875b21b8c0..d0559123b7251 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java @@ -27,6 +27,7 @@ import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.storage.Converter; +import org.apache.maven.artifact.versioning.VersionRange; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -190,10 +191,10 @@ public SynchronizedDelegatingClassLoader(ClassLoader parent, Breakpoint } @Override - public PluginClassLoader pluginClassLoader(String name) { + public PluginClassLoader pluginClassLoader(String name, VersionRange range) { dclBreakpoint.await(name); dclBreakpoint.await(name); - return super.pluginClassLoader(name); + return super.pluginClassLoader(name, range); } } @@ -462,9 +463,7 @@ private static ThreadFactory threadFactoryWithNamedThreads(String threadPrefix) return r -> { // This is essentially Executors.defaultThreadFactory except with // custom thread names so in order to filter by thread names when debugging - SecurityManager s = System.getSecurityManager(); - Thread t = new Thread((s != null) ? s.getThreadGroup() : - Thread.currentThread().getThreadGroup(), r, + Thread t = new Thread(Thread.currentThread().getThreadGroup(), r, threadPrefix + threadNumber.getAndIncrement(), 0); if (t.isDaemon()) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java index c9d8892da91ce..adb2c2418d5fb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java @@ -85,7 +85,8 @@ public enum TestPackage { SAMPLING_CONVERTER("sampling-converter"), SAMPLING_HEADER_CONVERTER("sampling-header-converter"), SERVICE_LOADER("service-loader"), - SUBCLASS_OF_CLASSPATH("subclass-of-classpath"); + SUBCLASS_OF_CLASSPATH("subclass-of-classpath"), + CLASSPATH_CONVERTER("classpath-converter"); private final String resourceDir; private final Predicate removeRuntimeClasses; @@ -251,7 +252,11 @@ public enum TestPlugin { /** * A ServiceLoader discovered plugin which subclasses another plugin which is present on the classpath */ - SUBCLASS_OF_CLASSPATH_OVERRIDE_POLICY(TestPackage.SUBCLASS_OF_CLASSPATH, "test.plugins.SubclassOfClasspathOverridePolicy"); + SUBCLASS_OF_CLASSPATH_OVERRIDE_POLICY(TestPackage.SUBCLASS_OF_CLASSPATH, "test.plugins.SubclassOfClasspathOverridePolicy"), + /** + * A plugin which is part of the classpath by default. This packages it as a separate jar which is used to test plugin isolation from the classpath plugin. + */ + CLASSPATH_CONVERTER(TestPackage.CLASSPATH_CONVERTER, "org.apache.kafka.connect.converters.ByteArrayConverter", false); private final TestPackage testPackage; private final String className; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java index 8b69b1c37b234..13727c5b438bb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java @@ -58,7 +58,7 @@ import java.util.HashMap; import java.util.Map; -import javax.ws.rs.core.MediaType; +import jakarta.ws.rs.core.MediaType; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -184,7 +184,7 @@ public void testOptionsDoesNotIncludeWadlOutput() throws IOException { response.getEntity().writeTo(baos); assertArrayEquals( request.getAllowedMethods(response).toArray(), - new String(baos.toByteArray(), StandardCharsets.UTF_8).split(", ") + baos.toString(StandardCharsets.UTF_8).split(", ") ); } @@ -281,7 +281,7 @@ public void testLoggerEndpointWithDefaults() throws IOException { expectedLogger.put("level", loggingLevel); expectedLogger.put("last_modified", lastModified); Map> expectedLoggers = Collections.singletonMap(logger, expectedLogger); - Map> actualLoggers = mapper.readValue(responseStr, new TypeReference>>() { }); + Map> actualLoggers = mapper.readValue(responseStr, new TypeReference<>() { }); assertEquals(expectedLoggers, actualLoggers); } @@ -353,7 +353,6 @@ public void testRequestLogs() throws IOException { server.stop(); Collection logMessages = restServerAppender.getMessages(); - LogCaptureAppender.unregister(restServerAppender); restServerAppender.close(); String expectedlogContent = "\"GET / HTTP/1.1\" " + response.getStatusLine().getStatusCode(); assertTrue(logMessages.stream().anyMatch(logMessage -> logMessage.contains(expectedlogContent))); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java index 4d37b7e67b76d..394031e0df105 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java @@ -21,21 +21,23 @@ import org.apache.kafka.connect.runtime.distributed.Crypto; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; -import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.client.HttpClient; +import org.eclipse.jetty.client.Request; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.net.URI; import java.security.NoSuchAlgorithmException; import java.util.Base64; import javax.crypto.Mac; import javax.crypto.SecretKey; import javax.crypto.spec.SecretKeySpec; -import javax.ws.rs.core.HttpHeaders; + +import jakarta.ws.rs.core.HttpHeaders; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -113,25 +115,16 @@ public void addToRequestShouldThrowExceptionOnInvalidSignatureAlgorithm() throws @Test public void addToRequestShouldAddHeadersOnValidSignatureAlgorithm() { - Request request = mock(Request.class); - ArgumentCaptor signatureCapture = ArgumentCaptor.forClass(String.class); - ArgumentCaptor signatureAlgorithmCapture = ArgumentCaptor.forClass(String.class); - when(request.header( - eq(InternalRequestSignature.SIGNATURE_HEADER), - signatureCapture.capture() - )).thenReturn(request); - when(request.header( - eq(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER), - signatureAlgorithmCapture.capture() - )).thenReturn(request); + HttpClient httpClient = new HttpClient(); + Request request = httpClient.newRequest(URI.create("http://localhost")); InternalRequestSignature.addToRequest(crypto, KEY, REQUEST_BODY, SIGNATURE_ALGORITHM, request); assertEquals(ENCODED_SIGNATURE, - signatureCapture.getValue(), + request.getHeaders().get(InternalRequestSignature.SIGNATURE_HEADER), "Request should have valid base 64-encoded signature added as header"); assertEquals(SIGNATURE_ALGORITHM, - signatureAlgorithmCapture.getValue(), + request.getHeaders().get(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER), "Request should have provided signature algorithm added as header"); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java index 5250fc28f0d24..b5449daa81202 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java @@ -25,9 +25,9 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.eclipse.jetty.client.ContentResponse; import org.eclipse.jetty.client.HttpClient; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.client.Request; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; @@ -45,7 +45,8 @@ import java.util.stream.Stream; import javax.crypto.SecretKey; -import javax.ws.rs.core.Response; + +import jakarta.ws.rs.core.Response; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -69,7 +70,7 @@ public class RestClientTest { private static final String MOCK_URL = "http://localhost:1234/api/endpoint"; private static final String TEST_METHOD = "GET"; private static final TestDTO TEST_DTO = new TestDTO("requestBodyData"); - private static final TypeReference TEST_TYPE = new TypeReference() { }; + private static final TypeReference TEST_TYPE = new TypeReference<>() { }; private static final SecretKey MOCK_SECRET_KEY = getMockSecretKey(); private static final String TEST_SIGNATURE_ALGORITHM = "HmacSHA1"; @@ -118,7 +119,7 @@ private static Stream requestExceptions() { private static Request buildThrowingMockRequest(Throwable t) throws ExecutionException, InterruptedException, TimeoutException { Request req = mock(Request.class); - when(req.header(anyString(), anyString())).thenReturn(req); + when(req.headers(any())).thenReturn(req); when(req.send()).thenThrow(t); return req; } @@ -138,17 +139,15 @@ public void testFailureDuringRequestCausesInternalServerError(Throwable requestE @Test public void testNullUrl() { RestClient client = spy(new RestClient(null)); - assertThrows(NullPointerException.class, () -> { - client.httpRequest( - null, - TEST_METHOD, - null, - TEST_DTO, - TEST_TYPE, - MOCK_SECRET_KEY, - TEST_SIGNATURE_ALGORITHM - ); - }); + assertThrows(NullPointerException.class, () -> client.httpRequest( + null, + TEST_METHOD, + null, + TEST_DTO, + TEST_TYPE, + MOCK_SECRET_KEY, + TEST_SIGNATURE_ALGORITHM + )); } @Test @@ -233,7 +232,7 @@ public void testNonEmptyResponseWithVoidResponseType() throws Exception { when(resp.getContentAsString()).thenReturn(toJsonString(TEST_DTO)); setupHttpClient(statusCode, req, resp); - TypeReference voidResponse = new TypeReference() { }; + TypeReference voidResponse = new TypeReference<>() { }; RestClient.HttpResponse httpResp = httpRequest( httpClient, MOCK_URL, TEST_METHOD, voidResponse, TEST_SIGNATURE_ALGORITHM ); @@ -312,7 +311,7 @@ public void testUseSslConfigsOnlyWhenNecessary() throws Exception { public void testHttpRequestInterrupted() throws ExecutionException, InterruptedException, TimeoutException { Request req = mock(Request.class); doThrow(new InterruptedException()).when(req).send(); - doReturn(req).when(req).header(anyString(), anyString()); + doReturn(req).when(req).headers(any()); doReturn(req).when(httpClient).newRequest(anyString()); ConnectRestException e = assertThrows(ConnectRestException.class, () -> httpRequest( httpClient, MOCK_URL, TEST_METHOD, TEST_TYPE, TEST_SIGNATURE_ALGORITHM @@ -325,7 +324,7 @@ public void testHttpRequestInterrupted() throws ExecutionException, InterruptedE private void setupHttpClient(int responseCode, Request req, ContentResponse resp) throws Exception { when(resp.getStatus()).thenReturn(responseCode); when(req.send()).thenReturn(resp); - when(req.header(anyString(), anyString())).thenReturn(req); + when(req.headers(any())).thenReturn(req); when(httpClient.newRequest(anyString())).thenReturn(req); } @@ -358,4 +357,4 @@ public int hashCode() { return Objects.hash(content); } } -} \ No newline at end of file +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java index f76f0585f1aff..e58444ccd4d77 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java @@ -75,8 +75,6 @@ public void testListenersConfigAllowedValues() { props.put(RestServerConfig.LISTENERS_CONFIG, "http://a.b:9999, https://a.b:7812"); config = RestServerConfig.forPublic(null, props); assertEquals(Arrays.asList("http://a.b:9999", "https://a.b:7812"), config.listeners()); - - config = RestServerConfig.forPublic(null, props); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java index 7773618ce2587..1b8376db635d4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java @@ -84,7 +84,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import javax.ws.rs.BadRequestException; +import jakarta.ws.rs.BadRequestException; import static java.util.Arrays.asList; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; @@ -159,22 +159,18 @@ public class ConnectorPluginsResourceTest { PREDICATE_PLUGINS.add(new PluginDesc<>(HasHeaderKey.class, appVersion, PluginType.PREDICATE, classLoader)); PREDICATE_PLUGINS.add(new PluginDesc<>(RecordIsTombstone.class, appVersion, PluginType.PREDICATE, classLoader)); } catch (Exception e) { - e.printStackTrace(); fail("Failed setting up plugins"); } } static { - List configs = new LinkedList<>(); - List partialConfigs = new LinkedList<>(); - ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List connectorConfigValues = connectorConfigDef.validate(PROPS); List partialConnectorConfigValues = connectorConfigDef.validate(PARTIAL_PROPS); ConfigInfos result = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), connectorConfigValues, Collections.emptyList()); ConfigInfos partialResult = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), partialConnectorConfigValues, Collections.emptyList()); - configs.addAll(result.values()); - partialConfigs.addAll(partialResult.values()); + List configs = new LinkedList<>(result.values()); + List partialConfigs = new LinkedList<>(partialResult.values()); ConfigKeyInfo configKeyInfo = new ConfigKeyInfo("test.string.config", "STRING", true, null, "HIGH", "Test configuration for string type.", null, -1, "NONE", "test.string.config", Collections.emptyList()); ConfigValueInfo configValueInfo = new ConfigValueInfo("test.string.config", "testString", Collections.emptyList(), Collections.emptyList(), true); @@ -432,14 +428,14 @@ public void testListAllPlugins() { @Test public void testGetConnectorConfigDef() { String connName = ConnectorPluginsResourceTestConnector.class.getName(); - when(herder.connectorPluginConfig(eq(connName))).thenAnswer(answer -> { + when(herder.connectorPluginConfig(eq(connName), eq(null))).thenAnswer(answer -> { List results = new ArrayList<>(); for (ConfigDef.ConfigKey configKey : ConnectorPluginsResourceTestConnector.CONFIG_DEF.configKeys().values()) { results.add(AbstractHerder.convertConfigKey(configKey)); } return results; }); - List connectorConfigDef = connectorPluginsResource.getConnectorConfigDef(connName); + List connectorConfigDef = connectorPluginsResource.getConnectorConfigDef(connName, null); assertEquals(ConnectorPluginsResourceTestConnector.CONFIG_DEF.names().size(), connectorConfigDef.size()); for (String config : ConnectorPluginsResourceTestConnector.CONFIG_DEF.names()) { Optional cki = connectorConfigDef.stream().filter(c -> c.name().equals(config)).findFirst(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java index 6357f731fa453..9dfead77220f6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java @@ -64,12 +64,12 @@ import java.util.Map; import java.util.Set; -import javax.ws.rs.BadRequestException; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MultivaluedHashMap; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; +import jakarta.ws.rs.BadRequestException; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MultivaluedHashMap; +import jakarta.ws.rs.core.MultivaluedMap; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriInfo; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; @@ -486,7 +486,7 @@ public void testGetConnectorConfigConnectorNotFound() { } @Test - public void testGetTasksConfig() throws Throwable { + public void testGetTaskConfigs() throws Throwable { final ConnectorTaskId connectorTask0 = new ConnectorTaskId(CONNECTOR_NAME, 0); final Map connectorTask0Configs = new HashMap<>(); connectorTask0Configs.put("connector-task0-config0", "123"); @@ -498,31 +498,22 @@ public void testGetTasksConfig() throws Throwable { final ConnectorTaskId connector2Task0 = new ConnectorTaskId(CONNECTOR2_NAME, 0); final Map connector2Task0Configs = Collections.singletonMap("connector2-task0-config0", "789"); - final Map> expectedTasksConnector = new HashMap<>(); - expectedTasksConnector.put(connectorTask0, connectorTask0Configs); - expectedTasksConnector.put(connectorTask1, connectorTask1Configs); - final Map> expectedTasksConnector2 = new HashMap<>(); - expectedTasksConnector2.put(connector2Task0, connector2Task0Configs); + final List expectedTasksConnector = new ArrayList<>(); + expectedTasksConnector.add(new TaskInfo(connectorTask0, connectorTask0Configs)); + expectedTasksConnector.add(new TaskInfo(connectorTask1, connectorTask1Configs)); - final ArgumentCaptor>>> cb1 = ArgumentCaptor.forClass(Callback.class); - expectAndCallbackResult(cb1, expectedTasksConnector).when(herder).tasksConfig(eq(CONNECTOR_NAME), cb1.capture()); - final ArgumentCaptor>>> cb2 = ArgumentCaptor.forClass(Callback.class); - expectAndCallbackResult(cb2, expectedTasksConnector2).when(herder).tasksConfig(eq(CONNECTOR2_NAME), cb2.capture()); + final List expectedTasksConnector2 = new ArrayList<>(); + expectedTasksConnector2.add(new TaskInfo(connector2Task0, connector2Task0Configs)); - Map> tasksConfig = connectorsResource.getTasksConfig(CONNECTOR_NAME); - assertEquals(expectedTasksConnector, tasksConfig); - Map> tasksConfig2 = connectorsResource.getTasksConfig(CONNECTOR2_NAME); - assertEquals(expectedTasksConnector2, tasksConfig2); - } - - @Test - public void testGetTasksConfigConnectorNotFound() { - final ArgumentCaptor>>> cb = ArgumentCaptor.forClass(Callback.class); - expectAndCallbackException(cb, new NotFoundException("not found")) - .when(herder).tasksConfig(eq(CONNECTOR_NAME), cb.capture()); + final ArgumentCaptor>> cb1 = ArgumentCaptor.forClass(Callback.class); + expectAndCallbackResult(cb1, expectedTasksConnector).when(herder).taskConfigs(eq(CONNECTOR_NAME), cb1.capture()); + final ArgumentCaptor>> cb2 = ArgumentCaptor.forClass(Callback.class); + expectAndCallbackResult(cb2, expectedTasksConnector2).when(herder).taskConfigs(eq(CONNECTOR2_NAME), cb2.capture()); - assertThrows(NotFoundException.class, () -> - connectorsResource.getTasksConfig(CONNECTOR_NAME)); + List taskConfigs = connectorsResource.getTaskConfigs(CONNECTOR_NAME); + assertEquals(expectedTasksConnector, taskConfigs); + List taskConfigs2 = connectorsResource.getTaskConfigs(CONNECTOR2_NAME); + assertEquals(expectedTasksConnector2, taskConfigs2); } @Test @@ -629,7 +620,7 @@ public void testPatchConnectorConfigLeaderRedirect() throws Throwable { } @Test - public void testPatchConnectorConfigNotFound() throws Throwable { + public void testPatchConnectorConfigNotFound() { final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new NotFoundException("Connector " + CONNECTOR_NAME + " not found")) .when(herder).patchConnectorConfig(eq(CONNECTOR_NAME), eq(CONNECTOR_CONFIG_PATCH), cb.capture()); @@ -981,9 +972,4 @@ private Stubber expectAndCallbackNotLeaderException(final ArgumentCaptor { - T run() throws Throwable; - } - } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java index 0e24f86695169..aee85a86c2ab2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java @@ -44,8 +44,9 @@ import java.util.Map; import javax.crypto.Mac; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.UriInfo; + +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.UriInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java index 916de425bd984..c73bba8c84368 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java @@ -35,7 +35,7 @@ import java.util.Collections; import java.util.List; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -167,4 +167,4 @@ public void testSetLevelClusterScope() { verify(herder).setClusterLoggerLevel(logger, level); } -} \ No newline at end of file +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java index dfdf081227cf3..459bc58201392 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java @@ -37,7 +37,7 @@ import java.util.concurrent.TimeoutException; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java index a3dc0efef99d4..408f4cb886b29 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java @@ -17,9 +17,12 @@ package org.apache.kafka.connect.runtime.rest.util; import org.apache.kafka.common.config.SslConfigs; +import org.apache.kafka.common.config.types.Password; +import org.apache.kafka.common.network.CertStores; import org.apache.kafka.connect.runtime.rest.RestServerConfig; import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.Arrays; @@ -33,6 +36,22 @@ public class SSLUtilsTest { + private Map sslConfig; + private String keystorePath; + private String truststorePath; + private Password keystorePassword; + private Password truststorePassword; + + @BeforeEach + public void before() throws Exception { + CertStores serverCertStores = new CertStores(true, "localhost"); + sslConfig = serverCertStores.getUntrustingConfig(); + keystorePath = sslConfig.get("ssl.keystore.location").toString(); + truststorePath = sslConfig.get("ssl.truststore.location").toString(); + keystorePassword = (Password) sslConfig.get("ssl.keystore.password"); + truststorePassword = (Password) sslConfig.get("ssl.keystore.password"); + } + @Test public void testGetOrDefault() { String existingKey = "exists"; @@ -47,13 +66,13 @@ public void testGetOrDefault() { } @Test - public void testCreateServerSideSslContextFactory() { + public void testCreateServerSideSslContextFactory() throws Exception { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); @@ -69,8 +88,8 @@ public void testCreateServerSideSslContextFactory() { RestServerConfig config = RestServerConfig.forPublic(null, configMap); SslContextFactory.Server ssl = SSLUtils.createServerSideSslContextFactory(config); - assertEquals("file:///path/to/keystore", ssl.getKeyStorePath()); - assertEquals("file:///path/to/truststore", ssl.getTrustStorePath()); + assertEquals("file://" + keystorePath, ssl.getKeyStorePath()); + assertEquals("file://" + truststorePath, ssl.getTrustStorePath()); assertEquals("SunJSSE", ssl.getProvider()); assertArrayEquals(new String[] {"SSL_RSA_WITH_RC4_128_SHA", "SSL_RSA_WITH_RC4_128_MD5"}, ssl.getIncludeCipherSuites()); assertEquals("SHA1PRNG", ssl.getSecureRandomAlgorithm()); @@ -87,11 +106,11 @@ public void testCreateServerSideSslContextFactory() { @Test public void testCreateClientSideSslContextFactory() { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); @@ -107,8 +126,8 @@ public void testCreateClientSideSslContextFactory() { RestServerConfig config = RestServerConfig.forPublic(null, configMap); SslContextFactory.Client ssl = SSLUtils.createClientSideSslContextFactory(config); - assertEquals("file:///path/to/keystore", ssl.getKeyStorePath()); - assertEquals("file:///path/to/truststore", ssl.getTrustStorePath()); + assertEquals("file://" + keystorePath, ssl.getKeyStorePath()); + assertEquals("file://" + truststorePath, ssl.getTrustStorePath()); assertEquals("SunJSSE", ssl.getProvider()); assertArrayEquals(new String[] {"SSL_RSA_WITH_RC4_128_SHA", "SSL_RSA_WITH_RC4_128_MD5"}, ssl.getIncludeCipherSuites()); assertEquals("SHA1PRNG", ssl.getSecureRandomAlgorithm()); @@ -123,11 +142,11 @@ public void testCreateClientSideSslContextFactory() { @Test public void testCreateServerSideSslContextFactoryDefaultValues() { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); @@ -148,11 +167,11 @@ public void testCreateServerSideSslContextFactoryDefaultValues() { @Test public void testCreateClientSideSslContextFactoryDefaultValues() { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java index 1630fbf19faaf..4d8c25932fe42 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java @@ -34,7 +34,7 @@ public class StandaloneConfigTest { private static final String HTTPS_LISTENER_PREFIX = "listeners.https."; private Map sslProps() { - return new HashMap() { + return new HashMap<>() { { put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, new Password("ssl_key_password")); put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "ssl_keystore"); @@ -46,7 +46,7 @@ private Map sslProps() { } private Map baseWorkerProps() { - return new HashMap() { + return new HashMap<>() { { put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java index 857bbb77b6388..17a7d7c391a5e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java @@ -38,6 +38,7 @@ import org.apache.kafka.connect.runtime.TaskConfig; import org.apache.kafka.connect.runtime.TaskStatus; import org.apache.kafka.connect.runtime.Worker; +import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.WorkerConfigTransformer; import org.apache.kafka.connect.runtime.distributed.SampleConnectorClientConfigOverridePolicy; import org.apache.kafka.connect.runtime.isolation.LoaderSwap; @@ -56,6 +57,7 @@ import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.MemoryConfigBackingStore; +import org.apache.kafka.connect.storage.SimpleHeaderConverter; import org.apache.kafka.connect.storage.StatusBackingStore; import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.ConnectorTaskId; @@ -130,6 +132,8 @@ private enum SourceSink { @Mock protected Worker worker; @Mock + protected WorkerConfig workerConfig; + @Mock protected WorkerConfigTransformer transformer; @Mock private Plugins plugins; @@ -144,9 +148,11 @@ private enum SourceSink { noneConnectorClientConfigOverridePolicy = new SampleConnectorClientConfigOverridePolicy(); public void initialize(boolean mockTransform) { + when(worker.getPlugins()).thenReturn(plugins); herder = mock(StandaloneHerder.class, withSettings() .useConstructor(worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, new MemoryConfigBackingStore(transformer), noneConnectorClientConfigOverridePolicy, new MockTime()) .defaultAnswer(CALLS_REAL_METHODS)); + verify(worker).getPlugins(); createCallback = new FutureCallback<>(); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); if (mockTransform) @@ -173,6 +179,7 @@ public void testCreateSourceConnector() throws Exception { } @Test + @SuppressWarnings("rawtypes") public void testCreateConnectorFailedValidation() { initialize(false); // Basic validation should be performed and return an error, but should still evaluate the connector's config @@ -185,12 +192,13 @@ public void testCreateConnectorFailedValidation() { final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenReturn(connectorMock); - when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); + when(worker.config()).thenReturn(workerConfig); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); + when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); when(connectorMock.config()).thenReturn(new ConfigDef()); - ConfigValue validatedValue = new ConfigValue("foo.bar"); when(connectorMock.validate(config)).thenReturn(new Config(singletonList(validatedValue))); @@ -677,16 +685,13 @@ public void testAccessors() throws Exception { Callback connectorInfoCb = mock(Callback.class); Callback> connectorConfigCb = mock(Callback.class); Callback> taskConfigsCb = mock(Callback.class); - Callback>> tasksConfigCb = mock(Callback.class); // Check accessors with empty worker doNothing().when(listConnectorsCb).onCompletion(null, Collections.EMPTY_SET); doNothing().when(connectorInfoCb).onCompletion(any(NotFoundException.class), isNull()); doNothing().when(taskConfigsCb).onCompletion(any(NotFoundException.class), isNull()); - doNothing().when(tasksConfigCb).onCompletion(any(NotFoundException.class), isNull()); doNothing().when(connectorConfigCb).onCompletion(any(NotFoundException.class), isNull()); - expectAdd(SourceSink.SOURCE); expectConfigValidation(SourceSink.SOURCE, connConfig); @@ -699,16 +704,11 @@ public void testAccessors() throws Exception { TaskInfo taskInfo = new TaskInfo(new ConnectorTaskId(CONNECTOR_NAME, 0), taskConfig(SourceSink.SOURCE)); doNothing().when(taskConfigsCb).onCompletion(null, singletonList(taskInfo)); - Map> tasksConfig = Collections.singletonMap(new ConnectorTaskId(CONNECTOR_NAME, 0), - taskConfig(SourceSink.SOURCE)); - doNothing().when(tasksConfigCb).onCompletion(null, tasksConfig); - // All operations are synchronous for StandaloneHerder, so we don't need to actually wait after making each call herder.connectors(listConnectorsCb); herder.connectorInfo(CONNECTOR_NAME, connectorInfoCb); herder.connectorConfig(CONNECTOR_NAME, connectorConfigCb); herder.taskConfigs(CONNECTOR_NAME, taskConfigsCb); - herder.tasksConfig(CONNECTOR_NAME, tasksConfigCb); herder.putConnectorConfig(CONNECTOR_NAME, connConfig, false, createCallback); Herder.Created connectorInfo = createCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); @@ -719,7 +719,6 @@ public void testAccessors() throws Exception { herder.connectorInfo(CONNECTOR_NAME, connectorInfoCb); herder.connectorConfig(CONNECTOR_NAME, connectorConfigCb); herder.taskConfigs(CONNECTOR_NAME, taskConfigsCb); - herder.tasksConfig(CONNECTOR_NAME, tasksConfigCb); // Config transformation should not occur when requesting connector or task info verify(transformer, never()).transform(eq(CONNECTOR_NAME), any()); } @@ -859,6 +858,7 @@ public void testPutTaskConfigs() { } @Test + @SuppressWarnings("rawtypes") public void testCorruptConfig() { initialize(false); Map config = new HashMap<>(); @@ -879,10 +879,12 @@ public void testCorruptConfig() { when(worker.configTransformer()).thenReturn(transformer); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); + when(worker.config()).thenReturn(workerConfig); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); - when(plugins.newConnector(anyString())).thenReturn(connectorMock); + when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); when(connectorMock.config()).thenReturn(configDef); herder.putConnectorConfig(CONNECTOR_NAME, config, true, createCallback); @@ -1221,6 +1223,7 @@ private static Map taskConfig(SourceSink sourceSink) { return generatedTaskProps; } + @SuppressWarnings("rawtypes") private void expectConfigValidation( SourceSink sourceSink, Map... configs @@ -1230,13 +1233,13 @@ private void expectConfigValidation( when(worker.configTransformer()).thenReturn(transformer); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); - when(worker.getPlugins()).thenReturn(plugins); - when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); + when(worker.config()).thenReturn(workerConfig); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); - // Assume the connector should always be created when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenReturn(connectorMock); + when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); when(connectorMock.config()).thenReturn(new ConfigDef()); // Set up validation for each config diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java index 4f849f7b3ba79..f78ab54950f4a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java @@ -18,7 +18,7 @@ import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.common.Cluster; @@ -437,7 +437,7 @@ protected Consumer createConsumer() { } private MockConsumer createMockConsumer(String topic) { - MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.LATEST); + MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.LATEST.name()); Node noNode = Node.noNode(); Node[] nodes = new Node[]{noNode}; consumer.updatePartitions(topic, Collections.singletonList(new PartitionInfo(topic, 0, noNode, nodes, nodes))); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index 945b88cce6c50..d455976423dac 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -69,6 +69,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -164,7 +165,6 @@ public class KafkaConfigBackingStoreTest { new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) ); - private static final Struct TARGET_STATE_STARTED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V0).put("state", "STARTED"); private static final Struct TARGET_STATE_PAUSED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "PAUSED"); @@ -175,7 +175,8 @@ public class KafkaConfigBackingStoreTest { .put("state.v2", "STOPPED"); private static final List CONNECTOR_TASK_COUNT_RECORD_STRUCTS = Arrays.asList( new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 6), - new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 9) + new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 9), + new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 2) ); // The exact format doesn't matter here since both conversions are mocked @@ -310,7 +311,7 @@ public void testPutConnectorConfig() throws Exception { doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)))) // Config deletion - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ + .doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(configKey, null); put(targetStateKey, null); }}) @@ -363,7 +364,7 @@ public void testPutConnectorConfig() throws Exception { } @Test - public void testPutConnectorConfigWithTargetState() throws Exception { + public void testPutConnectorConfigWithTargetState() { when(configLog.partitionCount()).thenReturn(1); configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); @@ -376,7 +377,7 @@ public void testPutConnectorConfigWithTargetState() throws Exception { assertNull(configState.connectorConfig(CONNECTOR_IDS.get(0))); assertNull(configState.targetState(CONNECTOR_IDS.get(0))); - doAnswer(expectReadToEnd(new LinkedHashMap() {{ + doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(TARGET_STATE_KEYS.get(0), TARGET_STATES_SERIALIZED.get(2)); put(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); }}) @@ -819,6 +820,56 @@ public void testRestoreZeroTasks() { verify(configLog).stop(); } + @Test + public void testRestoreCompactedDeletedConnector() { + // When a connector is deleted, we emit a tombstone record for its config (with key + // "connector-") and its target state (with key "target-state-"), but not + // for its task configs + // As a result, we need to carefully handle the case where task configs are present in + // the config topic for a connector, but there is no accompanying config for the + // connector itself + + int offset = 0; + List> existingRecords = List.of( + new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, + TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, + TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, + COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), + new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, + CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); + LinkedHashMap deserialized = new LinkedHashMap<>(); + deserialized.put(CONFIGS_SERIALIZED.get(0), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); + deserialized.put(CONFIGS_SERIALIZED.get(2), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); + deserialized.put(CONFIGS_SERIALIZED.get(3), CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(2)); + logOffset = offset; + expectStart(existingRecords, deserialized); + when(configLog.partitionCount()).thenReturn(1); + + configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); + verifyConfigure(); + configStorage.start(); + + // Should see no connectors and no task configs + ClusterConfigState configState = configStorage.snapshot(); + assertEquals(Set.of(), configState.connectors()); + assertEquals(0, configState.taskCount(CONNECTOR_1_NAME)); + assertNull(configState.rawTaskConfig(TASK_IDS.get(0))); + assertNull(configState.rawTaskConfig(TASK_IDS.get(1))); + + // Probe internal collections just to be sure + assertEquals(Map.of(), configState.connectorConfigs); + assertEquals(Map.of(), configState.taskConfigs); + assertEquals(Map.of(), configState.connectorTaskCounts); + + // Exception: we still include task count records, for the unlikely-but-possible case + // where there are still zombie instances of the tasks for this long-deleted connector + // running somewhere on the cluster + assertEquals(2, (int) configState.taskCountRecord(CONNECTOR_1_NAME)); + } + @Test public void testRecordToRestartRequest() { ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), @@ -1028,7 +1079,7 @@ public void testBackgroundConnectorDeletion() throws Exception { } @Test - public void testPutTaskConfigsDoesNotResolveAllInconsistencies() throws Exception { + public void testPutTaskConfigsDoesNotResolveAllInconsistencies() { // Test a case where a failure and compaction has left us in an inconsistent state when reading the log. // We start out by loading an initial configuration where we started to write a task update, and then // compaction cleaned up the earlier record. @@ -1105,18 +1156,18 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() throws Exceptio } @Test - public void testPutRestartRequestOnlyFailed() throws Exception { + public void testPutRestartRequestOnlyFailed() { RestartRequest restartRequest = new RestartRequest(CONNECTOR_IDS.get(0), true, false); testPutRestartRequest(restartRequest); } @Test - public void testPutRestartRequestOnlyFailedIncludingTasks() throws Exception { + public void testPutRestartRequestOnlyFailedIncludingTasks() { RestartRequest restartRequest = new RestartRequest(CONNECTOR_IDS.get(0), true, true); testPutRestartRequest(restartRequest); } - private void testPutRestartRequest(RestartRequest restartRequest) throws Exception { + private void testPutRestartRequest(RestartRequest restartRequest) { expectStart(Collections.emptyList(), Collections.emptyMap()); when(configLog.partitionCount()).thenReturn(1); @@ -1191,7 +1242,7 @@ public void testRestoreRestartRequestInconsistentState() { } @Test - public void testPutTaskConfigsZeroTasks() throws Exception { + public void testPutTaskConfigsZeroTasks() { configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); verifyConfigure(); configStorage.start(); @@ -1290,7 +1341,7 @@ public void testBackgroundUpdateTargetState() throws Exception { } @Test - public void testSameTargetState() throws Exception { + public void testSameTargetState() { // verify that we handle target state changes correctly when they come up through the log List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), @@ -1391,7 +1442,7 @@ public void testPutLogLevel() throws Exception { } @Test - public void testTaskCountRecordsAndGenerations() throws Exception { + public void testTaskCountRecordsAndGenerations() { configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); verifyConfigure(); configStorage.start(); @@ -1408,7 +1459,7 @@ public void testTaskCountRecordsAndGenerations() throws Exception { doAnswer(expectReadToEnd(new LinkedHashMap<>())) .doAnswer(expectReadToEnd(new LinkedHashMap<>())) .doAnswer(expectReadToEnd(serializedConfigs)) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ + .doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), CONFIGS_SERIALIZED.get(3)); }}) ) @@ -1467,7 +1518,7 @@ public void testTaskCountRecordsAndGenerations() throws Exception { } @Test - public void testPutTaskConfigs() throws Exception { + public void testPutTaskConfigs() { configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); verifyConfigure(); configStorage.start(); @@ -1475,7 +1526,7 @@ public void testPutTaskConfigs() throws Exception { doAnswer(expectReadToEnd(new LinkedHashMap<>())) .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ + .doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); @@ -1525,7 +1576,7 @@ public void testPutTaskConfigs() throws Exception { } @Test - public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { + public void testPutTaskConfigsStartsOnlyReconfiguredTasks() { configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); verifyConfigure(); configStorage.start(); @@ -1533,7 +1584,7 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { doAnswer(expectReadToEnd(new LinkedHashMap<>())) .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ + .doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); @@ -1541,7 +1592,7 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() throws Exception { ) .doAnswer(expectReadToEnd(new LinkedHashMap<>())) .doAnswer(expectReadToEnd(new LinkedHashMap<>())) - .doAnswer(expectReadToEnd(new LinkedHashMap() {{ + .doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(TASK_CONFIG_KEYS.get(2), CONFIGS_SERIALIZED.get(3)); put(COMMIT_TASKS_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(4)); }}) @@ -1628,7 +1679,7 @@ private void expectStart(final List> preexistingR // from the log. Validate the data that is captured when the conversion is performed matches the specified data // (by checking a single field's value) private void expectConvertWriteRead2(final String configKey, final Schema valueSchema, final byte[] serialized, - final Struct value) throws Exception { + final Struct value) { doReturn(serialized).when(converter).fromConnectData(eq(TOPIC), eq(valueSchema), eq(value)); doReturn(producerFuture).when(configLog).sendWithReceipt(eq(configKey), eq(serialized)); doReturn(new SchemaAndValue(null, structToMap(value))).when(converter).toConnectData(eq(TOPIC), eq(serialized)); @@ -1638,7 +1689,7 @@ private void expectConvertWriteRead2(final String configKey, final Schema valueS // from the log. Validate the data that is captured when the conversion is performed matches the specified data // (by checking a single field's value) private void expectConvertWriteRead(final String configKey, final Schema valueSchema, final byte[] serialized, - final String dataFieldName, final Object dataFieldValue) throws Exception { + final String dataFieldName, final Object dataFieldValue) { final ArgumentCaptor capturedRecord = ArgumentCaptor.forClass(Struct.class); when(converter.fromConnectData(eq(TOPIC), eq(valueSchema), capturedRecord.capture())).thenReturn(serialized); when(configLog.sendWithReceipt(configKey, serialized)).thenReturn(producerFuture); @@ -1658,12 +1709,6 @@ private void expectRead(LinkedHashMap serializedValues, } } - private void expectRead(final String key, final byte[] serializedValue, Struct deserializedValue) { - LinkedHashMap serializedData = new LinkedHashMap<>(); - serializedData.put(key, serializedValue); - expectRead(serializedData, Collections.singletonMap(key, deserializedValue)); - } - // This map needs to maintain ordering private Answer> expectReadToEnd(final Map serializedConfigs) { return invocation -> { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java index f29d3bd29f239..aabf894e1ea90 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java @@ -20,7 +20,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; @@ -141,7 +141,7 @@ public class KafkaBasedLogTest { @BeforeEach public void setUp() { - store = new KafkaBasedLog(TOPIC, PRODUCER_PROPS, CONSUMER_PROPS, topicAdminSupplier, consumedCallback, time, initializer) { + store = new KafkaBasedLog<>(TOPIC, PRODUCER_PROPS, CONSUMER_PROPS, topicAdminSupplier, consumedCallback, time, initializer) { @Override protected KafkaProducer createProducer() { return producer; @@ -152,7 +152,7 @@ protected MockConsumer createConsumer() { return consumer; } }; - consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); consumer.updatePartitions(TOPIC, Arrays.asList(TPINFO0, TPINFO1)); Map beginningOffsets = new HashMap<>(); beginningOffsets.put(TP0, 0L); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java index 29fd1689d40df..2fb788a1f495a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java @@ -104,7 +104,7 @@ public void shouldFailToGetTopicAdminAfterClose() { // When closed sharedAdmin.close(); // Then using the admin should fail - assertThrows(ConnectException.class, () -> sharedAdmin.topicAdmin()); + assertThrows(ConnectException.class, sharedAdmin::topicAdmin); } private void verifyTopicAdminCreatesAndCloses(int count) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java index 9130d8badc9ba..8410c0023b1e6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestFuture.java @@ -28,17 +28,9 @@ public class TestFuture implements Future { private Throwable exception; private final CountDownLatch getCalledLatch; - private volatile boolean resolveOnGet; - private T resolveOnGetResult; - private Throwable resolveOnGetException; - public TestFuture() { resolved = false; getCalledLatch = new CountDownLatch(1); - - resolveOnGet = false; - resolveOnGetResult = null; - resolveOnGetException = null; } public void resolve(T val) { @@ -88,13 +80,6 @@ public T get() throws InterruptedException, ExecutionException { public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { getCalledLatch.countDown(); - if (resolveOnGet) { - if (resolveOnGetException != null) - resolve(resolveOnGetException); - else - resolve(resolveOnGetResult); - } - synchronized (this) { while (!resolved) { this.wait(TimeUnit.MILLISECONDS.convert(timeout, unit)); @@ -111,50 +96,4 @@ else if (exception instanceof InterruptedException) } return result; } - - /** - * Set a flag to resolve the future as soon as one of the get() methods has been called. Returns immediately. - * @param val the value to return from the future - */ - public void resolveOnGet(T val) { - resolveOnGet = true; - resolveOnGetResult = val; - } - - /** - * Set a flag to resolve the future as soon as one of the get() methods has been called. Returns immediately. - * @param t the exception to return from the future - */ - public void resolveOnGet(Throwable t) { - resolveOnGet = true; - resolveOnGetException = t; - } - - /** - * Block, waiting for another thread to call one of the get() methods, and then immediately resolve the future with - * the specified value. - * @param val the value to return from the future - */ - public void waitForGetAndResolve(T val) { - waitForGet(); - resolve(val); - } - - /** - * Block, waiting for another thread to call one of the get() methods, and then immediately resolve the future with - * the specified value. - * @param t the exception to use to resolve the future - */ - public void waitForGetAndResolve(Throwable t) { - waitForGet(); - resolve(t); - } - - private void waitForGet() { - try { - getCalledLatch.await(); - } catch (InterruptedException e) { - throw new RuntimeException("Unexpected interruption: ", e); - } - } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java index 08686b87f1300..1f25dd15f514c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java @@ -525,8 +525,6 @@ public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException( Cluster cluster = createCluster(1, "myTopic", 1); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { - Map offsetMap = new HashMap<>(); - offsetMap.put(tp1, offset); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // This error should be treated as non-retriable and cause TopicAdmin::retryEndOffsets to fail @@ -543,10 +541,7 @@ public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException( Throwable cause = exception.getCause(); assertNotNull(cause, "cause of failure should be preserved"); - assertTrue( - cause instanceof TopicAuthorizationException, - "cause of failure should be accurately reported; expected topic authorization error, but was " + cause - ); + assertInstanceOf(TopicAuthorizationException.class, cause, "cause of failure should be accurately reported; expected topic authorization error, but was " + cause); } } @@ -559,8 +554,6 @@ public void retryEndOffsetsShouldRetryWhenTopicNotFound() { Cluster cluster = createCluster(1, "myTopic", 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { - Map offsetMap = new HashMap<>(); - offsetMap.put(tp1, offset); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java index c30c78ad7160a..8dc22edb86309 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java @@ -37,7 +37,7 @@ import java.util.function.Predicate; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.test.TestUtils.waitForCondition; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java index 018c9a40b05de..b576cda56a75d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java @@ -37,14 +37,15 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.eclipse.jetty.client.ContentResponse; import org.eclipse.jetty.client.HttpClient; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; -import org.eclipse.jetty.client.util.StringContentProvider; +import org.eclipse.jetty.client.Request; +import org.eclipse.jetty.client.StringRequestContent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -55,7 +56,9 @@ import java.util.Set; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; + +import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; abstract class EmbeddedConnect { @@ -81,6 +84,10 @@ protected EmbeddedConnect( this.kafkaCluster = new EmbeddedKafkaCluster(numBrokers, brokerProps, clientProps); this.maskExitProcedures = maskExitProcedures; this.httpClient = new HttpClient(); + // Necessary to prevent the rest request from timing out too early + // Before this change,ConnectWorkerIntegrationTest#testPollTimeoutExpiry() was failing + // because the request was being stopped by jetty before the framework responded + this.httpClient.setIdleTimeout(DEFAULT_REST_REQUEST_TIMEOUT_MS); this.assertions = new ConnectAssertions(this); // we should keep the original class loader and set it back after connector stopped since the connector will change the class loader, // and then, the Mockito will use the unexpected class loader to generate the wrong proxy instance, which makes mock failed @@ -618,7 +625,7 @@ public List taskConfigs(String connectorName) { if (response.getStatus() < Response.Status.BAD_REQUEST.getStatusCode()) { // We use String instead of ConnectorTaskId as the key here since the latter can't be automatically // deserialized by Jackson when used as a JSON object key (i.e., when it's serialized as a JSON string) - return mapper.readValue(responseToString(response), new TypeReference>() { }); + return mapper.readValue(responseToString(response), new TypeReference<>() { }); } } catch (IOException e) { log.error("Could not read task configs from response: {}", @@ -914,19 +921,6 @@ public EmbeddedKafkaCluster kafka() { return kafkaCluster; } - /** - * Execute a GET request on the given URL. - * - * @param url the HTTP endpoint - * @return the response to the GET request - * @throws ConnectException if execution of the GET request fails - * @deprecated Use {@link #requestGet(String)} instead. - */ - @Deprecated - public String executeGet(String url) { - return responseToString(requestGet(url)); - } - /** * Execute a GET request on the given URL. * @@ -938,20 +932,6 @@ public Response requestGet(String url) { return requestHttpMethod(url, null, Collections.emptyMap(), "GET"); } - /** - * Execute a PUT request on the given URL. - * - * @param url the HTTP endpoint - * @param body the payload of the PUT request - * @return the response to the PUT request - * @throws ConnectException if execution of the PUT request fails - * @deprecated Use {@link #requestPut(String, String)} instead. - */ - @Deprecated - public int executePut(String url, String body) { - return requestPut(url, body).getStatus(); - } - /** * Execute a PUT request on the given URL. * @@ -964,21 +944,6 @@ public Response requestPut(String url, String body) { return requestHttpMethod(url, body, Collections.emptyMap(), "PUT"); } - /** - * Execute a POST request on the given URL. - * - * @param url the HTTP endpoint - * @param body the payload of the POST request - * @param headers a map that stores the POST request headers - * @return the response to the POST request - * @throws ConnectException if execution of the POST request fails - * @deprecated Use {@link #requestPost(String, String, java.util.Map)} instead. - */ - @Deprecated - public int executePost(String url, String body, Map headers) { - return requestPost(url, body, headers).getStatus(); - } - /** * Execute a POST request on the given URL. * @@ -1004,19 +969,6 @@ public Response requestPatch(String url, String body) { return requestHttpMethod(url, body, Collections.emptyMap(), "PATCH"); } - /** - * Execute a DELETE request on the given URL. - * - * @param url the HTTP endpoint - * @return the response to the DELETE request - * @throws ConnectException if execution of the DELETE request fails - * @deprecated Use {@link #requestDelete(String)} instead. - */ - @Deprecated - public int executeDelete(String url) { - return requestDelete(url).getStatus(); - } - /** * Execute a DELETE request on the given URL. * @@ -1047,8 +999,8 @@ protected Response requestHttpMethod(String url, String body, Map headers.forEach(mutable::add)); + req.body(new StringRequestContent("application/json", body, StandardCharsets.UTF_8)); } ContentResponse res = req.send(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java index ceb3a6c6af6d2..0a5554c393751 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java @@ -202,14 +202,6 @@ public Builder numWorkers(int numWorkers) { return this; } - /** - * @deprecated Use {@link #clientProps(Map)} instead. - */ - @Deprecated - public Builder clientConfigs(Map clientProps) { - return clientProps(clientProps); - } - @Override protected EmbeddedConnectCluster build( int numBrokers, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java index 66ce78d0d1bab..5678b97bb1314 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java @@ -37,7 +37,7 @@ import java.util.Properties; import java.util.Set; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java index 51a4bbb337abf..a3f67cd06bbd8 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java @@ -40,7 +40,7 @@ protected WorkerHandle(String workerName, Connect worker) { /** * Track the worker status during startup. - * @return {@link Connect#herderTask} to track or null + * @return {@link Connect#herderTask()} to track or null */ public Future herderTask() { return worker.herderTask(); @@ -114,10 +114,9 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof WorkerHandle)) { + if (!(o instanceof WorkerHandle that)) { return false; } - WorkerHandle that = (WorkerHandle) o; return Objects.equals(workerName, that.workerName) && Objects.equals(worker, that.worker); } diff --git a/connect/runtime/src/test/resources/log4j.properties b/connect/runtime/src/test/resources/log4j.properties deleted file mode 100644 index de7180c282a32..0000000000000 --- a/connect/runtime/src/test/resources/log4j.properties +++ /dev/null @@ -1,37 +0,0 @@ -## -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -## -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -# -# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information -# in the log message, where appropriate. This makes it easier to identify those log messages that apply to a -# specific connector. Simply add this parameter to the log layout configuration below to include the contextual information. -# -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %X{connector.context}%m (%c:%L)%n -# -# The following line includes no MDC context parameters: -#log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n (%t) - -log4j.logger.kafka=WARN -log4j.logger.state.change.logger=OFF -log4j.logger.org.apache.kafka.connect=DEBUG - -# Troubleshooting KAFKA-17493. -log4j.logger.org.apache.kafka.consumer=DEBUG -log4j.logger.org.apache.kafka.coordinator.group=DEBUG \ No newline at end of file diff --git a/connect/runtime/src/test/resources/log4j2.yaml b/connect/runtime/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..45faa635378a9 --- /dev/null +++ b/connect/runtime/src/test/resources/log4j2.yaml @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %X{connector.context}%m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: kafka + level: WARN + + - name: state.change.logger + level: "OFF" + + - name: org.apache.kafka.connect + level: DEBUG + + # Troubleshooting KAFKA-17493. + - name: org.apache.kafka.consumer + level: DEBUG + + - name: org.apache.kafka.coordinator.group + level: DEBUG diff --git a/connect/runtime/src/test/resources/test-plugins/classpath-converter/META-INF/services/org.apache.kafka.connect.storage.Converter b/connect/runtime/src/test/resources/test-plugins/classpath-converter/META-INF/services/org.apache.kafka.connect.storage.Converter new file mode 100644 index 0000000000000..ae9c2a5820304 --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/classpath-converter/META-INF/services/org.apache.kafka.connect.storage.Converter @@ -0,0 +1,17 @@ + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +org.apache.kafka.connect.converters.ByteArrayConverter + diff --git a/connect/runtime/src/test/resources/test-plugins/classpath-converter/org/apache/kafka/connect/converters/ByteArrayConverter.java b/connect/runtime/src/test/resources/test-plugins/classpath-converter/org/apache/kafka/connect/converters/ByteArrayConverter.java new file mode 100644 index 0000000000000..699d71635a042 --- /dev/null +++ b/connect/runtime/src/test/resources/test-plugins/classpath-converter/org/apache/kafka/connect/converters/ByteArrayConverter.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.converters; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.utils.AppInfoParser; +import org.apache.kafka.connect.components.Versioned; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaAndValue; +import org.apache.kafka.connect.errors.DataException; +import org.apache.kafka.connect.sink.SinkConnector; +import org.apache.kafka.connect.storage.Converter; +import org.apache.kafka.connect.storage.ConverterConfig; +import org.apache.kafka.connect.storage.HeaderConverter; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +public class ByteArrayConverter implements Converter, HeaderConverter, Versioned { + + private static final ConfigDef CONFIG_DEF = ConverterConfig.newConfigDef(); + @Override + public String version() { + return AppInfoParser.getVersion(); + } + @Override + public ConfigDef config() { + return CONFIG_DEF; + } + + @Override + public void configure(Map configs) { + } + + @Override + public void configure(Map configs, boolean isKey) { + } + + @Override + public byte[] fromConnectData(String topic, Schema schema, Object value) { + if (schema != null && schema.type() != Schema.Type.BYTES) + throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type().toString()); + + if (value != null && !(value instanceof byte[]) && !(value instanceof ByteBuffer)) + throw new DataException("ByteArrayConverter is not compatible with objects of type " + value.getClass()); + + return value instanceof ByteBuffer ? getBytesFromByteBuffer((ByteBuffer) value) : (byte[]) value; + } + + @Override + public SchemaAndValue toConnectData(String topic, byte[] value) { + return new SchemaAndValue(Schema.OPTIONAL_BYTES_SCHEMA, value); + } + + @Override + public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) { + return fromConnectData(topic, schema, value); + } + + @Override + public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) { + return toConnectData(topic, value); + } + + @Override + public void close() { + // do nothing + } + + private byte[] getBytesFromByteBuffer(ByteBuffer byteBuffer) { + if (byteBuffer == null) { + return null; + } + + byteBuffer.rewind(); + byte[] bytes = new byte[byteBuffer.remaining()]; + byteBuffer.get(bytes); + return bytes; + } +} diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java index ffc0c8b8b715e..099e1a64882de 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java @@ -386,14 +386,11 @@ else if (value instanceof String) } private static String castToString(Object value) { - if (value instanceof java.util.Date) { - java.util.Date dateValue = (java.util.Date) value; + if (value instanceof java.util.Date dateValue) { return Values.dateFormatFor(dateValue).format(dateValue); - } else if (value instanceof ByteBuffer) { - ByteBuffer byteBuffer = (ByteBuffer) value; + } else if (value instanceof ByteBuffer byteBuffer) { return Base64.getEncoder().encodeToString(Utils.readBytes(byteBuffer)); - } else if (value instanceof byte[]) { - byte[] rawBytes = (byte[]) value; + } else if (value instanceof byte[] rawBytes) { return Base64.getEncoder().encodeToString(rawBytes); } else { return value.toString(); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java index 86ec11f5fd485..4a94dd1cddc5f 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java @@ -179,8 +179,7 @@ protected R newRecord(R record, Schema updatedSchema) { * a copy of the key or value object with updated references to the new schema. */ protected Object updateSchemaIn(Object keyOrValue, Schema updatedSchema) { - if (keyOrValue instanceof Struct) { - Struct origStruct = (Struct) keyOrValue; + if (keyOrValue instanceof Struct origStruct) { Struct newStruct = new Struct(updatedSchema); for (Field field : updatedSchema.fields()) { // assume both schemas have exact same fields with same names and schemas ... diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java index 957cc3e1fe316..aeec9ea41892c 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java @@ -164,9 +164,8 @@ public String toType(Config config, Date orig) { TRANSLATORS.put(TYPE_UNIX, new TimestampTranslator() { @Override public Date toRaw(Config config, Object orig) { - if (!(orig instanceof Long)) + if (!(orig instanceof Long unixTime)) throw new DataException("Expected Unix timestamp to be a Long, but found " + orig.getClass()); - Long unixTime = (Long) orig; switch (config.unixPrecision) { case UNIX_PRECISION_SECONDS: return Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.SECONDS.toMillis(unixTime)); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKey.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKey.java index fe0a99c4abe72..566bdfab23832 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKey.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKey.java @@ -35,9 +35,8 @@ public class HasHeaderKey> implements Predicate, V private static final String NAME_CONFIG = "name"; public static final String OVERVIEW_DOC = "A predicate which is true for records with at least one header with the configured name."; public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(NAME_CONFIG, ConfigDef.Type.STRING, ConfigDef.NO_DEFAULT_VALUE, - new ConfigDef.NonEmptyString(), ConfigDef.Importance.MEDIUM, - "The header name."); + .define(NAME_CONFIG, ConfigDef.Type.STRING, ConfigDef.NO_DEFAULT_VALUE, + new ConfigDef.NonEmptyString(), ConfigDef.Importance.MEDIUM, "The header name."); private String name; @Override diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutor.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutor.java new file mode 100644 index 0000000000000..2bf7e7670d5ee --- /dev/null +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutor.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.KafkaException; + +/** + * An interface to schedule and cancel asynchronous tasks. The TaskRunnable + * interface defines the tasks to be executed in the executor and the + * TaskOperation defines the operation scheduled to the runtime to + * process the output of the executed task. + * + * @param The record type. + */ +public interface CoordinatorExecutor { + /** + * The task's runnable. + * + * @param The return type. + */ + interface TaskRunnable { + R run() throws Throwable; + } + + /** + * The task's write operation to handle the output + * of the task. + * + * @param The record type. + * @param The return type of the task. + */ + interface TaskOperation { + CoordinatorResult onComplete( + R result, + Throwable exception + ) throws KafkaException; + } + + /** + * Schedule an asynchronous task. Note that only one task for a given key can + * be executed at the time. + * + * @param key The key to identify the task. + * @param task The task itself. + * @param operation The runtime operation to handle the output of the task. + * @return True if the task was scheduled; False otherwise. + * + * @param The return type of the task. + */ + boolean schedule( + String key, + TaskRunnable task, + TaskOperation operation + ); + + /** + * Return true if the key is associated to a task; false otherwise. + * + * @param key The key to identify the task. + * @return A boolean indicating whether the task is scheduled or not. + */ + boolean isScheduled(String key); + + /** + * Cancel the given task + * + * @param key The key to identify the task. + */ + void cancel(String key); +} diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java new file mode 100644 index 0000000000000..f9a417b0e8607 --- /dev/null +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.CoordinatorLoadInProgressException; +import org.apache.kafka.common.errors.NotCoordinatorException; +import org.apache.kafka.common.utils.LogContext; + +import org.slf4j.Logger; + +import java.time.Duration; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; + +public class CoordinatorExecutorImpl, U> implements CoordinatorExecutor { + private static class TaskResult { + final R result; + final Throwable exception; + + TaskResult( + R result, + Throwable exception + ) { + this.result = result; + this.exception = exception; + } + } + + private final Logger log; + private final TopicPartition shard; + private final CoordinatorRuntime runtime; + private final ExecutorService executor; + private final Duration writeTimeout; + private final Map> tasks = new ConcurrentHashMap<>(); + + public CoordinatorExecutorImpl( + LogContext logContext, + TopicPartition shard, + CoordinatorRuntime runtime, + ExecutorService executor, + Duration writeTimeout + ) { + this.log = logContext.logger(CoordinatorExecutorImpl.class); + this.shard = shard; + this.runtime = runtime; + this.executor = executor; + this.writeTimeout = writeTimeout; + } + + private TaskResult executeTask(TaskRunnable task) { + try { + return new TaskResult<>(task.run(), null); + } catch (Throwable ex) { + return new TaskResult<>(null, ex); + } + } + + @Override + public boolean schedule( + String key, + TaskRunnable task, + TaskOperation operation + ) { + // Put the task if the key is free. Otherwise, reject it. + if (tasks.putIfAbsent(key, task) != null) return false; + + // Submit the task. + executor.submit(() -> { + // If the task associated with the key is not us, it means + // that the task was either replaced or cancelled. We stop. + if (tasks.get(key) != task) return; + + // Execute the task. + final TaskResult result = executeTask(task); + + // Schedule the operation. + runtime.scheduleWriteOperation( + key, + shard, + writeTimeout, + coordinator -> { + // If the task associated with the key is not us, it means + // that the task was either replaced or cancelled. We stop. + if (!tasks.remove(key, task)) { + throw new RejectedExecutionException(String.format("Task %s was overridden or cancelled", key)); + } + + // Call the underlying write operation with the result of the task. + return operation.onComplete(result.result, result.exception); + } + ).exceptionally(exception -> { + // Remove the task after a failure. + tasks.remove(key, task); + + if (exception instanceof RejectedExecutionException) { + log.debug("The write event for the task {} was not executed because it was " + + "cancelled or overridden.", key); + } else if (exception instanceof NotCoordinatorException || exception instanceof CoordinatorLoadInProgressException) { + log.debug("The write event for the task {} failed due to {}. Ignoring it because " + + "the coordinator is not active.", key, exception.getMessage()); + } else { + log.error("The write event for the task {} failed due to {}. Ignoring it. ", + key, exception.getMessage()); + } + + return null; + }); + }); + + return true; + } + + @Override + public boolean isScheduled(String key) { + return tasks.containsKey(key); + } + + @Override + public void cancel(String key) { + tasks.remove(key); + } + + public void cancelAll() { + Iterator iterator = tasks.keySet().iterator(); + while (iterator.hasNext()) { + iterator.remove(); + } + } +} diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java index 56f9a6cae1313..9e5afc3b89f2a 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java @@ -43,7 +43,7 @@ public abstract class CoordinatorRecordSerde implements Serializer, U> { private Serializer serializer; private Compression compression; private int appendLingerMs; + private ExecutorService executorService; public Builder withLogPrefix(String logPrefix) { this.logPrefix = logPrefix; @@ -189,6 +192,11 @@ public Builder withAppendLingerMs(int appendLingerMs) { return this; } + public Builder withExecutorService(ExecutorService executorService) { + this.executorService = executorService; + return this; + } + public CoordinatorRuntime build() { if (logPrefix == null) logPrefix = ""; @@ -216,6 +224,8 @@ public CoordinatorRuntime build() { compression = Compression.NONE; if (appendLingerMs < 0) throw new IllegalArgumentException("AppendLinger must be >= 0"); + if (executorService == null) + throw new IllegalArgumentException("ExecutorService must be set."); return new CoordinatorRuntime<>( logPrefix, @@ -231,7 +241,8 @@ public CoordinatorRuntime build() { coordinatorMetrics, serializer, compression, - appendLingerMs + appendLingerMs, + executorService ); } } @@ -551,6 +562,11 @@ class CoordinatorContext { */ final EventBasedCoordinatorTimer timer; + /** + * The coordinator executor. + */ + final CoordinatorExecutorImpl executor; + /** * The current state. */ @@ -603,6 +619,13 @@ private CoordinatorContext( this.epoch = -1; this.deferredEventQueue = new DeferredEventQueue(logContext); this.timer = new EventBasedCoordinatorTimer(tp, logContext); + this.executor = new CoordinatorExecutorImpl<>( + logContext, + tp, + CoordinatorRuntime.this, + executorService, + defaultWriteTimeout + ); this.bufferSupplier = new BufferSupplier.GrowableBufferSupplier(); } @@ -633,6 +656,7 @@ private void transitionTo( .withSnapshotRegistry(snapshotRegistry) .withTime(time) .withTimer(timer) + .withExecutor(executor) .withCoordinatorMetrics(coordinatorMetrics) .withTopicPartition(tp) .build(), @@ -714,6 +738,7 @@ private void unload() { highWatermarklistener = null; } timer.cancelAll(); + executor.cancelAll(); deferredEventQueue.failAll(Errors.NOT_COORDINATOR.exception()); failCurrentBatch(Errors.NOT_COORDINATOR.exception()); if (coordinator != null) { @@ -743,6 +768,17 @@ private void freeCurrentBatch() { private void flushCurrentBatch() { if (currentBatch != null) { try { + if (currentBatch.builder.numRecords() == 0) { + // The only way we can get here is if append() has failed in an unexpected + // way and left an empty batch. Try to clean it up. + log.debug("Tried to flush an empty batch for {}.", tp); + // There should not be any deferred events attached to the batch. We fail + // the batch just in case. As a side effect, coordinator state is also + // reverted, but there should be no changes since the batch was empty. + failCurrentBatch(new IllegalStateException("Record batch was empty")); + return; + } + long flushStartMs = time.milliseconds(); // Write the records to the log and update the last written offset. long offset = partitionWriter.append( @@ -821,14 +857,13 @@ private void maybeAllocateNewBatch( ) { if (currentBatch == null) { LogConfig logConfig = partitionWriter.config(tp); - byte magic = logConfig.recordVersion().value; int maxBatchSize = logConfig.maxMessageSize(); long prevLastWrittenOffset = coordinator.lastWrittenOffset(); ByteBuffer buffer = bufferSupplier.get(maxBatchSize); MemoryRecordsBuilder builder = new MemoryRecordsBuilder( buffer, - magic, + RecordBatch.CURRENT_MAGIC_VALUE, compression, TimestampType.CREATE_TIME, 0L, @@ -901,7 +936,7 @@ private void append( // If the records are empty, it was a read operation after all. In this case, // the response can be returned directly iff there are no pending write operations; // otherwise, the read needs to wait on the last write operation to be completed. - if (currentBatch != null) { + if (currentBatch != null && currentBatch.builder.numRecords() > 0) { currentBatch.deferredEvents.add(event); } else { if (coordinator.lastCommittedOffset() < coordinator.lastWrittenOffset()) { @@ -1336,6 +1371,10 @@ public void run() { */ @Override public void complete(Throwable exception) { + if (future.isDone()) { + return; + } + final long purgatoryTimeMs = time.milliseconds() - deferredEventQueuedTimestamp; CompletableFuture appendFuture = result != null ? result.appendFuture() : null; @@ -1629,6 +1668,10 @@ public void run() { */ @Override public void complete(Throwable exception) { + if (future.isDone()) { + return; + } + final long purgatoryTimeMs = time.milliseconds() - deferredEventQueuedTimestamp; if (exception == null) { future.complete(null); @@ -1899,6 +1942,12 @@ public void onHighWatermarkUpdated( */ private final int appendLingerMs; + /** + * The executor service used by the coordinator runtime to schedule + * asynchronous tasks. + */ + private final ExecutorService executorService; + /** * Atomic boolean indicating whether the runtime is running. */ @@ -1926,6 +1975,7 @@ public void onHighWatermarkUpdated( * @param serializer The serializer. * @param compression The compression codec. * @param appendLingerMs The append linger time in ms. + * @param executorService The executor service. */ @SuppressWarnings("checkstyle:ParameterNumber") private CoordinatorRuntime( @@ -1942,7 +1992,8 @@ private CoordinatorRuntime( CoordinatorMetrics coordinatorMetrics, Serializer serializer, Compression compression, - int appendLingerMs + int appendLingerMs, + ExecutorService executorService ) { this.logPrefix = logPrefix; this.logContext = logContext; @@ -1960,6 +2011,7 @@ private CoordinatorRuntime( this.serializer = serializer; this.compression = compression; this.appendLingerMs = appendLingerMs; + this.executorService = executorService; } /** @@ -2333,7 +2385,7 @@ public void scheduleUnloadOperation( if (context != null) { context.lock.lock(); try { - if (!partitionEpoch.isPresent() || context.epoch < partitionEpoch.getAsInt()) { + if (partitionEpoch.isEmpty() || context.epoch < partitionEpoch.getAsInt()) { log.info("Started unloading metadata for {} with epoch {}.", tp, partitionEpoch); context.transitionTo(CoordinatorState.CLOSED); coordinators.remove(tp, context); @@ -2423,7 +2475,27 @@ public void close() throws Exception { } }); coordinators.clear(); + executorService.shutdown(); Utils.closeQuietly(runtimeMetrics, "runtime metrics"); log.info("Coordinator runtime closed."); } + + /** + * Util method which returns all the topic partitions for which + * the state machine is in active state. + *

          + * This could be useful if the caller does not have a specific + * target internal topic partition. + * @return List of {@link TopicPartition} whose coordinators are active + */ + public List activeTopicPartitions() { + if (coordinators == null || coordinators.isEmpty()) { + return Collections.emptyList(); + } + + return coordinators.entrySet().stream() + .filter(entry -> entry.getValue().state.equals(CoordinatorState.ACTIVE)) + .map(Map.Entry::getKey) + .toList(); + } } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java index 591b37e2fb450..391813250c147 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java @@ -31,6 +31,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; +import static org.apache.kafka.coordinator.common.runtime.KafkaMetricHistogram.MAX_LATENCY_MS; + public class CoordinatorRuntimeMetricsImpl implements CoordinatorRuntimeMetrics { /** @@ -291,7 +293,7 @@ public void recordEventProcessingTime(long durationMs) { @Override public void recordEventPurgatoryTime(long purgatoryTimeMs) { - eventPurgatoryTimeSensor.record(purgatoryTimeMs); + eventPurgatoryTimeSensor.record(Math.min(MAX_LATENCY_MS, purgatoryTimeMs)); } @Override diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShardBuilder.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShardBuilder.java index 7315f271a8f90..62092999c39a2 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShardBuilder.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShardBuilder.java @@ -75,6 +75,17 @@ CoordinatorShardBuilder withTimer( CoordinatorTimer timer ); + /** + * Sets the coordinator executor. + * + * @param executor The coordinator executor. + * + * @return The builder. + */ + CoordinatorShardBuilder withExecutor( + CoordinatorExecutor executor + ); + /** * Sets the coordinator metrics. * diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java index 47df8bcae3499..cb8bec3f71c94 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java @@ -107,4 +107,15 @@ CompletableFuture maybeStartTransactionVerification( short producerEpoch, short apiVersion ) throws KafkaException; + + /** + * Delete records from a topic partition until specified offset + * @param tp The partition to delete records from + * @param deleteBeforeOffset Offset to delete until, starting from the beginning + * @throws KafkaException Any KafkaException caught during the operation. + */ + CompletableFuture deleteRecords( + TopicPartition tp, + long deleteBeforeOffset + ) throws KafkaException; } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImplTest.java new file mode 100644 index 0000000000000..d5ac1be7820d6 --- /dev/null +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImplTest.java @@ -0,0 +1,317 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.server.util.FutureUtils; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.util.Collections; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +// Creating mocks of classes using generics creates unsafe assignment. +@SuppressWarnings("unchecked") +public class CoordinatorExecutorImplTest { + private static final LogContext LOG_CONTEXT = new LogContext(); + private static final TopicPartition SHARD_PARTITION = new TopicPartition("__consumer_offsets", 0); + private static final Duration WRITE_TIMEOUT = Duration.ofMillis(1000); + private static final String TASK_KEY = "task"; + + @Test + public void testTaskSuccessfulLifecycle() { + CoordinatorShard coordinatorShard = mock(CoordinatorShard.class); + CoordinatorRuntime, String> runtime = mock(CoordinatorRuntime.class); + ExecutorService executorService = mock(ExecutorService.class); + CoordinatorExecutorImpl, String> executor = new CoordinatorExecutorImpl<>( + LOG_CONTEXT, + SHARD_PARTITION, + runtime, + executorService, + WRITE_TIMEOUT + ); + + when(runtime.scheduleWriteOperation( + eq(TASK_KEY), + eq(SHARD_PARTITION), + eq(WRITE_TIMEOUT), + any() + )).thenAnswer(args -> { + assertTrue(executor.isScheduled(TASK_KEY)); + CoordinatorRuntime.CoordinatorWriteOperation, Void, String> op = + args.getArgument(3); + assertEquals( + new CoordinatorResult<>(Collections.singletonList("record"), null), + op.generateRecordsAndResult(coordinatorShard) + ); + return CompletableFuture.completedFuture(null); + }); + + when(executorService.submit(any(Runnable.class))).thenAnswer(args -> { + assertTrue(executor.isScheduled(TASK_KEY)); + Runnable op = args.getArgument(0); + op.run(); + return CompletableFuture.completedFuture(null); + }); + + AtomicBoolean taskCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskRunnable taskRunnable = () -> { + taskCalled.set(true); + return "Hello!"; + }; + + AtomicBoolean operationCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskOperation taskOperation = (result, exception) -> { + operationCalled.set(true); + assertEquals("Hello!", result); + assertNull(exception); + return new CoordinatorResult<>(Collections.singletonList("record"), null); + }; + + executor.schedule( + TASK_KEY, + taskRunnable, + taskOperation + ); + + assertTrue(taskCalled.get()); + assertTrue(operationCalled.get()); + } + + @Test + public void testTaskFailedLifecycle() { + CoordinatorShard coordinatorShard = mock(CoordinatorShard.class); + CoordinatorRuntime, String> runtime = mock(CoordinatorRuntime.class); + ExecutorService executorService = mock(ExecutorService.class); + CoordinatorExecutorImpl, String> executor = new CoordinatorExecutorImpl<>( + LOG_CONTEXT, + SHARD_PARTITION, + runtime, + executorService, + WRITE_TIMEOUT + ); + + when(runtime.scheduleWriteOperation( + eq(TASK_KEY), + eq(SHARD_PARTITION), + eq(WRITE_TIMEOUT), + any() + )).thenAnswer(args -> { + CoordinatorRuntime.CoordinatorWriteOperation, Void, String> op = + args.getArgument(3); + assertEquals( + new CoordinatorResult<>(Collections.emptyList(), null), + op.generateRecordsAndResult(coordinatorShard) + ); + return CompletableFuture.completedFuture(null); + }); + + when(executorService.submit(any(Runnable.class))).thenAnswer(args -> { + Runnable op = args.getArgument(0); + op.run(); + return CompletableFuture.completedFuture(null); + }); + + AtomicBoolean taskCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskRunnable taskRunnable = () -> { + taskCalled.set(true); + throw new Exception("Oh no!"); + }; + + AtomicBoolean operationCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskOperation taskOperation = (result, exception) -> { + operationCalled.set(true); + assertNull(result); + assertNotNull(exception); + assertEquals("Oh no!", exception.getMessage()); + return new CoordinatorResult<>(Collections.emptyList(), null); + }; + + executor.schedule( + TASK_KEY, + taskRunnable, + taskOperation + ); + + assertTrue(taskCalled.get()); + assertTrue(operationCalled.get()); + } + + @Test + public void testTaskCancelledBeforeBeingExecuted() { + CoordinatorRuntime, String> runtime = mock(CoordinatorRuntime.class); + ExecutorService executorService = mock(ExecutorService.class); + CoordinatorExecutorImpl, String> executor = new CoordinatorExecutorImpl<>( + LOG_CONTEXT, + SHARD_PARTITION, + runtime, + executorService, + WRITE_TIMEOUT + ); + + when(executorService.submit(any(Runnable.class))).thenAnswer(args -> { + // Cancel the task before running it. + executor.cancel(TASK_KEY); + + // Running the task. + Runnable op = args.getArgument(0); + op.run(); + return CompletableFuture.completedFuture(null); + }); + + AtomicBoolean taskCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskRunnable taskRunnable = () -> { + taskCalled.set(true); + return null; + }; + + AtomicBoolean operationCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskOperation taskOperation = (result, exception) -> { + operationCalled.set(true); + return null; + }; + + executor.schedule( + TASK_KEY, + taskRunnable, + taskOperation + ); + + assertFalse(taskCalled.get()); + assertFalse(operationCalled.get()); + } + + @Test + public void testTaskCancelledAfterBeingExecutedButBeforeWriteOperationIsExecuted() { + CoordinatorShard coordinatorShard = mock(CoordinatorShard.class); + CoordinatorRuntime, String> runtime = mock(CoordinatorRuntime.class); + ExecutorService executorService = mock(ExecutorService.class); + CoordinatorExecutorImpl, String> executor = new CoordinatorExecutorImpl<>( + LOG_CONTEXT, + SHARD_PARTITION, + runtime, + executorService, + WRITE_TIMEOUT + ); + + when(runtime.scheduleWriteOperation( + eq(TASK_KEY), + eq(SHARD_PARTITION), + eq(WRITE_TIMEOUT), + any() + )).thenAnswer(args -> { + // Cancel the task before running the write operation. + executor.cancel(TASK_KEY); + + CoordinatorRuntime.CoordinatorWriteOperation, Void, String> op = + args.getArgument(3); + Throwable ex = assertThrows(RejectedExecutionException.class, () -> op.generateRecordsAndResult(coordinatorShard)); + return FutureUtils.failedFuture(ex); + }); + + when(executorService.submit(any(Runnable.class))).thenAnswer(args -> { + Runnable op = args.getArgument(0); + op.run(); + return CompletableFuture.completedFuture(null); + }); + + AtomicBoolean taskCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskRunnable taskRunnable = () -> { + taskCalled.set(true); + return "Hello!"; + }; + + AtomicBoolean operationCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskOperation taskOperation = (result, exception) -> { + operationCalled.set(true); + return null; + }; + + executor.schedule( + TASK_KEY, + taskRunnable, + taskOperation + ); + + assertTrue(taskCalled.get()); + assertFalse(operationCalled.get()); + } + + @Test + public void testTaskSchedulingWriteOperationFailed() { + CoordinatorRuntime, String> runtime = mock(CoordinatorRuntime.class); + ExecutorService executorService = mock(ExecutorService.class); + CoordinatorExecutorImpl, String> executor = new CoordinatorExecutorImpl<>( + LOG_CONTEXT, + SHARD_PARTITION, + runtime, + executorService, + WRITE_TIMEOUT + ); + + when(runtime.scheduleWriteOperation( + eq(TASK_KEY), + eq(SHARD_PARTITION), + eq(WRITE_TIMEOUT), + any() + )).thenReturn(FutureUtils.failedFuture(new Throwable("Oh no!"))); + + when(executorService.submit(any(Runnable.class))).thenAnswer(args -> { + Runnable op = args.getArgument(0); + op.run(); + return CompletableFuture.completedFuture(null); + }); + + AtomicBoolean taskCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskRunnable taskRunnable = () -> { + taskCalled.set(true); + return "Hello!"; + }; + + AtomicBoolean operationCalled = new AtomicBoolean(false); + CoordinatorExecutor.TaskOperation taskOperation = (result, exception) -> { + operationCalled.set(true); + return new CoordinatorResult<>(Collections.emptyList(), null); + }; + + executor.schedule( + TASK_KEY, + taskRunnable, + taskOperation + ); + + assertTrue(taskCalled.get()); + assertFalse(operationCalled.get()); + assertFalse(executor.isScheduled(TASK_KEY)); + } +} diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java index bb637d94b27a0..7285b58ffabf4 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java @@ -36,6 +36,7 @@ import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.EVENT_PURGATORY_TIME_METRIC_NAME; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.EVENT_QUEUE_TIME_METRIC_NAME; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.NUM_PARTITIONS_METRIC_NAME; +import static org.apache.kafka.coordinator.common.runtime.KafkaMetricHistogram.MAX_LATENCY_MS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -204,6 +205,23 @@ public void testHistogramMetrics(String metricNamePrefix) { assertEquals(999.0, metric.metricValue()); } + @Test + public void testRecordEventPurgatoryTimeLimit() { + Time time = new MockTime(); + Metrics metrics = new Metrics(time); + + CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); + + IntStream.range(1, 1001).forEach(__ -> runtimeMetrics.recordEventPurgatoryTime(MAX_LATENCY_MS + 1000L)); + + MetricName metricName = kafkaMetricName(metrics, EVENT_PURGATORY_TIME_METRIC_NAME + "-max"); + KafkaMetric metric = metrics.metrics().get(metricName); + long value = ((Double) metric.metricValue()).longValue(); + + // 3 sigfigs in HdrHistogram is not precise enough. + assertTrue(value >= MAX_LATENCY_MS && value < MAX_LATENCY_MS + 1000L); + } + private static void assertMetricGauge(Metrics metrics, org.apache.kafka.common.MetricName metricName, long count) { assertEquals(count, (long) metrics.metric(metricName).metricValue()); } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java index 7172fb2e8996d..a2f25b24a4c93 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java @@ -54,6 +54,7 @@ import org.junit.jupiter.params.provider.EnumSource; import org.mockito.ArgumentMatcher; +import java.nio.BufferOverflowException; import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.time.Duration; @@ -69,6 +70,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -100,7 +102,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -@SuppressWarnings("checkstyle:JavaNCSS") +@SuppressWarnings({"checkstyle:JavaNCSS", "checkstyle:ClassDataAbstractionCoupling"}) public class CoordinatorRuntimeTest { private static final TopicPartition TP = new TopicPartition("__consumer_offsets", 0); private static final Duration DEFAULT_WRITE_TIMEOUT = Duration.ofMillis(5); @@ -119,6 +121,34 @@ public byte[] serializeValue(String record) { } } + private static class ThrowingSerializer implements Serializer { + private final Serializer serializer; + private boolean throwOnNextOperation; + + public ThrowingSerializer(Serializer serializer) { + this.serializer = serializer; + this.throwOnNextOperation = false; + } + + public void throwOnNextOperation() { + throwOnNextOperation = true; + } + + @Override + public byte[] serializeKey(T record) { + return serializer.serializeKey(record); + } + + @Override + public byte[] serializeValue(T record) { + if (throwOnNextOperation) { + throwOnNextOperation = false; + throw new BufferOverflowException(); + } + return serializer.serializeValue(record); + } + } + /** * A CoordinatorEventProcessor that directly executes the operations. This is * useful in unit tests where execution in threads is not required. @@ -143,7 +173,7 @@ public void enqueueFirst(CoordinatorEvent event) throws RejectedExecutionExcepti } @Override - public void close() throws Exception {} + public void close() {} } /** @@ -181,7 +211,7 @@ public int size() { } @Override - public void close() throws Exception { + public void close() { } } @@ -219,7 +249,7 @@ public CompletableFuture load( } @Override - public void close() throws Exception { } + public void close() { } } /** @@ -269,6 +299,10 @@ public long append( if (batch.sizeInBytes() > config(tp).maxMessageSize()) throw new RecordTooLargeException("Batch is larger than the max message size"); + // We don't want the coordinator to write empty batches. + if (batch.validBytes() <= 0) + throw new KafkaException("Coordinator tried to write an empty batch"); + if (writeCount.incrementAndGet() > maxWrites) throw new KafkaException("Maximum number of writes reached"); @@ -342,7 +376,7 @@ public String toString() { "offset=" + offset + ", producerId=" + producerId + ", producerEpoch=" + producerEpoch + - ", record='" + record.substring(0, 10) + '\'' + + ", record='" + record.substring(0, Math.min(10, record.length())) + '\'' + ')'; } } @@ -351,15 +385,25 @@ public String toString() { private final TimelineHashSet records; private final TimelineHashMap> pendingRecords; private final CoordinatorTimer timer; + private final CoordinatorExecutor executor; MockCoordinatorShard( SnapshotRegistry snapshotRegistry, CoordinatorTimer timer + ) { + this(snapshotRegistry, timer, null); + } + + MockCoordinatorShard( + SnapshotRegistry snapshotRegistry, + CoordinatorTimer timer, + CoordinatorExecutor executor ) { this.snapshotRegistry = snapshotRegistry; this.records = new TimelineHashSet<>(snapshotRegistry, 0); this.pendingRecords = new TimelineHashMap<>(snapshotRegistry, 0); this.timer = timer; + this.executor = executor; } @Override @@ -403,28 +447,18 @@ public void replayEndTransactionMarker( Set pendingRecords(long producerId) { TimelineHashSet pending = pendingRecords.get(producerId); if (pending == null) return Collections.emptySet(); - return Collections.unmodifiableSet( - pending.stream().map(record -> record.record).collect(Collectors.toSet()) - ); + return pending.stream().map(record -> record.record).collect(Collectors.toUnmodifiableSet()); } Set records() { - return Collections.unmodifiableSet( - records.stream().map(record -> record.record).collect(Collectors.toSet()) - ); + return records.stream().map(record -> record.record).collect(Collectors.toUnmodifiableSet()); } List fullRecords() { - return Collections.unmodifiableList( - records - .stream() - .sorted(Comparator.comparingLong(record -> record.offset)) - .collect(Collectors.toList()) - ); - } - - CoordinatorTimer timer() { - return timer; + return records + .stream() + .sorted(Comparator.comparingLong(record -> record.offset)) + .collect(Collectors.toList()); } } @@ -434,6 +468,7 @@ CoordinatorTimer timer() { private static class MockCoordinatorShardBuilder implements CoordinatorShardBuilder { private SnapshotRegistry snapshotRegistry; private CoordinatorTimer timer; + private CoordinatorExecutor executor; @Override public CoordinatorShardBuilder withSnapshotRegistry( @@ -457,6 +492,14 @@ public CoordinatorShardBuilder withTime( return this; } + @Override + public CoordinatorShardBuilder withExecutor( + CoordinatorExecutor executor + ) { + this.executor = executor; + return this; + } + @Override public CoordinatorShardBuilder withTimer( CoordinatorTimer timer @@ -481,7 +524,8 @@ public CoordinatorShardBuilder withTopicPartition( public MockCoordinatorShard build() { return new MockCoordinatorShard( Objects.requireNonNull(this.snapshotRegistry), - Objects.requireNonNull(this.timer) + Objects.requireNonNull(this.timer), + Objects.requireNonNull(this.executor) ); } } @@ -634,6 +678,7 @@ public void testScheduleLoading() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -642,6 +687,7 @@ public void testScheduleLoading() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); CompletableFuture future = new CompletableFuture<>(); @@ -704,6 +750,7 @@ public void testScheduleLoadingWithFailure() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -712,6 +759,7 @@ public void testScheduleLoadingWithFailure() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); CompletableFuture future = new CompletableFuture<>(); @@ -754,6 +802,7 @@ public void testScheduleLoadingWithStalePartitionEpoch() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -762,6 +811,7 @@ public void testScheduleLoadingWithStalePartitionEpoch() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); CompletableFuture future = new CompletableFuture<>(); @@ -808,6 +858,7 @@ public void testScheduleLoadingAfterLoadingFailure() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -816,6 +867,7 @@ public void testScheduleLoadingAfterLoadingFailure() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); CompletableFuture future = new CompletableFuture<>(); @@ -879,6 +931,7 @@ public void testScheduleUnloading() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -887,6 +940,7 @@ public void testScheduleUnloading() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); @@ -933,6 +987,7 @@ public void testScheduleUnloadingWithEmptyEpoch() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -941,6 +996,7 @@ public void testScheduleUnloadingWithEmptyEpoch() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); @@ -987,6 +1043,7 @@ public void testScheduleUnloadingWhenContextDoesntExist() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -994,6 +1051,7 @@ public void testScheduleUnloadingWhenContextDoesntExist() { when(builder.withTime(any())).thenReturn(builder); when(builder.withTimer(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); @@ -1029,6 +1087,7 @@ public void testScheduleUnloadingWithStalePartitionEpoch() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -1039,6 +1098,8 @@ public void testScheduleUnloadingWithStalePartitionEpoch() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); @@ -1072,6 +1133,7 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -1081,11 +1143,11 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Write #1. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1") + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1") ); // Verify that the write is not committed yet. @@ -1096,17 +1158,17 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // The last committed offset does not change. assertEquals(0L, ctx.coordinator.lastCommittedOffset()); // A new snapshot is created. - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); // Records have been replayed to the coordinator. assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records()); // Records have been written to the log. - assertEquals(Collections.singletonList( + assertEquals(List.of( records(timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); // Write #2. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record3"), "response2")); + state -> new CoordinatorResult<>(List.of("record3"), "response2")); // Verify that the write is not committed yet. assertFalse(write2.isDone()); @@ -1116,11 +1178,11 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // The last committed offset does not change. assertEquals(0L, ctx.coordinator.lastCommittedOffset()); // A new snapshot is created. - assertEquals(Arrays.asList(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); // Records have been replayed to the coordinator. assertEquals(Set.of("record1", "record2", "record3"), ctx.coordinator.coordinator().records()); // Records have been written to the log. - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds(), "record1", "record2"), records(timer.time().milliseconds(), "record3") ), writer.entries(TP)); @@ -1135,9 +1197,9 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // The state does not change. assertEquals(3L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2", "record3"), ctx.coordinator.coordinator().records()); - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds(), "record1", "record2"), records(timer.time().milliseconds(), "record3") ), writer.entries(TP)); @@ -1152,7 +1214,7 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // The last committed offset is updated. assertEquals(2L, ctx.coordinator.lastCommittedOffset()); // The snapshot is cleaned up. - assertEquals(Arrays.asList(2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); // Commit write #2. writer.commit(TP, 3); @@ -1166,7 +1228,7 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // The last committed offset is updated. assertEquals(3L, ctx.coordinator.lastCommittedOffset()); // The snapshot is cleaned up. - assertEquals(Collections.singletonList(3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(3L), ctx.coordinator.snapshotRegistry().epochsList()); // Write #4 but without records. CompletableFuture write4 = runtime.scheduleWriteOperation("write#4", TP, DEFAULT_WRITE_TIMEOUT, @@ -1175,7 +1237,7 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // It is completed immediately because the state is fully committed. assertTrue(write4.isDone()); assertEquals("response4", write4.get(5, TimeUnit.SECONDS)); - assertEquals(Collections.singletonList(3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(3L), ctx.coordinator.snapshotRegistry().epochsList()); } @Test @@ -1193,6 +1255,7 @@ public void testScheduleWriteOpWhenInactive() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Scheduling a write fails with a NotCoordinatorException because the coordinator @@ -1217,6 +1280,7 @@ public void testScheduleWriteOpWhenOpFails() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1245,6 +1309,7 @@ public void testScheduleWriteOpWhenReplayFails() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1254,7 +1319,7 @@ public void testScheduleWriteOpWhenReplayFails() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Override the coordinator with a coordinator that throws // an exception when replay is called. @@ -1278,13 +1343,13 @@ public void replay( // Write. It should fail. CompletableFuture write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1")); + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")); assertFutureThrows(write, IllegalArgumentException.class); // Verify that the state has not changed. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); } @Test @@ -1305,6 +1370,7 @@ public void testScheduleWriteOpWhenWriteFails() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1314,28 +1380,28 @@ public void testScheduleWriteOpWhenWriteFails() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0, ctx.coordinator.lastWrittenOffset()); assertEquals(0, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Write #1. It should succeed and be applied to the coordinator. runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1")); + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")); // Verify that the state has been updated. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records()); // Write #2. It should fail because the writer is configured to only // accept 1 write. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record3", "record4", "record5"), "response2")); + state -> new CoordinatorResult<>(List.of("record3", "record4", "record5"), "response2")); assertFutureThrows(write2, KafkaException.class); // Verify that the state has not changed. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().records()); } @@ -1357,6 +1423,7 @@ public void testScheduleWriteOpWhenWriteTimesOut() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1366,11 +1433,11 @@ public void testScheduleWriteOpWhenWriteTimesOut() throws InterruptedException { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0, ctx.coordinator.lastWrittenOffset()); assertEquals(0, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Write #1. We should get a TimeoutException because the HWM will not advance. CompletableFuture timedOutWrite = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(3), - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1")); + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")); timer.advanceClock(4); @@ -1394,6 +1461,7 @@ public void testScheduleWriteAllOperation() throws ExecutionException, Interrupt .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); TopicPartition coordinator0 = new TopicPartition("__consumer_offsets", 0); @@ -1410,8 +1478,8 @@ public void testScheduleWriteAllOperation() throws ExecutionException, Interrupt List>> writes = runtime.scheduleWriteAllOperation("write", DEFAULT_WRITE_TIMEOUT, state -> { int counter = cnt.getAndIncrement(); return new CoordinatorResult<>( - Collections.singletonList("record#" + counter), - Collections.singletonList("response#" + counter) + List.of("record#" + counter), + List.of("response#" + counter) ); }); @@ -1419,9 +1487,9 @@ public void testScheduleWriteAllOperation() throws ExecutionException, Interrupt assertEquals(1L, runtime.contextOrThrow(coordinator1).coordinator.lastWrittenOffset()); assertEquals(1L, runtime.contextOrThrow(coordinator2).coordinator.lastWrittenOffset()); - assertEquals(Collections.singletonList(records(timer.time().milliseconds(), "record#0")), writer.entries(coordinator0)); - assertEquals(Collections.singletonList(records(timer.time().milliseconds(), "record#1")), writer.entries(coordinator1)); - assertEquals(Collections.singletonList(records(timer.time().milliseconds(), "record#2")), writer.entries(coordinator2)); + assertEquals(List.of(records(timer.time().milliseconds(), "record#0")), writer.entries(coordinator0)); + assertEquals(List.of(records(timer.time().milliseconds(), "record#1")), writer.entries(coordinator1)); + assertEquals(List.of(records(timer.time().milliseconds(), "record#2")), writer.entries(coordinator2)); // Commit. writer.commit(coordinator0); @@ -1430,7 +1498,7 @@ public void testScheduleWriteAllOperation() throws ExecutionException, Interrupt // Verify. assertEquals( - Arrays.asList("response#0", "response#1", "response#2"), + List.of("response#0", "response#1", "response#2"), FutureUtils.combineFutures(writes, ArrayList::new, List::addAll).get(5, TimeUnit.SECONDS) ); } @@ -1465,6 +1533,7 @@ public CoordinatorShardBuilder get() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -1494,7 +1563,7 @@ public CoordinatorShardBuilder get() { 100L, (short) 50, Duration.ofMillis(5000), - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response"), + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response"), TXN_OFFSET_COMMIT_LATEST_VERSION ); @@ -1558,6 +1627,7 @@ public CoordinatorShardBuilder get() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -1583,7 +1653,7 @@ public CoordinatorShardBuilder get() { 100L, (short) 50, Duration.ofMillis(5000), - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response"), + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response"), TXN_OFFSET_COMMIT_LATEST_VERSION ); @@ -1616,6 +1686,7 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -1625,7 +1696,7 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Transactional write #1. CompletableFuture write1 = runtime.scheduleTransactionalWriteOperation( @@ -1635,7 +1706,7 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E 100L, (short) 5, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1"), + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"), TXN_OFFSET_COMMIT_LATEST_VERSION ); @@ -1647,14 +1718,14 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E // The last committed offset does not change. assertEquals(0L, ctx.coordinator.lastCommittedOffset()); // A new snapshot is created. - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); // Records have been replayed to the coordinator. They are stored in // the pending set for now. assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords( 100L )); // Records have been written to the log. - assertEquals(Collections.singletonList( + assertEquals(List.of( transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); @@ -1677,7 +1748,7 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E // The last committed offset does not change. assertEquals(0L, ctx.coordinator.lastCommittedOffset()); // A new snapshot is created. - assertEquals(Arrays.asList(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); // Records have been replayed to the coordinator. ControlRecordType expectedType; if (result == TransactionResult.COMMIT) { @@ -1691,7 +1762,7 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E } // Records have been written to the log. - assertEquals(Arrays.asList( + assertEquals(List.of( transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2"), endTransactionMarker(100L, (short) 5, timer.time().milliseconds(), 10, expectedType) ), writer.entries(TP)); @@ -1728,6 +1799,7 @@ public void testScheduleTransactionCompletionWhenWriteTimesOut() throws Interrup .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1737,7 +1809,7 @@ public void testScheduleTransactionCompletionWhenWriteTimesOut() throws Interrup CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0, ctx.coordinator.lastWrittenOffset()); assertEquals(0, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Complete #1. We should get a TimeoutException because the HWM will not advance. CompletableFuture timedOutCompletion = runtime.scheduleTransactionCompletion( @@ -1753,7 +1825,7 @@ public void testScheduleTransactionCompletionWhenWriteTimesOut() throws Interrup // Verify that the state has been updated. assertEquals(1L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 1L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 1L), ctx.coordinator.snapshotRegistry().epochsList()); // Advance clock to timeout Complete #1. timer.advanceClock(4); @@ -1764,7 +1836,7 @@ public void testScheduleTransactionCompletionWhenWriteTimesOut() throws Interrup // operation timeouts because the record has been written to the log. assertEquals(1L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 1L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 1L), ctx.coordinator.snapshotRegistry().epochsList()); } @Test @@ -1785,6 +1857,7 @@ public void testScheduleTransactionCompletionWhenWriteFails() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1794,7 +1867,7 @@ public void testScheduleTransactionCompletionWhenWriteFails() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0, ctx.coordinator.lastWrittenOffset()); assertEquals(0, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Write #1. It should succeed and be applied to the coordinator. runtime.scheduleTransactionalWriteOperation( @@ -1804,14 +1877,14 @@ public void testScheduleTransactionCompletionWhenWriteFails() { 100L, (short) 5, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1"), + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"), TXN_OFFSET_COMMIT_LATEST_VERSION ); // Verify that the state has been updated. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); @@ -1830,7 +1903,7 @@ public void testScheduleTransactionCompletionWhenWriteFails() { // Verify that the state has not changed. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); } @@ -1851,6 +1924,7 @@ public void testScheduleTransactionCompletionWhenReplayFails() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1860,7 +1934,7 @@ public void testScheduleTransactionCompletionWhenReplayFails() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); // Override the coordinator with a coordinator that throws // an exception when replayEndTransactionMarker is called. @@ -1889,17 +1963,17 @@ public void replayEndTransactionMarker( 100L, (short) 5, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1"), + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1"), TXN_OFFSET_COMMIT_LATEST_VERSION ); // Verify that the state has been updated. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); - assertEquals(Collections.singletonList( + assertEquals(List.of( transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); @@ -1918,10 +1992,10 @@ public void replayEndTransactionMarker( // Verify that the state has not changed. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); - assertEquals(Collections.singletonList( + assertEquals(List.of( transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); } @@ -1943,6 +2017,7 @@ public void testScheduleReadOp() throws ExecutionException, InterruptedException .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -1955,11 +2030,11 @@ public void testScheduleReadOp() throws ExecutionException, InterruptedException // Write #1. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1")); + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")); // Write #2. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record3", "record4"), "response2")); + state -> new CoordinatorResult<>(List.of("record3", "record4"), "response2")); // Commit write #1. writer.commit(TP, 2); @@ -2001,6 +2076,7 @@ public void testScheduleReadOpWhenPartitionInactive() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule a read. It fails because the coordinator does not exist. @@ -2026,6 +2102,7 @@ public void testScheduleReadOpWhenOpsFails() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2038,11 +2115,11 @@ public void testScheduleReadOpWhenOpsFails() { // Write #1. runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1")); + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")); // Write #2. runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record3", "record4"), "response2")); + state -> new CoordinatorResult<>(List.of("record3", "record4"), "response2")); // Commit write #1. writer.commit(TP, 2); @@ -2072,6 +2149,7 @@ public void testScheduleReadAllOp() throws ExecutionException, InterruptedExcept .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); TopicPartition coordinator0 = new TopicPartition("__consumer_offsets", 0); @@ -2085,11 +2163,11 @@ public void testScheduleReadAllOp() throws ExecutionException, InterruptedExcept // Writes runtime.scheduleWriteOperation("write#0", coordinator0, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record0"), "response0")); + state -> new CoordinatorResult<>(List.of("record0"), "response0")); runtime.scheduleWriteOperation("write#1", coordinator1, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record1"), "response1")); + state -> new CoordinatorResult<>(List.of("record1"), "response1")); runtime.scheduleWriteOperation("write#2", coordinator2, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record2"), "response2")); + state -> new CoordinatorResult<>(List.of("record2"), "response2")); // Commit writes. writer.commit(coordinator0); @@ -2103,7 +2181,7 @@ public void testScheduleReadAllOp() throws ExecutionException, InterruptedExcept ); assertEquals( - Arrays.asList("record0", "record1", "record2"), + List.of("record0", "record1", "record2"), FutureUtils.combineFutures(responses, ArrayList::new, List::addAll).get(5, TimeUnit.SECONDS) ); } @@ -2112,6 +2190,7 @@ public void testScheduleReadAllOp() throws ExecutionException, InterruptedExcept public void testClose() throws Exception { MockCoordinatorLoader loader = spy(new MockCoordinatorLoader()); MockTimer timer = new MockTimer(); + ExecutorService executorService = mock(ExecutorService.class); CoordinatorRuntime runtime = new CoordinatorRuntime.Builder() .withTime(timer.time()) @@ -2124,6 +2203,7 @@ public void testClose() throws Exception { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(executorService) .build(); // Loads the coordinator. @@ -2136,11 +2216,11 @@ public void testClose() throws Exception { // Write #1. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1")); + state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")); // Write #2. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Arrays.asList("record3", "record4"), "response2")); + state -> new CoordinatorResult<>(List.of("record3", "record4"), "response2")); // Writes are inflight. assertFalse(write1.isDone()); @@ -2151,7 +2231,7 @@ public void testClose() throws Exception { // Timer #1. This is never executed. ctx.timer.schedule("timer-1", 10, TimeUnit.SECONDS, true, - () -> new CoordinatorResult<>(Arrays.asList("record5", "record6"), null)); + () -> new CoordinatorResult<>(List.of("record5", "record6"), null)); // The coordinator timer should have one pending task. assertEquals(1, ctx.timer.size()); @@ -2168,6 +2248,9 @@ public void testClose() throws Exception { // The coordinator timer should be empty. assertEquals(0, ctx.timer.size()); + + // Verify that the executor service was shutdown. + verify(executorService).shutdown(); } @Test @@ -2193,6 +2276,7 @@ public void testOnNewMetadataImage() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); MockCoordinatorShard coordinator0 = mock(MockCoordinatorShard.class); @@ -2205,6 +2289,7 @@ public void testOnNewMetadataImage() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.withTime(any())).thenReturn(builder); when(builder.build()) .thenReturn(coordinator0) @@ -2256,6 +2341,7 @@ public void testScheduleTimer() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2271,11 +2357,11 @@ public void testScheduleTimer() throws InterruptedException { // Timer #1. ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, - () -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), null)); + () -> new CoordinatorResult<>(List.of("record1", "record2"), null)); // Timer #2. ctx.timer.schedule("timer-2", 20, TimeUnit.MILLISECONDS, true, - () -> new CoordinatorResult<>(Arrays.asList("record3", "record4"), null)); + () -> new CoordinatorResult<>(List.of("record3", "record4"), null)); // The coordinator timer should have two pending tasks. assertEquals(2, ctx.timer.size()); @@ -2311,6 +2397,7 @@ public void testRescheduleTimer() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2329,7 +2416,7 @@ public void testRescheduleTimer() throws InterruptedException { // Timer #1. ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, - () -> new CoordinatorResult<>(Collections.singletonList("record1"), null)); + () -> new CoordinatorResult<>(List.of("record1"), null)); // The coordinator timer should have one pending task. assertEquals(1, ctx.timer.size()); @@ -2342,14 +2429,14 @@ public void testRescheduleTimer() throws InterruptedException { // Schedule a second timer with the same key. ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, - () -> new CoordinatorResult<>(Collections.singletonList("record2"), null)); + () -> new CoordinatorResult<>(List.of("record2"), null)); // The coordinator timer should still have one pending task. assertEquals(1, ctx.timer.size()); // Schedule a third timer with the same key. ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, - () -> new CoordinatorResult<>(Collections.singletonList("record3"), null)); + () -> new CoordinatorResult<>(List.of("record3"), null)); // The coordinator timer should still have one pending task. assertEquals(1, ctx.timer.size()); @@ -2386,6 +2473,7 @@ public void testCancelTimer() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2404,7 +2492,7 @@ public void testCancelTimer() throws InterruptedException { // Timer #1. ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, - () -> new CoordinatorResult<>(Collections.singletonList("record1"), null)); + () -> new CoordinatorResult<>(List.of("record1"), null)); // The coordinator timer should have one pending task. assertEquals(1, ctx.timer.size()); @@ -2417,7 +2505,7 @@ public void testCancelTimer() throws InterruptedException { // Schedule a second timer with the same key. ctx.timer.schedule("timer-1", 10, TimeUnit.MILLISECONDS, true, - () -> new CoordinatorResult<>(Collections.singletonList("record2"), null)); + () -> new CoordinatorResult<>(List.of("record2"), null)); // The coordinator timer should still have one pending task. assertEquals(1, ctx.timer.size()); @@ -2458,6 +2546,7 @@ public void testRetryableTimer() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2518,6 +2607,7 @@ public void testRetryableTimerWithCustomBackoff() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2592,6 +2682,7 @@ public void testNonRetryableTimer() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2635,6 +2726,7 @@ public void testTimerScheduleIfAbsent() throws InterruptedException { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. @@ -2693,6 +2785,7 @@ public void testStateChanges() throws Exception { .withCoordinatorRuntimeMetrics(runtimeMetrics) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -2701,6 +2794,7 @@ public void testStateChanges() throws Exception { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); CompletableFuture future = new CompletableFuture<>(); @@ -2771,6 +2865,7 @@ public void testPartitionLoadSensor() { .withCoordinatorRuntimeMetrics(runtimeMetrics) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -2779,6 +2874,7 @@ public void testPartitionLoadSensor() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); @@ -2819,14 +2915,15 @@ public void testPartitionLoadGeneratesSnapshotAtHighWatermark() { 1500, 30, 3000), - Arrays.asList(5L, 15L, 27L), - Arrays.asList(5L, 15L))) + List.of(5L, 15L, 27L), + List.of(5L, 15L))) .withEventProcessor(new DirectEventProcessor()) .withPartitionWriter(writer) .withCoordinatorShardBuilderSupplier(supplier) .withCoordinatorRuntimeMetrics(runtimeMetrics) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -2835,6 +2932,7 @@ public void testPartitionLoadGeneratesSnapshotAtHighWatermark() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); @@ -2884,6 +2982,7 @@ public void testPartitionLoadGeneratesSnapshotAtHighWatermarkNoRecordsLoaded() { .withCoordinatorRuntimeMetrics(runtimeMetrics) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); when(builder.withSnapshotRegistry(any())).thenReturn(builder); @@ -2892,6 +2991,7 @@ public void testPartitionLoadGeneratesSnapshotAtHighWatermarkNoRecordsLoaded() { when(builder.withTimer(any())).thenReturn(builder); when(builder.withCoordinatorMetrics(any())).thenReturn(builder); when(builder.withTopicPartition(any())).thenReturn(builder); + when(builder.withExecutor(any())).thenReturn(builder); when(builder.build()).thenReturn(coordinator); when(supplier.get()).thenReturn(builder); @@ -2927,6 +3027,7 @@ public void testHighWatermarkUpdate() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. Poll once to execute the load operation and once @@ -2937,18 +3038,18 @@ public void testHighWatermarkUpdate() { // Write #1. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record1"), "response1") + state -> new CoordinatorResult<>(List.of("record1"), "response1") ); processor.poll(); // Write #2. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record2"), "response2") + state -> new CoordinatorResult<>(List.of("record2"), "response2") ); processor.poll(); // Records have been written to the log. - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds(), "record1"), records(timer.time().milliseconds(), "record2") ), writer.entries(TP)); @@ -2997,6 +3098,7 @@ public void testWriteEventWriteTimeoutTaskIsCancelledWhenHighWatermarkIsUpdated( .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. Poll once to execute the load operation and once @@ -3007,18 +3109,18 @@ public void testWriteEventWriteTimeoutTaskIsCancelledWhenHighWatermarkIsUpdated( // Write#1. CompletableFuture write1 = runtime.scheduleWriteOperation("Write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record1"), "response1") + state -> new CoordinatorResult<>(List.of("record1"), "response1") ); processor.poll(); // Write#2. CompletableFuture write2 = runtime.scheduleWriteOperation("Write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(Collections.singletonList("record2"), "response2") + state -> new CoordinatorResult<>(List.of("record2"), "response2") ); processor.poll(); // Records have been written to the log. - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds(), "record1"), records(timer.time().milliseconds(), "record2") ), writer.entries(TP)); @@ -3071,6 +3173,7 @@ public void testCoordinatorCompleteTransactionEventWriteTimeoutTaskIsCancelledWh .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. Poll once to execute the load operation and once @@ -3092,7 +3195,7 @@ public void testCoordinatorCompleteTransactionEventWriteTimeoutTaskIsCancelledWh processor.poll(); // Records have been written to the log. - assertEquals(Collections.singletonList( + assertEquals(List.of( endTransactionMarker(100, (short) 50, timer.time().milliseconds(), 1, ControlRecordType.COMMIT) ), writer.entries(TP)); @@ -3140,6 +3243,7 @@ public void testAppendRecordBatchSize() { .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(serializer) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3149,7 +3253,7 @@ public void testAppendRecordBatchSize() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); int maxBatchSize = writer.config(TP).maxMessageSize(); assertTrue(maxBatchSize > MIN_BUFFER_SIZE); @@ -3191,6 +3295,7 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3200,7 +3305,7 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Get the max batch size. @@ -3229,8 +3334,8 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, // Verify the state. Records are replayed but no batch written. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)) ), ctx.coordinator.coordinator().fullRecords()); @@ -3247,8 +3352,8 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, // Verify the state. Records are replayed but no batch written. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) @@ -3268,14 +3373,14 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, // got flushed with all the records but the new one from #3. assertEquals(3L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)), new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(Collections.singletonList( + assertEquals(List.of( records(timer.time().milliseconds(), records.subList(0, 3)) ), writer.entries(TP)); @@ -3285,14 +3390,14 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, // Verify the state. The pending batch is flushed. assertEquals(4L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 3L, 4L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L, 3L, 4L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)), new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds() - 11, records.subList(0, 3)), records(timer.time().milliseconds() - 11, records.subList(3, 4)) ), writer.entries(TP)); @@ -3325,6 +3430,7 @@ public void testScheduleWriteOperationWithBatchingWhenRecordsTooLarge() { .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3334,7 +3440,7 @@ public void testScheduleWriteOperationWithBatchingWhenRecordsTooLarge() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Get the max batch size. @@ -3376,6 +3482,7 @@ public void testScheduleWriteOperationWithBatchingWhenWriteFails() { .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3385,7 +3492,7 @@ public void testScheduleWriteOperationWithBatchingWhenWriteFails() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Get the max batch size. @@ -3415,8 +3522,8 @@ public void testScheduleWriteOperationWithBatchingWhenWriteFails() { // Verify the state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) @@ -3438,7 +3545,7 @@ public void testScheduleWriteOperationWithBatchingWhenWriteFails() { // Verify the state. The state should be reverted to the initial state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Collections.emptyList(), ctx.coordinator.coordinator().fullRecords()); assertEquals(Collections.emptyList(), writer.entries(TP)); } @@ -3461,6 +3568,7 @@ public void testScheduleWriteOperationWithBatchingWhenReplayFails() { .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3470,7 +3578,7 @@ public void testScheduleWriteOperationWithBatchingWhenReplayFails() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Override the coordinator with a coordinator that throws @@ -3518,8 +3626,8 @@ public void replay( // Verify the state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Collections.singletonList( + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)) ), ctx.coordinator.coordinator().fullRecords()); assertEquals(Collections.emptyList(), writer.entries(TP)); @@ -3535,7 +3643,7 @@ public void replay( // Verify the state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Collections.emptyList(), ctx.coordinator.coordinator().fullRecords()); assertEquals(Collections.emptyList(), writer.entries(TP)); } @@ -3558,6 +3666,7 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3567,12 +3676,12 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Write #1 with one record. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(Collections.singletonList("record#1"), "response1") + state -> new CoordinatorResult<>(List.of("record#1"), "response1") ); // Verify that the write is not committed yet. @@ -3581,7 +3690,7 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi // Verify the state. Records are replayed but no batch written. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Set.of("record#1"), ctx.coordinator.coordinator().records()); assertEquals(Collections.emptyList(), writer.entries(TP)); @@ -3594,7 +3703,7 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi 100L, (short) 50, Duration.ofMillis(20), - state -> new CoordinatorResult<>(Collections.singletonList("record#2"), "response2"), + state -> new CoordinatorResult<>(List.of("record#2"), "response2"), TXN_OFFSET_COMMIT_LATEST_VERSION ); @@ -3605,17 +3714,17 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi // written to the log. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record#2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Set.of("record#1"), ctx.coordinator.coordinator().records()); - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds(), "record#1"), transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record#2") ), writer.entries(TP)); // Write #3 with one record. CompletableFuture write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(Collections.singletonList("record#3"), "response3") + state -> new CoordinatorResult<>(List.of("record#3"), "response3") ); // Verify that the write is not committed yet. @@ -3624,10 +3733,10 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi // Verify the state. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 1L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record#2"), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Set.of("record#1", "record#3"), ctx.coordinator.coordinator().records()); - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds(), "record#1"), transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record#2") ), writer.entries(TP)); @@ -3649,10 +3758,10 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi // Verify the state. assertEquals(4L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 1L, 2L, 3L, 4L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L, 1L, 2L, 3L, 4L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Set.of("record#1", "record#2", "record#3"), ctx.coordinator.coordinator().records()); - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds(), "record#1"), transactionalRecords(100L, (short) 50, timer.time().milliseconds(), "record#2"), records(timer.time().milliseconds(), "record#3"), @@ -3700,6 +3809,7 @@ public long append( .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3710,7 +3820,7 @@ public long append( assertEquals(ACTIVE, ctx.state); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Keep a reference to the current coordinator. @@ -3814,6 +3924,7 @@ public void close() {} .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3823,7 +3934,7 @@ public void close() {} CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(1L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(2L), ctx.coordinator.snapshotRegistry().epochsList()); // Schedule a write operation that does not generate any records. CompletableFuture write = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20), @@ -3838,7 +3949,7 @@ public void close() {} // Verify the state. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(2L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(2L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(2L), ctx.coordinator.snapshotRegistry().epochsList()); // The write operation should be completed. assertEquals("response1", write.get(5, TimeUnit.SECONDS)); @@ -3862,6 +3973,7 @@ public void testScheduleNonAtomicWriteOperation() throws ExecutionException, Int .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3871,7 +3983,7 @@ public void testScheduleNonAtomicWriteOperation() throws ExecutionException, Int CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Get the max batch size. @@ -3908,14 +4020,14 @@ public void testScheduleNonAtomicWriteOperation() throws ExecutionException, Int // the first three records. The 4th one is pending. assertEquals(3L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)), new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(Collections.singletonList( + assertEquals(List.of( records(timer.time().milliseconds(), records.subList(0, 3)) ), writer.entries(TP)); @@ -3932,14 +4044,14 @@ public void testScheduleNonAtomicWriteOperation() throws ExecutionException, Int assertNull(ctx.currentBatch); assertEquals(4L, ctx.coordinator.lastWrittenOffset()); assertEquals(3L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(3L, 4L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(3L, 4L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)), new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(Arrays.asList( + assertEquals(List.of( records(timer.time().milliseconds() - 11, records.subList(0, 3)), records(timer.time().milliseconds() - 11, records.subList(3, 4)) ), writer.entries(TP)); @@ -3970,6 +4082,7 @@ public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws Inter .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -3979,7 +4092,7 @@ public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws Inter CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Get the max batch size. @@ -4007,8 +4120,8 @@ public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws Inter // Verify the state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) @@ -4020,7 +4133,7 @@ public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws Inter // Note that the batch will fail only when the batch is written because the // MemoryBatchBuilder always accept one record. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(Collections.singletonList(record), "write#2", null, true, false) + state -> new CoordinatorResult<>(List.of(record), "write#2", null, true, false) ); // Advance past the linger time to flush the pending batch. @@ -4035,13 +4148,13 @@ public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws Inter // Verify the state. assertEquals(3L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L, 3L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(Collections.singletonList( + assertEquals(List.of( records(timer.time().milliseconds() - 11, records.subList(0, 3)) ), writer.entries(TP)); } @@ -4065,6 +4178,7 @@ public void testScheduleNonAtomicWriteOperationWhenWriteFails() { .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -4074,7 +4188,7 @@ public void testScheduleNonAtomicWriteOperationWhenWriteFails() { CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertNull(ctx.currentBatch); // Get the max batch size. @@ -4104,8 +4218,8 @@ public void testScheduleNonAtomicWriteOperationWhenWriteFails() { // Verify the state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Arrays.asList( + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) @@ -4127,11 +4241,78 @@ public void testScheduleNonAtomicWriteOperationWhenWriteFails() { // Verify the state. The state should be reverted to the initial state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Collections.singletonList(0L), ctx.coordinator.snapshotRegistry().epochsList()); + assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Collections.emptyList(), ctx.coordinator.coordinator().fullRecords()); assertEquals(Collections.emptyList(), writer.entries(TP)); } + @Test + public void testEmptyBatch() throws Exception { + MockTimer timer = new MockTimer(); + MockPartitionWriter writer = new MockPartitionWriter(); + ThrowingSerializer serializer = new ThrowingSerializer(new StringSerializer()); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(Duration.ofMillis(20)) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(new DirectEventProcessor()) + .withPartitionWriter(writer) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(serializer) + .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Schedule the loading. + runtime.scheduleLoadOperation(TP, 10); + + // Verify the initial state. + CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); + assertNull(ctx.currentBatch); + + // Write #1, which fails. + serializer.throwOnNextOperation(); + CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20), + state -> new CoordinatorResult<>(List.of("1"), "response1")); + + // Write #1 should fail and leave an empty batch. + assertFutureThrows(write1, BufferOverflowException.class); + assertNotNull(ctx.currentBatch); + + // Write #2, with no records. + CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), + state -> new CoordinatorResult<>(Collections.emptyList(), "response2")); + + // Write #2 should not be attached to the empty batch. + assertTrue(write2.isDone()); + assertEquals("response2", write2.get(5, TimeUnit.SECONDS)); + + // Complete transaction #1. It will flush the current empty batch. + // The coordinator must not try to write an empty batch, otherwise the mock partition writer + // will throw an exception. + CompletableFuture complete1 = runtime.scheduleTransactionCompletion( + "complete#1", + TP, + 100L, + (short) 50, + 10, + TransactionResult.COMMIT, + DEFAULT_WRITE_TIMEOUT + ); + + // Verify that the completion is not committed yet. + assertFalse(complete1.isDone()); + + // Commit and verify that writes are completed. + writer.commit(TP); + assertNull(complete1.get(5, TimeUnit.SECONDS)); + } + @Test public void testRecordFlushTime() throws Exception { MockTimer timer = new MockTimer(); @@ -4153,6 +4334,7 @@ public void testRecordFlushTime() throws Exception { .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .withAppendLingerMs(10) + .withExecutorService(mock(ExecutorService.class)) .build(); // Schedule the loading. @@ -4203,13 +4385,13 @@ public void testRecordFlushTime() throws Exception { // got flushed with all the records but the new one from #3. assertEquals(3L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList( + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)), new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(Collections.singletonList( + assertEquals(List.of( records(firstBatchTimestamp, records.subList(0, 3)) ), writer.entries(TP)); verify(runtimeMetrics, times(1)).recordFlushTime(10); @@ -4220,13 +4402,13 @@ public void testRecordFlushTime() throws Exception { // Verify the state. The pending batch is flushed. assertEquals(4L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(Arrays.asList( + assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)), new MockCoordinatorShard.RecordAndMetadata(3, records.get(3)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(Arrays.asList( + assertEquals(List.of( records(secondBatchTimestamp, records.subList(0, 3)), records(secondBatchTimestamp, records.subList(3, 4)) ), writer.entries(TP)); @@ -4263,6 +4445,7 @@ public void testRecordEventPurgatoryTime() throws Exception { .withCoordinatorRuntimeMetrics(runtimeMetrics) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) .build(); // Loads the coordinator. Poll once to execute the load operation and once @@ -4273,11 +4456,11 @@ public void testRecordEventPurgatoryTime() throws Exception { // write#1 will be committed and update the high watermark. Record time spent in purgatory. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout, - state -> new CoordinatorResult<>(Collections.singletonList("record1"), "response1") + state -> new CoordinatorResult<>(List.of("record1"), "response1") ); // write#2 will time out sitting in the purgatory. Record time spent in purgatory. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, writeTimeout, - state -> new CoordinatorResult<>(Collections.singletonList("record2"), "response2") + state -> new CoordinatorResult<>(List.of("record2"), "response2") ); // write#3 will error while appending. Does not spend time in purgatory. CompletableFuture write3 = runtime.scheduleWriteOperation("write#3", TP, writeTimeout, @@ -4295,7 +4478,7 @@ public void testRecordEventPurgatoryTime() throws Exception { // Records have been written to the log. long writeTimestamp = timer.time().milliseconds(); - assertEquals(Arrays.asList( + assertEquals(List.of( records(writeTimestamp, "record1"), records(writeTimestamp, "record2") ), writer.entries(TP)); @@ -4327,6 +4510,231 @@ public void testRecordEventPurgatoryTime() throws Exception { verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1); } + @Test + public void testWriteEventCompletesOnlyOnce() throws Exception { + // Completes once via timeout, then again with HWM update. + Duration writeTimeout = Duration.ofMillis(1000L); + MockTimer timer = new MockTimer(); + MockPartitionWriter writer = new MockPartitionWriter(); + ManualEventProcessor processor = new ManualEventProcessor(); + CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(writeTimeout) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(processor) + .withPartitionWriter(writer) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(runtimeMetrics) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Loads the coordinator. Poll once to execute the load operation and once + // to complete the load. + runtime.scheduleLoadOperation(TP, 10); + processor.poll(); + processor.poll(); + + // write#1 will be committed and update the high watermark. Record time spent in purgatory. + CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout, + state -> new CoordinatorResult<>(List.of("record1"), "response1") + ); + + processor.poll(); + + // Records have been written to the log. + long writeTimestamp = timer.time().milliseconds(); + assertEquals(Collections.singletonList( + records(writeTimestamp, "record1") + ), writer.entries(TP)); + + // There is no pending high watermark. + assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + + // Advance the clock to time out the write event. Confirm write#1 is completed with a timeout. + timer.advanceClock(writeTimeout.toMillis() + 1L); + processor.poll(); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1); + assertTrue(write1.isCompletedExceptionally()); + + // HWM update + writer.commit(TP, 1); + assertEquals(1, processor.size()); + assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + + // Poll once to process the high watermark update and complete write#1. It has already + // been completed and this is a noop. + processor.poll(); + + assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset()); + assertTrue(write1.isCompletedExceptionally()); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1L); + } + + @Test + public void testCompleteTransactionEventCompletesOnlyOnce() throws Exception { + // Completes once via timeout, then again with HWM update. + Duration writeTimeout = Duration.ofMillis(1000L); + MockTimer timer = new MockTimer(); + MockPartitionWriter writer = new MockPartitionWriter(); + ManualEventProcessor processor = new ManualEventProcessor(); + CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(writeTimeout) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(processor) + .withPartitionWriter(writer) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(runtimeMetrics) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Loads the coordinator. Poll once to execute the load operation and once + // to complete the load. + runtime.scheduleLoadOperation(TP, 10); + processor.poll(); + processor.poll(); + + // transaction completion. + CompletableFuture write1 = runtime.scheduleTransactionCompletion( + "transactional-write", + TP, + 100L, + (short) 50, + 1, + TransactionResult.COMMIT, + writeTimeout + ); + processor.poll(); + + // Records have been written to the log. + assertEquals(List.of( + endTransactionMarker(100, (short) 50, timer.time().milliseconds(), 1, ControlRecordType.COMMIT) + ), writer.entries(TP)); + + // The write timeout tasks exist. + assertEquals(1, timer.size()); + assertFalse(write1.isDone()); + + // Advance the clock to time out the write event. Confirm write#1 is completed with a timeout. + timer.advanceClock(writeTimeout.toMillis() + 1L); + processor.poll(); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1); + assertTrue(write1.isCompletedExceptionally()); + + // HWM update + writer.commit(TP, 1); + assertEquals(1, processor.size()); + assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + + // Poll once to process the high watermark update and complete write#1. It has already + // been completed and this is a noop. + processor.poll(); + + assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset()); + assertTrue(write1.isCompletedExceptionally()); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1L); + } + @Test + public void testCoordinatorExecutor() { + Duration writeTimeout = Duration.ofMillis(1000); + MockTimer timer = new MockTimer(); + MockPartitionWriter writer = new MockPartitionWriter(); + ManualEventProcessor processor = new ManualEventProcessor(); + CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class); + ExecutorService executorService = mock(ExecutorService.class); + + when(executorService.submit(any(Runnable.class))).thenAnswer(args -> { + Runnable op = args.getArgument(0); + op.run(); + return CompletableFuture.completedFuture(null); + }); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(writeTimeout) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(processor) + .withPartitionWriter(writer) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(runtimeMetrics) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(new StringSerializer()) + .withExecutorService(executorService) + .build(); + + // Loads the coordinator. Poll once to execute the load operation and once + // to complete the load. + runtime.scheduleLoadOperation(TP, 10); + processor.poll(); + processor.poll(); + + // Schedule a write which schedules an async tasks. + CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout, + state -> { + state.executor.schedule( + "write#1#task", + () -> "task result", + (result, exception) -> { + assertEquals("task result", result); + assertNull(exception); + return new CoordinatorResult<>(Collections.singletonList("record2"), null); + } + ); + return new CoordinatorResult<>(Collections.singletonList("record1"), "response1"); + } + ); + + // Execute the write. + processor.poll(); + + // We should have a new write event in the queue as a result of the + // task being executed immediately. + assertEquals(1, processor.queue.size()); + + // Verify the state. + CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); + assertEquals(1L, ctx.coordinator.lastWrittenOffset()); + assertEquals(0L, ctx.coordinator.lastCommittedOffset()); + assertEquals(List.of( + new MockCoordinatorShard.RecordAndMetadata(0, "record1") + ), ctx.coordinator.coordinator().fullRecords()); + + // Execute the pending write. + processor.poll(); + + // The processor must be empty now. + assertEquals(0, processor.queue.size()); + + // Verify the state. + assertEquals(2L, ctx.coordinator.lastWrittenOffset()); + assertEquals(0L, ctx.coordinator.lastCommittedOffset()); + assertEquals(List.of( + new MockCoordinatorShard.RecordAndMetadata(0, "record1"), + new MockCoordinatorShard.RecordAndMetadata(1, "record2") + ), ctx.coordinator.coordinator().fullRecords()); + + // Commit. + writer.commit(TP); + processor.poll(); + assertTrue(write1.isDone()); + } + private static , U> ArgumentMatcher> coordinatorMatcher( CoordinatorRuntime runtime, TopicPartition tp diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/EventAccumulatorTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/EventAccumulatorTest.java index 1f357de34e379..d35a0776db075 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/EventAccumulatorTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/EventAccumulatorTest.java @@ -37,7 +37,7 @@ public class EventAccumulatorTest { - private class MockEvent implements EventAccumulator.Event { + private static class MockEvent implements EventAccumulator.Event { int key; int value; @@ -153,7 +153,7 @@ public void testKeyConcurrentAndOrderingGuarantees() { accumulator.addLast(event2); assertEquals(3, accumulator.size()); - MockEvent event = null; + MockEvent event; // Poll event0. event = accumulator.poll(); diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java index cd756a018af33..a8551f0734bbd 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java @@ -37,10 +37,10 @@ */ public class InMemoryPartitionWriter implements PartitionWriter { - private class PartitionState { - private ReentrantLock lock = new ReentrantLock(); - private List listeners = new ArrayList<>(); - private List entries = new ArrayList<>(); + private static class PartitionState { + private final ReentrantLock lock = new ReentrantLock(); + private final List listeners = new ArrayList<>(); + private final List entries = new ArrayList<>(); private long endOffset = 0L; private long committedOffset = 0L; } @@ -115,6 +115,14 @@ public long append( } } + @Override + public CompletableFuture deleteRecords( + TopicPartition tp, + long deleteBeforeOffset + ) throws KafkaException { + throw new RuntimeException("method not implemented"); + } + @Override public CompletableFuture maybeStartTransactionVerification( TopicPartition tp, @@ -134,9 +142,8 @@ public void commit( state.lock.lock(); try { state.committedOffset = offset; - state.listeners.forEach(listener -> { - listener.onHighWatermarkUpdated(tp, state.committedOffset); - }); + state.listeners.forEach(listener -> + listener.onHighWatermarkUpdated(tp, state.committedOffset)); } finally { state.lock.unlock(); } @@ -149,9 +156,8 @@ public void commit( state.lock.lock(); try { state.committedOffset = state.endOffset; - state.listeners.forEach(listener -> { - listener.onHighWatermarkUpdated(tp, state.committedOffset); - }); + state.listeners.forEach(listener -> + listener.onHighWatermarkUpdated(tp, state.committedOffset)); } finally { state.lock.unlock(); } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java new file mode 100644 index 0000000000000..40b946bbefd9d --- /dev/null +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.common.runtime; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Queue; + +public class MockCoordinatorExecutor implements CoordinatorExecutor { + private class ExecutorTask { + public final String key; + public final TaskRunnable task; + public final TaskOperation operation; + + ExecutorTask( + String key, + TaskRunnable task, + TaskOperation operation + ) { + this.key = Objects.requireNonNull(key); + this.task = Objects.requireNonNull(task); + this.operation = Objects.requireNonNull(operation); + } + + CoordinatorResult execute() { + try { + return operation.onComplete(task.run(), null); + } catch (Throwable ex) { + return operation.onComplete(null, ex); + } + } + } + + public static class ExecutorResult { + public final String key; + public final CoordinatorResult result; + + public ExecutorResult( + String key, + CoordinatorResult result + ) { + this.key = Objects.requireNonNull(key); + this.result = Objects.requireNonNull(result); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ExecutorResult that = (ExecutorResult) o; + + if (!Objects.equals(key, that.key)) return false; + return Objects.equals(result, that.result); + } + + @Override + public int hashCode() { + int result = key.hashCode(); + result = 31 * result + this.result.hashCode(); + return result; + } + + @Override + public String toString() { + return "ExecutorResult(" + + "key='" + key + '\'' + + ", result=" + result + + ')'; + } + } + + private final Map> tasks = new HashMap<>(); + private final Queue> queue = new ArrayDeque<>(); + + @Override + public boolean schedule( + String key, + TaskRunnable task, + TaskOperation operation + ) { + if (tasks.putIfAbsent(key, task) != null) return false; + return queue.add(new ExecutorTask<>(key, task, operation)); + } + + @Override + public boolean isScheduled(String key) { + return tasks.containsKey(key); + } + + public int size() { + return queue.size(); + } + + @Override + public void cancel(String key) { + tasks.remove(key); + } + + public List> poll() { + List> results = new ArrayList<>(); + for (ExecutorTask task : queue) { + tasks.remove(task.key, task.task); + results.add(new ExecutorResult<>(task.key, task.execute())); + } + queue.clear(); + return results; + } +} diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java index 9b321ef9a1eef..3acd3599e2d35 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java @@ -46,4 +46,25 @@ public static RequestContext requestContext( false ); } + + public static RequestContext requestContext( + ApiKeys apiKey, + Short version + ) { + return new RequestContext( + new RequestHeader( + apiKey, + version, + "client", + 0 + ), + "1", + InetAddress.getLoopbackAddress(), + KafkaPrincipal.ANONYMOUS, + ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), + SecurityProtocol.PLAINTEXT, + ClientInformation.EMPTY, + false + ); + } } diff --git a/core/src/main/java/kafka/log/remote/RemoteLogManager.java b/core/src/main/java/kafka/log/remote/RemoteLogManager.java index 9cd0515ad35f1..b5f9e408c9442 100644 --- a/core/src/main/java/kafka/log/remote/RemoteLogManager.java +++ b/core/src/main/java/kafka/log/remote/RemoteLogManager.java @@ -17,7 +17,6 @@ package kafka.log.remote; import kafka.cluster.Partition; -import kafka.log.AsyncOffsetReadFutureHolder; import kafka.log.UnifiedLog; import kafka.server.DelayedRemoteListOffsets; @@ -42,8 +41,8 @@ import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ChildFirstClassLoader; import org.apache.kafka.common.utils.CloseableIterator; -import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.ThreadUtils; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.common.CheckpointFile; @@ -55,6 +54,7 @@ import org.apache.kafka.server.log.remote.quota.RLMQuotaManagerConfig; import org.apache.kafka.server.log.remote.quota.RLMQuotaMetrics; import org.apache.kafka.server.log.remote.storage.ClassLoaderAwareRemoteStorageManager; +import org.apache.kafka.server.log.remote.storage.CustomMetadataSizeLimitExceededException; import org.apache.kafka.server.log.remote.storage.LogSegmentData; import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig; import org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager; @@ -73,12 +73,14 @@ import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; import org.apache.kafka.storage.internals.log.AbortedTxn; +import org.apache.kafka.storage.internals.log.AsyncOffsetReadFutureHolder; import org.apache.kafka.storage.internals.log.EpochEntry; import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.LogSegment; import org.apache.kafka.storage.internals.log.OffsetIndex; import org.apache.kafka.storage.internals.log.OffsetPosition; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; import org.apache.kafka.storage.internals.log.RemoteIndexCache; import org.apache.kafka.storage.internals.log.RemoteLogReadResult; import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; @@ -129,7 +131,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiConsumer; @@ -142,7 +143,6 @@ import scala.Option; import scala.jdk.javaapi.CollectionConverters; -import scala.util.Either; import static org.apache.kafka.server.config.ServerLogConfigs.LOG_DIR_CONFIG; import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX; @@ -161,7 +161,7 @@ public class RemoteLogManager implements Closeable { private static final Logger LOGGER = LoggerFactory.getLogger(RemoteLogManager.class); - private static final String REMOTE_LOG_READER_THREAD_NAME_PREFIX = "remote-log-reader"; + private static final String REMOTE_LOG_READER_THREAD_NAME_PATTERN = "remote-log-reader-%d"; private final RemoteLogManagerConfig rlmConfig; private final int brokerId; private final String logDir; @@ -254,18 +254,18 @@ public RemoteLogManager(RemoteLogManagerConfig rlmConfig, indexCache = new RemoteIndexCache(rlmConfig.remoteLogIndexFileCacheTotalSizeBytes(), remoteLogStorageManager, logDir); delayInMs = rlmConfig.remoteLogManagerTaskIntervalMs(); rlmCopyThreadPool = new RLMScheduledThreadPool(rlmConfig.remoteLogManagerCopierThreadPoolSize(), - "RLMCopyThreadPool", "kafka-rlm-copy-thread-pool-"); + "RLMCopyThreadPool", "kafka-rlm-copy-thread-pool-%d"); rlmExpirationThreadPool = new RLMScheduledThreadPool(rlmConfig.remoteLogManagerExpirationThreadPoolSize(), - "RLMExpirationThreadPool", "kafka-rlm-expiration-thread-pool-"); + "RLMExpirationThreadPool", "kafka-rlm-expiration-thread-pool-%d"); followerThreadPool = new RLMScheduledThreadPool(rlmConfig.remoteLogManagerThreadPoolSize(), - "RLMFollowerScheduledThreadPool", "kafka-rlm-follower-thread-pool-"); + "RLMFollowerScheduledThreadPool", "kafka-rlm-follower-thread-pool-%d"); metricsGroup.newGauge(REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC, rlmCopyThreadPool::getIdlePercent); remoteReadTimer = metricsGroup.newTimer(REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC, TimeUnit.MILLISECONDS, TimeUnit.SECONDS); remoteStorageReaderThreadPool = new RemoteStorageThreadPool( - REMOTE_LOG_READER_THREAD_NAME_PREFIX, + REMOTE_LOG_READER_THREAD_NAME_PATTERN, rlmConfig.remoteLogReaderThreads(), rlmConfig.remoteLogReaderMaxPendingTasks() ); @@ -289,6 +289,24 @@ public void updateFetchQuota(long quota) { rlmFetchQuotaManager.updateQuota(new Quota(quota, true)); } + public void resizeCopierThreadPool(int newSize) { + int currentSize = rlmCopyThreadPool.getCorePoolSize(); + LOGGER.info("Updating remote copy thread pool size from {} to {}", currentSize, newSize); + rlmCopyThreadPool.setCorePoolSize(newSize); + } + + public void resizeExpirationThreadPool(int newSize) { + int currentSize = rlmExpirationThreadPool.getCorePoolSize(); + LOGGER.info("Updating remote expiration thread pool size from {} to {}", currentSize, newSize); + rlmExpirationThreadPool.setCorePoolSize(newSize); + } + + public void resizeReaderThreadPool(int newSize) { + int currentSize = remoteStorageReaderThreadPool.getCorePoolSize(); + LOGGER.info("Updating remote reader thread pool size from {} to {}", currentSize, newSize); + remoteStorageReaderThreadPool.setCorePoolSize(newSize); + } + private void removeMetrics() { metricsGroup.removeMetric(REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC); metricsGroup.removeMetric(REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC); @@ -474,9 +492,9 @@ public void stopLeaderCopyRLMTasks(Set partitions) { if (topicIdByPartitionMap.containsKey(tp)) { TopicIdPartition tpId = new TopicIdPartition(topicIdByPartitionMap.get(tp), tp); leaderCopyRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { - LOGGER.info("Cancelling the copy RLM task for tpId: {}", tpId); + LOGGER.info("Cancelling the copy RLM task for partition: {}", tpId); task.cancel(); - LOGGER.info("Resetting remote copy lag metrics for tpId: {}", tpId); + LOGGER.info("Resetting remote copy lag metrics for partition: {}", tpId); ((RLMCopyTask) task.rlmTask).resetLagStats(); return null; }); @@ -501,17 +519,17 @@ public void stopPartitions(Set stopPartitions, if (topicIdByPartitionMap.containsKey(tp)) { TopicIdPartition tpId = new TopicIdPartition(topicIdByPartitionMap.get(tp), tp); leaderCopyRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { - LOGGER.info("Cancelling the copy RLM task for tpId: {}", tpId); + LOGGER.info("Cancelling the copy RLM task for partition: {}", tpId); task.cancel(); return null; }); leaderExpirationRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { - LOGGER.info("Cancelling the expiration RLM task for tpId: {}", tpId); + LOGGER.info("Cancelling the expiration RLM task for partition: {}", tpId); task.cancel(); return null; }); followerRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { - LOGGER.info("Cancelling the follower RLM task for tpId: {}", tpId); + LOGGER.info("Cancelling the follower RLM task for partition: {}", tpId); task.cancel(); return null; }); @@ -644,13 +662,13 @@ private Optional maybeLeaderEpoch(int leaderEpoch) { return leaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH ? Optional.empty() : Optional.of(leaderEpoch); } - public AsyncOffsetReadFutureHolder>> asyncOffsetRead( + public AsyncOffsetReadFutureHolder asyncOffsetRead( TopicPartition topicPartition, Long timestamp, Long startingOffset, LeaderEpochFileCache leaderEpochCache, Supplier> searchLocalLog) { - CompletableFuture>> taskFuture = new CompletableFuture<>(); + CompletableFuture taskFuture = new CompletableFuture<>(); Future jobFuture = remoteStorageReaderThreadPool.submit( new RemoteLogOffsetReader(this, topicPartition, timestamp, startingOffset, leaderEpochCache, searchLocalLog, result -> { TopicPartitionOperationKey key = new TopicPartitionOperationKey(topicPartition.topic(), topicPartition.partition()); @@ -759,11 +777,7 @@ public boolean isCancelled() { * @return the leader epoch entries */ List getLeaderEpochEntries(UnifiedLog log, long startOffset, long endOffset) { - if (log.leaderEpochCache().isDefined()) { - return log.leaderEpochCache().get().epochEntriesInRange(startOffset, endOffset); - } else { - return Collections.emptyList(); - } + return log.leaderEpochCache().epochEntriesInRange(startOffset, endOffset); } // VisibleForTesting @@ -790,8 +804,14 @@ protected LogContext getLogContext() { } public void run() { - if (isCancelled()) + if (isCancelled()) { + logger.debug("Skipping the current run for partition {} as it is cancelled", topicIdPartition); return; + } + if (!remoteLogMetadataManager.isReady(topicIdPartition)) { + logger.debug("Skipping the current run for partition {} as the remote-log metadata is not ready", topicIdPartition); + return; + } try { Optional unifiedLogOptional = fetchLog.apply(topicIdPartition.topicPartition()); @@ -803,13 +823,13 @@ public void run() { execute(unifiedLogOptional.get()); } catch (InterruptedException ex) { if (!isCancelled()) { - logger.warn("Current thread for topic-partition-id {} is interrupted", topicIdPartition, ex); + logger.warn("Current thread for partition {} is interrupted", topicIdPartition, ex); } } catch (RetriableException ex) { - logger.debug("Encountered a retryable error while executing current task for topic-partition {}", topicIdPartition, ex); + logger.debug("Encountered a retryable error while executing current task for partition {}", topicIdPartition, ex); } catch (Exception ex) { if (!isCancelled()) { - logger.warn("Current task for topic-partition {} received error but it will be scheduled", topicIdPartition, ex); + logger.warn("Current task for partition {} received error but it will be scheduled", topicIdPartition, ex); } } } @@ -1225,11 +1245,6 @@ void cleanupExpiredRemoteLogSegments() throws RemoteStorageException, ExecutionE } final UnifiedLog log = logOptional.get(); - final Option leaderEpochCacheOption = log.leaderEpochCache(); - if (leaderEpochCacheOption.isEmpty()) { - logger.debug("No leader epoch cache available for partition: {}", topicIdPartition); - return; - } // Cleanup remote log segments and update the log start offset if applicable. final Iterator segmentMetadataIter = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition); @@ -1257,7 +1272,7 @@ void cleanupExpiredRemoteLogSegments() throws RemoteStorageException, ExecutionE final List remoteLeaderEpochs = new ArrayList<>(epochsSet); Collections.sort(remoteLeaderEpochs); - LeaderEpochFileCache leaderEpochCache = leaderEpochCacheOption.get(); + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); // Build the leader epoch map by filtering the epochs that do not have any records. NavigableMap epochWithOffsets = buildFilteredLeaderEpochMap(leaderEpochCache.epochWithOffsets()); @@ -1393,8 +1408,9 @@ void cleanupExpiredRemoteLogSegments() throws RemoteStorageException, ExecutionE } private Optional buildRetentionTimeData(long retentionMs) { - return retentionMs > -1 - ? Optional.of(new RetentionTimeData(retentionMs, time.milliseconds() - retentionMs)) + long cleanupUntilMs = time.milliseconds() - retentionMs; + return retentionMs > -1 && cleanupUntilMs >= 0 + ? Optional.of(new RetentionTimeData(retentionMs, cleanupUntilMs)) : Optional.empty(); } @@ -1655,10 +1671,8 @@ public FetchDataInfo read(RemoteStorageFetchInfo remoteStorageFetchInfo) throws OptionalInt epoch = OptionalInt.empty(); if (logOptional.isPresent()) { - Option leaderEpochCache = logOptional.get().leaderEpochCache(); - if (leaderEpochCache != null && leaderEpochCache.isDefined()) { - epoch = leaderEpochCache.get().epochForOffset(offset); - } + LeaderEpochFileCache leaderEpochCache = logOptional.get().leaderEpochCache(); + epoch = leaderEpochCache.epochForOffset(offset); } Optional rlsMetadataOptional = epoch.isPresent() @@ -1794,7 +1808,7 @@ private void collectAbortedTransactions(long startOffset, UnifiedLog log) throws RemoteStorageException { TopicPartition tp = segmentMetadata.topicIdPartition().topicPartition(); boolean isSearchComplete = false; - LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache().getOrElse(null); + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); Optional currentMetadataOpt = Optional.of(segmentMetadata); while (!isSearchComplete && currentMetadataOpt.isPresent()) { RemoteLogSegmentMetadata currentMetadata = currentMetadataOpt.get(); @@ -1841,13 +1855,9 @@ private void collectAbortedTransactionInLocalSegments(long startOffset, // visible for testing. Optional findNextSegmentMetadata(RemoteLogSegmentMetadata segmentMetadata, - Option leaderEpochFileCacheOption) throws RemoteStorageException { - if (leaderEpochFileCacheOption.isEmpty()) { - return Optional.empty(); - } - + LeaderEpochFileCache leaderEpochFileCacheOption) throws RemoteStorageException { long nextSegmentBaseOffset = segmentMetadata.endOffset() + 1; - OptionalInt epoch = leaderEpochFileCacheOption.get().epochForOffset(nextSegmentBaseOffset); + OptionalInt epoch = leaderEpochFileCacheOption.epochForOffset(nextSegmentBaseOffset); return epoch.isPresent() ? fetchRemoteLogSegmentMetadata(segmentMetadata.topicIdPartition().topicPartition(), epoch.getAsInt(), nextSegmentBaseOffset) : Optional.empty(); @@ -1862,7 +1872,7 @@ Optional findNextSegmentMetadata(RemoteLogSegmentMetad * Visible for testing * @param tp The topic partition. * @param offset The offset to start the search. - * @param leaderEpochCache The leader epoch file cache, this could be null. + * @param leaderEpochCache The leader epoch file cache. * @return The next segment metadata that contains the transaction index. The transaction index may or may not exist * in that segment metadata which depends on the RLMM plugin implementation. The caller of this method should handle * for both the cases. @@ -1871,9 +1881,6 @@ Optional findNextSegmentMetadata(RemoteLogSegmentMetad Optional findNextSegmentWithTxnIndex(TopicPartition tp, long offset, LeaderEpochFileCache leaderEpochCache) throws RemoteStorageException { - if (leaderEpochCache == null) { - return Optional.empty(); - } OptionalInt initialEpochOpt = leaderEpochCache.epochForOffset(offset); if (initialEpochOpt.isEmpty()) { return Optional.empty(); @@ -1908,30 +1915,27 @@ EnrichedRecordBatch findFirstBatch(RemoteLogInputStream remoteLogInputStream, lo OffsetAndEpoch findHighestRemoteOffset(TopicIdPartition topicIdPartition, UnifiedLog log) throws RemoteStorageException { OffsetAndEpoch offsetAndEpoch = null; - Option leaderEpochCacheOpt = log.leaderEpochCache(); - if (leaderEpochCacheOpt.isDefined()) { - LeaderEpochFileCache cache = leaderEpochCacheOpt.get(); - Optional maybeEpochEntry = cache.latestEntry(); - while (offsetAndEpoch == null && maybeEpochEntry.isPresent()) { - int epoch = maybeEpochEntry.get().epoch; - Optional highestRemoteOffsetOpt = - remoteLogMetadataManager.highestOffsetForEpoch(topicIdPartition, epoch); - if (highestRemoteOffsetOpt.isPresent()) { - Map.Entry entry = cache.endOffsetFor(epoch, log.logEndOffset()); - int requestedEpoch = entry.getKey(); - long endOffset = entry.getValue(); - long highestRemoteOffset = highestRemoteOffsetOpt.get(); - if (endOffset <= highestRemoteOffset) { - LOGGER.info("The end-offset for epoch {}: ({}, {}) is less than or equal to the " + - "highest-remote-offset: {} for partition: {}", epoch, requestedEpoch, endOffset, - highestRemoteOffset, topicIdPartition); - offsetAndEpoch = new OffsetAndEpoch(endOffset - 1, requestedEpoch); - } else { - offsetAndEpoch = new OffsetAndEpoch(highestRemoteOffset, epoch); - } + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); + Optional maybeEpochEntry = leaderEpochCache.latestEntry(); + while (offsetAndEpoch == null && maybeEpochEntry.isPresent()) { + int epoch = maybeEpochEntry.get().epoch; + Optional highestRemoteOffsetOpt = + remoteLogMetadataManager.highestOffsetForEpoch(topicIdPartition, epoch); + if (highestRemoteOffsetOpt.isPresent()) { + Map.Entry entry = leaderEpochCache.endOffsetFor(epoch, log.logEndOffset()); + int requestedEpoch = entry.getKey(); + long endOffset = entry.getValue(); + long highestRemoteOffset = highestRemoteOffsetOpt.get(); + if (endOffset <= highestRemoteOffset) { + LOGGER.info("The end-offset for epoch {}: ({}, {}) is less than or equal to the " + + "highest-remote-offset: {} for partition: {}", epoch, requestedEpoch, endOffset, + highestRemoteOffset, topicIdPartition); + offsetAndEpoch = new OffsetAndEpoch(endOffset - 1, requestedEpoch); + } else { + offsetAndEpoch = new OffsetAndEpoch(highestRemoteOffset, epoch); } - maybeEpochEntry = cache.previousEntry(epoch); } + maybeEpochEntry = leaderEpochCache.previousEntry(epoch); } if (offsetAndEpoch == null) { offsetAndEpoch = new OffsetAndEpoch(-1L, RecordBatch.NO_PARTITION_LEADER_EPOCH); @@ -1941,20 +1945,17 @@ OffsetAndEpoch findHighestRemoteOffset(TopicIdPartition topicIdPartition, Unifie long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) throws RemoteStorageException { Optional logStartOffset = Optional.empty(); - Option maybeLeaderEpochFileCache = log.leaderEpochCache(); - if (maybeLeaderEpochFileCache.isDefined()) { - LeaderEpochFileCache cache = maybeLeaderEpochFileCache.get(); - OptionalInt earliestEpochOpt = cache.earliestEntry() - .map(epochEntry -> OptionalInt.of(epochEntry.epoch)) - .orElseGet(OptionalInt::empty); - while (logStartOffset.isEmpty() && earliestEpochOpt.isPresent()) { - Iterator iterator = - remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, earliestEpochOpt.getAsInt()); - if (iterator.hasNext()) { - logStartOffset = Optional.of(iterator.next().startOffset()); - } - earliestEpochOpt = cache.nextEpoch(earliestEpochOpt.getAsInt()); + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); + OptionalInt earliestEpochOpt = leaderEpochCache.earliestEntry() + .map(epochEntry -> OptionalInt.of(epochEntry.epoch)) + .orElseGet(OptionalInt::empty); + while (logStartOffset.isEmpty() && earliestEpochOpt.isPresent()) { + Iterator iterator = + remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, earliestEpochOpt.getAsInt()); + if (iterator.hasNext()) { + logStartOffset = Optional.of(iterator.next().startOffset()); } + earliestEpochOpt = leaderEpochCache.nextEpoch(earliestEpochOpt.getAsInt()); } return logStartOffset.orElseGet(log::localLogStartOffset); } @@ -2069,28 +2070,10 @@ public void close() { } } - private static void shutdownAndAwaitTermination(ExecutorService pool, String poolName, long timeout, TimeUnit timeUnit) { - // This pattern of shutting down thread pool is adopted from here: https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ExecutorService.html - LOGGER.info("Shutting down of thread pool {} is started", poolName); - pool.shutdown(); // Disable new tasks from being submitted - try { - // Wait a while for existing tasks to terminate - if (!pool.awaitTermination(timeout, timeUnit)) { - LOGGER.info("Shutting down of thread pool {} could not be completed. It will retry cancelling the tasks using shutdownNow.", poolName); - pool.shutdownNow(); // Cancel currently executing tasks - // Wait a while for tasks to respond to being cancelled - if (!pool.awaitTermination(timeout, timeUnit)) - LOGGER.warn("Shutting down of thread pool {} could not be completed even after retrying cancellation of the tasks using shutdownNow.", poolName); - } - } catch (InterruptedException ex) { - // (Re-)Cancel if current thread also interrupted - LOGGER.warn("Encountered InterruptedException while shutting down thread pool {}. It will retry cancelling the tasks using shutdownNow.", poolName); - pool.shutdownNow(); - // Preserve interrupt status - Thread.currentThread().interrupt(); - } - - LOGGER.info("Shutting down of thread pool {} is completed", poolName); + private static void shutdownAndAwaitTermination(ExecutorService executor, String poolName, long timeout, TimeUnit timeUnit) { + LOGGER.info("Shutting down {} executor", poolName); + ThreadUtils.shutdownExecutorServiceQuietly(executor, timeout, timeUnit); + LOGGER.info("{} executor shutdown completed", poolName); } //Visible for testing @@ -2144,31 +2127,32 @@ RLMTaskWithFuture followerTask(TopicIdPartition partition) { static class RLMScheduledThreadPool { private static final Logger LOGGER = LoggerFactory.getLogger(RLMScheduledThreadPool.class); - private final int poolSize; private final String threadPoolName; - private final String threadNamePrefix; + private final String threadNamePattern; private final ScheduledThreadPoolExecutor scheduledThreadPool; - public RLMScheduledThreadPool(int poolSize, String threadPoolName, String threadNamePrefix) { - this.poolSize = poolSize; + public RLMScheduledThreadPool(int poolSize, String threadPoolName, String threadNamePattern) { this.threadPoolName = threadPoolName; - this.threadNamePrefix = threadNamePrefix; - scheduledThreadPool = createPool(); + this.threadNamePattern = threadNamePattern; + scheduledThreadPool = createPool(poolSize); + } + + public void setCorePoolSize(int newSize) { + scheduledThreadPool.setCorePoolSize(newSize); + } + + public int getCorePoolSize() { + return scheduledThreadPool.getCorePoolSize(); } - private ScheduledThreadPoolExecutor createPool() { + private ScheduledThreadPoolExecutor createPool(int poolSize) { + ThreadFactory threadFactory = ThreadUtils.createThreadFactory(threadNamePattern, true, + (t, e) -> LOGGER.error("Uncaught exception in thread '{}':", t.getName(), e)); ScheduledThreadPoolExecutor threadPool = new ScheduledThreadPoolExecutor(poolSize); threadPool.setRemoveOnCancelPolicy(true); threadPool.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); threadPool.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); - threadPool.setThreadFactory(new ThreadFactory() { - private final AtomicInteger sequence = new AtomicInteger(); - - public Thread newThread(Runnable r) { - return KafkaThread.daemon(threadNamePrefix + sequence.incrementAndGet(), r); - } - }); - + threadPool.setThreadFactory(threadFactory); return threadPool; } diff --git a/core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java b/core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java index 09b2c6dccfaad..493139248e642 100644 --- a/core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java +++ b/core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java @@ -19,19 +19,18 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Optional; import java.util.concurrent.Callable; import java.util.function.Consumer; import java.util.function.Supplier; import scala.Option; import scala.jdk.javaapi.OptionConverters; -import scala.util.Either; -import scala.util.Left; -import scala.util.Right; public class RemoteLogOffsetReader implements Callable { private static final Logger LOGGER = LoggerFactory.getLogger(RemoteLogOffsetReader.class); @@ -40,8 +39,8 @@ public class RemoteLogOffsetReader implements Callable { private final long timestamp; private final long startingOffset; private final LeaderEpochFileCache leaderEpochCache; - private final Supplier> searchInLocalLog; - private final Consumer>> callback; + private final Supplier> searchInLocalLog; + private final Consumer callback; public RemoteLogOffsetReader(RemoteLogManager rlm, TopicPartition tp, @@ -49,29 +48,28 @@ public RemoteLogOffsetReader(RemoteLogManager rlm, long startingOffset, LeaderEpochFileCache leaderEpochCache, Supplier> searchInLocalLog, - Consumer>> callback) { + Consumer callback) { this.rlm = rlm; this.tp = tp; this.timestamp = timestamp; this.startingOffset = startingOffset; this.leaderEpochCache = leaderEpochCache; - this.searchInLocalLog = searchInLocalLog; + this.searchInLocalLog = () -> OptionConverters.toJava(searchInLocalLog.get()); this.callback = callback; } @Override public Void call() throws Exception { - Either> result; + OffsetResultHolder.FileRecordsOrError result; try { // If it is not found in remote storage, then search in the local storage starting with local log start offset. - Option timestampAndOffsetOpt = - OptionConverters.toScala(rlm.findOffsetByTimestamp(tp, timestamp, startingOffset, leaderEpochCache)) - .orElse(searchInLocalLog::get); - result = Right.apply(timestampAndOffsetOpt); + Optional timestampAndOffsetOpt = + rlm.findOffsetByTimestamp(tp, timestamp, startingOffset, leaderEpochCache).or(searchInLocalLog); + result = new OffsetResultHolder.FileRecordsOrError(Optional.empty(), timestampAndOffsetOpt); } catch (Exception e) { // NOTE: All the exceptions from the secondary storage are catched instead of only the KafkaException. LOGGER.error("Error occurred while reading the remote log offset for {}", tp, e); - result = Left.apply(e); + result = new OffsetResultHolder.FileRecordsOrError(Optional.of(e), Optional.empty()); } callback.accept(result); return null; diff --git a/core/src/main/java/kafka/server/NetworkUtils.java b/core/src/main/java/kafka/server/NetworkUtils.java index 83093c19e1092..10236c0ce74a5 100644 --- a/core/src/main/java/kafka/server/NetworkUtils.java +++ b/core/src/main/java/kafka/server/NetworkUtils.java @@ -47,7 +47,6 @@ public static NetworkClient buildNetworkClient(String prefix, config.interBrokerListenerName(), config.saslMechanismInterBrokerProtocol(), time, - config.saslInterBrokerHandshakeRequestEnable(), logContext ); @@ -89,4 +88,4 @@ public static NetworkClient buildNetworkClient(String prefix, MetadataRecoveryStrategy.NONE ); } -} \ No newline at end of file +} diff --git a/core/src/main/java/kafka/server/TierStateMachine.java b/core/src/main/java/kafka/server/TierStateMachine.java index f5f83cb240bae..d316e70da2e3e 100644 --- a/core/src/main/java/kafka/server/TierStateMachine.java +++ b/core/src/main/java/kafka/server/TierStateMachine.java @@ -229,12 +229,8 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, } RemoteLogSegmentMetadata remoteLogSegmentMetadata = rlm.fetchRemoteLogSegmentMetadata(topicPartition, targetEpoch, previousOffsetToLeaderLocalLogStartOffset) - .orElseThrow(() -> new RemoteStorageException("Couldn't build the state from remote store for partition: " + topicPartition + - ", currentLeaderEpoch: " + currentLeaderEpoch + - ", leaderLocalLogStartOffset: " + leaderLocalLogStartOffset + - ", leaderLogStartOffset: " + leaderLogStartOffset + - ", epoch: " + targetEpoch + - "as the previous remote log segment metadata was not found")); + .orElseThrow(() -> buildRemoteStorageException(topicPartition, targetEpoch, currentLeaderEpoch, + leaderLocalLogStartOffset, leaderLogStartOffset)); // Build leader epoch cache, producer snapshots until remoteLogSegmentMetadata.endOffset() and start @@ -251,9 +247,7 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, // Build leader epoch cache. List epochs = readLeaderEpochCheckpoint(rlm, remoteLogSegmentMetadata); - if (unifiedLog.leaderEpochCache().isDefined()) { - unifiedLog.leaderEpochCache().get().assign(epochs); - } + unifiedLog.leaderEpochCache().assign(epochs); log.info("Updated the epoch cache from remote tier till offset: {} with size: {} for {}", leaderLocalLogStartOffset, epochs.size(), partition); @@ -265,4 +259,17 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, return nextOffset; } + + private RemoteStorageException buildRemoteStorageException(TopicPartition topicPartition, + int targetEpoch, + int currentLeaderEpoch, + long leaderLocalLogStartOffset, + long leaderLogStartOffset) { + String message = String.format( + "Couldn't build the state from remote store for partition: %s, currentLeaderEpoch: %d, " + + "leaderLocalLogStartOffset: %d, leaderLogStartOffset: %d, epoch: %d as the previous remote log segment metadata was not found", + topicPartition, currentLeaderEpoch, leaderLocalLogStartOffset, leaderLogStartOffset, targetEpoch + ); + return new RemoteStorageException(message); + } } diff --git a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java index 75dc0d7dc9b85..b4764f8d284d8 100644 --- a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java +++ b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java @@ -23,10 +23,10 @@ import kafka.server.AutoTopicCreationManager; import kafka.server.DelegationTokenManager; import kafka.server.FetchManager; +import kafka.server.ForwardingManager; import kafka.server.KafkaApis; import kafka.server.KafkaConfig; import kafka.server.MetadataCache; -import kafka.server.MetadataSupport; import kafka.server.QuotaFactory.QuotaManagers; import kafka.server.ReplicaManager; import kafka.server.metadata.ConfigRepository; @@ -45,11 +45,9 @@ import scala.jdk.javaapi.OptionConverters; - - public class KafkaApisBuilder { private RequestChannel requestChannel = null; - private MetadataSupport metadataSupport = null; + private ForwardingManager forwardingManager = null; private ReplicaManager replicaManager = null; private GroupCoordinator groupCoordinator = null; private TransactionCoordinator txnCoordinator = null; @@ -62,13 +60,13 @@ public class KafkaApisBuilder { private Optional authorizer = Optional.empty(); private QuotaManagers quotas = null; private FetchManager fetchManager = null; - private Optional sharePartitionManager = Optional.empty(); + private SharePartitionManager sharePartitionManager = null; private BrokerTopicStats brokerTopicStats = null; private String clusterId = "clusterId"; private Time time = Time.SYSTEM; private DelegationTokenManager tokenManager = null; private ApiVersionManager apiVersionManager = null; - private Optional clientMetricsManager = Optional.empty(); + private ClientMetricsManager clientMetricsManager = null; private Optional shareCoordinator = Optional.empty(); public KafkaApisBuilder setRequestChannel(RequestChannel requestChannel) { @@ -76,8 +74,8 @@ public KafkaApisBuilder setRequestChannel(RequestChannel requestChannel) { return this; } - public KafkaApisBuilder setMetadataSupport(MetadataSupport metadataSupport) { - this.metadataSupport = metadataSupport; + public KafkaApisBuilder setForwardingManager(ForwardingManager forwardingManager) { + this.forwardingManager = forwardingManager; return this; } @@ -146,7 +144,7 @@ public KafkaApisBuilder setFetchManager(FetchManager fetchManager) { return this; } - public KafkaApisBuilder setSharePartitionManager(Optional sharePartitionManager) { + public KafkaApisBuilder setSharePartitionManager(SharePartitionManager sharePartitionManager) { this.sharePartitionManager = sharePartitionManager; return this; } @@ -176,14 +174,15 @@ public KafkaApisBuilder setApiVersionManager(ApiVersionManager apiVersionManager return this; } - public KafkaApisBuilder setClientMetricsManager(Optional clientMetricsManager) { + public KafkaApisBuilder setClientMetricsManager(ClientMetricsManager clientMetricsManager) { this.clientMetricsManager = clientMetricsManager; return this; } + @SuppressWarnings({"CyclomaticComplexity"}) public KafkaApis build() { if (requestChannel == null) throw new RuntimeException("you must set requestChannel"); - if (metadataSupport == null) throw new RuntimeException("you must set metadataSupport"); + if (forwardingManager == null) throw new RuntimeException("you must set forwardingManager"); if (replicaManager == null) throw new RuntimeException("You must set replicaManager"); if (groupCoordinator == null) throw new RuntimeException("You must set groupCoordinator"); if (txnCoordinator == null) throw new RuntimeException("You must set txnCoordinator"); @@ -195,11 +194,13 @@ public KafkaApis build() { if (metrics == null) throw new RuntimeException("You must set metrics"); if (quotas == null) throw new RuntimeException("You must set quotas"); if (fetchManager == null) throw new RuntimeException("You must set fetchManager"); + if (sharePartitionManager == null) throw new RuntimeException("You must set sharePartitionManager"); + if (clientMetricsManager == null) throw new RuntimeException("You must set clientMetricsManager"); if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().isRemoteStorageSystemEnabled()); if (apiVersionManager == null) throw new RuntimeException("You must set apiVersionManager"); return new KafkaApis(requestChannel, - metadataSupport, + forwardingManager, replicaManager, groupCoordinator, txnCoordinator, @@ -213,12 +214,12 @@ public KafkaApis build() { OptionConverters.toScala(authorizer), quotas, fetchManager, - OptionConverters.toScala(sharePartitionManager), + sharePartitionManager, brokerTopicStats, clusterId, time, tokenManager, apiVersionManager, - OptionConverters.toScala(clientMetricsManager)); + clientMetricsManager); } } diff --git a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java index 447117604217e..53f6c0dd305ed 100644 --- a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java @@ -56,7 +56,6 @@ public class LogManagerBuilder { private BrokerTopicStats brokerTopicStats = null; private LogDirFailureChannel logDirFailureChannel = null; private Time time = Time.SYSTEM; - private boolean keepPartitionMetadataFile = true; private boolean remoteStorageSystemEnable = false; private long initialTaskDelayMs = ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DEFAULT; @@ -145,11 +144,6 @@ public LogManagerBuilder setTime(Time time) { return this; } - public LogManagerBuilder setKeepPartitionMetadataFile(boolean keepPartitionMetadataFile) { - this.keepPartitionMetadataFile = keepPartitionMetadataFile; - return this; - } - public LogManagerBuilder setRemoteStorageSystemEnable(boolean remoteStorageSystemEnable) { this.remoteStorageSystemEnable = remoteStorageSystemEnable; return this; @@ -186,7 +180,6 @@ public LogManager build() { brokerTopicStats, logDirFailureChannel, time, - keepPartitionMetadataFile, remoteStorageSystemEnable, initialTaskDelayMs); } diff --git a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java index f64dc96e6e519..626b53c12c4a4 100644 --- a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java @@ -32,7 +32,6 @@ import kafka.server.QuotaFactory.QuotaManagers; import kafka.server.ReplicaManager; import kafka.server.share.DelayedShareFetch; -import kafka.zk.KafkaZkClient; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.Time; @@ -64,7 +63,6 @@ public class ReplicaManagerBuilder { private BrokerTopicStats brokerTopicStats = null; private AtomicBoolean isShuttingDown = new AtomicBoolean(false); private Optional remoteLogManager = Optional.empty(); - private Optional zkClient = Optional.empty(); private Optional> delayedProducePurgatory = Optional.empty(); private Optional> delayedFetchPurgatory = Optional.empty(); private Optional> delayedDeleteRecordsPurgatory = Optional.empty(); @@ -137,11 +135,6 @@ public ReplicaManagerBuilder setIsShuttingDown(AtomicBoolean isShuttingDown) { return this; } - public ReplicaManagerBuilder setZkClient(KafkaZkClient zkClient) { - this.zkClient = Optional.of(zkClient); - return this; - } - public ReplicaManagerBuilder setDelayedProducePurgatory(DelayedOperationPurgatory delayedProducePurgatory) { this.delayedProducePurgatory = Optional.of(delayedProducePurgatory); return this; @@ -210,7 +203,6 @@ public ReplicaManager build() { alterPartitionManager, brokerTopicStats, isShuttingDown, - OptionConverters.toScala(zkClient), OptionConverters.toScala(delayedProducePurgatory), OptionConverters.toScala(delayedFetchPurgatory), OptionConverters.toScala(delayedDeleteRecordsPurgatory), diff --git a/core/src/main/java/kafka/server/share/DelayedShareFetch.java b/core/src/main/java/kafka/server/share/DelayedShareFetch.java index 4cb9ce0cf4241..1422e08524a77 100644 --- a/core/src/main/java/kafka/server/share/DelayedShareFetch.java +++ b/core/src/main/java/kafka/server/share/DelayedShareFetch.java @@ -25,8 +25,10 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.server.purgatory.DelayedOperation; +import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; -import org.apache.kafka.server.share.fetch.ShareFetchData; +import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy; +import org.apache.kafka.server.share.fetch.ShareFetch; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; @@ -36,11 +38,12 @@ import org.slf4j.LoggerFactory; import java.util.Collections; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.locks.Lock; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import scala.Tuple2; @@ -55,28 +58,38 @@ public class DelayedShareFetch extends DelayedOperation { private static final Logger log = LoggerFactory.getLogger(DelayedShareFetch.class); - private final ShareFetchData shareFetchData; + private final ShareFetch shareFetch; private final ReplicaManager replicaManager; - - private Map partitionsAcquired; - private Map partitionsAlreadyFetched; - private final SharePartitionManager sharePartitionManager; + private final BiConsumer exceptionHandler; + private final PartitionMaxBytesStrategy partitionMaxBytesStrategy; // The topic partitions that need to be completed for the share fetch request are given by sharePartitions. // sharePartitions is a subset of shareFetchData. The order of insertion/deletion of entries in sharePartitions is important. private final LinkedHashMap sharePartitions; + private LinkedHashMap partitionsAcquired; + private LinkedHashMap partitionsAlreadyFetched; DelayedShareFetch( - ShareFetchData shareFetchData, + ShareFetch shareFetch, ReplicaManager replicaManager, - SharePartitionManager sharePartitionManager, + BiConsumer exceptionHandler, LinkedHashMap sharePartitions) { - super(shareFetchData.fetchParams().maxWaitMs, Optional.empty()); - this.shareFetchData = shareFetchData; + this(shareFetch, replicaManager, exceptionHandler, sharePartitions, PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)); + } + + DelayedShareFetch( + ShareFetch shareFetch, + ReplicaManager replicaManager, + BiConsumer exceptionHandler, + LinkedHashMap sharePartitions, + PartitionMaxBytesStrategy partitionMaxBytesStrategy) { + super(shareFetch.fetchParams().maxWaitMs, Optional.empty()); + this.shareFetch = shareFetch; this.replicaManager = replicaManager; this.partitionsAcquired = new LinkedHashMap<>(); this.partitionsAlreadyFetched = new LinkedHashMap<>(); - this.sharePartitionManager = sharePartitionManager; + this.exceptionHandler = exceptionHandler; this.sharePartitions = sharePartitions; + this.partitionMaxBytesStrategy = partitionMaxBytesStrategy; } @Override @@ -90,47 +103,57 @@ public void onExpiration() { */ @Override public void onComplete() { + // We are utilizing lock so that onComplete doesn't do a dirty read for instance variables - + // partitionsAcquired and partitionsAlreadyFetched, since these variables can get updated in a different tryComplete thread. + lock.lock(); log.trace("Completing the delayed share fetch request for group {}, member {}, " - + "topic partitions {}", shareFetchData.groupId(), shareFetchData.memberId(), + + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), partitionsAcquired.keySet()); - if (shareFetchData.future().isDone()) - return; + try { + LinkedHashMap topicPartitionData; + // tryComplete did not invoke forceComplete, so we need to check if we have any partitions to fetch. + if (partitionsAcquired.isEmpty()) + topicPartitionData = acquirablePartitions(); + // tryComplete invoked forceComplete, so we can use the data from tryComplete. + else + topicPartitionData = partitionsAcquired; - Map topicPartitionData; - // tryComplete did not invoke forceComplete, so we need to check if we have any partitions to fetch. - if (partitionsAcquired.isEmpty()) - topicPartitionData = acquirablePartitions(); - // tryComplete invoked forceComplete, so we can use the data from tryComplete. - else - topicPartitionData = partitionsAcquired; + if (topicPartitionData.isEmpty()) { + // No locks for share partitions could be acquired, so we complete the request with an empty response. + shareFetch.maybeComplete(Collections.emptyMap()); + return; + } + log.trace("Fetchable share partitions data: {} with groupId: {} fetch params: {}", + topicPartitionData, shareFetch.groupId(), shareFetch.fetchParams()); - if (topicPartitionData.isEmpty()) { - // No locks for share partitions could be acquired, so we complete the request with an empty response. - shareFetchData.future().complete(Collections.emptyMap()); - return; + completeShareFetchRequest(topicPartitionData); + } finally { + lock.unlock(); } - log.trace("Fetchable share partitions data: {} with groupId: {} fetch params: {}", - topicPartitionData, shareFetchData.groupId(), shareFetchData.fetchParams()); + } + private void completeShareFetchRequest(LinkedHashMap topicPartitionData) { try { - Map responseData; + LinkedHashMap responseData; if (partitionsAlreadyFetched.isEmpty()) - responseData = readFromLog(topicPartitionData); + responseData = readFromLog( + topicPartitionData, + partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, topicPartitionData.keySet(), topicPartitionData.size())); else // There shouldn't be a case when we have a partitionsAlreadyFetched value here and this variable is getting // updated in a different tryComplete thread. responseData = combineLogReadResponse(topicPartitionData, partitionsAlreadyFetched); - Map fetchPartitionsData = new LinkedHashMap<>(); + LinkedHashMap fetchPartitionsData = new LinkedHashMap<>(); for (Map.Entry entry : responseData.entrySet()) fetchPartitionsData.put(entry.getKey(), entry.getValue().toFetchPartitionData(false)); - shareFetchData.future().complete(ShareFetchUtils.processFetchResponse(shareFetchData, fetchPartitionsData, - sharePartitions, replicaManager)); + shareFetch.maybeComplete(ShareFetchUtils.processFetchResponse(shareFetch, fetchPartitionsData, + sharePartitions, replicaManager, exceptionHandler)); } catch (Exception e) { log.error("Error processing delayed share fetch request", e); - sharePartitionManager.handleFetchException(shareFetchData.groupId(), topicPartitionData.keySet(), shareFetchData.future(), e); + handleFetchException(shareFetch, topicPartitionData.keySet(), e); } finally { // Releasing the lock to move ahead with the next request in queue. releasePartitionLocks(topicPartitionData.keySet()); @@ -140,7 +163,7 @@ public void onComplete() { // we directly call delayedShareFetchPurgatory.checkAndComplete replicaManager.addToActionQueue(() -> topicPartitionData.keySet().forEach(topicIdPartition -> replicaManager.completeDelayedShareFetchRequest( - new DelayedShareFetchGroupKey(shareFetchData.groupId(), topicIdPartition.topicId(), topicIdPartition.partition())))); + new DelayedShareFetchGroupKey(shareFetch.groupId(), topicIdPartition.topicId(), topicIdPartition.partition())))); } } @@ -149,16 +172,16 @@ public void onComplete() { */ @Override public boolean tryComplete() { - Map topicPartitionData = acquirablePartitions(); + LinkedHashMap topicPartitionData = acquirablePartitions(); try { if (!topicPartitionData.isEmpty()) { // In case, fetch offset metadata doesn't exist for one or more topic partitions, we do a // replicaManager.readFromLog to populate the offset metadata and update the fetch offset metadata for // those topic partitions. - Map replicaManagerReadResponse = maybeReadFromLog(topicPartitionData); - maybeUpdateFetchOffsetMetadata(replicaManagerReadResponse); - if (anyPartitionHasLogReadError(replicaManagerReadResponse) || isMinBytesSatisfied(topicPartitionData)) { + LinkedHashMap replicaManagerReadResponse = maybeReadFromLog(topicPartitionData); + maybeUpdateFetchOffsetMetadata(topicPartitionData, replicaManagerReadResponse); + if (anyPartitionHasLogReadError(replicaManagerReadResponse) || isMinBytesSatisfied(topicPartitionData, partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, topicPartitionData.keySet(), topicPartitionData.size()))) { partitionsAcquired = topicPartitionData; partitionsAlreadyFetched = replicaManagerReadResponse; boolean completedByMe = forceComplete(); @@ -170,13 +193,13 @@ public boolean tryComplete() { return completedByMe; } else { log.debug("minBytes is not satisfied for the share fetch request for group {}, member {}, " + - "topic partitions {}", shareFetchData.groupId(), shareFetchData.memberId(), + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), sharePartitions.keySet()); releasePartitionLocks(topicPartitionData.keySet()); } } else { log.trace("Can't acquire records for any partition in the share fetch request for group {}, member {}, " + - "topic partitions {}", shareFetchData.groupId(), shareFetchData.memberId(), + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), sharePartitions.keySet()); } return false; @@ -193,28 +216,18 @@ public boolean tryComplete() { * Prepare fetch request structure for partitions in the share fetch request for which we can acquire records. */ // Visible for testing - Map acquirablePartitions() { + LinkedHashMap acquirablePartitions() { // Initialize the topic partitions for which the fetch should be attempted. - Map topicPartitionData = new LinkedHashMap<>(); + LinkedHashMap topicPartitionData = new LinkedHashMap<>(); sharePartitions.forEach((topicIdPartition, sharePartition) -> { - int partitionMaxBytes = shareFetchData.partitionMaxBytes().getOrDefault(topicIdPartition, 0); // Add the share partition to the list of partitions to be fetched only if we can // acquire the fetch lock on it. if (sharePartition.maybeAcquireFetchLock()) { try { // If the share partition is already at capacity, we should not attempt to fetch. if (sharePartition.canAcquireRecords()) { - topicPartitionData.put( - topicIdPartition, - new FetchRequest.PartitionData( - topicIdPartition.topicId(), - sharePartition.nextFetchOffset(), - 0, - partitionMaxBytes, - Optional.empty() - ) - ); + topicPartitionData.put(topicIdPartition, sharePartition.nextFetchOffset()); } else { sharePartition.releaseFetchLock(); log.trace("Record lock partition limit exceeded for SharePartition {}, " + @@ -230,23 +243,28 @@ Map acquirablePartitions() { return topicPartitionData; } - private Map maybeReadFromLog(Map topicPartitionData) { - Map partitionsMissingFetchOffsetMetadata = new LinkedHashMap<>(); - topicPartitionData.forEach((topicIdPartition, partitionData) -> { + private LinkedHashMap maybeReadFromLog(LinkedHashMap topicPartitionData) { + LinkedHashMap partitionsNotMatchingFetchOffsetMetadata = new LinkedHashMap<>(); + topicPartitionData.forEach((topicIdPartition, fetchOffset) -> { SharePartition sharePartition = sharePartitions.get(topicIdPartition); - if (sharePartition.fetchOffsetMetadata().isEmpty()) { - partitionsMissingFetchOffsetMetadata.put(topicIdPartition, partitionData); + if (sharePartition.fetchOffsetMetadata(fetchOffset).isEmpty()) { + partitionsNotMatchingFetchOffsetMetadata.put(topicIdPartition, fetchOffset); } }); - if (partitionsMissingFetchOffsetMetadata.isEmpty()) { - return Collections.emptyMap(); + if (partitionsNotMatchingFetchOffsetMetadata.isEmpty()) { + return new LinkedHashMap<>(); } // We fetch data from replica manager corresponding to the topic partitions that have missing fetch offset metadata. - return readFromLog(partitionsMissingFetchOffsetMetadata); + // Although we are fetching partition max bytes for partitionsNotMatchingFetchOffsetMetadata, + // we will take acquired partitions size = topicPartitionData.size() because we do not want to let the + // leftover partitions to starve which will be fetched later. + return readFromLog( + partitionsNotMatchingFetchOffsetMetadata, + partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, partitionsNotMatchingFetchOffsetMetadata.keySet(), topicPartitionData.size())); } - private void maybeUpdateFetchOffsetMetadata( - Map replicaManagerReadResponseData) { + private void maybeUpdateFetchOffsetMetadata(LinkedHashMap topicPartitionData, + LinkedHashMap replicaManagerReadResponseData) { for (Map.Entry entry : replicaManagerReadResponseData.entrySet()) { TopicIdPartition topicIdPartition = entry.getKey(); SharePartition sharePartition = sharePartitions.get(topicIdPartition); @@ -256,55 +274,67 @@ private void maybeUpdateFetchOffsetMetadata( replicaManagerLogReadResult, topicIdPartition); continue; } - sharePartition.updateFetchOffsetMetadata(Optional.of(replicaManagerLogReadResult.info().fetchOffsetMetadata)); + sharePartition.updateFetchOffsetMetadata( + topicPartitionData.get(topicIdPartition), + replicaManagerLogReadResult.info().fetchOffsetMetadata); } } // minByes estimation currently assumes the common case where all fetched data is acquirable. - private boolean isMinBytesSatisfied(Map topicPartitionData) { + private boolean isMinBytesSatisfied(LinkedHashMap topicPartitionData, + LinkedHashMap partitionMaxBytes) { long accumulatedSize = 0; - for (Map.Entry entry : topicPartitionData.entrySet()) { + for (Map.Entry entry : topicPartitionData.entrySet()) { TopicIdPartition topicIdPartition = entry.getKey(); - FetchRequest.PartitionData partitionData = entry.getValue(); - LogOffsetMetadata endOffsetMetadata = endOffsetMetadataForTopicPartition(topicIdPartition); + long fetchOffset = entry.getValue(); + + LogOffsetMetadata endOffsetMetadata; + try { + endOffsetMetadata = endOffsetMetadataForTopicPartition(topicIdPartition); + } catch (Exception e) { + shareFetch.addErroneous(topicIdPartition, e); + exceptionHandler.accept( + new SharePartitionKey(shareFetch.groupId(), topicIdPartition), e); + continue; + } if (endOffsetMetadata == LogOffsetMetadata.UNKNOWN_OFFSET_METADATA) continue; SharePartition sharePartition = sharePartitions.get(topicIdPartition); - Optional optionalFetchOffsetMetadata = sharePartition.fetchOffsetMetadata(); + Optional optionalFetchOffsetMetadata = sharePartition.fetchOffsetMetadata(fetchOffset); if (optionalFetchOffsetMetadata.isEmpty() || optionalFetchOffsetMetadata.get() == LogOffsetMetadata.UNKNOWN_OFFSET_METADATA) continue; LogOffsetMetadata fetchOffsetMetadata = optionalFetchOffsetMetadata.get(); if (fetchOffsetMetadata.messageOffset > endOffsetMetadata.messageOffset) { log.debug("Satisfying delayed share fetch request for group {}, member {} since it is fetching later segments of " + - "topicIdPartition {}", shareFetchData.groupId(), shareFetchData.memberId(), topicIdPartition); + "topicIdPartition {}", shareFetch.groupId(), shareFetch.memberId(), topicIdPartition); return true; } else if (fetchOffsetMetadata.messageOffset < endOffsetMetadata.messageOffset) { if (fetchOffsetMetadata.onOlderSegment(endOffsetMetadata)) { // This can happen when the fetch operation is falling behind the current segment or the partition // has just rolled a new segment. log.debug("Satisfying delayed share fetch request for group {}, member {} immediately since it is fetching older " + - "segments of topicIdPartition {}", shareFetchData.groupId(), shareFetchData.memberId(), topicIdPartition); + "segments of topicIdPartition {}", shareFetch.groupId(), shareFetch.memberId(), topicIdPartition); return true; } else if (fetchOffsetMetadata.onSameSegment(endOffsetMetadata)) { // we take the partition fetch size as upper bound when accumulating the bytes. - long bytesAvailable = Math.min(endOffsetMetadata.positionDiff(fetchOffsetMetadata), partitionData.maxBytes); + long bytesAvailable = Math.min(endOffsetMetadata.positionDiff(fetchOffsetMetadata), partitionMaxBytes.get(topicIdPartition)); accumulatedSize += bytesAvailable; } } } - return accumulatedSize >= shareFetchData.fetchParams().minBytes; + return accumulatedSize >= shareFetch.fetchParams().minBytes; } private LogOffsetMetadata endOffsetMetadataForTopicPartition(TopicIdPartition topicIdPartition) { - Partition partition = replicaManager.getPartitionOrException(topicIdPartition.topicPartition()); + Partition partition = ShareFetchUtils.partition(replicaManager, topicIdPartition.topicPartition()); LogOffsetSnapshot offsetSnapshot = partition.fetchOffsetSnapshot(Optional.empty(), true); // The FetchIsolation type that we use for share fetch is FetchIsolation.HIGH_WATERMARK. In the future, we can // extend it to support other FetchIsolation types. - FetchIsolation isolationType = shareFetchData.fetchParams().isolation; + FetchIsolation isolationType = shareFetch.fetchParams().isolation; if (isolationType == FetchIsolation.LOG_END) return offsetSnapshot.logEndOffset; else if (isolationType == FetchIsolation.HIGH_WATERMARK) @@ -314,17 +344,35 @@ else if (isolationType == FetchIsolation.HIGH_WATERMARK) } - private Map readFromLog(Map topicPartitionData) { + private LinkedHashMap readFromLog(LinkedHashMap topicPartitionFetchOffsets, + LinkedHashMap partitionMaxBytes) { + // Filter if there already exists any erroneous topic partition. + Set partitionsToFetch = shareFetch.filterErroneousTopicPartitions(topicPartitionFetchOffsets.keySet()); + if (partitionsToFetch.isEmpty()) { + return new LinkedHashMap<>(); + } + + LinkedHashMap topicPartitionData = new LinkedHashMap<>(); + + topicPartitionFetchOffsets.forEach((topicIdPartition, fetchOffset) -> topicPartitionData.put(topicIdPartition, + new FetchRequest.PartitionData( + topicIdPartition.topicId(), + fetchOffset, + 0, + partitionMaxBytes.get(topicIdPartition), + Optional.empty()) + )); + Seq> responseLogResult = replicaManager.readFromLog( - shareFetchData.fetchParams(), + shareFetch.fetchParams(), CollectionConverters.asScala( - topicPartitionData.entrySet().stream().map(entry -> - new Tuple2<>(entry.getKey(), entry.getValue())).collect(Collectors.toList()) + partitionsToFetch.stream().map(topicIdPartition -> + new Tuple2<>(topicIdPartition, topicPartitionData.get(topicIdPartition))).collect(Collectors.toList()) ), QuotaFactory.UNBOUNDED_QUOTA, true); - Map responseData = new HashMap<>(); + LinkedHashMap responseData = new LinkedHashMap<>(); responseLogResult.foreach(tpLogResult -> { responseData.put(tpLogResult._1(), tpLogResult._2()); return BoxedUnit.UNIT; @@ -334,24 +382,50 @@ private Map readFromLog(Map replicaManagerReadResponse) { + private boolean anyPartitionHasLogReadError(LinkedHashMap replicaManagerReadResponse) { return replicaManagerReadResponse.values().stream() .anyMatch(logReadResult -> logReadResult.error().code() != Errors.NONE.code()); } + /** + * The handleFetchException method is used to handle the exception that occurred while reading from log. + * The method will handle the exception for each topic-partition in the request. The share partition + * might get removed from the cache. + *

          + * The replica read request might error out for one share partition + * but as we cannot determine which share partition errored out, we might remove all the share partitions + * in the request. + * + * @param shareFetch The share fetch request. + * @param topicIdPartitions The topic-partitions in the replica read request. + * @param throwable The exception that occurred while fetching messages. + */ + private void handleFetchException( + ShareFetch shareFetch, + Set topicIdPartitions, + Throwable throwable + ) { + topicIdPartitions.forEach(topicIdPartition -> exceptionHandler.accept( + new SharePartitionKey(shareFetch.groupId(), topicIdPartition), throwable)); + shareFetch.maybeCompleteWithException(topicIdPartitions, throwable); + } + // Visible for testing. - Map combineLogReadResponse(Map topicPartitionData, - Map existingFetchedData) { - Map missingLogReadTopicPartitions = new LinkedHashMap<>(); - topicPartitionData.forEach((topicIdPartition, partitionData) -> { + LinkedHashMap combineLogReadResponse(LinkedHashMap topicPartitionData, + LinkedHashMap existingFetchedData) { + LinkedHashMap missingLogReadTopicPartitions = new LinkedHashMap<>(); + topicPartitionData.forEach((topicIdPartition, fetchOffset) -> { if (!existingFetchedData.containsKey(topicIdPartition)) { - missingLogReadTopicPartitions.put(topicIdPartition, partitionData); + missingLogReadTopicPartitions.put(topicIdPartition, fetchOffset); } }); if (missingLogReadTopicPartitions.isEmpty()) { return existingFetchedData; } - Map missingTopicPartitionsLogReadResponse = readFromLog(missingLogReadTopicPartitions); + + LinkedHashMap missingTopicPartitionsLogReadResponse = readFromLog( + missingLogReadTopicPartitions, + partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, missingLogReadTopicPartitions.keySet(), topicPartitionData.size())); missingTopicPartitionsLogReadResponse.putAll(existingFetchedData); return missingTopicPartitionsLogReadResponse; } @@ -363,4 +437,9 @@ void releasePartitionLocks(Set topicIdPartitions) { sharePartition.releaseFetchLock(); }); } + + // Visible for testing. + Lock lock() { + return lock; + } } diff --git a/core/src/main/java/kafka/server/share/ShareFetchUtils.java b/core/src/main/java/kafka/server/share/ShareFetchUtils.java index 3515362152b02..70f4e702a3090 100644 --- a/core/src/main/java/kafka/server/share/ShareFetchUtils.java +++ b/core/src/main/java/kafka/server/share/ShareFetchUtils.java @@ -28,8 +28,9 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.requests.ListOffsetsRequest; +import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; -import org.apache.kafka.server.share.fetch.ShareFetchData; +import org.apache.kafka.server.share.fetch.ShareFetch; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.slf4j.Logger; @@ -40,6 +41,7 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Optional; +import java.util.function.BiConsumer; import scala.Option; import scala.Some; @@ -55,11 +57,11 @@ public class ShareFetchUtils { * by acquiring records from the share partition. */ static Map processFetchResponse( - ShareFetchData shareFetchData, + ShareFetch shareFetch, Map responseData, LinkedHashMap sharePartitions, - ReplicaManager replicaManager - ) { + ReplicaManager replicaManager, + BiConsumer exceptionHandler) { Map response = new HashMap<>(); // Acquired records count for the share fetch request. @@ -84,14 +86,23 @@ static Map processFetchR // response and let the client retry the fetch. This way we do not lose out on the data that // would be returned for other share partitions in the fetch request. if (fetchPartitionData.error.code() == Errors.OFFSET_OUT_OF_RANGE.code()) { - sharePartition.updateCacheAndOffsets(offsetForEarliestTimestamp(topicIdPartition, replicaManager)); + try { + sharePartition.updateCacheAndOffsets(offsetForEarliestTimestamp(topicIdPartition, + replicaManager, sharePartition.leaderEpoch())); + } catch (Exception e) { + log.error("Error while fetching offset for earliest timestamp for topicIdPartition: {}", topicIdPartition, e); + shareFetch.addErroneous(topicIdPartition, e); + exceptionHandler.accept(new SharePartitionKey(shareFetch.groupId(), topicIdPartition), e); + // Do not fill the response for this partition and continue. + continue; + } // We set the error code to NONE, as we have updated the start offset of the share partition // and the client can retry the fetch. partitionData.setErrorCode(Errors.NONE.code()); partitionData.setErrorMessage(Errors.NONE.message()); } } else { - ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire(shareFetchData.memberId(), shareFetchData.maxFetchRecords() - acquiredRecordsCount, fetchPartitionData); + ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire(shareFetch.memberId(), shareFetch.batchSize(), shareFetch.maxFetchRecords() - acquiredRecordsCount, fetchPartitionData); log.trace("Acquired records: {} for topicIdPartition: {}", shareAcquiredRecords, topicIdPartition); // Maybe, in the future, check if no records are acquired, and we want to retry // replica manager fetch. Depends on the share partition manager implementation, @@ -123,13 +134,13 @@ static Map processFetchR * * @return The offset for the earliest timestamp. */ - static long offsetForEarliestTimestamp(TopicIdPartition topicIdPartition, ReplicaManager replicaManager) { + static long offsetForEarliestTimestamp(TopicIdPartition topicIdPartition, ReplicaManager replicaManager, int leaderEpoch) { // Isolation level is only required when reading from the latest offset hence use Option.empty() for now. - Option timestampAndOffset = replicaManager.fetchOffsetForTimestamp( + Optional timestampAndOffset = replicaManager.fetchOffsetForTimestamp( topicIdPartition.topicPartition(), ListOffsetsRequest.EARLIEST_TIMESTAMP, Option.empty(), - Optional.empty(), true).timestampAndOffsetOpt(); + Optional.of(leaderEpoch), true).timestampAndOffsetOpt(); if (timestampAndOffset.isEmpty()) { - throw new OffsetNotAvailableException("offset for Earliest timestamp not found for topic partition: " + topicIdPartition); + throw new OffsetNotAvailableException("Offset for earliest timestamp not found for topic partition: " + topicIdPartition); } return timestampAndOffset.get().offset; } @@ -139,23 +150,41 @@ static long offsetForEarliestTimestamp(TopicIdPartition topicIdPartition, Replic * * @return The offset for the latest timestamp. */ - static long offsetForLatestTimestamp(TopicIdPartition topicIdPartition, ReplicaManager replicaManager) { + static long offsetForLatestTimestamp(TopicIdPartition topicIdPartition, ReplicaManager replicaManager, int leaderEpoch) { // Isolation level is set to READ_UNCOMMITTED, matching with that used in share fetch requests - Option timestampAndOffset = replicaManager.fetchOffsetForTimestamp( + Optional timestampAndOffset = replicaManager.fetchOffsetForTimestamp( topicIdPartition.topicPartition(), ListOffsetsRequest.LATEST_TIMESTAMP, new Some<>(IsolationLevel.READ_UNCOMMITTED), - Optional.empty(), true).timestampAndOffsetOpt(); + Optional.of(leaderEpoch), true).timestampAndOffsetOpt(); if (timestampAndOffset.isEmpty()) { - throw new OffsetNotAvailableException("offset for Latest timestamp not found for topic partition: " + topicIdPartition); + throw new OffsetNotAvailableException("Offset for latest timestamp not found for topic partition: " + topicIdPartition); + } + return timestampAndOffset.get().offset; + } + + /** + * The method is used to get the offset for the given timestamp for the topic-partition. + * + * @return The offset for the given timestamp. + */ + static long offsetForTimestamp(TopicIdPartition topicIdPartition, ReplicaManager replicaManager, long timestampToSearch, int leaderEpoch) { + Optional timestampAndOffset = replicaManager.fetchOffsetForTimestamp( + topicIdPartition.topicPartition(), timestampToSearch, new Some<>(IsolationLevel.READ_UNCOMMITTED), Optional.of(leaderEpoch), true).timestampAndOffsetOpt(); + if (timestampAndOffset.isEmpty()) { + throw new OffsetNotAvailableException("Offset for timestamp " + timestampToSearch + " not found for topic partition: " + topicIdPartition); } return timestampAndOffset.get().offset; } static int leaderEpoch(ReplicaManager replicaManager, TopicPartition tp) { + return partition(replicaManager, tp).getLeaderEpoch(); + } + + static Partition partition(ReplicaManager replicaManager, TopicPartition tp) { Partition partition = replicaManager.getPartitionOrException(tp); if (!partition.isLeader()) { log.debug("The broker is not the leader for topic partition: {}-{}", tp.topic(), tp.partition()); throw new NotLeaderOrFollowerException(); } - return partition.getLeaderEpoch(); + return partition; } } diff --git a/core/src/main/java/kafka/server/share/SharePartition.java b/core/src/main/java/kafka/server/share/SharePartition.java index 71baea1017441..a56c55b1b7463 100644 --- a/core/src/main/java/kafka/server/share/SharePartition.java +++ b/core/src/main/java/kafka/server/share/SharePartition.java @@ -17,6 +17,7 @@ package kafka.server.share; import kafka.server.ReplicaManager; +import kafka.server.share.SharePartitionManager.SharePartitionListener; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicIdPartition; @@ -36,6 +37,7 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.coordinator.group.GroupConfig; import org.apache.kafka.coordinator.group.GroupConfigManager; +import org.apache.kafka.coordinator.group.ShareGroupAutoOffsetResetStrategy; import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchKey; @@ -75,6 +77,7 @@ import static kafka.server.share.ShareFetchUtils.offsetForEarliestTimestamp; import static kafka.server.share.ShareFetchUtils.offsetForLatestTimestamp; +import static kafka.server.share.ShareFetchUtils.offsetForTimestamp; /** * The SharePartition is used to track the state of a partition that is shared between multiple @@ -267,6 +270,11 @@ public static RecordState forId(byte id) { */ private final Persister persister; + /** + * The listener is used to notify the share partition manager when the share partition state changes. + */ + private final SharePartitionListener listener; + /** * The share partition start offset specifies the partition start offset from which the records * are cached in the cachedState of the sharePartition. @@ -280,9 +288,9 @@ public static RecordState forId(byte id) { private long endOffset; /** - * We maintain the latest fetch offset metadata to estimate the minBytes requirement more efficiently. + * We maintain the latest fetch offset and its metadata to estimate the minBytes requirement more efficiently. */ - private Optional fetchOffsetMetadata; + private final OffsetMetadata fetchOffsetMetadata; /** * The state epoch is used to track the version of the state of the share partition. @@ -311,10 +319,11 @@ public static RecordState forId(byte id) { Time time, Persister persister, ReplicaManager replicaManager, - GroupConfigManager groupConfigManager + GroupConfigManager groupConfigManager, + SharePartitionListener listener ) { this(groupId, topicIdPartition, leaderEpoch, maxInFlightMessages, maxDeliveryCount, defaultRecordLockDurationMs, - timer, time, persister, replicaManager, groupConfigManager, SharePartitionState.EMPTY); + timer, time, persister, replicaManager, groupConfigManager, SharePartitionState.EMPTY, listener); } SharePartition( @@ -329,7 +338,8 @@ public static RecordState forId(byte id) { Persister persister, ReplicaManager replicaManager, GroupConfigManager groupConfigManager, - SharePartitionState sharePartitionState + SharePartitionState sharePartitionState, + SharePartitionListener listener ) { this.groupId = groupId; this.topicIdPartition = topicIdPartition; @@ -347,6 +357,8 @@ public static RecordState forId(byte id) { this.partitionState = sharePartitionState; this.replicaManager = replicaManager; this.groupConfigManager = groupConfigManager; + this.fetchOffsetMetadata = new OffsetMetadata(); + this.listener = listener; } /** @@ -360,44 +372,47 @@ public static RecordState forId(byte id) { */ public CompletableFuture maybeInitialize() { log.debug("Maybe initialize share partition: {}-{}", groupId, topicIdPartition); - CompletableFuture future = new CompletableFuture<>(); // Check if the share partition is already initialized. - maybeCompleteInitialization(future); - if (future.isDone()) { - return future; + try { + if (initializedOrThrowException()) return CompletableFuture.completedFuture(null); + } catch (Exception e) { + return CompletableFuture.failedFuture(e); } + // If code reaches here then the share partition is not initialized. Initialize the share partition. // All the pending requests should wait to get completed before the share partition is initialized. - // Attain lock to avoid any concurrent requests to be processed. - lock.writeLock().lock(); + // Attain lock while updating the state to avoid any concurrent requests to be processed. try { - // Re-check the state to verify if previous requests has already initialized the share partition. - maybeCompleteInitialization(future); - if (future.isDone()) { - return future; - } + if (!emptyToInitialState()) return CompletableFuture.completedFuture(null); + } catch (Exception e) { + return CompletableFuture.failedFuture(e); + } - // Update state to initializing to avoid any concurrent requests to be processed. - partitionState = SharePartitionState.INITIALIZING; - // Initialize the share partition by reading the state from the persister. - persister.readState(new ReadShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(this.groupId) - .setTopicsData(Collections.singletonList(new TopicData<>(topicIdPartition.topicId(), - Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData(topicIdPartition.partition(), leaderEpoch))))) - .build()) - .build() - ).whenComplete((result, exception) -> { + // The share partition is not initialized, hence try to initialize it. There shall be only one + // request trying to initialize the share partition. + CompletableFuture future = new CompletableFuture<>(); + // Initialize the share partition by reading the state from the persister. + persister.readState(new ReadShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(this.groupId) + .setTopicsData(Collections.singletonList(new TopicData<>(topicIdPartition.topicId(), + Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData(topicIdPartition.partition(), leaderEpoch))))) + .build()) + .build() + ).whenComplete((result, exception) -> { + Throwable throwable = null; + lock.writeLock().lock(); + try { if (exception != null) { log.error("Failed to initialize the share partition: {}-{}", groupId, topicIdPartition, exception); - completeInitializationWithException(future, exception); + throwable = exception; return; } if (result == null || result.topicsData() == null || result.topicsData().size() != 1) { log.error("Failed to initialize the share partition: {}-{}. Invalid state found: {}.", groupId, topicIdPartition, result); - completeInitializationWithException(future, new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } @@ -405,7 +420,7 @@ public CompletableFuture maybeInitialize() { if (state.topicId() != topicIdPartition.topicId() || state.partitions().size() != 1) { log.error("Failed to initialize the share partition: {}-{}. Invalid topic partition response: {}.", groupId, topicIdPartition, result); - completeInitializationWithException(future, new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } @@ -413,7 +428,7 @@ public CompletableFuture maybeInitialize() { if (partitionData.partition() != topicIdPartition.partition()) { log.error("Failed to initialize the share partition: {}-{}. Invalid partition response: {}.", groupId, topicIdPartition, partitionData); - completeInitializationWithException(future, new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } @@ -421,16 +436,11 @@ public CompletableFuture maybeInitialize() { KafkaException ex = fetchPersisterError(partitionData.errorCode(), partitionData.errorMessage()); log.error("Failed to initialize the share partition: {}-{}. Exception occurred: {}.", groupId, topicIdPartition, partitionData); - completeInitializationWithException(future, ex); + throwable = ex; return; } - try { - startOffset = startOffsetDuringInitialization(partitionData.startOffset()); - } catch (Exception e) { - completeInitializationWithException(future, e); - return; - } + startOffset = startOffsetDuringInitialization(partitionData.startOffset()); stateEpoch = partitionData.stateEpoch(); List stateBatches = partitionData.stateBatches(); @@ -439,7 +449,7 @@ public CompletableFuture maybeInitialize() { log.error("Invalid state batch found for the share partition: {}-{}. The base offset: {}" + " is less than the start offset: {}.", groupId, topicIdPartition, stateBatch.firstOffset(), startOffset); - completeInitializationWithException(future, new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } InFlightBatch inFlightBatch = new InFlightBatch(EMPTY_MEMBER_ID, stateBatch.firstOffset(), @@ -451,23 +461,32 @@ public CompletableFuture maybeInitialize() { // If the cachedState is not empty, findNextFetchOffset flag is set to true so that any AVAILABLE records // in the cached state are not missed findNextFetchOffset.set(true); - updateEndOffsetAndResetFetchOffsetMetadata(cachedState.lastEntry().getValue().lastOffset()); + endOffset = cachedState.lastEntry().getValue().lastOffset(); // In case the persister read state RPC result contains no AVAILABLE records, we can update cached state // and start/end offsets. maybeUpdateCachedStateAndOffsets(); } else { - updateEndOffsetAndResetFetchOffsetMetadata(startOffset); + endOffset = startOffset; } // Set the partition state to Active and complete the future. partitionState = SharePartitionState.ACTIVE; - future.complete(null); - }); - } catch (Exception e) { - log.error("Failed to initialize the share partition: {}-{}", groupId, topicIdPartition, e); - completeInitializationWithException(future, e); - } finally { - lock.writeLock().unlock(); - } + } catch (Exception e) { + throwable = e; + } finally { + boolean isFailed = throwable != null; + if (isFailed) { + partitionState = SharePartitionState.FAILED; + } + // Release the lock. + lock.writeLock().unlock(); + // Complete the future. + if (isFailed) { + future.completeExceptionally(throwable); + } else { + future.complete(null); + } + } + }); return future; } @@ -561,6 +580,7 @@ public long nextFetchOffset() { * fetched from the leader. * * @param memberId The member id of the client that is fetching the record. + * @param batchSize The number of records per acquired records batch. * @param maxFetchRecords The maximum number of records that should be acquired, this is a soft * limit and the method might acquire more records than the maxFetchRecords, * if the records are already part of the same fetch batch. @@ -570,6 +590,7 @@ public long nextFetchOffset() { @SuppressWarnings("cyclomaticcomplexity") // Consider refactoring to avoid suppression public ShareAcquiredRecords acquire( String memberId, + int batchSize, int maxFetchRecords, FetchPartitionData fetchPartitionData ) { @@ -608,9 +629,8 @@ public ShareAcquiredRecords acquire( if (subMap.isEmpty()) { log.trace("No cached data exists for the share partition for requested fetch batch: {}-{}", groupId, topicIdPartition); - AcquiredRecords acquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), - firstBatch.baseOffset(), lastBatch.lastOffset(), maxFetchRecords); - return ShareAcquiredRecords.fromAcquiredRecords(acquiredRecords); + return acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), + firstBatch.baseOffset(), lastBatch.lastOffset(), batchSize, maxFetchRecords); } log.trace("Overlap exists with in-flight records. Acquire the records if available for" @@ -642,7 +662,7 @@ public ShareAcquiredRecords acquire( // acquire subset of offsets from the in-flight batch but only if the // complete batch is available yet. Hence, do a pre-check to avoid exploding // the in-flight offset tracking unnecessarily. - if (inFlightBatch.batchState() != RecordState.AVAILABLE) { + if (inFlightBatch.batchState() != RecordState.AVAILABLE || inFlightBatch.batchHasOngoingStateTransition()) { log.trace("The batch is not available to acquire in share partition: {}-{}, skipping: {}" + " skipping offset tracking for batch as well.", groupId, topicIdPartition, inFlightBatch); @@ -661,7 +681,7 @@ public ShareAcquiredRecords acquire( } // The in-flight batch is a full match hence change the state of the complete batch. - if (inFlightBatch.batchState() != RecordState.AVAILABLE) { + if (inFlightBatch.batchState() != RecordState.AVAILABLE || inFlightBatch.batchHasOngoingStateTransition()) { log.trace("The batch is not available to acquire in share partition: {}-{}, skipping: {}", groupId, topicIdPartition, inFlightBatch); continue; @@ -689,11 +709,11 @@ public ShareAcquiredRecords acquire( // missing records as well. if (acquiredCount < maxFetchRecords && subMap.lastEntry().getValue().lastOffset() < lastBatch.lastOffset()) { log.trace("There exists another batch which needs to be acquired as well"); - AcquiredRecords acquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), + ShareAcquiredRecords shareAcquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), subMap.lastEntry().getValue().lastOffset() + 1, - lastBatch.lastOffset(), maxFetchRecords - acquiredCount); - result.add(acquiredRecords); - acquiredCount += (int) (acquiredRecords.lastOffset() - acquiredRecords.firstOffset() + 1); + lastBatch.lastOffset(), batchSize, maxFetchRecords - acquiredCount); + result.addAll(shareAcquiredRecords.acquiredRecords()); + acquiredCount += shareAcquiredRecords.count(); } return new ShareAcquiredRecords(result, acquiredCount); } finally { @@ -943,7 +963,7 @@ void updateCacheAndOffsets(long logStartOffset) { // If the cached state is empty, then the start and end offset will be the new log start offset. // This can occur during the initialization of share partition if LSO has moved. startOffset = logStartOffset; - updateEndOffsetAndResetFetchOffsetMetadata(logStartOffset); + endOffset = logStartOffset; return; } @@ -961,7 +981,7 @@ void updateCacheAndOffsets(long logStartOffset) { // This case means that the cached state is completely fresh now. // Example scenario - batch of 0-10 in acquired state in cached state, then LSO moves to 15, // then endOffset should be 15 as well. - updateEndOffsetAndResetFetchOffsetMetadata(startOffset); + endOffset = startOffset; } // Note - @@ -1082,8 +1102,8 @@ boolean canAcquireRecords() { /** * Prior to fetching records from the leader, the fetch lock is acquired to ensure that the same - * share partition does not enter a fetch queue while another one is being fetched within the queue. - * The fetch lock is released once the records are fetched from the leader. + * share partition is not fetched concurrently by multiple clients. The fetch lock is released once + * the records are fetched and acquired. * * @return A boolean which indicates whether the fetch lock is acquired. */ @@ -1113,48 +1133,56 @@ void markFenced() { } } + /** + * Returns the share partition listener. + * + * @return The share partition listener. + */ + SharePartitionListener listener() { + return this.listener; + } + + int leaderEpoch() { + return leaderEpoch; + } + private boolean stateNotActive() { return partitionState() != SharePartitionState.ACTIVE; } - private void completeInitializationWithException(CompletableFuture future, Throwable exception) { + private boolean emptyToInitialState() { lock.writeLock().lock(); try { - partitionState = SharePartitionState.FAILED; + if (initializedOrThrowException()) return false; + partitionState = SharePartitionState.INITIALIZING; + return true; } finally { lock.writeLock().unlock(); } - future.completeExceptionally(exception); } - private void maybeCompleteInitialization(CompletableFuture future) { + private boolean initializedOrThrowException() { SharePartitionState currentState = partitionState(); - switch (currentState) { - case ACTIVE: - future.complete(null); - return; - case FAILED: - future.completeExceptionally(new IllegalStateException(String.format("Share partition failed to load %s-%s", groupId, topicIdPartition))); - return; - case INITIALIZING: - future.completeExceptionally(new LeaderNotAvailableException(String.format("Share partition is already initializing %s-%s", groupId, topicIdPartition))); - return; - case FENCED: - future.completeExceptionally(new FencedStateEpochException(String.format("Share partition is fenced %s-%s", groupId, topicIdPartition))); - return; - case EMPTY: - // Do not complete the future as the share partition is not yet initialized. - break; - default: - throw new IllegalStateException("Unknown share partition state: " + currentState); - } + return switch (currentState) { + case ACTIVE -> true; + case FAILED -> throw new IllegalStateException( + String.format("Share partition failed to load %s-%s", groupId, topicIdPartition)); + case INITIALIZING -> throw new LeaderNotAvailableException( + String.format("Share partition is already initializing %s-%s", groupId, topicIdPartition)); + case FENCED -> throw new FencedStateEpochException( + String.format("Share partition is fenced %s-%s", groupId, topicIdPartition)); + case EMPTY -> + // The share partition is not yet initialized. + false; + }; } - private AcquiredRecords acquireNewBatchRecords( + private ShareAcquiredRecords acquireNewBatchRecords( String memberId, Iterable batches, long firstOffset, long lastOffset, + int batchSize, int maxFetchRecords ) { lock.writeLock().lock(); @@ -1178,25 +1206,74 @@ private AcquiredRecords acquireNewBatchRecords( lastAcquiredOffset = lastOffsetFromBatchWithRequestOffset(batches, firstAcquiredOffset + maxFetchRecords - 1); } - // Schedule acquisition lock timeout for the batch. - AcquisitionLockTimerTask timerTask = scheduleAcquisitionLockTimeout(memberId, firstAcquiredOffset, lastAcquiredOffset); - // Add the new batch to the in-flight records along with the acquisition lock timeout task for the batch. - cachedState.put(firstAcquiredOffset, new InFlightBatch( - memberId, - firstAcquiredOffset, - lastAcquiredOffset, - RecordState.ACQUIRED, - 1, - timerTask)); + // Create batches of acquired records. + List acquiredRecords = createBatches(memberId, batches, firstAcquiredOffset, lastAcquiredOffset, batchSize); // if the cachedState was empty before acquiring the new batches then startOffset needs to be updated if (cachedState.firstKey() == firstAcquiredOffset) { startOffset = firstAcquiredOffset; } - updateEndOffsetAndResetFetchOffsetMetadata(lastAcquiredOffset); - return new AcquiredRecords() - .setFirstOffset(firstAcquiredOffset) + endOffset = lastAcquiredOffset; + return new ShareAcquiredRecords(acquiredRecords, (int) (lastAcquiredOffset - firstAcquiredOffset + 1)); + } finally { + lock.writeLock().unlock(); + } + } + + private List createBatches( + String memberId, + Iterable batches, + long firstAcquiredOffset, + long lastAcquiredOffset, + int batchSize + ) { + lock.writeLock().lock(); + try { + List result = new ArrayList<>(); + long currentFirstOffset = firstAcquiredOffset; + // No split of batches is required if the batch size is greater than records which + // can be acquired, else split the batch into multiple batches. + if (lastAcquiredOffset - firstAcquiredOffset + 1 > batchSize) { + // The batch is split into multiple batches considering batch size. + // Note: Try reading only the baseOffset of the batch and avoid reading the lastOffset + // as lastOffset call of RecordBatch is expensive (loads headers). + for (RecordBatch batch : batches) { + long batchBaseOffset = batch.baseOffset(); + // Check if the batch is already past the last acquired offset then break. + if (batchBaseOffset > lastAcquiredOffset) { + // Break the loop and the last batch will be processed outside the loop. + break; + } + + // Create new batch once the batch size is reached. + if (batchBaseOffset - currentFirstOffset >= batchSize) { + result.add(new AcquiredRecords() + .setFirstOffset(currentFirstOffset) + .setLastOffset(batchBaseOffset - 1) + .setDeliveryCount((short) 1)); + currentFirstOffset = batchBaseOffset; + } + } + } + // Add the last batch or the only batch if the batch size is greater than the records which + // can be acquired. + result.add(new AcquiredRecords() + .setFirstOffset(currentFirstOffset) .setLastOffset(lastAcquiredOffset) - .setDeliveryCount((short) 1); + .setDeliveryCount((short) 1)); + + result.forEach(acquiredRecords -> { + // Schedule acquisition lock timeout for the batch. + AcquisitionLockTimerTask timerTask = scheduleAcquisitionLockTimeout(memberId, acquiredRecords.firstOffset(), acquiredRecords.lastOffset()); + // Add the new batch to the in-flight records along with the acquisition lock timeout task for the batch. + cachedState.put(acquiredRecords.firstOffset(), new InFlightBatch( + memberId, + acquiredRecords.firstOffset(), + acquiredRecords.lastOffset(), + RecordState.ACQUIRED, + 1, + timerTask)); + }); + return result; } finally { lock.writeLock().unlock(); } @@ -1224,10 +1301,9 @@ private int acquireSubsetBatchRecords( break; } - if (offsetState.getValue().state != RecordState.AVAILABLE) { - log.trace("The offset is not available skipping, offset: {} batch: {}" - + " for the share partition: {}-{}", offsetState.getKey(), inFlightBatch, - groupId, topicIdPartition); + if (offsetState.getValue().state != RecordState.AVAILABLE || offsetState.getValue().hasOngoingStateTransition()) { + log.trace("The offset {} is not available in share partition: {}-{}, skipping: {}", + offsetState.getKey(), groupId, topicIdPartition, inFlightBatch); continue; } @@ -1592,27 +1668,21 @@ private Optional acknowledgeCompleteBatch( return Optional.empty(); } - // The caller of this function is expected to hold lock.writeLock() when calling this method. - protected void updateEndOffsetAndResetFetchOffsetMetadata(long updatedEndOffset) { - endOffset = updatedEndOffset; - fetchOffsetMetadata = Optional.empty(); - } - - protected void updateFetchOffsetMetadata(Optional fetchOffsetMetadata) { + protected void updateFetchOffsetMetadata(long nextFetchOffset, LogOffsetMetadata logOffsetMetadata) { lock.writeLock().lock(); try { - this.fetchOffsetMetadata = fetchOffsetMetadata; + fetchOffsetMetadata.updateOffsetMetadata(nextFetchOffset, logOffsetMetadata); } finally { lock.writeLock().unlock(); } } - protected Optional fetchOffsetMetadata() { + protected Optional fetchOffsetMetadata(long nextFetchOffset) { lock.readLock().lock(); try { - if (findNextFetchOffset.get()) + if (fetchOffsetMetadata.offsetMetadata() == null || fetchOffsetMetadata.offset() != nextFetchOffset) return Optional.empty(); - return fetchOffsetMetadata; + return Optional.of(fetchOffsetMetadata.offsetMetadata()); } finally { lock.readLock().unlock(); } @@ -1650,8 +1720,13 @@ void rollbackOrProcessStateUpdates( future.complete(null); return; } + } finally { + lock.writeLock().unlock(); + } - writeShareGroupState(stateBatches).whenComplete((result, exception) -> { + writeShareGroupState(stateBatches).whenComplete((result, exception) -> { + lock.writeLock().lock(); + try { if (exception != null) { log.error("Failed to write state to persister for the share partition: {}-{}", groupId, topicIdPartition, exception); @@ -1670,10 +1745,10 @@ void rollbackOrProcessStateUpdates( // Update the cached state and start and end offsets after acknowledging/releasing the acquired records. maybeUpdateCachedStateAndOffsets(); future.complete(null); - }); - } finally { - lock.writeLock().unlock(); - } + } finally { + lock.writeLock().unlock(); + } + }); } private void maybeUpdateCachedStateAndOffsets() { @@ -1696,7 +1771,7 @@ private void maybeUpdateCachedStateAndOffsets() { long lastCachedOffset = cachedState.lastEntry().getValue().lastOffset(); if (lastOffsetAcknowledged == lastCachedOffset) { startOffset = lastCachedOffset + 1; // The next offset that will be fetched and acquired in the share partition - updateEndOffsetAndResetFetchOffsetMetadata(lastCachedOffset + 1); + endOffset = lastCachedOffset + 1; cachedState.clear(); // Nothing further to do. return; @@ -1731,10 +1806,7 @@ be removed once all the messages (0-99) are acknowledged (ACCEPT or REJECT). } if (lastKeyToRemove != -1) { - NavigableMap subMap = cachedState.subMap(firstKeyToRemove, true, lastKeyToRemove, true); - for (Long key : subMap.keySet()) { - cachedState.remove(key); - } + cachedState.subMap(firstKeyToRemove, true, lastKeyToRemove, true).clear(); } } finally { lock.writeLock().unlock(); @@ -2011,8 +2083,8 @@ private void releaseAcquisitionLockOnTimeoutForCompleteBatch(InFlightBatch inFli stateBatches.add(new PersisterStateBatch(inFlightBatch.firstOffset(), inFlightBatch.lastOffset(), updateResult.state.id, (short) updateResult.deliveryCount)); - // Update acquisition lock timeout task for the batch to null since it is completed now. - updateResult.updateAcquisitionLockTimeoutTask(null); + // Cancel the acquisition lock timeout task for the batch since it is completed now. + updateResult.cancelAndClearAcquisitionLockTimeoutTask(); if (updateResult.state != RecordState.ARCHIVED) { findNextFetchOffset.set(true); } @@ -2058,8 +2130,8 @@ private void releaseAcquisitionLockOnTimeoutForPerOffsetBatch(InFlightBatch inFl stateBatches.add(new PersisterStateBatch(offsetState.getKey(), offsetState.getKey(), updateResult.state.id, (short) updateResult.deliveryCount)); - // Update acquisition lock timeout task for the offset to null since it is completed now. - updateResult.updateAcquisitionLockTimeoutTask(null); + // Cancel the acquisition lock timeout task for the offset since it is completed now. + updateResult.cancelAndClearAcquisitionLockTimeoutTask(); if (updateResult.state != RecordState.ARCHIVED) { findNextFetchOffset.set(true); } @@ -2071,16 +2143,21 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro if (partitionDataStartOffset != PartitionFactory.UNINITIALIZED_START_OFFSET) { return partitionDataStartOffset; } - GroupConfig.ShareGroupAutoOffsetReset offsetResetStrategy; + ShareGroupAutoOffsetResetStrategy offsetResetStrategy; if (groupConfigManager.groupConfig(groupId).isPresent()) { offsetResetStrategy = groupConfigManager.groupConfig(groupId).get().shareAutoOffsetReset(); } else { offsetResetStrategy = GroupConfig.defaultShareAutoOffsetReset(); } - if (offsetResetStrategy == GroupConfig.ShareGroupAutoOffsetReset.EARLIEST) - return offsetForEarliestTimestamp(topicIdPartition, replicaManager); - return offsetForLatestTimestamp(topicIdPartition, replicaManager); + if (offsetResetStrategy.type() == ShareGroupAutoOffsetResetStrategy.StrategyType.LATEST) { + return offsetForLatestTimestamp(topicIdPartition, replicaManager, leaderEpoch); + } else if (offsetResetStrategy.type() == ShareGroupAutoOffsetResetStrategy.StrategyType.EARLIEST) { + return offsetForEarliestTimestamp(topicIdPartition, replicaManager, leaderEpoch); + } else { + // offsetResetStrategy type is BY_DURATION + return offsetForTimestamp(topicIdPartition, replicaManager, offsetResetStrategy.timestamp(), leaderEpoch); + } } // Visible for testing. Should only be used for testing purposes. @@ -2196,10 +2273,7 @@ long lastOffset() { // Visible for testing. RecordState batchState() { - if (batchState == null) { - throw new IllegalStateException("The batch state is not available as the offset state is maintained"); - } - return batchState.state; + return inFlightState().state; } // Visible for testing. @@ -2220,10 +2294,7 @@ int batchDeliveryCount() { // Visible for testing. AcquisitionLockTimerTask batchAcquisitionLockTimeoutTask() { - if (batchState == null) { - throw new IllegalStateException("The batch state is not available as the offset state is maintained"); - } - return batchState.acquisitionLockTimeoutTask; + return inFlightState().acquisitionLockTimeoutTask; } // Visible for testing. @@ -2231,11 +2302,19 @@ NavigableMap offsetState() { return offsetState; } - private void archiveBatch(String newMemberId) { + private InFlightState inFlightState() { if (batchState == null) { throw new IllegalStateException("The batch state is not available as the offset state is maintained"); } - batchState.archive(newMemberId); + return batchState; + } + + private boolean batchHasOngoingStateTransition() { + return inFlightState().hasOngoingStateTransition(); + } + + private void archiveBatch(String newMemberId) { + inFlightState().archive(newMemberId); } private InFlightState tryUpdateBatchState(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, String newMemberId) { @@ -2280,10 +2359,7 @@ private void maybeInitializeOffsetStateUpdate() { } private void updateAcquisitionLockTimeout(AcquisitionLockTimerTask acquisitionLockTimeoutTask) { - if (batchState == null) { - throw new IllegalStateException("The batch state is not available as the offset state is maintained"); - } - batchState.acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; + inFlightState().acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; } @Override @@ -2342,7 +2418,10 @@ TimerTask acquisitionLockTimeoutTask() { return acquisitionLockTimeoutTask; } - void updateAcquisitionLockTimeoutTask(AcquisitionLockTimerTask acquisitionLockTimeoutTask) { + void updateAcquisitionLockTimeoutTask(AcquisitionLockTimerTask acquisitionLockTimeoutTask) throws IllegalArgumentException { + if (this.acquisitionLockTimeoutTask != null) { + throw new IllegalArgumentException("Existing acquisition lock timeout exists, cannot override."); + } this.acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; } @@ -2351,6 +2430,15 @@ void cancelAndClearAcquisitionLockTimeoutTask() { acquisitionLockTimeoutTask = null; } + private boolean hasOngoingStateTransition() { + if (rollbackState == null) { + // This case could occur when the batch/offset hasn't transitioned even once or the state transitions have + // been committed. + return false; + } + return rollbackState.state != null; + } + /** * Try to update the state of the records. The state of the records can only be updated if the * new state is allowed to be transitioned from old state. The delivery count is not incremented @@ -2426,4 +2514,30 @@ public String toString() { ")"; } } + + /** + * FetchOffsetMetadata class is used to cache offset and its log metadata. + */ + static final class OffsetMetadata { + // This offset could be different from offsetMetadata.messageOffset if it's in the middle of a batch. + private long offset; + private LogOffsetMetadata offsetMetadata; + + OffsetMetadata() { + offset = -1; + } + + long offset() { + return offset; + } + + LogOffsetMetadata offsetMetadata() { + return offsetMetadata; + } + + void updateOffsetMetadata(long offset, LogOffsetMetadata offsetMetadata) { + this.offset = offset; + this.offsetMetadata = offsetMetadata; + } + } } diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index 4288dd55703d7..0f4be06b24631 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -16,6 +16,7 @@ */ package kafka.server.share; +import kafka.cluster.PartitionListener; import kafka.server.ReplicaManager; import org.apache.kafka.clients.consumer.AcknowledgeType; @@ -49,7 +50,7 @@ import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey; -import org.apache.kafka.server.share.fetch.ShareFetchData; +import org.apache.kafka.server.share.fetch.ShareFetch; import org.apache.kafka.server.share.persister.Persister; import org.apache.kafka.server.share.session.ShareSession; import org.apache.kafka.server.share.session.ShareSessionCache; @@ -71,10 +72,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; +import java.util.function.BiConsumer; /** * The SharePartitionManager is responsible for managing the SharePartitions and ShareSessions. @@ -236,6 +236,7 @@ private SharePartitionManager( * @param groupId The group id, this is used to identify the share group. * @param memberId The member id, generated by the group-coordinator, this is used to identify the client. * @param fetchParams The fetch parameters from the share fetch request. + * @param batchSize The number of records per acquired records batch. * @param partitionMaxBytes The maximum number of bytes to fetch for each partition. * * @return A future that will be completed with the fetched messages. @@ -244,13 +245,14 @@ public CompletableFuture> fetchMessages( String groupId, String memberId, FetchParams fetchParams, + int batchSize, Map partitionMaxBytes ) { log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}", partitionMaxBytes.keySet(), groupId, fetchParams); CompletableFuture> future = new CompletableFuture<>(); - processShareFetch(new ShareFetchData(fetchParams, groupId, memberId, future, partitionMaxBytes, maxFetchRecords)); + processShareFetch(new ShareFetch(fetchParams, groupId, memberId, future, partitionMaxBytes, batchSize, maxFetchRecords)); return future; } @@ -273,20 +275,20 @@ public CompletableFuture> futures = new HashMap<>(); + Map> futures = new HashMap<>(); acknowledgeTopics.forEach((topicIdPartition, acknowledgePartitionBatches) -> { SharePartitionKey sharePartitionKey = sharePartitionKey(groupId, topicIdPartition); SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey); if (sharePartition != null) { - CompletableFuture future = new CompletableFuture<>(); + CompletableFuture future = new CompletableFuture<>(); sharePartition.acknowledge(memberId, acknowledgePartitionBatches).whenComplete((result, throwable) -> { if (throwable != null) { - handleFencedSharePartitionException(sharePartitionKey, throwable); - future.complete(Errors.forException(throwable)); + fencedSharePartitionHandler().accept(sharePartitionKey, throwable); + future.complete(throwable); return; } acknowledgePartitionBatches.forEach(batch -> batch.acknowledgeTypes().forEach(this.shareGroupMetrics::recordAcknowledgement)); - future.complete(Errors.NONE); + future.complete(null); }); // If we have an acknowledgement completed for a topic-partition, then we should check if @@ -296,19 +298,11 @@ public CompletableFuture allFutures = CompletableFuture.allOf( - futures.values().toArray(new CompletableFuture[0])); - return allFutures.thenApply(v -> { - Map result = new HashMap<>(); - futures.forEach((topicIdPartition, future) -> result.put(topicIdPartition, new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(topicIdPartition.partition()) - .setErrorCode(future.join().code()))); - return result; - }); + return mapAcknowledgementFutures(futures); } /** @@ -343,22 +337,22 @@ public CompletableFuture> futuresMap = new HashMap<>(); + Map> futuresMap = new HashMap<>(); topicIdPartitions.forEach(topicIdPartition -> { SharePartitionKey sharePartitionKey = sharePartitionKey(groupId, topicIdPartition); SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey); if (sharePartition == null) { log.error("No share partition found for groupId {} topicPartition {} while releasing acquired topic partitions", groupId, topicIdPartition); - futuresMap.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION)); + futuresMap.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION.exception())); } else { - CompletableFuture future = new CompletableFuture<>(); + CompletableFuture future = new CompletableFuture<>(); sharePartition.releaseAcquiredRecords(memberId).whenComplete((result, throwable) -> { if (throwable != null) { - handleFencedSharePartitionException(sharePartitionKey, throwable); - future.complete(Errors.forException(throwable)); + fencedSharePartitionHandler().accept(sharePartitionKey, throwable); + future.complete(throwable); return; } - future.complete(Errors.NONE); + future.complete(null); }); // If we have a release acquired request completed for a topic-partition, then we should check if // there is a pending share fetch request for the topic-partition and complete it. @@ -369,13 +363,24 @@ public CompletableFuture> mapAcknowledgementFutures(Map> futuresMap) { CompletableFuture allFutures = CompletableFuture.allOf( futuresMap.values().toArray(new CompletableFuture[0])); return allFutures.thenApply(v -> { Map result = new HashMap<>(); - futuresMap.forEach((topicIdPartition, future) -> result.put(topicIdPartition, new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(topicIdPartition.partition()) - .setErrorCode(future.join().code()))); + futuresMap.forEach((topicIdPartition, future) -> { + ShareAcknowledgeResponseData.PartitionData partitionData = new ShareAcknowledgeResponseData.PartitionData() + .setPartitionIndex(topicIdPartition.partition()); + Throwable t = future.join(); + if (t != null) { + partitionData.setErrorCode(Errors.forException(t).code()) + .setErrorMessage(t.getMessage()); + } + result.put(topicIdPartition, partitionData); + }); return result; }); } @@ -498,30 +503,6 @@ public void acknowledgeSessionUpdate(String groupId, ShareRequestMetadata reqMet } } - /** - * The handleFetchException method is used to handle the exception that occurred while reading from log. - * The method will handle the exception for each topic-partition in the request. The share partition - * might get removed from the cache. - *

          - * The replica read request might error out for one share partition - * but as we cannot determine which share partition errored out, we might remove all the share partitions - * in the request. - * - * @param groupId The group id in the share fetch request. - * @param topicIdPartitions The topic-partitions in the replica read request. - * @param future The future to complete with the exception. - * @param throwable The exception that occurred while fetching messages. - */ - public void handleFetchException( - String groupId, - Set topicIdPartitions, - CompletableFuture> future, - Throwable throwable - ) { - topicIdPartitions.forEach(topicIdPartition -> handleFencedSharePartitionException(sharePartitionKey(groupId, topicIdPartition), throwable)); - maybeCompleteShareFetchWithException(future, topicIdPartitions, throwable); - } - /** * The cachedTopicIdPartitionsInShareSession method is used to get the cached topic-partitions in the share session. * @@ -564,20 +545,18 @@ private static String partitionsToLogString(Collection partiti } // Visible for testing. - void processShareFetch(ShareFetchData shareFetchData) { - if (shareFetchData.partitionMaxBytes().isEmpty()) { + void processShareFetch(ShareFetch shareFetch) { + if (shareFetch.partitionMaxBytes().isEmpty()) { // If there are no partitions to fetch then complete the future with an empty map. - shareFetchData.future().complete(Collections.emptyMap()); + shareFetch.maybeComplete(Collections.emptyMap()); return; } - // Initialize lazily, if required. - Map erroneous = null; List delayedShareFetchWatchKeys = new ArrayList<>(); LinkedHashMap sharePartitions = new LinkedHashMap<>(); - for (TopicIdPartition topicIdPartition : shareFetchData.partitionMaxBytes().keySet()) { + for (TopicIdPartition topicIdPartition : shareFetch.partitionMaxBytes().keySet()) { SharePartitionKey sharePartitionKey = sharePartitionKey( - shareFetchData.groupId(), + shareFetch.groupId(), topicIdPartition ); @@ -585,15 +564,8 @@ void processShareFetch(ShareFetchData shareFetchData) { try { sharePartition = getOrCreateSharePartition(sharePartitionKey); } catch (Exception e) { - // Complete the whole fetch request with an exception if there is an error processing. - // The exception currently can be thrown only if there is an error while initializing - // the share partition. But skip the processing for other share partitions in the request - // as this situation is not expected. - log.error("Error processing share fetch request", e); - if (erroneous == null) { - erroneous = new HashMap<>(); - } - erroneous.put(topicIdPartition, e); + log.debug("Error processing share fetch request", e); + shareFetch.addErroneous(topicIdPartition, e); // Continue iteration for other partitions in the request. continue; } @@ -601,37 +573,42 @@ void processShareFetch(ShareFetchData shareFetchData) { // We add a key corresponding to each share partition in the request in the group so that when there are // acknowledgements/acquisition lock timeout etc., we have a way to perform checkAndComplete for all // such requests which are delayed because of lack of data to acquire for the share partition. - delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(shareFetchData.groupId(), topicIdPartition.topicId(), topicIdPartition.partition())); + DelayedShareFetchKey delayedShareFetchKey = new DelayedShareFetchGroupKey(shareFetch.groupId(), + topicIdPartition.topicId(), topicIdPartition.partition()); + delayedShareFetchWatchKeys.add(delayedShareFetchKey); // We add a key corresponding to each topic partition in the request so that when the HWM is updated // for any topic partition, we have a way to perform checkAndComplete for all such requests which are // delayed because of lack of data to acquire for the topic partition. delayedShareFetchWatchKeys.add(new DelayedShareFetchPartitionKey(topicIdPartition.topicId(), topicIdPartition.partition())); - // The share partition is initialized asynchronously, so we need to wait for it to be initialized. - // But if the share partition is already initialized, then the future will be completed immediately. - // Hence, it's safe to call the maybeInitialize method and then wait for the future to be completed. - // TopicPartitionData list will be populated only if the share partition is already initialized. - sharePartition.maybeInitialize().whenComplete((result, throwable) -> { + + CompletableFuture initializationFuture = sharePartition.maybeInitialize(); + final boolean initialized = initializationFuture.isDone(); + initializationFuture.whenComplete((result, throwable) -> { if (throwable != null) { - // TODO: Complete error handling for initialization. We have to record the error - // for respective share partition as completing the full request might result in - // some acquired records to not being sent: https://issues.apache.org/jira/browse/KAFKA-17510 - maybeCompleteInitializationWithException(sharePartitionKey, shareFetchData.future(), throwable); + handleInitializationException(sharePartitionKey, shareFetch, throwable); } + // Though the share partition is initialized asynchronously, but if already initialized or + // errored then future should be completed immediately. If the initialization is not completed + // immediately then the requests might be waiting in purgatory until the share partition + // is initialized. Hence, trigger the completion of all pending delayed share fetch requests + // for the share partition. + if (!initialized) + replicaManager.completeDelayedShareFetchRequest(delayedShareFetchKey); }); sharePartitions.put(topicIdPartition, sharePartition); } // If all the partitions in the request errored out, then complete the fetch request with an exception. - if (erroneous != null && erroneous.size() == shareFetchData.partitionMaxBytes().size()) { - completeShareFetchWithException(shareFetchData.future(), erroneous); + if (shareFetch.errorInAllPartitions()) { + shareFetch.maybeComplete(Collections.emptyMap()); // Do not proceed with share fetch processing as all the partitions errored out. return; } - // TODO: If there exists some erroneous partitions then they will not be part of response. - // Add the share fetch to the delayed share fetch purgatory to process the fetch request. - addDelayedShareFetch(new DelayedShareFetch(shareFetchData, replicaManager, this, sharePartitions), delayedShareFetchWatchKeys); + // The request will be added irrespective of whether the share partition is initialized or not. + // Once the share partition is initialized, the delayed share fetch will be completed. + addDelayedShareFetch(new DelayedShareFetch(shareFetch, replicaManager, fencedSharePartitionHandler(), sharePartitions), delayedShareFetchWatchKeys); } private SharePartition getOrCreateSharePartition(SharePartitionKey sharePartitionKey) { @@ -639,6 +616,12 @@ private SharePartition getOrCreateSharePartition(SharePartitionKey sharePartitio k -> { long start = time.hiResClockMs(); int leaderEpoch = ShareFetchUtils.leaderEpoch(replicaManager, sharePartitionKey.topicIdPartition().topicPartition()); + // Attach listener to Partition which shall invoke partition change handlers. + // However, as there could be multiple share partitions (per group name) for a single topic-partition, + // hence create separate listeners per share partition which holds the share partition key + // to identify the respective share partition. + SharePartitionListener listener = new SharePartitionListener(sharePartitionKey, replicaManager, partitionCacheMap); + replicaManager.maybeAddListener(sharePartitionKey.topicIdPartition().topicPartition(), listener); SharePartition partition = new SharePartition( sharePartitionKey.groupId(), sharePartitionKey.topicIdPartition(), @@ -650,70 +633,121 @@ private SharePartition getOrCreateSharePartition(SharePartitionKey sharePartitio time, persister, replicaManager, - groupConfigManager + groupConfigManager, + listener ); this.shareGroupMetrics.partitionLoadTime(start); return partition; }); } - private void maybeCompleteInitializationWithException( + private void handleInitializationException( SharePartitionKey sharePartitionKey, - CompletableFuture> future, + ShareFetch shareFetch, Throwable throwable) { if (throwable instanceof LeaderNotAvailableException) { log.debug("The share partition with key {} is not initialized yet", sharePartitionKey); - // Do not process the fetch request for this partition as the leader is not initialized yet. - // The fetch request will be retried in the next poll. - // TODO: Add the request to delayed fetch purgatory. + // Skip any handling for this error as the share partition is still loading. The request + // to fetch will be added in purgatory and will be completed once either timed out + // or the share partition initialization completes. return; } // Remove the partition from the cache as it's failed to initialize. - partitionCacheMap.remove(sharePartitionKey); - // The partition initialization failed, so complete the request with the exception. - // The server should not be in this state, so log the error on broker and surface the same - // to the client. The broker should not be in this state, investigate the root cause of the error. - log.error("Error initializing share partition with key {}", sharePartitionKey, throwable); - maybeCompleteShareFetchWithException(future, Collections.singletonList(sharePartitionKey.topicIdPartition()), throwable); + removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); + // The partition initialization failed, so add the partition to the erroneous partitions. + log.debug("Error initializing share partition with key {}", sharePartitionKey, throwable); + shareFetch.addErroneous(sharePartitionKey.topicIdPartition(), throwable); } - private void handleFencedSharePartitionException( + /** + * The method returns a BiConsumer that handles share partition exceptions. The BiConsumer accepts + * a share partition key and a throwable which specifies the exception. + * + * @return A BiConsumer that handles share partition exceptions. + */ + private BiConsumer fencedSharePartitionHandler() { + return (sharePartitionKey, throwable) -> { + if (throwable instanceof NotLeaderOrFollowerException || throwable instanceof FencedStateEpochException || + throwable instanceof GroupIdNotFoundException || throwable instanceof UnknownTopicOrPartitionException) { + log.info("The share partition with key {} is fenced: {}", sharePartitionKey, throwable.getMessage()); + // The share partition is fenced hence remove the partition from map and let the client retry. + // But surface the error to the client so client might take some action i.e. re-fetch + // the metadata and retry the fetch on new leader. + removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); + } + }; + } + + private SharePartitionKey sharePartitionKey(String groupId, TopicIdPartition topicIdPartition) { + return new SharePartitionKey(groupId, topicIdPartition); + } + + private static void removeSharePartitionFromCache( SharePartitionKey sharePartitionKey, - Throwable throwable + Map map, + ReplicaManager replicaManager ) { - if (throwable instanceof NotLeaderOrFollowerException || throwable instanceof FencedStateEpochException || - throwable instanceof GroupIdNotFoundException || throwable instanceof UnknownTopicOrPartitionException) { - log.info("The share partition with key {} is fenced: {}", sharePartitionKey, throwable.getMessage()); - // The share partition is fenced hence remove the partition from map and let the client retry. - // But surface the error to the client so client might take some action i.e. re-fetch - // the metadata and retry the fetch on new leader. - SharePartition sharePartition = partitionCacheMap.remove(sharePartitionKey); - if (sharePartition != null) { - sharePartition.markFenced(); - } + SharePartition sharePartition = map.remove(sharePartitionKey); + if (sharePartition != null) { + sharePartition.markFenced(); + replicaManager.removeListener(sharePartitionKey.topicIdPartition().topicPartition(), sharePartition.listener()); } } - private void maybeCompleteShareFetchWithException(CompletableFuture> future, - Collection topicIdPartitions, Throwable throwable) { - if (!future.isDone()) { - future.complete(topicIdPartitions.stream().collect(Collectors.toMap( - tp -> tp, tp -> new PartitionData().setErrorCode(Errors.forException(throwable).code()).setErrorMessage(throwable.getMessage())))); + /** + * The SharePartitionListener is used to listen for partition events. The share partition is associated with + * the topic-partition, we need to handle the partition events for the share partition. + *

          + * The partition cache map stores share partitions against share partition key which comprises + * group and topic-partition. Instead of maintaining a separate map for topic-partition to share partitions, + * we can maintain the share partition key in the listener and create a new listener for each share partition. + */ + static class SharePartitionListener implements PartitionListener { + + private final SharePartitionKey sharePartitionKey; + private final ReplicaManager replicaManager; + private final Map partitionCacheMap; + + SharePartitionListener( + SharePartitionKey sharePartitionKey, + ReplicaManager replicaManager, + Map partitionCacheMap + ) { + this.sharePartitionKey = sharePartitionKey; + this.replicaManager = replicaManager; + this.partitionCacheMap = partitionCacheMap; } - } - private void completeShareFetchWithException(CompletableFuture> future, - Map erroneous) { - future.complete(erroneous.entrySet().stream().collect(Collectors.toMap( - Map.Entry::getKey, entry -> { - Throwable t = entry.getValue(); - return new PartitionData().setErrorCode(Errors.forException(t).code()).setErrorMessage(t.getMessage()); - }))); - } + @Override + public void onFailed(TopicPartition topicPartition) { + log.debug("The share partition failed listener is invoked for the topic-partition: {}, share-partition: {}", + topicPartition, sharePartitionKey); + onUpdate(topicPartition); + } - private SharePartitionKey sharePartitionKey(String groupId, TopicIdPartition topicIdPartition) { - return new SharePartitionKey(groupId, topicIdPartition); + @Override + public void onDeleted(TopicPartition topicPartition) { + log.debug("The share partition delete listener is invoked for the topic-partition: {}, share-partition: {}", + topicPartition, sharePartitionKey); + onUpdate(topicPartition); + } + + @Override + public void onBecomingFollower(TopicPartition topicPartition) { + log.debug("The share partition becoming follower listener is invoked for the topic-partition: {}, share-partition: {}", + topicPartition, sharePartitionKey); + onUpdate(topicPartition); + } + + private void onUpdate(TopicPartition topicPartition) { + if (!sharePartitionKey.topicIdPartition().topicPartition().equals(topicPartition)) { + log.error("The share partition listener is invoked for the wrong topic-partition: {}, share-partition: {}", + topicPartition, sharePartitionKey); + return; + } + removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); + } } static class ShareGroupMetrics { diff --git a/core/src/main/scala/kafka/Kafka.scala b/core/src/main/scala/kafka/Kafka.scala index f32f23d3475e7..fa64ed05421bf 100755 --- a/core/src/main/scala/kafka/Kafka.scala +++ b/core/src/main/scala/kafka/Kafka.scala @@ -19,7 +19,7 @@ package kafka import java.util.Properties import joptsimple.OptionParser -import kafka.server.{KafkaConfig, KafkaRaftServer, KafkaServer, Server} +import kafka.server.{KafkaConfig, KafkaRaftServer, Server} import kafka.utils.Implicits._ import kafka.utils.Logging import org.apache.kafka.common.utils.{Exit, Java, LoggingSignalHandler, OperatingSystem, Time, Utils} @@ -61,27 +61,12 @@ object Kafka extends Logging { props } - // For Zk mode, the API forwarding is currently enabled only under migration flag. We can - // directly do a static IBP check to see API forwarding is enabled here because IBP check is - // static in Zk mode. - private def enableApiForwarding(config: KafkaConfig) = - config.migrationEnabled && config.interBrokerProtocolVersion.isApiForwardingEnabled - private def buildServer(props: Properties): Server = { val config = KafkaConfig.fromProps(props, doLog = false) - if (config.requiresZookeeper) { - new KafkaServer( - config, - Time.SYSTEM, - threadNamePrefix = None, - enableForwarding = enableApiForwarding(config) - ) - } else { - new KafkaRaftServer( - config, - Time.SYSTEM, - ) - } + new KafkaRaftServer( + config, + Time.SYSTEM, + ) } def main(args: Array[String]): Unit = { @@ -112,7 +97,7 @@ object Kafka extends Logging { try server.startup() catch { case e: Throwable => - // KafkaServer.startup() calls shutdown() in case of exceptions, so we invoke `exit` to set the status code + // KafkaBroker.startup() calls shutdown() in case of exceptions, so we invoke `exit` to set the status code fatal("Exiting Kafka due to fatal exception during startup.", e) Exit.exit(1) } diff --git a/core/src/main/scala/kafka/admin/AclCommand.scala b/core/src/main/scala/kafka/admin/AclCommand.scala deleted file mode 100644 index e83ffe30ec607..0000000000000 --- a/core/src/main/scala/kafka/admin/AclCommand.scala +++ /dev/null @@ -1,510 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.admin - -import java.util.Properties -import joptsimple._ -import joptsimple.util.EnumConverter -import kafka.utils._ -import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} -import org.apache.kafka.common.acl._ -import org.apache.kafka.common.acl.AclOperation._ -import org.apache.kafka.common.acl.AclPermissionType.{ALLOW, DENY} -import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourcePatternFilter, Resource => JResource, ResourceType => JResourceType} -import org.apache.kafka.common.security.auth.KafkaPrincipal -import org.apache.kafka.common.utils.{Exit, Utils, SecurityUtils => JSecurityUtils} -import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.util.{CommandDefaultOptions, CommandLineUtils} - -import scala.jdk.CollectionConverters._ -import scala.collection.mutable -import scala.io.StdIn - -object AclCommand extends Logging { - - private val ClusterResourceFilter = new ResourcePatternFilter(JResourceType.CLUSTER, JResource.CLUSTER_NAME, PatternType.LITERAL) - - private val Newline = scala.util.Properties.lineSeparator - - def main(args: Array[String]): Unit = { - - val opts = new AclCommandOptions(args) - - CommandLineUtils.maybePrintHelpOrVersion(opts, "This tool helps to manage acls on kafka.") - - opts.checkArgs() - - val aclCommandService = new AdminClientService(opts) - - try { - if (opts.options.has(opts.addOpt)) - aclCommandService.addAcls() - else if (opts.options.has(opts.removeOpt)) - aclCommandService.removeAcls() - else if (opts.options.has(opts.listOpt)) - aclCommandService.listAcls() - } catch { - case e: Throwable => - println(s"Error while executing ACL command: ${e.getMessage}") - println(Utils.stackTrace(e)) - Exit.exit(1) - } - } - - private class AdminClientService(val opts: AclCommandOptions) extends Logging { - - private def withAdminClient(opts: AclCommandOptions)(f: Admin => Unit): Unit = { - val props = if (opts.options.has(opts.commandConfigOpt)) - Utils.loadProps(opts.options.valueOf(opts.commandConfigOpt)) - else - new Properties() - - if (opts.options.has(opts.bootstrapServerOpt)) { - props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, opts.options.valueOf(opts.bootstrapServerOpt)) - } else { - props.put(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, opts.options.valueOf(opts.bootstrapControllerOpt)) - } - val adminClient = Admin.create(props) - - try { - f(adminClient) - } finally { - adminClient.close() - } - } - - def addAcls(): Unit = { - val resourceToAcl = getResourceToAcls(opts) - withAdminClient(opts) { adminClient => - for ((resource, acls) <- resourceToAcl) { - println(s"Adding ACLs for resource `$resource`: $Newline ${acls.map("\t" + _).mkString(Newline)} $Newline") - val aclBindings = acls.map(acl => new AclBinding(resource, acl)).asJavaCollection - adminClient.createAcls(aclBindings).all().get() - } - } - } - - def removeAcls(): Unit = { - withAdminClient(opts) { adminClient => - val filterToAcl = getResourceFilterToAcls(opts) - - for ((filter, acls) <- filterToAcl) { - if (acls.isEmpty) { - if (confirmAction(opts, s"Are you sure you want to delete all ACLs for resource filter `$filter`? (y/n)")) - removeAcls(adminClient, acls, filter) - } else { - if (confirmAction(opts, s"Are you sure you want to remove ACLs: $Newline ${acls.map("\t" + _).mkString(Newline)} $Newline from resource filter `$filter`? (y/n)")) - removeAcls(adminClient, acls, filter) - } - } - } - } - - def listAcls(): Unit = { - withAdminClient(opts) { adminClient => - listAcls(adminClient) - } - } - - private def listAcls(adminClient: Admin): Unit = { - val filters = getResourceFilter(opts, dieIfNoResourceFound = false) - val listPrincipals = getPrincipals(opts, opts.listPrincipalsOpt) - val resourceToAcls = getAcls(adminClient, filters) - - if (listPrincipals.isEmpty) { - printResourceAcls(resourceToAcls) - } else { - listPrincipals.foreach{principal => - println(s"ACLs for principal `$principal`") - val filteredResourceToAcls = resourceToAcls.map { case (resource, acls) => - resource -> acls.filter(acl => principal.toString.equals(acl.principal)) - }.filter { case (_, acls) => acls.nonEmpty } - printResourceAcls(filteredResourceToAcls) - } - } - } - - private def printResourceAcls(resourceToAcls: Map[ResourcePattern, Set[AccessControlEntry]]): Unit = { - for ((resource, acls) <- resourceToAcls) - println(s"Current ACLs for resource `$resource`: $Newline ${acls.map("\t" + _).mkString(Newline)} $Newline") - } - - private def removeAcls(adminClient: Admin, acls: Set[AccessControlEntry], filter: ResourcePatternFilter): Unit = { - if (acls.isEmpty) - adminClient.deleteAcls(List(new AclBindingFilter(filter, AccessControlEntryFilter.ANY)).asJava).all().get() - else { - val aclBindingFilters = acls.map(acl => new AclBindingFilter(filter, acl.toFilter)).toList.asJava - adminClient.deleteAcls(aclBindingFilters).all().get() - } - } - - private def getAcls(adminClient: Admin, filters: Set[ResourcePatternFilter]): Map[ResourcePattern, Set[AccessControlEntry]] = { - val aclBindings = - if (filters.isEmpty) adminClient.describeAcls(AclBindingFilter.ANY).values().get().asScala.toList - else { - val results = for (filter <- filters) yield { - adminClient.describeAcls(new AclBindingFilter(filter, AccessControlEntryFilter.ANY)).values().get().asScala.toList - } - results.reduceLeft(_ ++ _) - } - - val resourceToAcls = mutable.Map[ResourcePattern, Set[AccessControlEntry]]().withDefaultValue(Set()) - - aclBindings.foreach(aclBinding => resourceToAcls(aclBinding.pattern()) = resourceToAcls(aclBinding.pattern()) + aclBinding.entry()) - resourceToAcls.toMap - } - } - - private def getResourceToAcls(opts: AclCommandOptions): Map[ResourcePattern, Set[AccessControlEntry]] = { - val patternType = opts.options.valueOf(opts.resourcePatternType) - if (!patternType.isSpecific) - CommandLineUtils.printUsageAndExit(opts.parser, s"A '--resource-pattern-type' value of '$patternType' is not valid when adding acls.") - - val resourceToAcl = getResourceFilterToAcls(opts).map { - case (filter, acls) => - new ResourcePattern(filter.resourceType(), filter.name(), filter.patternType()) -> acls - } - - if (resourceToAcl.values.exists(_.isEmpty)) - CommandLineUtils.printUsageAndExit(opts.parser, "You must specify one of: --allow-principal, --deny-principal when trying to add ACLs.") - - resourceToAcl - } - - private def getResourceFilterToAcls(opts: AclCommandOptions): Map[ResourcePatternFilter, Set[AccessControlEntry]] = { - var resourceToAcls = Map.empty[ResourcePatternFilter, Set[AccessControlEntry]] - - //if none of the --producer or --consumer options are specified , just construct ACLs from CLI options. - if (!opts.options.has(opts.producerOpt) && !opts.options.has(opts.consumerOpt)) { - resourceToAcls ++= getCliResourceFilterToAcls(opts) - } - - //users are allowed to specify both --producer and --consumer options in a single command. - if (opts.options.has(opts.producerOpt)) - resourceToAcls ++= getProducerResourceFilterToAcls(opts) - - if (opts.options.has(opts.consumerOpt)) - resourceToAcls ++= getConsumerResourceFilterToAcls(opts).map { case (k, v) => k -> (v ++ resourceToAcls.getOrElse(k, Set.empty[AccessControlEntry])) } - - validateOperation(opts, resourceToAcls) - - resourceToAcls - } - - private def getProducerResourceFilterToAcls(opts: AclCommandOptions): Map[ResourcePatternFilter, Set[AccessControlEntry]] = { - val filters = getResourceFilter(opts) - - val topics = filters.filter(_.resourceType == JResourceType.TOPIC) - val transactionalIds = filters.filter(_.resourceType == JResourceType.TRANSACTIONAL_ID) - val enableIdempotence = opts.options.has(opts.idempotentOpt) - - val topicAcls = getAcl(opts, Set(WRITE, DESCRIBE, CREATE)) - val transactionalIdAcls = getAcl(opts, Set(WRITE, DESCRIBE)) - - //Write, Describe, Create permission on topics, Write, Describe on transactionalIds - topics.map(_ -> topicAcls).toMap ++ - transactionalIds.map(_ -> transactionalIdAcls).toMap ++ - (if (enableIdempotence) - Map(ClusterResourceFilter -> getAcl(opts, Set(IDEMPOTENT_WRITE))) - else - Map.empty) - } - - private def getConsumerResourceFilterToAcls(opts: AclCommandOptions): Map[ResourcePatternFilter, Set[AccessControlEntry]] = { - val filters = getResourceFilter(opts) - - val topics = filters.filter(_.resourceType == JResourceType.TOPIC) - val groups = filters.filter(_.resourceType == JResourceType.GROUP) - - //Read, Describe on topic, Read on consumerGroup - - val acls = getAcl(opts, Set(READ, DESCRIBE)) - - topics.map(_ -> acls).toMap[ResourcePatternFilter, Set[AccessControlEntry]] ++ - groups.map(_ -> getAcl(opts, Set(READ))).toMap[ResourcePatternFilter, Set[AccessControlEntry]] - } - - private def getCliResourceFilterToAcls(opts: AclCommandOptions): Map[ResourcePatternFilter, Set[AccessControlEntry]] = { - val acls = getAcl(opts) - val filters = getResourceFilter(opts) - filters.map(_ -> acls).toMap - } - - private def getAcl(opts: AclCommandOptions, operations: Set[AclOperation]): Set[AccessControlEntry] = { - val allowedPrincipals = getPrincipals(opts, opts.allowPrincipalsOpt) - - val deniedPrincipals = getPrincipals(opts, opts.denyPrincipalsOpt) - - val allowedHosts = getHosts(opts, opts.allowHostsOpt, opts.allowPrincipalsOpt) - - val deniedHosts = getHosts(opts, opts.denyHostsOpt, opts.denyPrincipalsOpt) - - val acls = new collection.mutable.HashSet[AccessControlEntry] - if (allowedHosts.nonEmpty && allowedPrincipals.nonEmpty) - acls ++= getAcls(allowedPrincipals, ALLOW, operations, allowedHosts) - - if (deniedHosts.nonEmpty && deniedPrincipals.nonEmpty) - acls ++= getAcls(deniedPrincipals, DENY, operations, deniedHosts) - - acls.toSet - } - - private def getAcl(opts: AclCommandOptions): Set[AccessControlEntry] = { - val operations = opts.options.valuesOf(opts.operationsOpt).asScala - .map(operation => JSecurityUtils.operation(operation.trim)).toSet - getAcl(opts, operations) - } - - def getAcls(principals: Set[KafkaPrincipal], permissionType: AclPermissionType, operations: Set[AclOperation], - hosts: Set[String]): Set[AccessControlEntry] = { - for { - principal <- principals - operation <- operations - host <- hosts - } yield new AccessControlEntry(principal.toString, host, operation, permissionType) - } - - private def getHosts(opts: AclCommandOptions, hostOptionSpec: OptionSpec[String], - principalOptionSpec: OptionSpec[String]): Set[String] = { - if (opts.options.has(hostOptionSpec)) - opts.options.valuesOf(hostOptionSpec).asScala.map(_.trim).toSet - else if (opts.options.has(principalOptionSpec)) - Set[String](AclEntry.WILDCARD_HOST) - else - Set.empty[String] - } - - private def getPrincipals(opts: AclCommandOptions, principalOptionSpec: OptionSpec[String]): Set[KafkaPrincipal] = { - if (opts.options.has(principalOptionSpec)) - opts.options.valuesOf(principalOptionSpec).asScala.map(s => JSecurityUtils.parseKafkaPrincipal(s.trim)).toSet - else - Set.empty[KafkaPrincipal] - } - - private def getResourceFilter(opts: AclCommandOptions, dieIfNoResourceFound: Boolean = true): Set[ResourcePatternFilter] = { - val patternType = opts.options.valueOf(opts.resourcePatternType) - - var resourceFilters = Set.empty[ResourcePatternFilter] - if (opts.options.has(opts.topicOpt)) - opts.options.valuesOf(opts.topicOpt).forEach(topic => resourceFilters += new ResourcePatternFilter(JResourceType.TOPIC, topic.trim, patternType)) - - if (patternType == PatternType.LITERAL && (opts.options.has(opts.clusterOpt) || opts.options.has(opts.idempotentOpt))) - resourceFilters += ClusterResourceFilter - - if (opts.options.has(opts.groupOpt)) - opts.options.valuesOf(opts.groupOpt).forEach(group => resourceFilters += new ResourcePatternFilter(JResourceType.GROUP, group.trim, patternType)) - - if (opts.options.has(opts.transactionalIdOpt)) - opts.options.valuesOf(opts.transactionalIdOpt).forEach(transactionalId => - resourceFilters += new ResourcePatternFilter(JResourceType.TRANSACTIONAL_ID, transactionalId, patternType)) - - if (opts.options.has(opts.delegationTokenOpt)) - opts.options.valuesOf(opts.delegationTokenOpt).forEach(token => resourceFilters += new ResourcePatternFilter(JResourceType.DELEGATION_TOKEN, token.trim, patternType)) - - if (opts.options.has(opts.userPrincipalOpt)) - opts.options.valuesOf(opts.userPrincipalOpt).forEach(user => resourceFilters += new ResourcePatternFilter(JResourceType.USER, user.trim, patternType)) - - if (resourceFilters.isEmpty && dieIfNoResourceFound) - CommandLineUtils.printUsageAndExit(opts.parser, "You must provide at least one resource: --topic or --cluster or --group or --delegation-token ") - - resourceFilters - } - - private def confirmAction(opts: AclCommandOptions, msg: String): Boolean = { - if (opts.options.has(opts.forceOpt)) - return true - println(msg) - StdIn.readLine().equalsIgnoreCase("y") - } - - private def validateOperation(opts: AclCommandOptions, resourceToAcls: Map[ResourcePatternFilter, Set[AccessControlEntry]]): Unit = { - for ((resource, acls) <- resourceToAcls) { - val validOps = AclEntry.supportedOperations(resource.resourceType).asScala.toSet + AclOperation.ALL - if ((acls.map(_.operation) -- validOps).nonEmpty) - CommandLineUtils.printUsageAndExit(opts.parser, s"ResourceType ${resource.resourceType} only supports operations ${validOps.map(JSecurityUtils.operationName).mkString(", ")}") - } - } - - class AclCommandOptions(args: Array[String]) extends CommandDefaultOptions(args) { - val CommandConfigDoc = "A property file containing configs to be passed to Admin Client." - - val bootstrapServerOpt: OptionSpec[String] = parser.accepts("bootstrap-server", "A list of host/port pairs to use for establishing the connection to the Kafka cluster." + - " This list should be in the form host1:port1,host2:port2,... This config is required for acl management using admin client API.") - .withRequiredArg - .describedAs("server to connect to") - .ofType(classOf[String]) - - val bootstrapControllerOpt: OptionSpec[String] = parser.accepts("bootstrap-controller", "A list of host/port pairs to use for establishing the connection to the Kafka cluster." + - " This list should be in the form host1:port1,host2:port2,... This config is required for acl management using admin client API.") - .withRequiredArg - .describedAs("controller to connect to") - .ofType(classOf[String]) - - val commandConfigOpt: OptionSpec[String] = parser.accepts("command-config", CommandConfigDoc) - .withOptionalArg() - .describedAs("command-config") - .ofType(classOf[String]) - - val topicOpt: OptionSpec[String] = parser.accepts("topic", "topic to which ACLs should be added or removed. " + - "A value of '*' indicates ACL should apply to all topics.") - .withRequiredArg - .describedAs("topic") - .ofType(classOf[String]) - - val clusterOpt: OptionSpecBuilder = parser.accepts("cluster", "Add/Remove cluster ACLs.") - val groupOpt: OptionSpec[String] = parser.accepts("group", "Consumer Group to which the ACLs should be added or removed. " + - "A value of '*' indicates the ACLs should apply to all groups.") - .withRequiredArg - .describedAs("group") - .ofType(classOf[String]) - - val transactionalIdOpt: OptionSpec[String] = parser.accepts("transactional-id", "The transactionalId to which ACLs should " + - "be added or removed. A value of '*' indicates the ACLs should apply to all transactionalIds.") - .withRequiredArg - .describedAs("transactional-id") - .ofType(classOf[String]) - - val idempotentOpt: OptionSpecBuilder = parser.accepts("idempotent", "Enable idempotence for the producer. This should be " + - "used in combination with the --producer option. Note that idempotence is enabled automatically if " + - "the producer is authorized to a particular transactional-id.") - - val delegationTokenOpt: OptionSpec[String] = parser.accepts("delegation-token", "Delegation token to which ACLs should be added or removed. " + - "A value of '*' indicates ACL should apply to all tokens.") - .withRequiredArg - .describedAs("delegation-token") - .ofType(classOf[String]) - - val resourcePatternType: OptionSpec[PatternType] = parser.accepts("resource-pattern-type", "The type of the resource pattern or pattern filter. " + - "When adding acls, this should be a specific pattern type, e.g. 'literal' or 'prefixed'. " + - "When listing or removing acls, a specific pattern type can be used to list or remove acls from specific resource patterns, " + - "or use the filter values of 'any' or 'match', where 'any' will match any pattern type, but will match the resource name exactly, " + - "where as 'match' will perform pattern matching to list or remove all acls that affect the supplied resource(s). " + - "WARNING: 'match', when used in combination with the '--remove' switch, should be used with care.") - .withRequiredArg() - .ofType(classOf[String]) - .withValuesConvertedBy(new PatternTypeConverter()) - .defaultsTo(PatternType.LITERAL) - - val addOpt: OptionSpecBuilder = parser.accepts("add", "Indicates you are trying to add ACLs.") - val removeOpt: OptionSpecBuilder = parser.accepts("remove", "Indicates you are trying to remove ACLs.") - val listOpt: OptionSpecBuilder = parser.accepts("list", "List ACLs for the specified resource, use --topic or --group or --cluster to specify a resource.") - - val operationsOpt: OptionSpec[String] = parser.accepts("operation", "Operation that is being allowed or denied. Valid operation names are: " + Newline + - AclEntry.ACL_OPERATIONS.asScala.map("\t" + JSecurityUtils.operationName(_)).mkString(Newline) + Newline) - .withRequiredArg - .ofType(classOf[String]) - .defaultsTo(JSecurityUtils.operationName(AclOperation.ALL)) - - val allowPrincipalsOpt: OptionSpec[String] = parser.accepts("allow-principal", "principal is in principalType:name format." + - " Note that principalType must be supported by the Authorizer being used." + - " For example, User:'*' is the wild card indicating all users.") - .withRequiredArg - .describedAs("allow-principal") - .ofType(classOf[String]) - - val denyPrincipalsOpt: OptionSpec[String] = parser.accepts("deny-principal", "principal is in principalType:name format. " + - "By default anyone not added through --allow-principal is denied access. " + - "You only need to use this option as negation to already allowed set. " + - "Note that principalType must be supported by the Authorizer being used. " + - "For example if you wanted to allow access to all users in the system but not test-user you can define an ACL that " + - "allows access to User:'*' and specify --deny-principal=User:test@EXAMPLE.COM. " + - "AND PLEASE REMEMBER DENY RULES TAKES PRECEDENCE OVER ALLOW RULES.") - .withRequiredArg - .describedAs("deny-principal") - .ofType(classOf[String]) - - val listPrincipalsOpt: OptionSpec[String] = parser.accepts("principal", "List ACLs for the specified principal. principal is in principalType:name format." + - " Note that principalType must be supported by the Authorizer being used. Multiple --principal option can be passed.") - .withOptionalArg() - .describedAs("principal") - .ofType(classOf[String]) - - val allowHostsOpt: OptionSpec[String] = parser.accepts("allow-host", "Host from which principals listed in --allow-principal will have access. " + - "If you have specified --allow-principal then the default for this option will be set to '*' which allows access from all hosts.") - .withRequiredArg - .describedAs("allow-host") - .ofType(classOf[String]) - - val denyHostsOpt: OptionSpec[String] = parser.accepts("deny-host", "Host from which principals listed in --deny-principal will be denied access. " + - "If you have specified --deny-principal then the default for this option will be set to '*' which denies access from all hosts.") - .withRequiredArg - .describedAs("deny-host") - .ofType(classOf[String]) - - val producerOpt: OptionSpecBuilder = parser.accepts("producer", "Convenience option to add/remove ACLs for producer role. " + - "This will generate ACLs that allows WRITE,DESCRIBE and CREATE on topic.") - - val consumerOpt: OptionSpecBuilder = parser.accepts("consumer", "Convenience option to add/remove ACLs for consumer role. " + - "This will generate ACLs that allows READ,DESCRIBE on topic and READ on group.") - - val forceOpt: OptionSpecBuilder = parser.accepts("force", "Assume Yes to all queries and do not prompt.") - - val userPrincipalOpt: OptionSpec[String] = parser.accepts("user-principal", "Specifies a user principal as a resource in relation with the operation. For instance " + - "one could grant CreateTokens or DescribeTokens permission on a given user principal.") - .withRequiredArg() - .describedAs("user-principal") - .ofType(classOf[String]) - - options = parser.parse(args: _*) - - def checkArgs(): Unit = { - if (options.has(bootstrapServerOpt) && options.has(bootstrapControllerOpt)) - CommandLineUtils.printUsageAndExit(parser, "Only one of --bootstrap-server or --bootstrap-controller must be specified") - - if (!options.has(bootstrapServerOpt) && !options.has(bootstrapControllerOpt)) - CommandLineUtils.printUsageAndExit(parser, "One of --bootstrap-server or --bootstrap-controller must be specified") - - val actions = Seq(addOpt, removeOpt, listOpt).count(options.has) - if (actions != 1) - CommandLineUtils.printUsageAndExit(parser, "Command must include exactly one action: --list, --add, --remove. ") - - CommandLineUtils.checkInvalidArgs(parser, options, listOpt, producerOpt, consumerOpt, allowHostsOpt, allowPrincipalsOpt, denyHostsOpt, denyPrincipalsOpt) - - //when --producer or --consumer is specified , user should not specify operations as they are inferred and we also disallow --deny-principals and --deny-hosts. - CommandLineUtils.checkInvalidArgs(parser, options, producerOpt, operationsOpt, denyPrincipalsOpt, denyHostsOpt) - CommandLineUtils.checkInvalidArgs(parser, options, consumerOpt, operationsOpt, denyPrincipalsOpt, denyHostsOpt) - - if (options.has(listPrincipalsOpt) && !options.has(listOpt)) - CommandLineUtils.printUsageAndExit(parser, "The --principal option is only available if --list is set") - - if (options.has(producerOpt) && !options.has(topicOpt)) - CommandLineUtils.printUsageAndExit(parser, "With --producer you must specify a --topic") - - if (options.has(idempotentOpt) && !options.has(producerOpt)) - CommandLineUtils.printUsageAndExit(parser, "The --idempotent option is only available if --producer is set") - - if (options.has(consumerOpt) && (!options.has(topicOpt) || !options.has(groupOpt) || (!options.has(producerOpt) && (options.has(clusterOpt) || options.has(transactionalIdOpt))))) - CommandLineUtils.printUsageAndExit(parser, "With --consumer you must specify a --topic and a --group and no --cluster or --transactional-id option should be specified.") - } - } -} - -class PatternTypeConverter extends EnumConverter[PatternType](classOf[PatternType]) { - - override def convert(value: String): PatternType = { - val patternType = super.convert(value) - if (patternType.isUnknown) - throw new ValueConversionException("Unknown resource-pattern-type: " + value) - - patternType - } - - override def valuePattern: String = PatternType.values - .filter(_ != PatternType.UNKNOWN) - .mkString("|") -} diff --git a/core/src/main/scala/kafka/admin/ConfigCommand.scala b/core/src/main/scala/kafka/admin/ConfigCommand.scala index 97c727826a95d..aa38a6c85c4f9 100644 --- a/core/src/main/scala/kafka/admin/ConfigCommand.scala +++ b/core/src/main/scala/kafka/admin/ConfigCommand.scala @@ -18,29 +18,24 @@ package kafka.admin import java.nio.charset.StandardCharsets -import java.util.concurrent.TimeUnit -import java.util.{Collections, Optional, Properties} +import java.util.concurrent.{ExecutionException, TimeUnit} +import java.util.{Collections, Properties} import joptsimple._ -import kafka.server.{DynamicBrokerConfig, DynamicConfig, KafkaConfig} +import kafka.server.DynamicConfig import kafka.utils.Implicits._ import kafka.utils.Logging -import kafka.zk.{AdminZkClient, KafkaZkClient} -import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, AlterConfigsOptions, ConfigEntry, DescribeClusterOptions, DescribeConfigsOptions, ListTopicsOptions, ScramCredentialInfo, UserScramCredentialDeletion, UserScramCredentialUpsertion, Config => JConfig, ScramMechanism => PublicScramMechanism} -import org.apache.kafka.common.config.{ConfigResource, TopicConfig} -import org.apache.kafka.common.config.types.Password -import org.apache.kafka.common.errors.InvalidConfigurationException +import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, AlterConfigsOptions, ConfigEntry, DescribeClusterOptions, DescribeConfigsOptions, ListTopicsOptions, ScramCredentialInfo, UserScramCredentialDeletion, UserScramCredentialUpsertion, ScramMechanism => PublicScramMechanism} +import org.apache.kafka.common.config.ConfigResource +import org.apache.kafka.common.errors.{InvalidConfigurationException, UnsupportedVersionException} import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent} -import org.apache.kafka.common.security.JaasUtils -import org.apache.kafka.common.security.scram.internals.{ScramCredentialUtils, ScramFormatter, ScramMechanism} -import org.apache.kafka.common.utils.{Exit, Sanitizer, Time, Utils} -import org.apache.kafka.server.config.{ConfigType, QuotaConfig, ZkConfigs, ZooKeeperInternals} -import org.apache.kafka.security.{PasswordEncoder, PasswordEncoderConfigs} +import org.apache.kafka.common.security.scram.internals.ScramMechanism +import org.apache.kafka.common.utils.{Exit, Utils} +import org.apache.kafka.server.config.{ConfigType, QuotaConfig} import org.apache.kafka.server.util.{CommandDefaultOptions, CommandLineUtils} import org.apache.kafka.storage.internals.log.LogConfig -import org.apache.zookeeper.client.ZKClientConfig -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ import scala.collection._ @@ -63,24 +58,12 @@ import scala.collection._ * when describing or altering default configuration for users, clients, brokers, or ips, respectively. * Alternatively, --user-defaults, --client-defaults, --broker-defaults, or --ip-defaults may be specified in place of * --entity-type --entity-default, respectively. - * - * For most use cases, this script communicates with a kafka cluster (specified via the - * `--bootstrap-server` option). There are three exceptions where direct communication with a - * ZooKeeper ensemble (specified via the `--zookeeper` option) is allowed: - * - * 1. Describe/alter user configs where the config is a SCRAM mechanism name (i.e. a SCRAM credential for a user) - * 2. Describe/alter broker configs for a particular broker when that broker is down - * 3. Describe/alter broker default configs when all brokers are down - * - * For example, this allows password configs to be stored encrypted in ZK before brokers are started, - * avoiding cleartext passwords in `server.properties`. */ object ConfigCommand extends Logging { - val BrokerDefaultEntityName = "" + private val BrokerDefaultEntityName = "" val BrokerLoggerConfigType = "broker-loggers" private val BrokerSupportedConfigTypes = ConfigType.ALL.asScala :+ BrokerLoggerConfigType :+ ConfigType.CLIENT_METRICS :+ ConfigType.GROUP - private val ZkSupportedConfigTypes = Seq(ConfigType.USER, ConfigType.BROKER) private val DefaultScramIterations = 4096 def main(args: Array[String]): Unit = { @@ -91,20 +74,18 @@ object ConfigCommand extends Logging { "This tool helps to manipulate and describe entity config for a topic, client, user, broker, ip, client-metrics or group") opts.checkArgs() - - if (opts.options.has(opts.zkConnectOpt)) { - System.out.println(s"Warning: --zookeeper is deprecated and will be removed in a future version of Kafka.") - System.out.println(s"Use --bootstrap-server instead to specify a broker to connect to.") - processCommandWithZk(opts.options.valueOf(opts.zkConnectOpt), opts) - } else { - processCommand(opts) - } + processCommand(opts) } catch { case e @ (_: IllegalArgumentException | _: InvalidConfigurationException | _: OptionException) => logger.debug(s"Failed config command with args '${args.mkString(" ")}'", e) System.err.println(e.getMessage) Exit.exit(1) + case e: UnsupportedVersionException => + logger.debug(s"Unsupported API encountered in server when executing config command with args '${args.mkString(" ")}'") + System.err.println(e.getMessage) + Exit.exit(1) + case t: Throwable => logger.debug(s"Error while executing config command with args '${args.mkString(" ")}'", t) System.err.println(s"Error while executing config command with args '${args.mkString(" ")}'") @@ -113,178 +94,6 @@ object ConfigCommand extends Logging { } } - private def processCommandWithZk(zkConnectString: String, opts: ConfigCommandOptions): Unit = { - val zkClientConfig = ZkSecurityMigrator.createZkClientConfigFromOption(opts.options, opts.zkTlsConfigFile) - .getOrElse(new ZKClientConfig()) - val zkClient = KafkaZkClient(zkConnectString, JaasUtils.isZkSaslEnabled || KafkaConfig.zkTlsClientAuthEnabled(zkClientConfig), 30000, 30000, - Int.MaxValue, Time.SYSTEM, zkClientConfig = zkClientConfig, name = "ConfigCommand", enableEntityConfigControllerCheck = false) - val adminZkClient = new AdminZkClient(zkClient) - try { - if (opts.options.has(opts.alterOpt)) - alterConfigWithZk(zkClient, opts, adminZkClient) - else if (opts.options.has(opts.describeOpt)) - describeConfigWithZk(zkClient, opts, adminZkClient) - } finally { - zkClient.close() - } - } - - def alterConfigWithZk(zkClient: KafkaZkClient, opts: ConfigCommandOptions, adminZkClient: AdminZkClient): Unit = { - val configsToBeAdded = parseConfigsToBeAdded(opts) - val configsToBeDeleted = parseConfigsToBeDeleted(opts) - val entity = parseEntity(opts) - val entityType = entity.root.entityType - val entityName = entity.fullSanitizedName - val errorMessage = s"--bootstrap-server option must be specified to update $entityType configs: {add: $configsToBeAdded, delete: $configsToBeDeleted}" - var isUserClientId = false - - if (entityType == ConfigType.USER) { - isUserClientId = entity.child.exists(e => ConfigType.CLIENT.equals(e.entityType)) - if (!configsToBeAdded.isEmpty || configsToBeDeleted.nonEmpty) { - val info = "User configuration updates using ZooKeeper are only supported for SCRAM credential updates." - val scramMechanismNames = ScramMechanism.values.map(_.mechanismName) - // make sure every added/deleted configs are SCRAM related, other configs are not supported using zookeeper - require(configsToBeAdded.stringPropertyNames.asScala.forall(scramMechanismNames.contains), - s"$errorMessage. $info") - require(configsToBeDeleted.forall(scramMechanismNames.contains), s"$errorMessage. $info") - } - preProcessScramCredentials(configsToBeAdded) - } else if (entityType == ConfigType.BROKER) { - // Dynamic broker configs can be updated using ZooKeeper only if the corresponding broker is not running. - if (!configsToBeAdded.isEmpty || configsToBeDeleted.nonEmpty) { - validateBrokersNotRunning(entityName, adminZkClient, zkClient, errorMessage) - - val perBrokerConfig = entityName != ZooKeeperInternals.DEFAULT_STRING - preProcessBrokerConfigs(configsToBeAdded, perBrokerConfig) - } - } - - // compile the final set of configs - val configs = adminZkClient.fetchEntityConfig(entityType, entityName) - - // fail the command if any of the configs to be deleted does not exist - val invalidConfigs = configsToBeDeleted.filterNot(configs.containsKey(_)) - if (invalidConfigs.nonEmpty) - throw new InvalidConfigurationException(s"Invalid config(s): ${invalidConfigs.mkString(",")}") - - configs ++= configsToBeAdded - configsToBeDeleted.foreach(configs.remove(_)) - - adminZkClient.changeConfigs(entityType, entityName, configs, isUserClientId) - - System.out.println(s"Completed updating config for entity: $entity.") - } - - private def validateBrokersNotRunning(entityName: String, - adminZkClient: AdminZkClient, - zkClient: KafkaZkClient, - errorMessage: String): Unit = { - val perBrokerConfig = entityName != ZooKeeperInternals.DEFAULT_STRING - val info = "Broker configuration operations using ZooKeeper are only supported if the affected broker(s) are not running." - if (perBrokerConfig) { - adminZkClient.parseBroker(entityName).foreach { brokerId => - require(zkClient.getBroker(brokerId).isEmpty, s"$errorMessage - broker $brokerId is running. $info") - } - } else { - val runningBrokersCount = zkClient.getAllBrokersInCluster.size - require(runningBrokersCount == 0, s"$errorMessage - $runningBrokersCount brokers are running. $info") - } - } - - private def preProcessScramCredentials(configsToBeAdded: Properties): Unit = { - def scramCredential(mechanism: ScramMechanism, credentialStr: String): String = { - val pattern = "(?:iterations=([0-9]*),)?password=(.*)".r - val (iterations, password) = credentialStr match { - case pattern(iterations, password) => (if (iterations != null) iterations.toInt else DefaultScramIterations, password) - case _ => throw new IllegalArgumentException(s"Invalid credential property $mechanism=$credentialStr") - } - if (iterations < mechanism.minIterations()) - throw new IllegalArgumentException(s"Iterations $iterations is less than the minimum ${mechanism.minIterations()} required for $mechanism") - val credential = new ScramFormatter(mechanism).generateCredential(password, iterations) - ScramCredentialUtils.credentialToString(credential) - } - for (mechanism <- ScramMechanism.values) { - configsToBeAdded.getProperty(mechanism.mechanismName) match { - case null => - case value => - configsToBeAdded.setProperty(mechanism.mechanismName, scramCredential(mechanism, value)) - } - } - } - - def createPasswordEncoder(encoderConfigs: java.util.Map[String, String]): PasswordEncoder = { - val encoderSecret = Optional.ofNullable(encoderConfigs.get(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG)) - .orElseThrow(() => new IllegalArgumentException("Password encoder secret not specified")) - PasswordEncoder.encrypting(new Password(encoderSecret), - null, - encoderConfigs.getOrDefault(PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_CONFIG, PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_DEFAULT), - Optional.ofNullable(encoderConfigs.get(PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_CONFIG)) - .map[Int](Integer.parseInt) - .orElse(PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_DEFAULT), - Optional.ofNullable(encoderConfigs.get(PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_CONFIG)) - .map[Int](Integer.parseInt) - .orElse(PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_DEFAULT) - ) - } - - /** - * Pre-process broker configs provided to convert them to persistent format. - * Password configs are encrypted using the secret `PasswordEncoderConfigs.SECRET`. - * The secret is removed from `configsToBeAdded` and will not be persisted in ZooKeeper. - */ - private def preProcessBrokerConfigs(configsToBeAdded: Properties, perBrokerConfig: Boolean): Unit = { - val passwordEncoderConfigs = new Properties - passwordEncoderConfigs ++= configsToBeAdded.asScala.filter { case (key, _) => key.startsWith("password.encoder.") } - if (!passwordEncoderConfigs.isEmpty) { - info(s"Password encoder configs ${passwordEncoderConfigs.keySet} will be used for encrypting" + - " passwords, but will not be stored in ZooKeeper.") - passwordEncoderConfigs.asScala.keySet.foreach(configsToBeAdded.remove) - } - - DynamicBrokerConfig.validateConfigs(configsToBeAdded, perBrokerConfig) - val passwordConfigs = configsToBeAdded.asScala.keySet.filter(DynamicBrokerConfig.isPasswordConfig) - if (passwordConfigs.nonEmpty) { - require(passwordEncoderConfigs.containsKey(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG), - s"${PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG} must be specified to update $passwordConfigs." + - " Other password encoder configs like cipher algorithm and iterations may also be specified" + - " to override the default encoding parameters. Password encoder configs will not be persisted" + - " in ZooKeeper." - ) - val passwordConfigsMap = new java.util.HashMap[String, String] - passwordEncoderConfigs.forEach { (key, value) => - passwordConfigsMap.put(key.toString, value.toString) - } - val passwordEncoder = createPasswordEncoder(passwordConfigsMap) - passwordConfigs.foreach { configName => - val encodedValue = passwordEncoder.encode(new Password(configsToBeAdded.getProperty(configName))) - configsToBeAdded.setProperty(configName, encodedValue) - } - } - } - - def describeConfigWithZk(zkClient: KafkaZkClient, opts: ConfigCommandOptions, adminZkClient: AdminZkClient): Unit = { - val configEntity = parseEntity(opts) - val entityType = configEntity.root.entityType - val describeAllUsers = entityType == ConfigType.USER && configEntity.root.sanitizedName.isEmpty && configEntity.child.isEmpty - val entityName = configEntity.fullSanitizedName - val errorMessage = s"--bootstrap-server option must be specified to describe $entityType" - if (entityType == ConfigType.BROKER) { - // Dynamic broker configs can be described using ZooKeeper only if the corresponding broker is not running. - validateBrokersNotRunning(entityName, adminZkClient, zkClient, errorMessage) - } - - val entities = configEntity.getAllEntities(zkClient) - for (entity <- entities) { - val configs = adminZkClient.fetchEntityConfig(entity.root.entityType, entity.fullSanitizedName) - // When describing all users, don't include empty user nodes with only quota overrides. - if (!configs.isEmpty || !describeAllUsers) { - System.out.println("Configs for %s are %s" - .format(entity, configs.asScala.map(kv => kv._1 + "=" + kv._2).mkString(","))) - } - } - } - - @nowarn("cat=deprecation") def parseConfigsToBeAdded(opts: ConfigCommandOptions): Properties = { val props = new Properties if (opts.options.has(opts.addConfigFile)) { @@ -303,11 +112,6 @@ object ConfigCommand extends Logging { //Create properties, parsing square brackets from values if necessary configsToBeAdded.foreach(pair => props.setProperty(pair(0).trim, pair(1).replaceAll("\\[?\\]?", "").trim)) } - if (props.containsKey(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG)) { - System.out.println(s"WARNING: The configuration ${TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG}=${props.getProperty(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG)} is specified. " + - "This configuration will be ignored if the version is newer than the inter.broker.protocol.version specified in the broker or " + - "if the inter.broker.protocol.version is 3.0 or newer. This configuration is deprecated and it will be removed in Apache Kafka 4.0.") - } validatePropsKey(props) props } @@ -356,7 +160,6 @@ object ConfigCommand extends Logging { } } - @nowarn("cat=deprecation") def alterConfig(adminClient: Admin, opts: ConfigCommandOptions): Unit = { val entityTypes = opts.entityTypes val entityNames = opts.entityNames @@ -367,27 +170,25 @@ object ConfigCommand extends Logging { val configsToBeDeleted = parseConfigsToBeDeleted(opts) entityTypeHead match { - case ConfigType.TOPIC => - alterResourceConfig(adminClient, entityTypeHead, entityNameHead, configsToBeDeleted, configsToBeAdded, ConfigResource.Type.TOPIC) - - case ConfigType.BROKER => - val oldConfig = getResourceConfig(adminClient, entityTypeHead, entityNameHead, includeSynonyms = false, describeAll = false) - .map { entry => (entry.name, entry) }.toMap - - // fail the command if any of the configs to be deleted does not exist - val invalidConfigs = configsToBeDeleted.filterNot(oldConfig.contains) - if (invalidConfigs.nonEmpty) - throw new InvalidConfigurationException(s"Invalid config(s): ${invalidConfigs.mkString(",")}") - - val newEntries = oldConfig ++ configsToBeAdded -- configsToBeDeleted - val sensitiveEntries = newEntries.filter(_._2.value == null) - if (sensitiveEntries.nonEmpty) - throw new InvalidConfigurationException(s"All sensitive broker config entries must be specified for --alter, missing entries: ${sensitiveEntries.keySet}") - val newConfig = new JConfig(newEntries.asJava.values) - - val configResource = new ConfigResource(ConfigResource.Type.BROKER, entityNameHead) - val alterOptions = new AlterConfigsOptions().timeoutMs(30000).validateOnly(false) - adminClient.alterConfigs(Map(configResource -> newConfig).asJava, alterOptions).all().get(60, TimeUnit.SECONDS) + case ConfigType.TOPIC | ConfigType.CLIENT_METRICS | ConfigType.BROKER | ConfigType.GROUP => + val configResourceType = entityTypeHead match { + case ConfigType.TOPIC => ConfigResource.Type.TOPIC + case ConfigType.CLIENT_METRICS => ConfigResource.Type.CLIENT_METRICS + case ConfigType.BROKER => ConfigResource.Type.BROKER + case ConfigType.GROUP => ConfigResource.Type.GROUP + } + try { + alterResourceConfig(adminClient, entityTypeHead, entityNameHead, configsToBeDeleted, configsToBeAdded, configResourceType) + } catch { + case e: ExecutionException => + e.getCause match { + case _: UnsupportedVersionException => + throw new UnsupportedVersionException(s"The ${ApiKeys.INCREMENTAL_ALTER_CONFIGS} API is not supported by the cluster. The API is supported starting from version 2.3.0." + + " You may want to use an older version of this tool to interact with your cluster, or upgrade your brokers to version 2.3.0 or newer to avoid this error.") + case _ => throw e + } + case e: Throwable => throw e + } case BrokerLoggerConfigType => val validLoggers = getResourceConfig(adminClient, entityTypeHead, entityNameHead, includeSynonyms = true, describeAll = false).map(_.name) @@ -398,10 +199,10 @@ object ConfigCommand extends Logging { val configResource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, entityNameHead) val alterOptions = new AlterConfigsOptions().timeoutMs(30000).validateOnly(false) - val alterLogLevelEntries = (configsToBeAdded.values.map(new AlterConfigOp(_, AlterConfigOp.OpType.SET)) - ++ configsToBeDeleted.map { k => new AlterConfigOp(new ConfigEntry(k, ""), AlterConfigOp.OpType.DELETE) } - ).asJavaCollection - adminClient.incrementalAlterConfigs(Map(configResource -> alterLogLevelEntries).asJava, alterOptions).all().get(60, TimeUnit.SECONDS) + val addEntries = configsToBeAdded.values.map(k => new AlterConfigOp(k, AlterConfigOp.OpType.SET)) + val deleteEntries = configsToBeDeleted.map(k => new AlterConfigOp(new ConfigEntry(k, ""), AlterConfigOp.OpType.DELETE)) + val alterEntries = (deleteEntries ++ addEntries).asJavaCollection + adminClient.incrementalAlterConfigs(Map(configResource -> alterEntries).asJava, alterOptions).all().get(60, TimeUnit.SECONDS) case ConfigType.USER | ConfigType.CLIENT => val hasQuotaConfigsToAdd = configsToBeAdded.keys.exists(QuotaConfig.isClientOrUserQuotaConfig) @@ -445,13 +246,8 @@ object ConfigCommand extends Logging { throw new IllegalArgumentException(s"Only connection quota configs can be added for '${ConfigType.IP}' using --bootstrap-server. Unexpected config names: ${unknownConfigs.mkString(",")}") alterQuotaConfigs(adminClient, entityTypes, entityNames, configsToBeAddedMap, configsToBeDeleted) - case ConfigType.CLIENT_METRICS => - alterResourceConfig(adminClient, entityTypeHead, entityNameHead, configsToBeDeleted, configsToBeAdded, ConfigResource.Type.CLIENT_METRICS) - - case ConfigType.GROUP => - alterResourceConfig(adminClient, entityTypeHead, entityNameHead, configsToBeDeleted, configsToBeAdded, ConfigResource.Type.GROUP) - - case _ => throw new IllegalArgumentException(s"Unsupported entity type: $entityTypeHead") + case _ => + throw new IllegalArgumentException(s"Unsupported entity type: $entityTypeHead") } if (entityNameHead.nonEmpty) @@ -575,9 +371,9 @@ object ConfigCommand extends Logging { val configResource = new ConfigResource(resourceType, entityNameHead) val alterOptions = new AlterConfigsOptions().timeoutMs(30000).validateOnly(false) - val alterEntries = (configsToBeAdded.values.map(new AlterConfigOp(_, AlterConfigOp.OpType.SET)) - ++ configsToBeDeleted.map { k => new AlterConfigOp(new ConfigEntry(k, ""), AlterConfigOp.OpType.DELETE) } - ).asJavaCollection + val addEntries = configsToBeAdded.values.map(k => new AlterConfigOp(k, AlterConfigOp.OpType.SET)) + val deleteEntries = configsToBeDeleted.map(k => new AlterConfigOp(new ConfigEntry(k, ""), AlterConfigOp.OpType.DELETE)) + val alterEntries = (deleteEntries ++ addEntries).asJavaCollection adminClient.incrementalAlterConfigs(Map(configResource -> alterEntries).asJava, alterOptions).all().get(60, TimeUnit.SECONDS) } @@ -693,119 +489,8 @@ object ConfigCommand extends Logging { adminClient.describeClientQuotas(ClientQuotaFilter.containsOnly(components.asJava)).entities.get(30, TimeUnit.SECONDS).asScala } - case class Entity(entityType: String, sanitizedName: Option[String]) { - val entityPath: String = sanitizedName match { - case Some(n) => entityType + "/" + n - case None => entityType - } - override def toString: String = { - val typeName = entityType match { - case ConfigType.USER => "user-principal" - case ConfigType.CLIENT => "client-id" - case ConfigType.TOPIC => "topic" - case ConfigType.GROUP => "group" - case t => t - } - sanitizedName match { - case Some(ZooKeeperInternals.DEFAULT_STRING) => "default " + typeName - case Some(n) => - val desanitized = if (entityType == ConfigType.USER || entityType == ConfigType.CLIENT) Sanitizer.desanitize(n) else n - s"$typeName '$desanitized'" - case None => entityType - } - } - } - - case class ConfigEntity(root: Entity, child: Option[Entity]) { - val fullSanitizedName: String = root.sanitizedName.getOrElse("") + child.map(s => "/" + s.entityPath).getOrElse("") - - def getAllEntities(zkClient: KafkaZkClient) : Seq[ConfigEntity] = { - // Describe option examples: - // Describe entity with specified name: - // --entity-type topics --entity-name topic1 (topic1) - // Describe all entities of a type (topics/brokers/users/clients): - // --entity-type topics (all topics) - // Describe quotas: - // --entity-type users --entity-name user1 --entity-type clients --entity-name client2 () - // --entity-type users --entity-name userA --entity-type clients (all clients of userA) - // --entity-type users --entity-type clients (all s)) - // Describe default quotas: - // --entity-type users --entity-default (Default user) - // --entity-type users --entity-default --entity-type clients --entity-default (Default ) - (root.sanitizedName, child) match { - case (None, _) => - val rootEntities = zkClient.getAllEntitiesWithConfig(root.entityType) - .map(name => ConfigEntity(Entity(root.entityType, Some(name)), child)) - child match { - case Some(s) => - rootEntities.flatMap(rootEntity => - ConfigEntity(rootEntity.root, Some(Entity(s.entityType, None))).getAllEntities(zkClient)) - case None => rootEntities - } - case (_, Some(childEntity)) => - childEntity.sanitizedName match { - case Some(_) => Seq(this) - case None => - zkClient.getAllEntitiesWithConfig(root.entityPath + "/" + childEntity.entityType) - .map(name => ConfigEntity(root, Some(Entity(childEntity.entityType, Some(name))))) - - } - case (_, None) => - Seq(this) - } - } - - override def toString: String = { - root.toString + child.map(s => ", " + s.toString).getOrElse("") - } - } - - def parseEntity(opts: ConfigCommandOptions): ConfigEntity = { - val entityTypes = opts.entityTypes - val entityNames = opts.entityNames - if (entityTypes.head == ConfigType.USER || entityTypes.head == ConfigType.CLIENT) - parseClientQuotaEntity(opts, entityTypes, entityNames) - else { - // Exactly one entity type and at-most one entity name expected for other entities - val name = entityNames.headOption match { - case Some("") => Some(ZooKeeperInternals.DEFAULT_STRING) - case v => v - } - ConfigEntity(Entity(entityTypes.head, name), None) - } - } - - private def parseClientQuotaEntity(opts: ConfigCommandOptions, types: List[String], names: List[String]): ConfigEntity = { - if (opts.options.has(opts.alterOpt) && names.size != types.size) - throw new IllegalArgumentException("--entity-name or --entity-default must be specified with each --entity-type for --alter") - - val reverse = types.size == 2 && types.head == ConfigType.CLIENT - val entityTypes = if (reverse) types.reverse else types - val sortedNames = (if (reverse && names.length == 2) names.reverse else names).iterator - - def sanitizeName(entityType: String, name: String) = { - if (name.isEmpty) - ZooKeeperInternals.DEFAULT_STRING - else { - entityType match { - case ConfigType.USER | ConfigType.CLIENT => Sanitizer.sanitize(name) - case _ => throw new IllegalArgumentException("Invalid entity type " + entityType) - } - } - } - - val entities = entityTypes.map(t => Entity(t, if (sortedNames.hasNext) Some(sanitizeName(t, sortedNames.next())) else None)) - ConfigEntity(entities.head, entities.lift(1)) - } class ConfigCommandOptions(args: Array[String]) extends CommandDefaultOptions(args) { - - val zkConnectOpt: OptionSpec[String] = parser.accepts("zookeeper", "DEPRECATED. The connection string for the zookeeper connection in the form host:port. " + - "Multiple URLS can be given to allow fail-over. Required when configuring SCRAM credentials for users or " + - "dynamic broker configs when the relevant broker(s) are down. Not allowed otherwise.") - .withRequiredArg - .describedAs("urls") - .ofType(classOf[String]) val bootstrapServerOpt: OptionSpec[String] = parser.accepts("bootstrap-server", "The Kafka servers to connect to.") .withRequiredArg .describedAs("server to connect to") @@ -831,7 +516,7 @@ object ConfigCommand extends Logging { .ofType(classOf[String]) private val entityDefault: OptionSpecBuilder = parser.accepts("entity-default", "Default entity name for clients/users/brokers/ips (applies to corresponding entity type)") - val nl: String = System.lineSeparator() + private val nl: String = System.lineSeparator() val addConfig: OptionSpec[String] = parser.accepts("add-config", "Key Value pairs of configs to add. Square brackets can be used to group values which contain commas: 'k1=v1,k2=[v1,v2,v2],k3=v3'. The following is a list of valid configurations: " + "For entity-type '" + ConfigType.TOPIC + "': " + LogConfig.configNames.asScala.map("\t" + _).mkString(nl, nl, nl) + "For entity-type '" + ConfigType.BROKER + "': " + DynamicConfig.Broker.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + @@ -879,10 +564,6 @@ object ConfigCommand extends Logging { val clientMetrics: OptionSpec[String] = parser.accepts("client-metrics", "The client metrics config resource name.") .withRequiredArg .ofType(classOf[String]) - val zkTlsConfigFile: OptionSpec[String] = parser.accepts("zk-tls-config-file", - "Identifies the file where ZooKeeper client TLS connectivity properties are defined. Any properties other than " + - ZkConfigs.ZK_SSL_CONFIG_TO_SYSTEM_PROPERTY_MAP.asScala.keys.toList.sorted.mkString(", ") + " are ignored.") - .withRequiredArg().describedAs("ZooKeeper TLS configuration").ofType(classOf[String]) options = parser.parse(args : _*) private val entityFlags = List((topic, ConfigType.TOPIC), @@ -930,10 +611,12 @@ object ConfigCommand extends Logging { if (entityTypeVals.size != entityTypeVals.distinct.size) throw new IllegalArgumentException(s"Duplicate entity type(s) specified: ${entityTypeVals.diff(entityTypeVals.distinct).mkString(",")}") - val (allowedEntityTypes, connectOptString) = if (options.has(bootstrapServerOpt) || options.has(bootstrapControllerOpt)) - (BrokerSupportedConfigTypes, "--bootstrap-server or --bootstrap-controller") - else - (ZkSupportedConfigTypes, "--zookeeper") + val (allowedEntityTypes, connectOptString) = + if (options.has(bootstrapServerOpt) || options.has(bootstrapControllerOpt)) { + (BrokerSupportedConfigTypes, "--bootstrap-server or --bootstrap-controller") + } else { + throw new IllegalArgumentException("Either --bootstrap-server or --bootstrap-controller must be specified.") + } entityTypeVals.foreach(entityTypeVal => if (!allowedEntityTypes.contains(entityTypeVal)) @@ -952,19 +635,9 @@ object ConfigCommand extends Logging { val hasEntityDefault = entityNames.exists(_.isEmpty) val numConnectOptions = (if (options.has(bootstrapServerOpt)) 1 else 0) + - (if (options.has(bootstrapControllerOpt)) 1 else 0) + - (if (options.has(zkConnectOpt)) 1 else 0) - if (numConnectOptions == 0) - throw new IllegalArgumentException("One of the required --bootstrap-server, --boostrap-controller, or --zookeeper arguments must be specified") - else if (numConnectOptions > 1) - throw new IllegalArgumentException("Only one of --bootstrap-server, --boostrap-controller, and --zookeeper can be specified") - - if (options.has(allOpt) && options.has(zkConnectOpt)) { - throw new IllegalArgumentException(s"--bootstrap-server must be specified for --all") - } - if (options.has(zkTlsConfigFile) && !options.has(zkConnectOpt)) { - throw new IllegalArgumentException("Only the --zookeeper option can be used with the --zk-tls-config-file option.") - } + (if (options.has(bootstrapControllerOpt)) 1 else 0) + if (numConnectOptions > 1) + throw new IllegalArgumentException("Only one of --bootstrap-server or --bootstrap-controller can be specified") if (hasEntityName && (entityTypeVals.contains(ConfigType.BROKER) || entityTypeVals.contains(BrokerLoggerConfigType))) { Seq(entityName, broker, brokerLogger).filter(options.has(_)).map(options.valueOf(_)).foreach { brokerId => try brokerId.toInt catch { diff --git a/core/src/main/scala/kafka/admin/RackAwareMode.scala b/core/src/main/scala/kafka/admin/RackAwareMode.scala deleted file mode 100644 index 45555b60bfcea..0000000000000 --- a/core/src/main/scala/kafka/admin/RackAwareMode.scala +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.admin - -/** - * Mode to control how rack aware replica assignment will be executed - */ -object RackAwareMode { - - /** - * Ignore all rack information in replica assignment. This is an optional mode used in command line. - */ - case object Disabled extends RackAwareMode - - /** - * Assume every broker has rack, or none of the brokers has rack. If only partial brokers have rack, fail fast - * in replica assignment. This is the default mode in command line tools (TopicCommand and ReassignPartitionsCommand). - */ - case object Enforced extends RackAwareMode - - /** - * Use rack information if every broker has a rack. Otherwise, fallback to Disabled mode. This is used in auto topic - * creation. - */ - case object Safe extends RackAwareMode -} - -sealed trait RackAwareMode diff --git a/core/src/main/scala/kafka/admin/ZkSecurityMigrator.scala b/core/src/main/scala/kafka/admin/ZkSecurityMigrator.scala deleted file mode 100644 index 77662c3b11464..0000000000000 --- a/core/src/main/scala/kafka/admin/ZkSecurityMigrator.scala +++ /dev/null @@ -1,307 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.admin - -import joptsimple.{OptionSet, OptionSpec, OptionSpecBuilder} -import kafka.server.KafkaConfig -import kafka.utils.{Logging, ToolsUtils} -import kafka.zk.{ControllerZNode, KafkaZkClient, ZkData, ZkSecurityMigratorUtils} -import org.apache.kafka.common.security.JaasUtils -import org.apache.kafka.common.utils.{Exit, Time, Utils} -import org.apache.kafka.server.config.ZkConfigs -import org.apache.kafka.server.util.{CommandDefaultOptions, CommandLineUtils} -import org.apache.zookeeper.AsyncCallback.{ChildrenCallback, StatCallback} -import org.apache.zookeeper.KeeperException -import org.apache.zookeeper.KeeperException.Code -import org.apache.zookeeper.client.ZKClientConfig -import org.apache.zookeeper.data.Stat - -import scala.annotation.tailrec -import scala.collection.mutable -import scala.jdk.CollectionConverters._ -import scala.concurrent._ -import scala.concurrent.duration._ - -/** - * This tool is to be used when making access to ZooKeeper authenticated or - * the other way around, when removing authenticated access. The exact steps - * to migrate a Kafka cluster from unsecure to secure with respect to ZooKeeper - * access are the following: - * - * 1- Perform a rolling upgrade of Kafka servers, setting zookeeper.set.acl to false - * and passing a valid JAAS login file via the system property - * java.security.auth.login.config - * 2- Perform a second rolling upgrade keeping the system property for the login file - * and now setting zookeeper.set.acl to true - * 3- Finally run this tool. There is a script under ./bin. Run - * ./bin/zookeeper-security-migration.sh --help - * to see the configuration parameters. An example of running it is the following: - * ./bin/zookeeper-security-migration.sh --zookeeper.acl=secure --zookeeper.connect=localhost:2181 - * - * To convert a cluster from secure to unsecure, we need to perform the following - * steps: - * 1- Perform a rolling upgrade setting zookeeper.set.acl to false for each server - * 2- Run this migration tool, setting zookeeper.acl to unsecure - * 3- Perform another rolling upgrade to remove the system property setting the - * login file (java.security.auth.login.config). - */ - -object ZkSecurityMigrator extends Logging { - private val usageMessage = ("ZooKeeper Migration Tool Help. This tool updates the ACLs of " - + "znodes as part of the process of setting up ZooKeeper " - + "authentication.") - private val tlsConfigFileOption = "zk-tls-config-file" - - def run(args: Array[String]): Unit = { - val jaasFile = System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) - val opts = new ZkSecurityMigratorOptions(args) - - CommandLineUtils.maybePrintHelpOrVersion(opts, usageMessage) - - // Must have either SASL or TLS mutual authentication enabled to use this tool. - // Instantiate the client config we will use so that we take into account config provided via the CLI option - // and system properties passed via -D parameters if no CLI option is given. - val zkClientConfig = createZkClientConfigFromOption(opts.options, opts.zkTlsConfigFile).getOrElse(new ZKClientConfig()) - val tlsClientAuthEnabled = KafkaConfig.zkTlsClientAuthEnabled(zkClientConfig) - if (jaasFile == null && !tlsClientAuthEnabled) { - val errorMsg = s"No JAAS configuration file has been specified and no TLS client certificate has been specified. Please make sure that you set " + - s"the system property ${JaasUtils.JAVA_LOGIN_CONFIG_PARAM} or provide a ZooKeeper client TLS configuration via --$tlsConfigFileOption " + - s"identifying at least ${ZkConfigs.ZK_SSL_CLIENT_ENABLE_CONFIG}, ${ZkConfigs.ZK_CLIENT_CNXN_SOCKET_CONFIG}, and ${ZkConfigs.ZK_SSL_KEY_STORE_LOCATION_CONFIG}" - System.err.println("ERROR: %s".format(errorMsg)) - throw new IllegalArgumentException("Incorrect configuration") - } - - if (!tlsClientAuthEnabled && !JaasUtils.isZkSaslEnabled) { - val errorMsg = "Security isn't enabled, most likely the file isn't set properly: %s".format(jaasFile) - System.out.println("ERROR: %s".format(errorMsg)) - throw new IllegalArgumentException("Incorrect configuration") - } - - val zkAcl = opts.options.valueOf(opts.zkAclOpt) match { - case "secure" => - info("zookeeper.acl option is secure") - true - case "unsecure" => - info("zookeeper.acl option is unsecure") - false - case _ => - ToolsUtils.printUsageAndExit(opts.parser, usageMessage) - } - val zkUrl = opts.options.valueOf(opts.zkUrlOpt) - val zkSessionTimeout = opts.options.valueOf(opts.zkSessionTimeoutOpt).intValue - val zkConnectionTimeout = opts.options.valueOf(opts.zkConnectionTimeoutOpt).intValue - val zkClient = KafkaZkClient(zkUrl, zkAcl, zkSessionTimeout, zkConnectionTimeout, - Int.MaxValue, Time.SYSTEM, zkClientConfig = zkClientConfig, name = "ZkSecurityMigrator", enableEntityConfigControllerCheck = false) - val enablePathCheck = opts.options.has(opts.enablePathCheckOpt) - val migrator = new ZkSecurityMigrator(zkClient) - migrator.run(enablePathCheck) - } - - def main(args: Array[String]): Unit = { - try { - run(args) - } catch { - case e: Exception => - e.printStackTrace() - // must exit with non-zero status so system tests will know we failed - Exit.exit(1) - } - } - - def createZkClientConfigFromFile(filename: String) : ZKClientConfig = { - val zkTlsConfigFileProps = Utils.loadProps(filename, ZkConfigs.ZK_SSL_CONFIG_TO_SYSTEM_PROPERTY_MAP.asScala.keys.toList.asJava) - val zkClientConfig = new ZKClientConfig() // Initializes based on any system properties that have been set - // Now override any set system properties with explicitly-provided values from the config file - // Emit INFO logs due to camel-case property names encouraging mistakes -- help people see mistakes they make - info(s"Found ${zkTlsConfigFileProps.size()} ZooKeeper client configuration properties in file $filename") - zkTlsConfigFileProps.asScala.foreachEntry { (key, value) => - info(s"Setting $key") - KafkaConfig.setZooKeeperClientProperty(zkClientConfig, key, value) - } - zkClientConfig - } - - private[admin] def createZkClientConfigFromOption(options: OptionSet, option: OptionSpec[String]) : Option[ZKClientConfig] = - if (!options.has(option)) - None - else - Some(createZkClientConfigFromFile(options.valueOf(option))) - - private class ZkSecurityMigratorOptions(args: Array[String]) extends CommandDefaultOptions(args) { - val zkAclOpt: OptionSpec[String] = parser.accepts("zookeeper.acl", "Indicates whether to make the Kafka znodes in ZooKeeper secure or unsecure." - + " The options are 'secure' and 'unsecure'").withRequiredArg().ofType(classOf[String]) - val zkUrlOpt: OptionSpec[String] = parser.accepts("zookeeper.connect", "Sets the ZooKeeper connect string (ensemble). This parameter " + - "takes a comma-separated list of host:port pairs.").withRequiredArg().defaultsTo("localhost:2181"). - ofType(classOf[String]) - val zkSessionTimeoutOpt: OptionSpec[Integer] = parser.accepts("zookeeper.session.timeout", "Sets the ZooKeeper session timeout."). - withRequiredArg().ofType(classOf[java.lang.Integer]).defaultsTo(30000) - val zkConnectionTimeoutOpt: OptionSpec[Integer] = parser.accepts("zookeeper.connection.timeout", "Sets the ZooKeeper connection timeout."). - withRequiredArg().ofType(classOf[java.lang.Integer]).defaultsTo(30000) - val enablePathCheckOpt: OptionSpecBuilder = parser.accepts("enable.path.check", "Checks if all the root paths exist in ZooKeeper " + - "before migration. If not, exit the command.") - val zkTlsConfigFile: OptionSpec[String] = parser.accepts(tlsConfigFileOption, - "Identifies the file where ZooKeeper client TLS connectivity properties are defined. Any properties other than " + - ZkConfigs.ZK_SSL_CONFIG_TO_SYSTEM_PROPERTY_MAP.asScala.keys.mkString(", ") + " are ignored.") - .withRequiredArg().describedAs("ZooKeeper TLS configuration").ofType(classOf[String]) - options = parser.parse(args : _*) - } -} - -class ZkSecurityMigrator(zkClient: KafkaZkClient) extends Logging { - private val zkSecurityMigratorUtils = new ZkSecurityMigratorUtils(zkClient) - private val futures = new mutable.Queue[Future[String]] - - private def setAcl(path: String, setPromise: Promise[String]): Unit = { - info("Setting ACL for path %s".format(path)) - zkSecurityMigratorUtils.currentZooKeeper.setACL(path, zkClient.defaultAcls(path).asJava, -1, SetACLCallback, setPromise) - } - - private def retrieveChildren(path: String, childrenPromise: Promise[String]): Unit = { - info("Getting children to set ACLs for path %s".format(path)) - zkSecurityMigratorUtils.currentZooKeeper.getChildren(path, false, GetChildrenCallback, childrenPromise) - } - - private def setAclIndividually(path: String): Unit = { - val setPromise = Promise[String]() - futures.synchronized { - futures += setPromise.future - } - setAcl(path, setPromise) - } - - private def setAclsRecursively(path: String): Unit = { - val setPromise = Promise[String]() - val childrenPromise = Promise[String]() - futures.synchronized { - futures += setPromise.future - futures += childrenPromise.future - } - setAcl(path, setPromise) - retrieveChildren(path, childrenPromise) - } - - private object GetChildrenCallback extends ChildrenCallback { - def processResult(rc: Int, - path: String, - ctx: Object, - children: java.util.List[String]): Unit = { - val zkHandle = zkSecurityMigratorUtils.currentZooKeeper - val promise = ctx.asInstanceOf[Promise[String]] - Code.get(rc) match { - case Code.OK => - // Set ACL for each child - children.asScala.map { child => - path match { - case "/" => s"/$child" - case path => s"$path/$child" - } - }.foreach(setAclsRecursively) - promise success "done" - case Code.CONNECTIONLOSS => - zkHandle.getChildren(path, false, GetChildrenCallback, ctx) - case Code.NONODE => - warn("Node is gone, it could be have been legitimately deleted: %s".format(path)) - promise success "done" - case Code.SESSIONEXPIRED => - // Starting a new session isn't really a problem, but it'd complicate - // the logic of the tool, so we quit and let the user re-run it. - System.out.println("ZooKeeper session expired while changing ACLs") - promise failure KeeperException.create(Code.get(rc)) - case _ => - System.out.println("Unexpected return code: %d".format(rc)) - promise failure KeeperException.create(Code.get(rc)) - } - } - } - - private object SetACLCallback extends StatCallback { - def processResult(rc: Int, - path: String, - ctx: Object, - stat: Stat): Unit = { - val zkHandle = zkSecurityMigratorUtils.currentZooKeeper - val promise = ctx.asInstanceOf[Promise[String]] - - Code.get(rc) match { - case Code.OK => - info("Successfully set ACLs for %s".format(path)) - promise success "done" - case Code.CONNECTIONLOSS => - zkHandle.setACL(path, zkClient.defaultAcls(path).asJava, -1, SetACLCallback, ctx) - case Code.NONODE => - warn("Znode is gone, it could be have been legitimately deleted: %s".format(path)) - promise success "done" - case Code.SESSIONEXPIRED => - // Starting a new session isn't really a problem, but it'd complicate - // the logic of the tool, so we quit and let the user re-run it. - System.out.println("ZooKeeper session expired while changing ACLs") - promise failure KeeperException.create(Code.get(rc)) - case _ => - System.out.println("Unexpected return code: %d".format(rc)) - promise failure KeeperException.create(Code.get(rc)) - } - } - } - - private def run(enablePathCheck: Boolean): Unit = { - try { - setAclIndividually("/") - checkPathExistenceAndMaybeExit(enablePathCheck) - for (path <- ZkData.SecureRootPaths) { - debug("Going to set ACL for %s".format(path)) - if (path == ControllerZNode.path && !zkClient.pathExists(path)) { - debug("Ignoring to set ACL for %s, because it doesn't exist".format(path)) - } else { - zkClient.makeSurePersistentPathExists(path) - setAclsRecursively(path) - } - } - - @tailrec - def recurse(): Unit = { - val future = futures.synchronized { - futures.headOption - } - future match { - case Some(a) => - Await.result(a, 6000 millis) - futures.synchronized { futures.dequeue() } - recurse() - case None => - } - } - recurse() - - } finally { - zkClient.close() - } - } - - private def checkPathExistenceAndMaybeExit(enablePathCheck: Boolean): Unit = { - val nonExistingSecureRootPaths = ZkData.SecureRootPaths.filterNot(zkClient.pathExists) - if (nonExistingSecureRootPaths.nonEmpty) { - println(s"Warning: The following secure root paths do not exist in ZooKeeper: ${nonExistingSecureRootPaths.mkString(",")}") - println("That might be due to an incorrect chroot is specified when executing the command.") - if (enablePathCheck) { - println("Exit the command.") - // must exit with non-zero status so system tests will know we failed - Exit.exit(1) - } - } - } -} diff --git a/core/src/main/scala/kafka/cluster/Broker.scala b/core/src/main/scala/kafka/cluster/Broker.scala index 968c1579c229f..794b641a847ce 100755 --- a/core/src/main/scala/kafka/cluster/Broker.scala +++ b/core/src/main/scala/kafka/cluster/Broker.scala @@ -17,28 +17,17 @@ package kafka.cluster -import java.util import kafka.common.BrokerEndPointNotAvailableException -import kafka.server.KafkaConfig import org.apache.kafka.common.feature.{Features, SupportedVersionRange} import org.apache.kafka.common.feature.Features._ -import org.apache.kafka.common.{ClusterResource, Endpoint, Node} +import org.apache.kafka.common.Node import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.metadata.{BrokerRegistration, VersionRange} -import org.apache.kafka.server.authorizer.AuthorizerServerInfo import org.apache.kafka.server.network.BrokerEndPoint import scala.collection.Seq -import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOptional object Broker { - private[kafka] case class ServerInfo(clusterResource: ClusterResource, - brokerId: Int, - endpoints: util.List[Endpoint], - interBrokerEndpoint: Endpoint, - earlyStartListeners: util.Set[String]) extends AuthorizerServerInfo def apply(id: Int, endPoints: Seq[EndPoint], rack: Option[String]): Broker = { new Broker(id, endPoints, rack, emptySupportedFeatures) @@ -47,22 +36,6 @@ object Broker { def apply(id: Int, endPoint: EndPoint, rack: Option[String]): Broker = { new Broker(id, Seq(endPoint), rack, emptySupportedFeatures) } - - private def supportedFeatures(features: java.util.Map[String, VersionRange]): java.util - .Map[String, SupportedVersionRange] = { - features.asScala.map { case (name, range) => - name -> new SupportedVersionRange(range.min(), range.max()) - }.asJava - } - - def fromBrokerRegistration(registration: BrokerRegistration): Broker = { - new Broker( - registration.id(), - registration.listeners().values().asScala.map(EndPoint.fromJava).toSeq, - registration.rack().toScala, - Features.supportedFeatures(supportedFeatures(registration.supportedFeatures())) - ) - } } /** @@ -111,12 +84,4 @@ case class Broker(id: Int, endPoints: Seq[EndPoint], rack: Option[String], featu endPointsMap.getOrElse(listenerName, throw new BrokerEndPointNotAvailableException(s"End point with listener name ${listenerName.value} not found for broker $id")) } - - def toServerInfo(clusterId: String, config: KafkaConfig): AuthorizerServerInfo = { - val clusterResource: ClusterResource = new ClusterResource(clusterId) - val interBrokerEndpoint: Endpoint = endPoint(config.interBrokerListenerName).toJava - val brokerEndpoints: util.List[Endpoint] = endPoints.toList.map(_.toJava).asJava - Broker.ServerInfo(clusterResource, id, brokerEndpoints, interBrokerEndpoint, - config.earlyStartListeners.map(_.value()).asJava) - } } diff --git a/core/src/main/scala/kafka/cluster/Partition.scala b/core/src/main/scala/kafka/cluster/Partition.scala index e432ead8edb27..4315a981bac2f 100755 --- a/core/src/main/scala/kafka/cluster/Partition.scala +++ b/core/src/main/scala/kafka/cluster/Partition.scala @@ -19,16 +19,14 @@ package kafka.cluster import java.util.concurrent.locks.ReentrantReadWriteLock import java.util.Optional import java.util.concurrent.{CompletableFuture, CopyOnWriteArrayList} -import kafka.common.UnexpectedAppendOffsetException -import kafka.controller.{KafkaController, StateChangeLogger} +import kafka.controller.StateChangeLogger import kafka.log._ import kafka.log.remote.RemoteLogManager import kafka.server._ -import kafka.server.metadata.{KRaftMetadataCache, ZkMetadataCache} +import kafka.server.metadata.KRaftMetadataCache import kafka.server.share.DelayedShareFetch import kafka.utils.CoreUtils.{inReadLock, inWriteLock} import kafka.utils._ -import kafka.zookeeper.ZooKeeperClientException import org.apache.kafka.common.{DirectoryId, IsolationLevel, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.errors._ import org.apache.kafka.common.message.AlterPartitionRequestData.BrokerState @@ -37,22 +35,23 @@ import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrParti import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.FileRecords.TimestampAndOffset -import org.apache.kafka.common.record.{MemoryRecords, RecordBatch} +import org.apache.kafka.common.record.{FileRecords, MemoryRecords, RecordBatch} import org.apache.kafka.common.requests._ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.utils.Time import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} -import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogReadInfo, LogStartOffsetIncrementReason, VerificationGuard} +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogReadInfo, LogStartOffsetIncrementReason, OffsetResultHolder, VerificationGuard} import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey -import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} +import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, UnexpectedAppendOffsetException} import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpoints import org.slf4j.event.Level -import scala.collection.{Map, Seq} +import scala.collection.Seq import scala.jdk.CollectionConverters._ +import scala.jdk.javaapi.OptionConverters /** * Listener receives notification from an Online Partition. @@ -82,6 +81,11 @@ trait PartitionListener { * that the partition was deleted but only that this broker does not host a replica of it any more. */ def onDeleted(partition: TopicPartition): Unit = {} + + /** + * Called when the Partition on this broker is transitioned to follower. + */ + def onBecomingFollower(partition: TopicPartition): Unit = {} } trait AlterPartitionListener { @@ -149,7 +153,6 @@ object Partition { new Partition(topicPartition, _topicId = topicId, replicaLagTimeMaxMs = replicaManager.config.replicaLagTimeMaxMs, - interBrokerProtocolVersion = replicaManager.metadataCache.metadataVersion(), localBrokerId = replicaManager.config.brokerId, localBrokerEpochSupplier = replicaManager.brokerEpochSupplier, time = time, @@ -302,7 +305,6 @@ case class CommittedPartitionState( */ class Partition(val topicPartition: TopicPartition, val replicaLagTimeMaxMs: Long, - interBrokerProtocolVersion: MetadataVersion, localBrokerId: Int, localBrokerEpochSupplier: () => Long, time: Time, @@ -355,12 +357,6 @@ class Partition(val topicPartition: TopicPartition, } } - /* Epoch of the controller that last changed the leader. This needs to be initialized correctly upon broker startup. - * One way of doing that is through the controller's start replica state change command. When a new broker starts up - * the controller sends it a start replica command containing the leader for each partition that the broker hosts. - * In addition to the leader, the controller can also send the epoch of the controller that elected the leader for - * each partition. */ - private var controllerEpoch: Int = KafkaController.InitialControllerEpoch this.logIdent = s"[Partition $topicPartition broker=$localBrokerId] " private val tags = Map("topic" -> topic, "partition" -> partitionId.toString).asJava @@ -701,6 +697,15 @@ class Partition(val topicPartition: TopicPartition, } } + /** + * Invoke the partition listeners when the partition has been transitioned to follower. + */ + def invokeOnBecomingFollowerListeners(): Unit = { + listeners.forEach { listener => + listener.onBecomingFollower(topicPartition) + } + } + private def clear(): Unit = { remoteReplicasMap.clear() assignmentState = SimpleAssignmentState(Seq.empty) @@ -726,7 +731,7 @@ class Partition(val topicPartition: TopicPartition, topicId: Option[Uuid], targetDirectoryId: Option[Uuid] = None): Boolean = { val (leaderHWIncremented, isNewLeader) = inWriteLock(leaderIsrUpdateLock) { - // Partition state changes are expected to have an partition epoch larger or equal + // Partition state changes are expected to have a partition epoch larger or equal // to the current partition epoch. The latter is allowed because the partition epoch // is also updated by the AlterPartition response so the new epoch might be known // before a LeaderAndIsr request is received or before an update is received via @@ -737,10 +742,6 @@ class Partition(val topicPartition: TopicPartition, return false } - // Record the epoch of the controller that made the leadership decision. This is useful while updating the isr - // to maintain the decision maker controller's epoch in the zookeeper path. - controllerEpoch = partitionState.controllerEpoch - val currentTimeMs = time.milliseconds val isNewLeader = !isLeader val isNewLeaderEpoch = partitionState.leaderEpoch > leaderEpoch @@ -765,14 +766,7 @@ class Partition(val topicPartition: TopicPartition, LeaderRecoveryState.RECOVERED ) - try { - createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetDirectoryId) - } catch { - case e: ZooKeeperClientException => - stateChangeLogger.error(s"A ZooKeeper client exception has occurred and makeLeader will be skipping the " + - s"state change for the partition $topicPartition with leader epoch: $leaderEpoch.", e) - return false - } + createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetDirectoryId) val leaderLog = localLogOrException @@ -792,7 +786,7 @@ class Partition(val topicPartition: TopicPartition, // to ensure that these followers can truncate to the right offset, we must cache the new // leader epoch and the start offset since it should be larger than any epoch that a follower // would try to query. - leaderLog.maybeAssignEpochStartOffset(partitionState.leaderEpoch, leaderEpochStartOffset) + leaderLog.assignEpochStartOffset(partitionState.leaderEpoch, leaderEpochStartOffset) // Initialize lastCaughtUpTime of replicas as well as their lastFetchTimeMs and // lastFetchLeaderLogEndOffset. @@ -849,10 +843,6 @@ class Partition(val topicPartition: TopicPartition, return false } - // Record the epoch of the controller that made the leadership decision. This is useful while updating the isr - // to maintain the decision maker controller's epoch in the zookeeper path - controllerEpoch = partitionState.controllerEpoch - val isNewLeaderEpoch = partitionState.leaderEpoch > leaderEpoch // The leader should be updated before updateAssignmentAndIsr where we clear the ISR. Or it is possible to meet // the under min isr condition during the makeFollower process and emits the wrong metric. @@ -870,14 +860,7 @@ class Partition(val topicPartition: TopicPartition, LeaderRecoveryState.of(partitionState.leaderRecoveryState) ) - try { - createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetLogDirectoryId) - } catch { - case e: ZooKeeperClientException => - stateChangeLogger.error(s"A ZooKeeper client exception has occurred. makeFollower will be skipping the " + - s"state change for the partition $topicPartition with leader epoch: $leaderEpoch.", e) - return false - } + createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetLogDirectoryId) val followerLog = localLogOrException if (isNewLeaderEpoch) { @@ -1088,11 +1071,6 @@ class Partition(val topicPartition: TopicPartition, !kRaftMetadataCache.isBrokerShuttingDown(followerReplicaId) && isBrokerEpochIsrEligible(storedBrokerEpoch, cachedBrokerEpoch) - // In ZK mode, we just ensure the broker is alive. Although we do not check for shutting down brokers here, - // the controller will block them from being added to ISR. - case zkMetadataCache: ZkMetadataCache => - zkMetadataCache.hasAliveBroker(followerReplicaId) - case _ => true } } @@ -1384,12 +1362,13 @@ class Partition(val topicPartition: TopicPartition, // Avoid writing to leader if there are not enough insync replicas to make it safe if (inSyncSize < minIsr && requiredAcks == -1) { - throw new NotEnoughReplicasException(s"The size of the current ISR ${partitionState.isr} " + - s"is insufficient to satisfy the min.isr requirement of $minIsr for partition $topicPartition") + throw new NotEnoughReplicasException(s"The size of the current ISR : $inSyncSize " + + s"is insufficient to satisfy the min.isr requirement of $minIsr for partition $topicPartition, " + + s"live replica(s) broker.id are : $inSyncReplicaIds") } val info = leaderLog.appendAsLeader(records, leaderEpoch = this.leaderEpoch, origin, - interBrokerProtocolVersion, requestLocal, verificationGuard) + requestLocal, verificationGuard) // we may need to increment high watermark since ISR could be down to 1 (info, maybeIncrementLeaderHW(leaderLog)) @@ -1614,7 +1593,7 @@ class Partition(val topicPartition: TopicPartition, def getOffsetByTimestamp: OffsetResultHolder = { logManager.getLog(topicPartition) .map(log => log.fetchOffsetByTimestamp(timestamp, remoteLogManager)) - .getOrElse(OffsetResultHolder(timestampAndOffsetOpt = None)) + .getOrElse(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]())) } // If we're in the lagging HW state after a leader election, throw OffsetNotAvailable for "latest" offset @@ -1622,13 +1601,13 @@ class Partition(val topicPartition: TopicPartition, timestamp match { case ListOffsetsRequest.LATEST_TIMESTAMP => maybeOffsetsError.map(e => throw e) - .getOrElse(OffsetResultHolder(Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, lastFetchableOffset, Optional.of(leaderEpoch))))) + .getOrElse(new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, lastFetchableOffset, Optional.of(leaderEpoch)))) case ListOffsetsRequest.EARLIEST_TIMESTAMP | ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP => getOffsetByTimestamp case _ => val offsetResultHolder = getOffsetByTimestamp - offsetResultHolder.maybeOffsetsError = maybeOffsetsError - offsetResultHolder.lastFetchableOffset = Some(lastFetchableOffset) + offsetResultHolder.maybeOffsetsError(OptionConverters.toJava(maybeOffsetsError)) + offsetResultHolder.lastFetchableOffset(Optional.of(lastFetchableOffset)) offsetResultHolder } } @@ -1811,7 +1790,7 @@ class Partition(val topicPartition: TopicPartition, ): PendingShrinkIsr = { // When shrinking the ISR, we cannot assume that the update will succeed as this could // erroneously advance the HW if the `AlterPartition` were to fail. Hence the "maximal ISR" - // for `PendingShrinkIsr` is the the current ISR. + // for `PendingShrinkIsr` is the current ISR. val isrToSend = partitionState.isr -- outOfSyncReplicaIds val isrWithBrokerEpoch = addBrokerEpochToIsr(isrToSend.toList).asJava val newLeaderAndIsr = new LeaderAndIsr( @@ -1862,8 +1841,7 @@ class Partition(val topicPartition: TopicPartition, debug(s"Submitting ISR state change $proposedIsrState") val future = alterIsrManager.submit( new TopicIdPartition(topicId.getOrElse(Uuid.ZERO_UUID), topicPartition), - proposedIsrState.sentLeaderAndIsr, - controllerEpoch + proposedIsrState.sentLeaderAndIsr ) future.whenComplete { (leaderAndIsr, e) => var hwIncremented = false @@ -1946,7 +1924,7 @@ class Partition(val topicPartition: TopicPartition, false case Errors.NEW_LEADER_ELECTED => // The operation completed successfully but this replica got removed from the replica set by the controller - // while completing a ongoing reassignment. This replica is no longer the leader but it does not know it + // while completing an ongoing reassignment. This replica is no longer the leader but it does not know it // yet. It should remain in the current pending state until the metadata overrides it. // This is only raised in KRaft mode. debug(s"The alter partition request successfully updated the partition state to $proposedIsrState but " + diff --git a/core/src/main/scala/kafka/common/GenerateBrokerIdException.scala b/core/src/main/scala/kafka/common/GenerateBrokerIdException.scala deleted file mode 100644 index 13784fe50554e..0000000000000 --- a/core/src/main/scala/kafka/common/GenerateBrokerIdException.scala +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.common - -/** - * Thrown when there is a failure to generate a zookeeper sequenceId to use as brokerId - */ -class GenerateBrokerIdException(message: String, cause: Throwable) extends RuntimeException(message, cause) { - def this(message: String) = this(message, null) - def this(cause: Throwable) = this(null, cause) - def this() = this(null, null) -} diff --git a/core/src/main/scala/kafka/common/LogCleaningAbortedException.scala b/core/src/main/scala/kafka/common/LogCleaningAbortedException.scala deleted file mode 100644 index dfded33f009e4..0000000000000 --- a/core/src/main/scala/kafka/common/LogCleaningAbortedException.scala +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.common - -/** - * Thrown when a log cleaning task is requested to be aborted. - */ -class LogCleaningAbortedException extends RuntimeException() { -} diff --git a/core/src/main/scala/kafka/common/OffsetsOutOfOrderException.scala b/core/src/main/scala/kafka/common/OffsetsOutOfOrderException.scala deleted file mode 100644 index f8daaa4a181b2..0000000000000 --- a/core/src/main/scala/kafka/common/OffsetsOutOfOrderException.scala +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.common - -/** - * Indicates the follower received records with non-monotonically increasing offsets - */ -class OffsetsOutOfOrderException(message: String) extends RuntimeException(message) { -} - diff --git a/core/src/main/scala/kafka/common/StateChangeFailedException.scala b/core/src/main/scala/kafka/common/StateChangeFailedException.scala deleted file mode 100644 index fd56796041bc5..0000000000000 --- a/core/src/main/scala/kafka/common/StateChangeFailedException.scala +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.common - -class StateChangeFailedException(message: String, cause: Throwable) extends RuntimeException(message, cause) { - def this(message: String) = this(message, null) - def this() = this(null, null) -} \ No newline at end of file diff --git a/core/src/main/scala/kafka/common/TopicAlreadyMarkedForDeletionException.scala b/core/src/main/scala/kafka/common/TopicAlreadyMarkedForDeletionException.scala deleted file mode 100644 index c83cea96b5de0..0000000000000 --- a/core/src/main/scala/kafka/common/TopicAlreadyMarkedForDeletionException.scala +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.common - -class TopicAlreadyMarkedForDeletionException(message: String) extends RuntimeException(message) { -} \ No newline at end of file diff --git a/core/src/main/scala/kafka/common/UnexpectedAppendOffsetException.scala b/core/src/main/scala/kafka/common/UnexpectedAppendOffsetException.scala deleted file mode 100644 index e719a93006d31..0000000000000 --- a/core/src/main/scala/kafka/common/UnexpectedAppendOffsetException.scala +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.common - -/** - * Indicates the follower or the future replica received records from the leader (or current - * replica) with first offset less than expected next offset. - * @param firstOffset The first offset of the records to append - * @param lastOffset The last offset of the records to append - */ -class UnexpectedAppendOffsetException(val message: String, - val firstOffset: Long, - val lastOffset: Long) extends RuntimeException(message) { -} diff --git a/core/src/main/scala/kafka/common/ZkNodeChangeNotificationListener.scala b/core/src/main/scala/kafka/common/ZkNodeChangeNotificationListener.scala deleted file mode 100644 index 1f7da18d9ac87..0000000000000 --- a/core/src/main/scala/kafka/common/ZkNodeChangeNotificationListener.scala +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.common - -import java.nio.charset.StandardCharsets.UTF_8 -import java.util.concurrent.LinkedBlockingQueue -import java.util.concurrent.atomic.AtomicBoolean - -import kafka.utils.Logging -import kafka.zk.{KafkaZkClient, StateChangeHandlers} -import kafka.zookeeper.{StateChangeHandler, ZNodeChildChangeHandler} -import org.apache.kafka.common.utils.Time -import org.apache.kafka.server.util.ShutdownableThread - -import scala.collection.Seq -import scala.util.{Failure, Try} - -/** - * Handle the notificationMessage. - */ -trait NotificationHandler { - def processNotification(notificationMessage: Array[Byte]): Unit -} - -/** - * A listener that subscribes to seqNodeRoot for any child changes where all children are assumed to be sequence node - * with seqNodePrefix. When a child is added under seqNodeRoot this class gets notified, it looks at lastExecutedChange - * number to avoid duplicate processing and if it finds an unprocessed child, it reads its data and calls supplied - * notificationHandler's processNotification() method with the child's data as argument. As part of processing these changes it also - * purges any children with currentTime - createTime > changeExpirationMs. - * - * @param zkClient - * @param seqNodeRoot - * @param seqNodePrefix - * @param notificationHandler - * @param changeExpirationMs - * @param time - */ -class ZkNodeChangeNotificationListener(private val zkClient: KafkaZkClient, - private val seqNodeRoot: String, - private val seqNodePrefix: String, - private val notificationHandler: NotificationHandler, - private val changeExpirationMs: Long = 15 * 60 * 1000, - private val time: Time = Time.SYSTEM) extends Logging { - private var lastExecutedChange = -1L - private val queue = new LinkedBlockingQueue[ChangeNotification] - private val thread = new ChangeEventProcessThread(s"$seqNodeRoot-event-process-thread") - private val isClosed = new AtomicBoolean(false) - - def init(): Unit = { - zkClient.registerStateChangeHandler(ZkStateChangeHandler) - zkClient.registerZNodeChildChangeHandler(ChangeNotificationHandler) - addChangeNotification() - thread.start() - } - - def close(): Unit = { - isClosed.set(true) - zkClient.unregisterStateChangeHandler(ZkStateChangeHandler.name) - zkClient.unregisterZNodeChildChangeHandler(ChangeNotificationHandler.path) - queue.clear() - thread.shutdown() - } - - /** - * Process notifications - */ - private def processNotifications(): Unit = { - try { - val notifications = zkClient.getChildren(seqNodeRoot).sorted - if (notifications.nonEmpty) { - info(s"Processing notification(s) to $seqNodeRoot") - val now = time.milliseconds - for (notification <- notifications) { - val changeId = changeNumber(notification) - if (changeId > lastExecutedChange) { - processNotification(notification) - lastExecutedChange = changeId - } - } - purgeObsoleteNotifications(now, notifications) - } - } catch { - case e: InterruptedException => if (!isClosed.get) error(s"Error while processing notification change for path = $seqNodeRoot", e) - case e: Exception => error(s"Error while processing notification change for path = $seqNodeRoot", e) - } - } - - private def processNotification(notification: String): Unit = { - val changeZnode = seqNodeRoot + "/" + notification - val (data, _) = zkClient.getDataAndStat(changeZnode) - data match { - case Some(d) => Try(notificationHandler.processNotification(d)) match { - case Failure(e) => error(s"error processing change notification ${new String(d, UTF_8)} from $changeZnode", e) - case _ => - } - case None => warn(s"read null data from $changeZnode") - } - } - - private def addChangeNotification(): Unit = { - if (!isClosed.get && queue.peek() == null) - queue.put(new ChangeNotification) - } - - private class ChangeNotification { - def process(): Unit = processNotifications() - } - - /** - * Purges expired notifications. - * - * @param now - * @param notifications - */ - private def purgeObsoleteNotifications(now: Long, notifications: Seq[String]): Unit = { - for (notification <- notifications.sorted) { - val notificationNode = seqNodeRoot + "/" + notification - val (data, stat) = zkClient.getDataAndStat(notificationNode) - if (data.isDefined) { - if (now - stat.getCtime > changeExpirationMs) { - debug(s"Purging change notification $notificationNode") - zkClient.deletePath(notificationNode) - } - } - } - } - - /* get the change number from a change notification znode */ - private def changeNumber(name: String): Long = name.substring(seqNodePrefix.length).toLong - - private class ChangeEventProcessThread(name: String) extends ShutdownableThread(name) { - override def doWork(): Unit = queue.take().process() - } - - private object ChangeNotificationHandler extends ZNodeChildChangeHandler { - override val path: String = seqNodeRoot - override def handleChildChange(): Unit = addChangeNotification() - } - - object ZkStateChangeHandler extends StateChangeHandler { - override val name: String = StateChangeHandlers.zkNodeChangeListenerHandler(seqNodeRoot) - override def afterInitializingSession(): Unit = addChangeNotification() - } -} - diff --git a/core/src/main/scala/kafka/controller/ControllerChannelContext.scala b/core/src/main/scala/kafka/controller/ControllerChannelContext.scala deleted file mode 100644 index 0ab43256290a4..0000000000000 --- a/core/src/main/scala/kafka/controller/ControllerChannelContext.scala +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.controller - -import kafka.cluster.Broker -import org.apache.kafka.common.{TopicPartition, Uuid} - -trait ControllerChannelContext { - def isTopicDeletionInProgress(topicName: String): Boolean - - def topicIds: collection.Map[String, Uuid] - - def liveBrokerIdAndEpochs: collection.Map[Int, Long] - - def liveOrShuttingDownBrokers: collection.Set[Broker] - - def isTopicQueuedUpForDeletion(topic: String): Boolean - - def isReplicaOnline(brokerId: Int, partition: TopicPartition): Boolean - - def partitionReplicaAssignment(partition: TopicPartition): collection.Seq[Int] - - def leaderEpoch(topicPartition: TopicPartition): Int - - def liveOrShuttingDownBrokerIds: collection.Set[Int] - - def partitionLeadershipInfo(topicPartition: TopicPartition): Option[LeaderIsrAndControllerEpoch] -} diff --git a/core/src/main/scala/kafka/controller/ControllerChannelManager.scala b/core/src/main/scala/kafka/controller/ControllerChannelManager.scala deleted file mode 100755 index cea7368378dda..0000000000000 --- a/core/src/main/scala/kafka/controller/ControllerChannelManager.scala +++ /dev/null @@ -1,774 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.controller - -import com.yammer.metrics.core.{Gauge, Timer} -import kafka.cluster.Broker -import kafka.server.KafkaConfig -import kafka.utils._ -import org.apache.kafka.clients._ -import org.apache.kafka.common._ -import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState -import org.apache.kafka.common.message.StopReplicaRequestData.{StopReplicaPartitionState, StopReplicaTopicState} -import org.apache.kafka.common.message.UpdateMetadataRequestData.{UpdateMetadataBroker, UpdateMetadataEndpoint, UpdateMetadataPartitionState} -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network._ -import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests._ -import org.apache.kafka.common.security.JaasContext -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.common.utils.{LogContext, Time} -import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.server.common.MetadataVersion._ -import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.kafka.server.util.ShutdownableThread - -import java.net.SocketTimeoutException -import java.util.concurrent.{BlockingQueue, LinkedBlockingQueue, TimeUnit} -import scala.collection.{Seq, Set, mutable} -import scala.jdk.CollectionConverters._ - -object ControllerChannelManager { - private val QueueSizeMetricName = "QueueSize" - private val RequestRateAndQueueTimeMetricName = "RequestRateAndQueueTimeMs" -} - -class ControllerChannelManager(controllerEpoch: () => Int, - config: KafkaConfig, - time: Time, - metrics: Metrics, - stateChangeLogger: StateChangeLogger, - threadNamePrefix: Option[String] = None) extends Logging { - import ControllerChannelManager._ - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) - - protected val brokerStateInfo = new mutable.HashMap[Int, ControllerBrokerStateInfo] - private val brokerLock = new Object - this.logIdent = "[Channel manager on controller " + config.brokerId + "]: " - - metricsGroup.newGauge("TotalQueueSize", - () => brokerLock synchronized { - brokerStateInfo.values.iterator.map(_.messageQueue.size).sum - } - ) - - def startup(initialBrokers: Set[Broker]):Unit = { - initialBrokers.foreach(addNewBroker) - - brokerLock synchronized { - brokerStateInfo.foreach(brokerState => startRequestSendThread(brokerState._1)) - } - } - - def shutdown():Unit = { - brokerLock synchronized { - brokerStateInfo.values.toList.foreach(removeExistingBroker) - } - } - - def sendRequest(brokerId: Int, request: AbstractControlRequest.Builder[_ <: AbstractControlRequest], - callback: AbstractResponse => Unit = null): Unit = { - brokerLock synchronized { - val stateInfoOpt = brokerStateInfo.get(brokerId) - stateInfoOpt match { - case Some(stateInfo) => - stateInfo.messageQueue.put(QueueItem(request.apiKey, request, callback, time.milliseconds())) - case None => - warn(s"Not sending request ${request.apiKey.name} with controllerId=${request.controllerId()}, " + - s"controllerEpoch=${request.controllerEpoch()}, brokerEpoch=${request.brokerEpoch()} " + - s"to broker $brokerId, since it is offline.") - } - } - } - - def addBroker(broker: Broker): Unit = { - // be careful here. Maybe the startup() API has already started the request send thread - brokerLock synchronized { - if (!brokerStateInfo.contains(broker.id)) { - addNewBroker(broker) - startRequestSendThread(broker.id) - } - } - } - - def removeBroker(brokerId: Int): Unit = { - brokerLock synchronized { - removeExistingBroker(brokerStateInfo(brokerId)) - } - } - - private def addNewBroker(broker: Broker): Unit = { - val messageQueue = new LinkedBlockingQueue[QueueItem] - debug(s"Controller ${config.brokerId} trying to connect to broker ${broker.id}") - val controllerToBrokerListenerName = config.controlPlaneListenerName.getOrElse(config.interBrokerListenerName) - val controllerToBrokerSecurityProtocol = config.controlPlaneSecurityProtocol.getOrElse(config.interBrokerSecurityProtocol) - val brokerNode = broker.node(controllerToBrokerListenerName) - val logContext = new LogContext(s"[Controller id=${config.brokerId}, targetBrokerId=${brokerNode.idString}] ") - val (networkClient, reconfigurableChannelBuilder) = { - val channelBuilder = ChannelBuilders.clientChannelBuilder( - controllerToBrokerSecurityProtocol, - JaasContext.Type.SERVER, - config, - controllerToBrokerListenerName, - config.saslMechanismInterBrokerProtocol, - time, - config.saslInterBrokerHandshakeRequestEnable, - logContext - ) - val reconfigurableChannelBuilder = channelBuilder match { - case reconfigurable: Reconfigurable => - config.addReconfigurable(reconfigurable) - Some(reconfigurable) - case _ => None - } - val selector = new Selector( - NetworkReceive.UNLIMITED, - Selector.NO_IDLE_TIMEOUT_MS, - metrics, - time, - "controller-channel", - Map("broker-id" -> brokerNode.idString).asJava, - false, - channelBuilder, - logContext - ) - val networkClient = new NetworkClient( - selector, - new ManualMetadataUpdater(Seq(brokerNode).asJava), - config.brokerId.toString, - 1, - 0, - 0, - Selectable.USE_DEFAULT_BUFFER_SIZE, - Selectable.USE_DEFAULT_BUFFER_SIZE, - config.requestTimeoutMs, - config.connectionSetupTimeoutMs, - config.connectionSetupTimeoutMaxMs, - time, - false, - new ApiVersions, - logContext, - MetadataRecoveryStrategy.NONE - ) - (networkClient, reconfigurableChannelBuilder) - } - val threadName = threadNamePrefix match { - case None => s"Controller-${config.brokerId}-to-broker-${broker.id}-send-thread" - case Some(name) => s"$name:Controller-${config.brokerId}-to-broker-${broker.id}-send-thread" - } - - val requestRateAndQueueTimeMetrics = metricsGroup.newTimer( - RequestRateAndQueueTimeMetricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS, brokerMetricTags(broker.id) - ) - - val requestThread = new RequestSendThread(config.brokerId, controllerEpoch, messageQueue, networkClient, - brokerNode, config, time, requestRateAndQueueTimeMetrics, stateChangeLogger, threadName) - requestThread.setDaemon(false) - - val queueSizeGauge = metricsGroup.newGauge(QueueSizeMetricName, () => messageQueue.size, brokerMetricTags(broker.id)) - - brokerStateInfo.put(broker.id, ControllerBrokerStateInfo(networkClient, brokerNode, messageQueue, - requestThread, queueSizeGauge, requestRateAndQueueTimeMetrics, reconfigurableChannelBuilder)) - } - - private def brokerMetricTags(brokerId: Int) = Map("broker-id" -> brokerId.toString).asJava - - private def removeExistingBroker(brokerState: ControllerBrokerStateInfo): Unit = { - try { - // Shutdown the RequestSendThread before closing the NetworkClient to avoid the concurrent use of the - // non-threadsafe classes as described in KAFKA-4959. - // The call to shutdownLatch.await() in ShutdownableThread.shutdown() serves as a synchronization barrier that - // hands off the NetworkClient from the RequestSendThread to the ZkEventThread. - brokerState.reconfigurableChannelBuilder.foreach(config.removeReconfigurable) - brokerState.requestSendThread.shutdown() - brokerState.networkClient.close() - brokerState.messageQueue.clear() - metricsGroup.removeMetric(QueueSizeMetricName, brokerMetricTags(brokerState.brokerNode.id)) - metricsGroup.removeMetric(RequestRateAndQueueTimeMetricName, brokerMetricTags(brokerState.brokerNode.id)) - brokerStateInfo.remove(brokerState.brokerNode.id) - } catch { - case e: Throwable => error("Error while removing broker by the controller", e) - } - } - - private def startRequestSendThread(brokerId: Int): Unit = { - val requestThread = brokerStateInfo(brokerId).requestSendThread - if (requestThread.getState == Thread.State.NEW) - requestThread.start() - } -} - -case class QueueItem(apiKey: ApiKeys, request: AbstractControlRequest.Builder[_ <: AbstractControlRequest], - callback: AbstractResponse => Unit, enqueueTimeMs: Long) - -class RequestSendThread(val controllerId: Int, - controllerEpoch: () => Int, - val queue: BlockingQueue[QueueItem], - val networkClient: NetworkClient, - val brokerNode: Node, - val config: KafkaConfig, - val time: Time, - val requestRateAndQueueTimeMetrics: Timer, - val stateChangeLogger: StateChangeLogger, - name: String) - extends ShutdownableThread(name, true, s"[RequestSendThread controllerId=$controllerId] ") - with Logging { - - logIdent = logPrefix - - private val socketTimeoutMs = config.controllerSocketTimeoutMs - - override def doWork(): Unit = { - - def backoff(): Unit = pause(100, TimeUnit.MILLISECONDS) - - val QueueItem(apiKey, requestBuilder, callback, enqueueTimeMs) = queue.take() - requestRateAndQueueTimeMetrics.update(time.milliseconds() - enqueueTimeMs, TimeUnit.MILLISECONDS) - - var clientResponse: ClientResponse = null - try { - var isSendSuccessful = false - while (isRunning && !isSendSuccessful) { - // if a broker goes down for a long time, then at some point the controller's zookeeper listener will trigger a - // removeBroker which will invoke shutdown() on this thread. At that point, we will stop retrying. - try { - if (!brokerReady()) { - isSendSuccessful = false - backoff() - } - else { - val clientRequest = networkClient.newClientRequest(brokerNode.idString, requestBuilder, - time.milliseconds(), true) - clientResponse = NetworkClientUtils.sendAndReceive(networkClient, clientRequest, time) - isSendSuccessful = true - } - } catch { - case e: Throwable => // if the send was not successful, reconnect to broker and resend the message - warn(s"Controller $controllerId epoch ${controllerEpoch()} fails to send request " + - s"$requestBuilder " + - s"to broker $brokerNode. Reconnecting to broker.", e) - networkClient.close(brokerNode.idString) - isSendSuccessful = false - backoff() - } - } - if (clientResponse != null) { - val requestHeader = clientResponse.requestHeader - val api = requestHeader.apiKey - if (api != ApiKeys.LEADER_AND_ISR && api != ApiKeys.STOP_REPLICA && api != ApiKeys.UPDATE_METADATA) - throw new KafkaException(s"Unexpected apiKey received: $apiKey") - - val response = clientResponse.responseBody - - stateChangeLogger.withControllerEpoch(controllerEpoch()).trace(s"Received response " + - s"$response for request $api with correlation id " + - s"${requestHeader.correlationId} sent to broker $brokerNode") - - if (callback != null) { - callback(response) - } - } - } catch { - case e: Throwable => - error(s"Controller $controllerId fails to send a request to broker $brokerNode", e) - // If there is any socket error (eg, socket timeout), the connection is no longer usable and needs to be recreated. - networkClient.close(brokerNode.idString) - } - } - - private def brokerReady(): Boolean = { - try { - if (!NetworkClientUtils.isReady(networkClient, brokerNode, time.milliseconds())) { - if (!NetworkClientUtils.awaitReady(networkClient, brokerNode, time, socketTimeoutMs)) - throw new SocketTimeoutException(s"Failed to connect within $socketTimeoutMs ms") - - info(s"Controller $controllerId connected to $brokerNode for sending state change requests") - } - - true - } catch { - case e: Throwable => - warn(s"Controller $controllerId's connection to broker $brokerNode was unsuccessful", e) - networkClient.close(brokerNode.idString) - false - } - } - - override def initiateShutdown(): Boolean = { - if (super.initiateShutdown()) { - networkClient.initiateClose() - true - } else - false - } -} - -class ControllerBrokerRequestBatch( - config: KafkaConfig, - controllerChannelManager: ControllerChannelManager, - controllerEventManager: ControllerEventManager, - controllerContext: ControllerContext, - stateChangeLogger: StateChangeLogger -) extends AbstractControllerBrokerRequestBatch( - config, - () => controllerContext, - () => config.interBrokerProtocolVersion, - stateChangeLogger -) { - - private def sendEvent(event: ControllerEvent): Unit = { - controllerEventManager.put(event) - } - def sendRequest(brokerId: Int, - request: AbstractControlRequest.Builder[_ <: AbstractControlRequest], - callback: AbstractResponse => Unit = null): Unit = { - controllerChannelManager.sendRequest(brokerId, request, callback) - } - - override def handleLeaderAndIsrResponse(response: LeaderAndIsrResponse, broker: Int): Unit = { - sendEvent(LeaderAndIsrResponseReceived(response, broker)) - } - - override def handleUpdateMetadataResponse(response: UpdateMetadataResponse, broker: Int): Unit = { - sendEvent(UpdateMetadataResponseReceived(response, broker)) - } - - override def handleStopReplicaResponse(stopReplicaResponse: StopReplicaResponse, brokerId: Int, - partitionErrorsForDeletingTopics: Map[TopicPartition, Errors]): Unit = { - if (partitionErrorsForDeletingTopics.nonEmpty) - sendEvent(TopicDeletionStopReplicaResponseReceived(brokerId, stopReplicaResponse.error, partitionErrorsForDeletingTopics)) - } -} - -/** - * Structure to send RPCs from controller to broker to inform about the metadata and leadership - * changes in the system. - * @param config Kafka config present in the controller. - * @param metadataProvider Provider to provide the relevant metadata to build the state needed to - * send RPCs - * @param metadataVersionProvider Provider to provide the metadata version used by the controller. - * @param stateChangeLogger logger to log the various events while sending requests and receiving - * responses from the brokers - * @param kraftController whether the controller is KRaft controller - */ -abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig, - metadataProvider: () => ControllerChannelContext, - metadataVersionProvider: () => MetadataVersion, - stateChangeLogger: StateChangeLogger, - kraftController: Boolean = false) extends Logging { - val controllerId: Int = config.brokerId - private val leaderAndIsrRequestMap = mutable.Map.empty[Int, mutable.Map[TopicPartition, LeaderAndIsrPartitionState]] - private val stopReplicaRequestMap = mutable.Map.empty[Int, mutable.Map[TopicPartition, StopReplicaPartitionState]] - private val updateMetadataRequestBrokerSet = mutable.Set.empty[Int] - private val updateMetadataRequestPartitionInfoMap = mutable.Map.empty[TopicPartition, UpdateMetadataPartitionState] - private var updateType: AbstractControlRequest.Type = AbstractControlRequest.Type.UNKNOWN - private var metadataInstance: ControllerChannelContext = _ - - def sendRequest(brokerId: Int, - request: AbstractControlRequest.Builder[_ <: AbstractControlRequest], - callback: AbstractResponse => Unit = null): Unit - - def newBatch(): Unit = { - // raise error if the previous batch is not empty - if (leaderAndIsrRequestMap.nonEmpty) - throw new IllegalStateException("Controller to broker state change requests batch is not empty while creating " + - s"a new one. Some LeaderAndIsr state changes $leaderAndIsrRequestMap might be lost ") - if (stopReplicaRequestMap.nonEmpty) - throw new IllegalStateException("Controller to broker state change requests batch is not empty while creating a " + - s"new one. Some StopReplica state changes $stopReplicaRequestMap might be lost ") - if (updateMetadataRequestBrokerSet.nonEmpty) - throw new IllegalStateException("Controller to broker state change requests batch is not empty while creating a " + - s"new one. Some UpdateMetadata state changes to brokers $updateMetadataRequestBrokerSet with partition info " + - s"$updateMetadataRequestPartitionInfoMap might be lost ") - metadataInstance = metadataProvider() - } - - def setUpdateType(updateType: AbstractControlRequest.Type): Unit = { - this.updateType = updateType - } - - def clear(): Unit = { - leaderAndIsrRequestMap.clear() - stopReplicaRequestMap.clear() - updateMetadataRequestBrokerSet.clear() - updateMetadataRequestPartitionInfoMap.clear() - metadataInstance = null - updateType = AbstractControlRequest.Type.UNKNOWN - } - - def addLeaderAndIsrRequestForBrokers(brokerIds: Seq[Int], - topicPartition: TopicPartition, - leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch, - replicaAssignment: ReplicaAssignment, - isNew: Boolean): Unit = { - - brokerIds.filter(_ >= 0).foreach { brokerId => - val result = leaderAndIsrRequestMap.getOrElseUpdate(brokerId, mutable.Map.empty) - val alreadyNew = result.get(topicPartition).exists(_.isNew) - val leaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr - val partitionState = new LeaderAndIsrPartitionState() - .setTopicName(topicPartition.topic) - .setPartitionIndex(topicPartition.partition) - .setControllerEpoch(leaderIsrAndControllerEpoch.controllerEpoch) - .setLeader(leaderAndIsr.leader) - .setLeaderEpoch(leaderAndIsr.leaderEpoch) - .setIsr(leaderAndIsr.isr) - .setPartitionEpoch(leaderAndIsr.partitionEpoch) - .setReplicas(replicaAssignment.replicas.map(Integer.valueOf).asJava) - .setAddingReplicas(replicaAssignment.addingReplicas.map(Integer.valueOf).asJava) - .setRemovingReplicas(replicaAssignment.removingReplicas.map(Integer.valueOf).asJava) - .setIsNew(isNew || alreadyNew) - - if (metadataVersionProvider.apply().isAtLeast(IBP_3_2_IV0)) { - partitionState.setLeaderRecoveryState(leaderAndIsr.leaderRecoveryState.value) - } - - result.put(topicPartition, partitionState) - } - - addUpdateMetadataRequestForBrokers(metadataInstance.liveOrShuttingDownBrokerIds.toSeq, Set(topicPartition)) - } - - def addStopReplicaRequestForBrokers(brokerIds: Seq[Int], - topicPartition: TopicPartition, - deletePartition: Boolean): Unit = { - // A sentinel (-2) is used as an epoch if the topic is queued for deletion. It overrides - // any existing epoch. - val leaderEpoch = metadataInstance.leaderEpoch(topicPartition) - - brokerIds.filter(_ >= 0).foreach { brokerId => - val result = stopReplicaRequestMap.getOrElseUpdate(brokerId, mutable.Map.empty) - val alreadyDelete = result.get(topicPartition).exists(_.deletePartition) - result.put(topicPartition, new StopReplicaPartitionState() - .setPartitionIndex(topicPartition.partition()) - .setLeaderEpoch(leaderEpoch) - .setDeletePartition(alreadyDelete || deletePartition)) - } - } - - /** Send UpdateMetadataRequest to the given brokers for the given partitions and partitions that are being deleted */ - def addUpdateMetadataRequestForBrokers(brokerIds: Seq[Int], - partitions: collection.Set[TopicPartition]): Unit = { - updateMetadataRequestBrokerSet ++= brokerIds.filter(_ >= 0) - partitions.foreach { partition => - val beingDeleted = metadataInstance.isTopicQueuedUpForDeletion(partition.topic()) - metadataInstance.partitionLeadershipInfo(partition) match { - case Some(LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)) => - val replicas = metadataInstance.partitionReplicaAssignment(partition) - val offlineReplicas = replicas.filter(!metadataInstance.isReplicaOnline(_, partition)) - val updatedLeaderAndIsr = - if (beingDeleted) LeaderAndIsr.duringDelete(leaderAndIsr.isr) - else leaderAndIsr - addUpdateMetadataRequestForBrokers(brokerIds, controllerEpoch, partition, - updatedLeaderAndIsr.leader, updatedLeaderAndIsr.leaderEpoch, updatedLeaderAndIsr.partitionEpoch, - updatedLeaderAndIsr.isr.asScala.map(_.toInt).toList, replicas, offlineReplicas) - case None => - info(s"Leader not yet assigned for partition $partition. Skip sending UpdateMetadataRequest.") - } - } - } - - def addUpdateMetadataRequestForBrokers(brokerIds: Seq[Int]): Unit = { - updateMetadataRequestBrokerSet ++= brokerIds.filter(_ >= 0) - } - - def addUpdateMetadataRequestForBrokers(brokerIds: Seq[Int], - controllerEpoch: Int, - partition: TopicPartition, - leader: Int, - leaderEpoch: Int, - partitionEpoch: Int, - isrs: List[Int], - replicas: Seq[Int], - offlineReplicas: Seq[Int]): Unit = { - updateMetadataRequestBrokerSet ++= brokerIds.filter(_ >= 0) - val partitionStateInfo = new UpdateMetadataPartitionState() - .setTopicName(partition.topic) - .setPartitionIndex(partition.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(leader) - .setLeaderEpoch(leaderEpoch) - .setIsr(isrs.map(Integer.valueOf).asJava) - .setZkVersion(partitionEpoch) - .setReplicas(replicas.map(Integer.valueOf).asJava) - .setOfflineReplicas(offlineReplicas.map(Integer.valueOf).asJava) - updateMetadataRequestPartitionInfoMap.put(partition, partitionStateInfo) - } - - private def sendLeaderAndIsrRequest(controllerEpoch: Int, stateChangeLog: StateChangeLogger): Unit = { - val metadataVersion = metadataVersionProvider.apply() - val leaderAndIsrRequestVersion: Short = - if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 7 - else if (metadataVersion.isAtLeast(IBP_3_2_IV0)) 6 - else if (metadataVersion.isAtLeast(IBP_2_8_IV1)) 5 - else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 4 - else if (metadataVersion.isAtLeast(IBP_2_4_IV0)) 3 - else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 2 - else if (metadataVersion.isAtLeast(IBP_1_0_IV0)) 1 - else 0 - - leaderAndIsrRequestMap.foreachEntry { (broker, leaderAndIsrPartitionStates) => - if (metadataInstance.liveOrShuttingDownBrokerIds.contains(broker)) { - val leaderIds = mutable.Set.empty[Int] - var numBecomeLeaders = 0 - leaderAndIsrPartitionStates.foreachEntry { (topicPartition, state) => - leaderIds += state.leader - val typeOfRequest = if (broker == state.leader) { - numBecomeLeaders += 1 - "become-leader" - } else { - "become-follower" - } - if (stateChangeLog.isTraceEnabled) - stateChangeLog.trace(s"Sending $typeOfRequest LeaderAndIsr request $state to broker $broker for partition $topicPartition") - } - stateChangeLog.info(s"Sending LeaderAndIsr request to broker $broker with $numBecomeLeaders become-leader " + - s"and ${leaderAndIsrPartitionStates.size - numBecomeLeaders} become-follower partitions") - val leaders = metadataInstance.liveOrShuttingDownBrokers.filter(b => leaderIds.contains(b.id)).map { - _.node(config.interBrokerListenerName) - } - val brokerEpoch = metadataInstance.liveBrokerIdAndEpochs(broker) - val topicIds = leaderAndIsrPartitionStates.keys - .map(_.topic) - .toSet[String] - .map(topic => (topic, metadataInstance.topicIds.getOrElse(topic, Uuid.ZERO_UUID))) - .toMap - val leaderAndIsrRequestBuilder = new LeaderAndIsrRequest.Builder( - leaderAndIsrRequestVersion, - controllerId, - controllerEpoch, - brokerEpoch, - leaderAndIsrPartitionStates.values.toBuffer.asJava, - topicIds.asJava, - leaders.asJava, - kraftController, - updateType - ) - sendRequest(broker, leaderAndIsrRequestBuilder, (r: AbstractResponse) => { - val leaderAndIsrResponse = r.asInstanceOf[LeaderAndIsrResponse] - handleLeaderAndIsrResponse(leaderAndIsrResponse, broker) - }) - } - } - leaderAndIsrRequestMap.clear() - } - - def handleLeaderAndIsrResponse(response: LeaderAndIsrResponse, broker: Int): Unit - - private def sendUpdateMetadataRequests(controllerEpoch: Int, stateChangeLog: StateChangeLogger): Unit = { - stateChangeLog.info(s"Sending UpdateMetadata request to brokers $updateMetadataRequestBrokerSet " + - s"for ${updateMetadataRequestPartitionInfoMap.size} partitions") - - val partitionStates = updateMetadataRequestPartitionInfoMap.values.toBuffer - val metadataVersion = metadataVersionProvider.apply() - val updateMetadataRequestVersion: Short = - if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 8 - else if (metadataVersion.isAtLeast(IBP_2_8_IV1)) 7 - else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 6 - else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 5 - else if (metadataVersion.isAtLeast(IBP_1_0_IV0)) 4 - else if (metadataVersion.isAtLeast(IBP_0_10_2_IV0)) 3 - else if (metadataVersion.isAtLeast(IBP_0_10_0_IV1)) 2 - else if (metadataVersion.isAtLeast(IBP_0_9_0)) 1 - else 0 - - val liveBrokers = metadataInstance.liveOrShuttingDownBrokers.iterator.map { broker => - val endpoints = if (updateMetadataRequestVersion == 0) { - // Version 0 of UpdateMetadataRequest only supports PLAINTEXT - val securityProtocol = SecurityProtocol.PLAINTEXT - val listenerName = ListenerName.forSecurityProtocol(securityProtocol) - val node = broker.node(listenerName) - Seq(new UpdateMetadataEndpoint() - .setHost(node.host) - .setPort(node.port) - .setSecurityProtocol(securityProtocol.id) - .setListener(listenerName.value)) - } else { - broker.endPoints.map { endpoint => - new UpdateMetadataEndpoint() - .setHost(endpoint.host) - .setPort(endpoint.port) - .setSecurityProtocol(endpoint.securityProtocol.id) - .setListener(endpoint.listenerName.value) - } - } - new UpdateMetadataBroker() - .setId(broker.id) - .setEndpoints(endpoints.asJava) - .setRack(broker.rack.orNull) - }.toBuffer - - updateMetadataRequestBrokerSet.intersect(metadataInstance.liveOrShuttingDownBrokerIds).foreach { broker => - val brokerEpoch = metadataInstance.liveBrokerIdAndEpochs(broker) - val topicIds = partitionStates.map(_.topicName()) - .distinct - .filter(metadataInstance.topicIds.contains) - .map(topic => (topic, metadataInstance.topicIds(topic))).toMap - val updateMetadataRequestBuilder = new UpdateMetadataRequest.Builder( - updateMetadataRequestVersion, - controllerId, - controllerEpoch, - brokerEpoch, - partitionStates.asJava, - liveBrokers.asJava, - topicIds.asJava, - kraftController, - updateType - ) - sendRequest(broker, updateMetadataRequestBuilder, (r: AbstractResponse) => { - val updateMetadataResponse = r.asInstanceOf[UpdateMetadataResponse] - handleUpdateMetadataResponse(updateMetadataResponse, broker) - }) - - } - updateMetadataRequestBrokerSet.clear() - updateMetadataRequestPartitionInfoMap.clear() - } - - def handleUpdateMetadataResponse(response: UpdateMetadataResponse, broker: Int): Unit - - private def sendStopReplicaRequests(controllerEpoch: Int, stateChangeLog: StateChangeLogger): Unit = { - val traceEnabled = stateChangeLog.isTraceEnabled - val metadataVersion = metadataVersionProvider.apply() - val stopReplicaRequestVersion: Short = - if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 4 - else if (metadataVersion.isAtLeast(IBP_2_6_IV0)) 3 - else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 2 - else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 1 - else 0 - - def responseCallback(brokerId: Int, isPartitionDeleted: TopicPartition => Boolean) - (response: AbstractResponse): Unit = { - val stopReplicaResponse = response.asInstanceOf[StopReplicaResponse] - val partitionErrorsForDeletingTopics = mutable.Map.empty[TopicPartition, Errors] - stopReplicaResponse.partitionErrors.forEach { pe => - val tp = new TopicPartition(pe.topicName, pe.partitionIndex) - if (metadataInstance.isTopicDeletionInProgress(pe.topicName) && - isPartitionDeleted(tp)) { - partitionErrorsForDeletingTopics += tp -> Errors.forCode(pe.errorCode) - } - } - if (partitionErrorsForDeletingTopics.nonEmpty) - handleStopReplicaResponse(stopReplicaResponse, brokerId, partitionErrorsForDeletingTopics.toMap) - } - - stopReplicaRequestMap.foreachEntry { (brokerId, partitionStates) => - if (metadataInstance.liveOrShuttingDownBrokerIds.contains(brokerId)) { - if (traceEnabled) - partitionStates.foreachEntry { (topicPartition, partitionState) => - stateChangeLog.trace(s"Sending StopReplica request $partitionState to " + - s"broker $brokerId for partition $topicPartition") - } - - val brokerEpoch = metadataInstance.liveBrokerIdAndEpochs(brokerId) - if (stopReplicaRequestVersion >= 3) { - val stopReplicaTopicState = mutable.Map.empty[String, StopReplicaTopicState] - partitionStates.foreachEntry { (topicPartition, partitionState) => - val topicState = stopReplicaTopicState.getOrElseUpdate(topicPartition.topic, - new StopReplicaTopicState().setTopicName(topicPartition.topic)) - topicState.partitionStates().add(partitionState) - } - - stateChangeLog.info(s"Sending StopReplica request for ${partitionStates.size} " + - s"replicas to broker $brokerId") - val stopReplicaRequestBuilder = new StopReplicaRequest.Builder( - stopReplicaRequestVersion, controllerId, controllerEpoch, brokerEpoch, - false, stopReplicaTopicState.values.toBuffer.asJava, kraftController) - sendRequest(brokerId, stopReplicaRequestBuilder, - responseCallback(brokerId, tp => partitionStates.get(tp).exists(_.deletePartition))) - } else { - var numPartitionStateWithDelete = 0 - var numPartitionStateWithoutDelete = 0 - val topicStatesWithDelete = mutable.Map.empty[String, StopReplicaTopicState] - val topicStatesWithoutDelete = mutable.Map.empty[String, StopReplicaTopicState] - - partitionStates.foreachEntry { (topicPartition, partitionState) => - val topicStates = if (partitionState.deletePartition()) { - numPartitionStateWithDelete += 1 - topicStatesWithDelete - } else { - numPartitionStateWithoutDelete += 1 - topicStatesWithoutDelete - } - val topicState = topicStates.getOrElseUpdate(topicPartition.topic, - new StopReplicaTopicState().setTopicName(topicPartition.topic)) - topicState.partitionStates().add(partitionState) - } - - if (topicStatesWithDelete.nonEmpty) { - stateChangeLog.info(s"Sending StopReplica request (delete = true) for " + - s"$numPartitionStateWithDelete replicas to broker $brokerId") - val stopReplicaRequestBuilder = new StopReplicaRequest.Builder( - stopReplicaRequestVersion, controllerId, controllerEpoch, brokerEpoch, - true, topicStatesWithDelete.values.toBuffer.asJava, kraftController) - sendRequest(brokerId, stopReplicaRequestBuilder, responseCallback(brokerId, _ => true)) - } - - if (topicStatesWithoutDelete.nonEmpty) { - stateChangeLog.info(s"Sending StopReplica request (delete = false) for " + - s"$numPartitionStateWithoutDelete replicas to broker $brokerId") - val stopReplicaRequestBuilder = new StopReplicaRequest.Builder( - stopReplicaRequestVersion, controllerId, controllerEpoch, brokerEpoch, - false, topicStatesWithoutDelete.values.toBuffer.asJava, kraftController) - sendRequest(brokerId, stopReplicaRequestBuilder) - } - } - } - } - - stopReplicaRequestMap.clear() - } - - def handleStopReplicaResponse(stopReplicaResponse: StopReplicaResponse, brokerId: Int, - partitionErrorsForDeletingTopics: Map[TopicPartition, Errors]): Unit - - def sendRequestsToBrokers(controllerEpoch: Int): Unit = { - try { - val stateChangeLog = stateChangeLogger.withControllerEpoch(controllerEpoch) - sendLeaderAndIsrRequest(controllerEpoch, stateChangeLog) - sendUpdateMetadataRequests(controllerEpoch, stateChangeLog) - sendStopReplicaRequests(controllerEpoch, stateChangeLog) - this.updateType = AbstractControlRequest.Type.UNKNOWN - } catch { - case e: Throwable => - if (leaderAndIsrRequestMap.nonEmpty) { - error("Haven't been able to send leader and isr requests, current state of " + - s"the map is $leaderAndIsrRequestMap. Exception message: $e") - } - if (updateMetadataRequestBrokerSet.nonEmpty) { - error(s"Haven't been able to send metadata update requests to brokers $updateMetadataRequestBrokerSet, " + - s"current state of the partition info is $updateMetadataRequestPartitionInfoMap. Exception message: $e") - } - if (stopReplicaRequestMap.nonEmpty) { - error("Haven't been able to send stop replica requests, current state of " + - s"the map is $stopReplicaRequestMap. Exception message: $e") - } - throw new IllegalStateException(e) - } - } -} - -case class ControllerBrokerStateInfo(networkClient: NetworkClient, - brokerNode: Node, - messageQueue: BlockingQueue[QueueItem], - requestSendThread: RequestSendThread, - queueSizeGauge: Gauge[Int], - requestRateAndTimeMetrics: Timer, - reconfigurableChannelBuilder: Option[Reconfigurable]) - diff --git a/core/src/main/scala/kafka/controller/ControllerContext.scala b/core/src/main/scala/kafka/controller/ControllerContext.scala index 1042071641607..cd56510e9a818 100644 --- a/core/src/main/scala/kafka/controller/ControllerContext.scala +++ b/core/src/main/scala/kafka/controller/ControllerContext.scala @@ -17,11 +17,7 @@ package kafka.controller -import kafka.cluster.Broker -import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.metadata.LeaderAndIsr - -import scala.collection.{Map, Seq, Set, mutable} +import scala.collection.Seq object ReplicaAssignment { def apply(replicas: Seq[Int]): ReplicaAssignment = { @@ -42,506 +38,15 @@ case class ReplicaAssignment private (replicas: Seq[Int], addingReplicas: Seq[Int], removingReplicas: Seq[Int]) { - lazy val originReplicas: Seq[Int] = replicas.diff(addingReplicas) lazy val targetReplicas: Seq[Int] = replicas.diff(removingReplicas) def isBeingReassigned: Boolean = { addingReplicas.nonEmpty || removingReplicas.nonEmpty } - def reassignTo(target: Seq[Int]): ReplicaAssignment = { - val fullReplicaSet = (target ++ originReplicas).distinct - ReplicaAssignment( - fullReplicaSet, - fullReplicaSet.diff(originReplicas), - fullReplicaSet.diff(target) - ) - } - - def removeReplica(replica: Int): ReplicaAssignment = { - ReplicaAssignment( - replicas.filterNot(_ == replica), - addingReplicas.filterNot(_ == replica), - removingReplicas.filterNot(_ == replica) - ) - } - override def toString: String = s"ReplicaAssignment(" + s"replicas=${replicas.mkString(",")}, " + s"addingReplicas=${addingReplicas.mkString(",")}, " + s"removingReplicas=${removingReplicas.mkString(",")})" } -class ControllerContext extends ControllerChannelContext { - val stats = new ControllerStats - var offlinePartitionCount = 0 - var preferredReplicaImbalanceCount = 0 - val shuttingDownBrokerIds = mutable.Set.empty[Int] - private val liveBrokers = mutable.Set.empty[Broker] - private val liveBrokerEpochs = mutable.Map.empty[Int, Long] - var epoch: Int = KafkaController.InitialControllerEpoch - var epochZkVersion: Int = KafkaController.InitialControllerEpochZkVersion - - val allTopics = mutable.Set.empty[String] - var topicIds = mutable.Map.empty[String, Uuid] - var topicNames = mutable.Map.empty[Uuid, String] - val partitionAssignments = mutable.Map.empty[String, mutable.Map[Int, ReplicaAssignment]] - private val partitionLeadershipInfo = mutable.Map.empty[TopicPartition, LeaderIsrAndControllerEpoch] - val partitionsBeingReassigned = mutable.Set.empty[TopicPartition] - val partitionStates = mutable.Map.empty[TopicPartition, PartitionState] - val replicaStates = mutable.Map.empty[PartitionAndReplica, ReplicaState] - val replicasOnOfflineDirs = mutable.Map.empty[Int, Set[TopicPartition]] - - val topicsToBeDeleted = mutable.Set.empty[String] - - /** The following topicsWithDeletionStarted variable is used to properly update the offlinePartitionCount metric. - * When a topic is going through deletion, we don't want to keep track of its partition state - * changes in the offlinePartitionCount metric. This goal means if some partitions of a topic are already - * in OfflinePartition state when deletion starts, we need to change the corresponding partition - * states to NonExistentPartition first before starting the deletion. - * - * However we can NOT change partition states to NonExistentPartition at the time of enqueuing topics - * for deletion. The reason is that when a topic is enqueued for deletion, it may be ineligible for - * deletion due to ongoing partition reassignments. Hence there might be a delay between enqueuing - * a topic for deletion and the actual start of deletion. In this delayed interval, partitions may still - * transition to or out of the OfflinePartition state. - * - * Hence we decide to change partition states to NonExistentPartition only when the actual deletion have started. - * For topics whose deletion have actually started, we keep track of them in the following topicsWithDeletionStarted - * variable. And once a topic is in the topicsWithDeletionStarted set, we are sure there will no longer - * be partition reassignments to any of its partitions, and only then it's safe to move its partitions to - * NonExistentPartition state. Once a topic is in the topicsWithDeletionStarted set, we will stop monitoring - * its partition state changes in the offlinePartitionCount metric - */ - val topicsWithDeletionStarted = mutable.Set.empty[String] - val topicsIneligibleForDeletion = mutable.Set.empty[String] - - private def clearTopicsState(): Unit = { - allTopics.clear() - topicIds.clear() - topicNames.clear() - partitionAssignments.clear() - partitionLeadershipInfo.clear() - partitionsBeingReassigned.clear() - replicasOnOfflineDirs.clear() - partitionStates.clear() - offlinePartitionCount = 0 - preferredReplicaImbalanceCount = 0 - replicaStates.clear() - } - - def addTopicId(topic: String, id: Uuid): Unit = { - if (!allTopics.contains(topic)) - throw new IllegalStateException(s"topic $topic is not contained in all topics.") - - topicIds.get(topic).foreach { existingId => - if (!existingId.equals(id)) - throw new IllegalStateException(s"topic ID map already contained ID for topic " + - s"$topic and new ID $id did not match existing ID $existingId") - } - topicNames.get(id).foreach { existingName => - if (!existingName.equals(topic)) - throw new IllegalStateException(s"topic name map already contained ID " + - s"$id and new name $topic did not match existing name $existingName") - } - topicIds.put(topic, id) - topicNames.put(id, topic) - } - - def partitionReplicaAssignment(topicPartition: TopicPartition): Seq[Int] = { - partitionAssignments.getOrElse(topicPartition.topic, mutable.Map.empty).get(topicPartition.partition) match { - case Some(partitionAssignment) => partitionAssignment.replicas - case None => Seq.empty - } - } - - def partitionFullReplicaAssignment(topicPartition: TopicPartition): ReplicaAssignment = { - partitionAssignments.getOrElse(topicPartition.topic, mutable.Map.empty) - .getOrElse(topicPartition.partition, ReplicaAssignment.empty) - } - - def updatePartitionFullReplicaAssignment(topicPartition: TopicPartition, newAssignment: ReplicaAssignment): Unit = { - val assignments = partitionAssignments.getOrElseUpdate(topicPartition.topic, mutable.Map.empty) - val previous = assignments.put(topicPartition.partition, newAssignment) - val leadershipInfo = partitionLeadershipInfo.get(topicPartition) - updatePreferredReplicaImbalanceMetric(topicPartition, previous, leadershipInfo, - Some(newAssignment), leadershipInfo) - } - - def partitionReplicaAssignmentForTopic(topic : String): Map[TopicPartition, Seq[Int]] = { - partitionAssignments.getOrElse(topic, Map.empty).map { - case (partition, assignment) => (new TopicPartition(topic, partition), assignment.replicas) - }.toMap - } - - def partitionFullReplicaAssignmentForTopic(topic : String): Map[TopicPartition, ReplicaAssignment] = { - partitionAssignments.getOrElse(topic, Map.empty).map { - case (partition, assignment) => (new TopicPartition(topic, partition), assignment) - }.toMap - } - - def allPartitions: Set[TopicPartition] = { - partitionAssignments.flatMap { - case (topic, topicReplicaAssignment) => topicReplicaAssignment.map { - case (partition, _) => new TopicPartition(topic, partition) - } - }.toSet - } - - def setLiveBrokers(brokerAndEpochs: Map[Broker, Long]): Unit = { - clearLiveBrokers() - addLiveBrokers(brokerAndEpochs) - } - - private def clearLiveBrokers(): Unit = { - liveBrokers.clear() - liveBrokerEpochs.clear() - } - - def addLiveBrokers(brokerAndEpochs: Map[Broker, Long]): Unit = { - liveBrokers ++= brokerAndEpochs.keySet - liveBrokerEpochs ++= brokerAndEpochs.map { case (broker, brokerEpoch) => (broker.id, brokerEpoch) } - } - - def removeLiveBrokers(brokerIds: Set[Int]): Unit = { - liveBrokers --= liveBrokers.filter(broker => brokerIds.contains(broker.id)) - liveBrokerEpochs --= brokerIds - } - - def updateBrokerMetadata(oldMetadata: Broker, newMetadata: Broker): Unit = { - liveBrokers -= oldMetadata - liveBrokers += newMetadata - } - - // getter - def liveBrokerIds: Set[Int] = liveBrokerEpochs.keySet.diff(shuttingDownBrokerIds) - // To just check if a broker is live, we should use this method instead of liveBrokerIds.contains(brokerId) - // which is more expensive because it constructs the set of live broker IDs. - // See KAFKA-17061 for the details. - def isLiveBroker(brokerId: Int): Boolean = liveBrokerEpochs.contains(brokerId) && !shuttingDownBrokerIds(brokerId) - def liveOrShuttingDownBrokerIds: Set[Int] = liveBrokerEpochs.keySet - def liveOrShuttingDownBrokers: Set[Broker] = liveBrokers - def liveBrokerIdAndEpochs: Map[Int, Long] = liveBrokerEpochs - def liveOrShuttingDownBroker(brokerId: Int): Option[Broker] = liveOrShuttingDownBrokers.find(_.id == brokerId) - - def partitionsOnBroker(brokerId: Int): Set[TopicPartition] = { - partitionAssignments.flatMap { - case (topic, topicReplicaAssignment) => topicReplicaAssignment.filter { - case (_, partitionAssignment) => partitionAssignment.replicas.contains(brokerId) - }.map { - case (partition, _) => new TopicPartition(topic, partition) - } - }.toSet - } - - def isReplicaOnline(brokerId: Int, topicPartition: TopicPartition): Boolean = { - isReplicaOnline(brokerId, topicPartition, includeShuttingDownBrokers = false) - } - - def isReplicaOnline(brokerId: Int, topicPartition: TopicPartition, includeShuttingDownBrokers: Boolean): Boolean = { - val brokerOnline = { - if (includeShuttingDownBrokers) liveOrShuttingDownBrokerIds.contains(brokerId) - else isLiveBroker(brokerId) - } - brokerOnline && !replicasOnOfflineDirs.getOrElse(brokerId, Set.empty).contains(topicPartition) - } - - def replicasOnBrokers(brokerIds: Set[Int]): Set[PartitionAndReplica] = { - brokerIds.flatMap { brokerId => - partitionAssignments.flatMap { - case (topic, topicReplicaAssignment) => topicReplicaAssignment.collect { - case (partition, partitionAssignment) if partitionAssignment.replicas.contains(brokerId) => - PartitionAndReplica(new TopicPartition(topic, partition), brokerId) - } - } - } - } - - def replicasForTopic(topic: String): Set[PartitionAndReplica] = { - partitionAssignments.getOrElse(topic, mutable.Map.empty).flatMap { - case (partition, assignment) => assignment.replicas.map { r => - PartitionAndReplica(new TopicPartition(topic, partition), r) - } - }.toSet - } - - def partitionsForTopic(topic: String): collection.Set[TopicPartition] = { - partitionAssignments.getOrElse(topic, mutable.Map.empty).map { - case (partition, _) => new TopicPartition(topic, partition) - }.toSet - } - - /** - * Get all online and offline replicas. - * - * @return a tuple consisting of first the online replicas and followed by the offline replicas - */ - def onlineAndOfflineReplicas: (Set[PartitionAndReplica], Set[PartitionAndReplica]) = { - val onlineReplicas = mutable.Set.empty[PartitionAndReplica] - val offlineReplicas = mutable.Set.empty[PartitionAndReplica] - for ((topic, partitionAssignments) <- partitionAssignments; - (partitionId, assignment) <- partitionAssignments) { - val partition = new TopicPartition(topic, partitionId) - for (replica <- assignment.replicas) { - val partitionAndReplica = PartitionAndReplica(partition, replica) - if (isReplicaOnline(replica, partition)) - onlineReplicas.add(partitionAndReplica) - else - offlineReplicas.add(partitionAndReplica) - } - } - (onlineReplicas, offlineReplicas) - } - - def replicasForPartition(partitions: collection.Set[TopicPartition]): collection.Set[PartitionAndReplica] = { - partitions.flatMap { p => - val replicas = partitionReplicaAssignment(p) - replicas.map(PartitionAndReplica(p, _)) - } - } - - def resetContext(): Unit = { - topicsToBeDeleted.clear() - topicsWithDeletionStarted.clear() - topicsIneligibleForDeletion.clear() - shuttingDownBrokerIds.clear() - epoch = 0 - epochZkVersion = 0 - clearTopicsState() - clearLiveBrokers() - } - - def setAllTopics(topics: Set[String]): Unit = { - allTopics.clear() - allTopics ++= topics - } - - def removeTopic(topic: String): Unit = { - // Metric is cleaned when the topic is queued up for deletion so - // we don't clean it twice. We clean it only if it is deleted - // directly. - if (!topicsToBeDeleted.contains(topic)) - cleanPreferredReplicaImbalanceMetric(topic) - topicsToBeDeleted -= topic - topicsWithDeletionStarted -= topic - allTopics -= topic - topicIds.remove(topic).foreach { topicId => - topicNames.remove(topicId) - } - partitionAssignments.remove(topic).foreach { assignments => - assignments.keys.foreach { partition => - partitionLeadershipInfo.remove(new TopicPartition(topic, partition)) - } - } - } - - def queueTopicDeletion(topicToBeAddedIntoDeletionList: Set[String]): Unit = { - // queueTopicDeletion could be called multiple times for same topic. - // e.g. 1) delete topic-A => 2) delete topic-B before A's deletion completes. - // In this case, at 2), queueTopicDeletion will be called with Set(topic-A, topic-B). - // However we should call cleanPreferredReplicaImbalanceMetric only once for same topic - // because otherwise, preferredReplicaImbalanceCount could be decremented wrongly at 2nd call. - // So we need to take a diff with already queued topics here. - val newlyDeletedTopics = topicToBeAddedIntoDeletionList.diff(topicsToBeDeleted) - topicsToBeDeleted ++= newlyDeletedTopics - newlyDeletedTopics.foreach(cleanPreferredReplicaImbalanceMetric) - } - - def beginTopicDeletion(topics: Set[String]): Unit = { - topicsWithDeletionStarted ++= topics - } - - def isTopicDeletionInProgress(topic: String): Boolean = { - topicsWithDeletionStarted.contains(topic) - } - - def isTopicQueuedUpForDeletion(topic: String): Boolean = { - topicsToBeDeleted.contains(topic) - } - - def isTopicEligibleForDeletion(topic: String): Boolean = { - topicsToBeDeleted.contains(topic) && !topicsIneligibleForDeletion.contains(topic) - } - - def topicsQueuedForDeletion: Set[String] = { - topicsToBeDeleted - } - - def replicasInState(topic: String, state: ReplicaState): Set[PartitionAndReplica] = { - replicasForTopic(topic).filter(replica => replicaStates(replica) == state).toSet - } - - def areAllReplicasInState(topic: String, state: ReplicaState): Boolean = { - replicasForTopic(topic).forall(replica => replicaStates(replica) == state) - } - - def isAnyReplicaInState(topic: String, state: ReplicaState): Boolean = { - replicasForTopic(topic).exists(replica => replicaStates(replica) == state) - } - - def checkValidReplicaStateChange(replicas: Seq[PartitionAndReplica], targetState: ReplicaState): (Seq[PartitionAndReplica], Seq[PartitionAndReplica]) = { - replicas.partition(replica => isValidReplicaStateTransition(replica, targetState)) - } - - def checkValidPartitionStateChange(partitions: Seq[TopicPartition], targetState: PartitionState): (Seq[TopicPartition], Seq[TopicPartition]) = { - partitions.partition(p => isValidPartitionStateTransition(p, targetState)) - } - - def putReplicaState(replica: PartitionAndReplica, state: ReplicaState): Unit = { - replicaStates.put(replica, state) - } - - def removeReplicaState(replica: PartitionAndReplica): Unit = { - replicaStates.remove(replica) - } - - def putReplicaStateIfNotExists(replica: PartitionAndReplica, state: ReplicaState): Unit = { - replicaStates.getOrElseUpdate(replica, state) - } - - def putPartitionState(partition: TopicPartition, targetState: PartitionState): Unit = { - val currentState = partitionStates.put(partition, targetState).getOrElse(NonExistentPartition) - updatePartitionStateMetrics(partition, currentState, targetState) - } - - private def updatePartitionStateMetrics(partition: TopicPartition, - currentState: PartitionState, - targetState: PartitionState): Unit = { - if (!isTopicDeletionInProgress(partition.topic)) { - if (currentState != OfflinePartition && targetState == OfflinePartition) { - offlinePartitionCount = offlinePartitionCount + 1 - } else if (currentState == OfflinePartition && targetState != OfflinePartition) { - offlinePartitionCount = offlinePartitionCount - 1 - } - } - } - - def putPartitionStateIfNotExists(partition: TopicPartition, state: PartitionState): Unit = { - if (partitionStates.getOrElseUpdate(partition, state) == state) - updatePartitionStateMetrics(partition, NonExistentPartition, state) - } - - def replicaState(replica: PartitionAndReplica): ReplicaState = { - replicaStates(replica) - } - - def partitionState(partition: TopicPartition): PartitionState = { - partitionStates(partition) - } - - def partitionsInState(state: PartitionState): Set[TopicPartition] = { - partitionStates.filter { case (_, s) => s == state }.keySet.toSet - } - - def partitionsInStates(states: Set[PartitionState]): Set[TopicPartition] = { - partitionStates.filter { case (_, s) => states.contains(s) }.keySet.toSet - } - - def partitionsInState(topic: String, state: PartitionState): Set[TopicPartition] = { - partitionsForTopic(topic).filter { partition => state == partitionState(partition) }.toSet - } - - def partitionsInStates(topic: String, states: Set[PartitionState]): Set[TopicPartition] = { - partitionsForTopic(topic).filter { partition => states.contains(partitionState(partition)) }.toSet - } - - def putPartitionLeadershipInfo(partition: TopicPartition, - leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch): Unit = { - val previous = partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch) - val replicaAssignment = partitionFullReplicaAssignment(partition) - updatePreferredReplicaImbalanceMetric(partition, Some(replicaAssignment), previous, - Some(replicaAssignment), Some(leaderIsrAndControllerEpoch)) - } - - def leaderEpoch(partition: TopicPartition): Int = { - // A sentinel (-2) is used as an epoch if the topic is queued for deletion. It overrides - // any existing epoch. - if (isTopicQueuedUpForDeletion(partition.topic)) { - LeaderAndIsr.EPOCH_DURING_DELETE - } else { - partitionLeadershipInfo.get(partition) - .map(_.leaderAndIsr.leaderEpoch) - .getOrElse(LeaderAndIsr.NO_EPOCH) - } - } - - def partitionLeadershipInfo(partition: TopicPartition): Option[LeaderIsrAndControllerEpoch] = { - partitionLeadershipInfo.get(partition) - } - - def partitionsLeadershipInfo: Map[TopicPartition, LeaderIsrAndControllerEpoch] = - partitionLeadershipInfo - - def partitionsWithLeaders: Set[TopicPartition] = - partitionLeadershipInfo.keySet.filter(tp => !isTopicQueuedUpForDeletion(tp.topic)) - - def partitionsWithOfflineLeader: Set[TopicPartition] = { - partitionLeadershipInfo.filter { case (topicPartition, leaderIsrAndControllerEpoch) => - !isReplicaOnline(leaderIsrAndControllerEpoch.leaderAndIsr.leader, topicPartition) && - !isTopicQueuedUpForDeletion(topicPartition.topic) - }.keySet - } - - def partitionLeadersOnBroker(brokerId: Int): Set[TopicPartition] = { - partitionLeadershipInfo.filter { case (topicPartition, leaderIsrAndControllerEpoch) => - !isTopicQueuedUpForDeletion(topicPartition.topic) && - leaderIsrAndControllerEpoch.leaderAndIsr.leader == brokerId && - partitionReplicaAssignment(topicPartition).size > 1 - }.keySet - } - - def topicName(topicId: Uuid): Option[String] = { - topicNames.get(topicId) - } - - def clearPartitionLeadershipInfo(): Unit = partitionLeadershipInfo.clear() - - def partitionWithLeadersCount: Int = partitionLeadershipInfo.size - - private def updatePreferredReplicaImbalanceMetric(partition: TopicPartition, - oldReplicaAssignment: Option[ReplicaAssignment], - oldLeadershipInfo: Option[LeaderIsrAndControllerEpoch], - newReplicaAssignment: Option[ReplicaAssignment], - newLeadershipInfo: Option[LeaderIsrAndControllerEpoch]): Unit = { - if (!isTopicQueuedUpForDeletion(partition.topic)) { - oldReplicaAssignment.foreach { replicaAssignment => - oldLeadershipInfo.foreach { leadershipInfo => - if (!hasPreferredLeader(replicaAssignment, leadershipInfo)) - preferredReplicaImbalanceCount -= 1 - } - } - - newReplicaAssignment.foreach { replicaAssignment => - newLeadershipInfo.foreach { leadershipInfo => - if (!hasPreferredLeader(replicaAssignment, leadershipInfo)) - preferredReplicaImbalanceCount += 1 - } - } - } - } - - private def cleanPreferredReplicaImbalanceMetric(topic: String): Unit = { - partitionAssignments.getOrElse(topic, mutable.Map.empty).foreachEntry { (partition, replicaAssignment) => - partitionLeadershipInfo.get(new TopicPartition(topic, partition)).foreach { leadershipInfo => - if (!hasPreferredLeader(replicaAssignment, leadershipInfo)) - preferredReplicaImbalanceCount -= 1 - } - } - } - - private def hasPreferredLeader(replicaAssignment: ReplicaAssignment, - leadershipInfo: LeaderIsrAndControllerEpoch): Boolean = { - val preferredReplica = replicaAssignment.replicas.head - if (replicaAssignment.isBeingReassigned && replicaAssignment.addingReplicas.contains(preferredReplica)) - // reassigning partitions are not counted as imbalanced until the new replica joins the ISR (completes reassignment) - !leadershipInfo.leaderAndIsr.isr.contains(preferredReplica) - else - leadershipInfo.leaderAndIsr.leader == preferredReplica - } - - private def isValidReplicaStateTransition(replica: PartitionAndReplica, targetState: ReplicaState): Boolean = - targetState.validPreviousStates.contains(replicaStates(replica)) - - private def isValidPartitionStateTransition(partition: TopicPartition, targetState: PartitionState): Boolean = - targetState.validPreviousStates.contains(partitionStates(partition)) -} diff --git a/core/src/main/scala/kafka/controller/ControllerEventManager.scala b/core/src/main/scala/kafka/controller/ControllerEventManager.scala deleted file mode 100644 index f4ea593f15e84..0000000000000 --- a/core/src/main/scala/kafka/controller/ControllerEventManager.scala +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.controller - -import com.yammer.metrics.core.Timer - -import java.util -import java.util.concurrent.atomic.AtomicBoolean -import java.util.concurrent.{CountDownLatch, LinkedBlockingQueue, TimeUnit} -import java.util.concurrent.locks.ReentrantLock -import kafka.utils.CoreUtils.inLock -import kafka.utils.Logging -import org.apache.kafka.common.utils.Time -import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.kafka.server.util.ShutdownableThread - -import scala.collection._ - -object ControllerEventManager { - val ControllerEventThreadName = "controller-event-thread" - private val EventQueueTimeMetricName = "EventQueueTimeMs" - private val EventQueueSizeMetricName = "EventQueueSize" -} - -trait ControllerEventProcessor { - def process(event: ControllerEvent): Unit - def preempt(event: ControllerEvent): Unit -} - -class QueuedEvent(val event: ControllerEvent, - val enqueueTimeMs: Long) { - private val processingStarted = new CountDownLatch(1) - private val spent = new AtomicBoolean(false) - - def process(processor: ControllerEventProcessor): Unit = { - if (spent.getAndSet(true)) - return - processingStarted.countDown() - processor.process(event) - } - - def preempt(processor: ControllerEventProcessor): Unit = { - if (spent.getAndSet(true)) - return - processor.preempt(event) - } - - def awaitProcessing(): Unit = { - processingStarted.await() - } - - override def toString: String = { - s"QueuedEvent(event=$event, enqueueTimeMs=$enqueueTimeMs)" - } -} - -class ControllerEventManager(controllerId: Int, - processor: ControllerEventProcessor, - time: Time, - rateAndTimeMetrics: Map[ControllerState, Timer], - eventQueueTimeTimeoutMs: Long = 300000) { - import ControllerEventManager._ - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) - - @volatile private var _state: ControllerState = ControllerState.Idle - private val putLock = new ReentrantLock() - private val queue = new LinkedBlockingQueue[QueuedEvent] - // Visible for test - private[controller] var thread = new ControllerEventThread(ControllerEventThreadName) - - private val eventQueueTimeHist = metricsGroup.newHistogram(EventQueueTimeMetricName) - - metricsGroup.newGauge(EventQueueSizeMetricName, () => queue.size) - - def state: ControllerState = _state - - def start(): Unit = thread.start() - - def close(): Unit = { - try { - thread.initiateShutdown() - clearAndPut(ShutdownEventThread) - thread.awaitShutdown() - } finally { - metricsGroup.removeMetric(EventQueueTimeMetricName) - metricsGroup.removeMetric(EventQueueSizeMetricName) - } - } - - def put(event: ControllerEvent): QueuedEvent = inLock(putLock) { - val queuedEvent = new QueuedEvent(event, time.milliseconds()) - queue.put(queuedEvent) - queuedEvent - } - - def clearAndPut(event: ControllerEvent): QueuedEvent = inLock(putLock) { - val preemptedEvents = new util.ArrayList[QueuedEvent]() - queue.drainTo(preemptedEvents) - preemptedEvents.forEach(_.preempt(processor)) - put(event) - } - - def isEmpty: Boolean = queue.isEmpty - - class ControllerEventThread(name: String) - extends ShutdownableThread( - name, false, s"[ControllerEventThread controllerId=$controllerId] ") - with Logging { - - logIdent = logPrefix - - override def doWork(): Unit = { - val dequeued = pollFromEventQueue() - dequeued.event match { - case ShutdownEventThread => // The shutting down of the thread has been initiated at this point. Ignore this event. - case controllerEvent => - _state = controllerEvent.state - - eventQueueTimeHist.update(time.milliseconds() - dequeued.enqueueTimeMs) - - try { - def process(): Unit = dequeued.process(processor) - - rateAndTimeMetrics.get(state) match { - case Some(timer) => timer.time(() => process()) - case None => process() - } - } catch { - case e: Throwable => error(s"Uncaught error processing event $controllerEvent", e) - } - - _state = ControllerState.Idle - } - } - } - - private def pollFromEventQueue(): QueuedEvent = { - val count = eventQueueTimeHist.count() - if (count != 0) { - val event = queue.poll(eventQueueTimeTimeoutMs, TimeUnit.MILLISECONDS) - if (event == null) { - eventQueueTimeHist.clear() - queue.take() - } else { - event - } - } else { - queue.take() - } - } - -} diff --git a/core/src/main/scala/kafka/controller/ControllerState.scala b/core/src/main/scala/kafka/controller/ControllerState.scala deleted file mode 100644 index 0474f6362db43..0000000000000 --- a/core/src/main/scala/kafka/controller/ControllerState.scala +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.controller - -import scala.collection.Seq - -sealed abstract class ControllerState { - - def value: Byte - - def rateAndTimeMetricName: Option[String] = - if (hasRateAndTimeMetric) Some(s"${toString}RateAndTimeMs") else None - - protected def hasRateAndTimeMetric: Boolean = true -} - -object ControllerState { - - // Note: `rateAndTimeMetricName` is based on the case object name by default. Changing a name is a breaking change - // unless `rateAndTimeMetricName` is overridden. - - case object Idle extends ControllerState { - def value = 0 - override protected def hasRateAndTimeMetric: Boolean = false - } - - case object ControllerChange extends ControllerState { - def value = 1 - } - - case object BrokerChange extends ControllerState { - def value = 2 - // The LeaderElectionRateAndTimeMs metric existed before `ControllerState` was introduced and we keep the name - // for backwards compatibility. The alternative would be to have the same metric under two different names. - override def rateAndTimeMetricName: Option[String] = Some("LeaderElectionRateAndTimeMs") - } - - case object TopicChange extends ControllerState { - def value = 3 - } - - case object TopicDeletion extends ControllerState { - def value = 4 - } - - case object AlterPartitionReassignment extends ControllerState { - def value = 5 - - override def rateAndTimeMetricName: Option[String] = Some("PartitionReassignmentRateAndTimeMs") - } - - case object AutoLeaderBalance extends ControllerState { - def value = 6 - } - - case object ManualLeaderBalance extends ControllerState { - def value = 7 - } - - case object ControlledShutdown extends ControllerState { - def value = 8 - } - - case object IsrChange extends ControllerState { - def value = 9 - } - - case object LeaderAndIsrResponseReceived extends ControllerState { - def value = 10 - } - - case object LogDirChange extends ControllerState { - def value = 11 - } - - case object ControllerShutdown extends ControllerState { - def value = 12 - } - - case object UncleanLeaderElectionEnable extends ControllerState { - def value = 13 - } - - case object TopicUncleanLeaderElectionEnable extends ControllerState { - def value = 14 - } - - case object ListPartitionReassignment extends ControllerState { - def value = 15 - } - - case object UpdateMetadataResponseReceived extends ControllerState { - def value = 16 - - override protected def hasRateAndTimeMetric: Boolean = false - } - - case object UpdateFeatures extends ControllerState { - def value = 17 - } - - val values: Seq[ControllerState] = Seq(Idle, ControllerChange, BrokerChange, TopicChange, TopicDeletion, - AlterPartitionReassignment, AutoLeaderBalance, ManualLeaderBalance, ControlledShutdown, IsrChange, - LeaderAndIsrResponseReceived, LogDirChange, ControllerShutdown, UncleanLeaderElectionEnable, - TopicUncleanLeaderElectionEnable, ListPartitionReassignment, UpdateMetadataResponseReceived, - UpdateFeatures) -} diff --git a/core/src/main/scala/kafka/controller/Election.scala b/core/src/main/scala/kafka/controller/Election.scala deleted file mode 100644 index d9d76e3876682..0000000000000 --- a/core/src/main/scala/kafka/controller/Election.scala +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.controller - -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.metadata.LeaderAndIsr - -import scala.collection.Seq -import scala.jdk.CollectionConverters._ - -case class ElectionResult(topicPartition: TopicPartition, leaderAndIsr: Option[LeaderAndIsr], liveReplicas: Seq[Int]) - -object Election { - - private def leaderForOffline(partition: TopicPartition, - leaderAndIsrOpt: Option[LeaderAndIsr], - uncleanLeaderElectionEnabled: Boolean, - isLeaderRecoverySupported: Boolean, - controllerContext: ControllerContext): ElectionResult = { - - val assignment = controllerContext.partitionReplicaAssignment(partition) - val liveReplicas = assignment.filter(replica => controllerContext.isReplicaOnline(replica, partition)) - leaderAndIsrOpt match { - case Some(leaderAndIsr) => - val isr = leaderAndIsr.isr.asScala.map(_.toInt) - val leaderOpt = PartitionLeaderElectionAlgorithms.offlinePartitionLeaderElection( - assignment, isr, liveReplicas.toSet, uncleanLeaderElectionEnabled, controllerContext) - val newLeaderAndIsrOpt = leaderOpt.map { leader => - val newIsr = if (isr.contains(leader)) isr.filter(replica => controllerContext.isReplicaOnline(replica, partition)) - else List(leader) - val newIsrAsJava = newIsr.map(Integer.valueOf).asJava - if (!isr.contains(leader) && isLeaderRecoverySupported) { - // The new leader is not in the old ISR so mark the partition a RECOVERING - leaderAndIsr.newRecoveringLeaderAndIsr(leader, newIsrAsJava) - } else { - // Elect a new leader but keep the previous leader recovery state - leaderAndIsr.newLeaderAndIsr(leader, newIsrAsJava) - } - } - ElectionResult(partition, newLeaderAndIsrOpt, liveReplicas) - - case None => - ElectionResult(partition, None, liveReplicas) - } - } - - /** - * Elect leaders for new or offline partitions. - * - * @param controllerContext Context with the current state of the cluster - * @param isLeaderRecoverySupported true leader recovery is support and should be set if election is unclean - * @param partitionsWithUncleanLeaderRecoveryState A sequence of tuples representing the partitions - * that need election, their leader/ISR state, and whether - * or not unclean leader election is enabled - * - * @return The election results - */ - def leaderForOffline( - controllerContext: ControllerContext, - isLeaderRecoverySupported: Boolean, - partitionsWithUncleanLeaderRecoveryState: Seq[(TopicPartition, Option[LeaderAndIsr], Boolean)] - ): Seq[ElectionResult] = { - partitionsWithUncleanLeaderRecoveryState.map { - case (partition, leaderAndIsrOpt, uncleanLeaderElectionEnabled) => - leaderForOffline(partition, leaderAndIsrOpt, uncleanLeaderElectionEnabled, isLeaderRecoverySupported, controllerContext) - } - } - - private def leaderForReassign(partition: TopicPartition, - leaderAndIsr: LeaderAndIsr, - controllerContext: ControllerContext): ElectionResult = { - val targetReplicas = controllerContext.partitionFullReplicaAssignment(partition).targetReplicas - val liveReplicas = targetReplicas.filter(replica => controllerContext.isReplicaOnline(replica, partition)) - val isr = leaderAndIsr.isr - val leaderOpt = PartitionLeaderElectionAlgorithms.reassignPartitionLeaderElection(targetReplicas, isr.asScala.map(_.toInt), liveReplicas.toSet) - val newLeaderAndIsrOpt = leaderOpt.map(leader => leaderAndIsr.newLeader(leader)) - ElectionResult(partition, newLeaderAndIsrOpt, targetReplicas) - } - - /** - * Elect leaders for partitions that are undergoing reassignment. - * - * @param controllerContext Context with the current state of the cluster - * @param leaderAndIsrs A sequence of tuples representing the partitions that need election - * and their respective leader/ISR states - * - * @return The election results - */ - def leaderForReassign(controllerContext: ControllerContext, - leaderAndIsrs: Seq[(TopicPartition, LeaderAndIsr)]): Seq[ElectionResult] = { - leaderAndIsrs.map { case (partition, leaderAndIsr) => - leaderForReassign(partition, leaderAndIsr, controllerContext) - } - } - - private def leaderForPreferredReplica(partition: TopicPartition, - leaderAndIsr: LeaderAndIsr, - controllerContext: ControllerContext): ElectionResult = { - val assignment = controllerContext.partitionReplicaAssignment(partition) - val liveReplicas = assignment.filter(replica => controllerContext.isReplicaOnline(replica, partition)) - val isr = leaderAndIsr.isr - val leaderOpt = PartitionLeaderElectionAlgorithms.preferredReplicaPartitionLeaderElection(assignment, isr.asScala.map(_.toInt), liveReplicas.toSet) - val newLeaderAndIsrOpt = leaderOpt.map(leader => leaderAndIsr.newLeader(leader)) - ElectionResult(partition, newLeaderAndIsrOpt, assignment) - } - - /** - * Elect preferred leaders. - * - * @param controllerContext Context with the current state of the cluster - * @param leaderAndIsrs A sequence of tuples representing the partitions that need election - * and their respective leader/ISR states - * - * @return The election results - */ - def leaderForPreferredReplica(controllerContext: ControllerContext, - leaderAndIsrs: Seq[(TopicPartition, LeaderAndIsr)]): Seq[ElectionResult] = { - leaderAndIsrs.map { case (partition, leaderAndIsr) => - leaderForPreferredReplica(partition, leaderAndIsr, controllerContext) - } - } - - private def leaderForControlledShutdown(partition: TopicPartition, - leaderAndIsr: LeaderAndIsr, - shuttingDownBrokerIds: Set[Int], - controllerContext: ControllerContext): ElectionResult = { - val assignment = controllerContext.partitionReplicaAssignment(partition) - val liveOrShuttingDownReplicas = assignment.filter(replica => - controllerContext.isReplicaOnline(replica, partition, includeShuttingDownBrokers = true)) - val isr = leaderAndIsr.isr.asScala.map(_.toInt) - val leaderOpt = PartitionLeaderElectionAlgorithms.controlledShutdownPartitionLeaderElection(assignment, isr, - liveOrShuttingDownReplicas.toSet, shuttingDownBrokerIds) - val newIsr = isr.filter(replica => !shuttingDownBrokerIds.contains(replica)).map(Integer.valueOf).asJava - val newLeaderAndIsrOpt = leaderOpt.map(leader => leaderAndIsr.newLeaderAndIsr(leader, newIsr)) - ElectionResult(partition, newLeaderAndIsrOpt, liveOrShuttingDownReplicas) - } - - /** - * Elect leaders for partitions whose current leaders are shutting down. - * - * @param controllerContext Context with the current state of the cluster - * @param leaderAndIsrs A sequence of tuples representing the partitions that need election - * and their respective leader/ISR states - * - * @return The election results - */ - def leaderForControlledShutdown(controllerContext: ControllerContext, - leaderAndIsrs: Seq[(TopicPartition, LeaderAndIsr)]): Seq[ElectionResult] = { - val shuttingDownBrokerIds = controllerContext.shuttingDownBrokerIds.toSet - leaderAndIsrs.map { case (partition, leaderAndIsr) => - leaderForControlledShutdown(partition, leaderAndIsr, shuttingDownBrokerIds, controllerContext) - } - } -} diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala b/core/src/main/scala/kafka/controller/KafkaController.scala deleted file mode 100644 index 5d886a3040136..0000000000000 --- a/core/src/main/scala/kafka/controller/KafkaController.scala +++ /dev/null @@ -1,2969 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.controller - -import com.yammer.metrics.core.{Meter, Timer} - -import java.util.concurrent.TimeUnit -import kafka.common._ -import kafka.cluster.Broker -import kafka.controller.KafkaController.{ActiveBrokerCountMetricName, ActiveControllerCountMetricName, AlterReassignmentsCallback, ControllerStateMetricName, ElectLeadersCallback, FencedBrokerCountMetricName, GlobalPartitionCountMetricName, GlobalTopicCountMetricName, ListReassignmentsCallback, OfflinePartitionsCountMetricName, PreferredReplicaImbalanceCountMetricName, ReplicasIneligibleToDeleteCountMetricName, ReplicasToDeleteCountMetricName, TopicsIneligibleToDeleteCountMetricName, TopicsToDeleteCountMetricName, UpdateFeaturesCallback, ZkMigrationStateMetricName} -import kafka.coordinator.transaction.ZkProducerIdManager -import kafka.server._ -import kafka.server.metadata.ZkFinalizedFeatureCache -import kafka.utils._ -import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult -import kafka.zk.TopicZNode.TopicIdReplicaAssignment -import kafka.zk.{FeatureZNodeStatus, _} -import kafka.zookeeper.{StateChangeHandler, ZNodeChangeHandler, ZNodeChildChangeHandler} -import org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType -import org.apache.kafka.common.ElectionType -import org.apache.kafka.common.KafkaException -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.Uuid -import org.apache.kafka.common.errors.{BrokerNotAvailableException, ControllerMovedException, StaleBrokerEpochException} -import org.apache.kafka.common.message.{AllocateProducerIdsRequestData, AllocateProducerIdsResponseData, AlterPartitionRequestData, AlterPartitionResponseData} -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests.{AbstractControlRequest, ApiError, LeaderAndIsrResponse, UpdateFeaturesRequest, UpdateMetadataResponse} -import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.metadata.migration.ZkMigrationState -import org.apache.kafka.server.BrokerFeatures -import org.apache.kafka.server.common.{AdminOperationException, ProducerIdsBlock} -import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.kafka.server.util.KafkaScheduler -import org.apache.zookeeper.KeeperException -import org.apache.zookeeper.KeeperException.Code - -import scala.collection.{Map, Seq, Set, immutable, mutable} -import scala.collection.mutable.ArrayBuffer -import scala.jdk.CollectionConverters._ -import scala.util.{Failure, Success, Try} - -sealed trait ElectionTrigger -case object AutoTriggered extends ElectionTrigger -case object ZkTriggered extends ElectionTrigger -case object AdminClientTriggered extends ElectionTrigger - -object KafkaController extends Logging { - val InitialControllerEpoch = 0 - val InitialControllerEpochZkVersion = 0 - - type ElectLeadersCallback = Map[TopicPartition, Either[ApiError, Int]] => Unit - type ListReassignmentsCallback = Either[Map[TopicPartition, ReplicaAssignment], ApiError] => Unit - type AlterReassignmentsCallback = Either[Map[TopicPartition, ApiError], ApiError] => Unit - type UpdateFeaturesCallback = Either[ApiError, Map[String, ApiError]] => Unit - - private val ActiveControllerCountMetricName = "ActiveControllerCount" - private val OfflinePartitionsCountMetricName = "OfflinePartitionsCount" - private val PreferredReplicaImbalanceCountMetricName = "PreferredReplicaImbalanceCount" - private val ControllerStateMetricName = "ControllerState" - private val GlobalTopicCountMetricName = "GlobalTopicCount" - private val GlobalPartitionCountMetricName = "GlobalPartitionCount" - private val TopicsToDeleteCountMetricName = "TopicsToDeleteCount" - private val ReplicasToDeleteCountMetricName = "ReplicasToDeleteCount" - private val TopicsIneligibleToDeleteCountMetricName = "TopicsIneligibleToDeleteCount" - private val ReplicasIneligibleToDeleteCountMetricName = "ReplicasIneligibleToDeleteCount" - private val ActiveBrokerCountMetricName = "ActiveBrokerCount" - private val FencedBrokerCountMetricName = "FencedBrokerCount" - private val ZkMigrationStateMetricName = "ZkMigrationState" - - // package private for testing - private[controller] val MetricNames = Set( - ZkMigrationStateMetricName, - ActiveControllerCountMetricName, - OfflinePartitionsCountMetricName, - PreferredReplicaImbalanceCountMetricName, - ControllerStateMetricName, - GlobalTopicCountMetricName, - GlobalPartitionCountMetricName, - TopicsToDeleteCountMetricName, - ReplicasToDeleteCountMetricName, - TopicsIneligibleToDeleteCountMetricName, - ReplicasIneligibleToDeleteCountMetricName, - ActiveBrokerCountMetricName, - FencedBrokerCountMetricName - ) -} - -class KafkaController(val config: KafkaConfig, - zkClient: KafkaZkClient, - time: Time, - metrics: Metrics, - initialBrokerInfo: BrokerInfo, - initialBrokerEpoch: Long, - tokenManager: DelegationTokenManager, - brokerFeatures: BrokerFeatures, - featureCache: ZkFinalizedFeatureCache, - threadNamePrefix: Option[String] = None) - extends ControllerEventProcessor with Logging { - - private val metricsGroup = new KafkaMetricsGroup(this.getClass) - - this.logIdent = s"[Controller id=${config.brokerId}] " - - @volatile private var brokerInfo = initialBrokerInfo - @volatile private var _brokerEpoch = initialBrokerEpoch - - private val isAlterPartitionEnabled = config.interBrokerProtocolVersion.isAlterPartitionSupported - private val stateChangeLogger = new StateChangeLogger(config.brokerId, inControllerContext = true, None) - val controllerContext = new ControllerContext - var controllerChannelManager = new ControllerChannelManager( - () => controllerContext.epoch, - config, - time, - metrics, - stateChangeLogger, - threadNamePrefix - ) - - // have a separate scheduler for the controller to be able to start and stop independently of the kafka server - // visible for testing - private[controller] val kafkaScheduler = new KafkaScheduler(1) - - // visible for testing - private[controller] val eventManager = new ControllerEventManager(config.brokerId, this, time, - controllerContext.stats.rateAndTimeMetrics) - - private val brokerRequestBatch = new ControllerBrokerRequestBatch(config, controllerChannelManager, - eventManager, controllerContext, stateChangeLogger) - val replicaStateMachine: ReplicaStateMachine = new ZkReplicaStateMachine(config, stateChangeLogger, controllerContext, zkClient, - new ControllerBrokerRequestBatch(config, controllerChannelManager, eventManager, controllerContext, stateChangeLogger)) - val partitionStateMachine: PartitionStateMachine = new ZkPartitionStateMachine(config, stateChangeLogger, controllerContext, zkClient, - new ControllerBrokerRequestBatch(config, controllerChannelManager, eventManager, controllerContext, stateChangeLogger)) - private val topicDeletionManager = new TopicDeletionManager(config, controllerContext, replicaStateMachine, - partitionStateMachine, new ControllerDeletionClient(this, zkClient)) - - private val controllerChangeHandler = new ControllerChangeHandler(eventManager) - private val brokerChangeHandler = new BrokerChangeHandler(eventManager) - private val brokerModificationsHandlers: mutable.Map[Int, BrokerModificationsHandler] = mutable.Map.empty - private val topicChangeHandler = new TopicChangeHandler(eventManager) - private val topicDeletionHandler = new TopicDeletionHandler(eventManager) - private val partitionModificationsHandlers: mutable.Map[String, PartitionModificationsHandler] = mutable.Map.empty - private val partitionReassignmentHandler = new PartitionReassignmentHandler(eventManager) - private val preferredReplicaElectionHandler = new PreferredReplicaElectionHandler(eventManager) - private val isrChangeNotificationHandler = new IsrChangeNotificationHandler(eventManager) - private val logDirEventNotificationHandler = new LogDirEventNotificationHandler(eventManager) - - @volatile var activeControllerId = -1 - @volatile private var offlinePartitionCount = 0 - @volatile private var preferredReplicaImbalanceCount = 0 - @volatile private var globalTopicCount = 0 - @volatile private var globalPartitionCount = 0 - @volatile private var topicsToDeleteCount = 0 - @volatile private var replicasToDeleteCount = 0 - @volatile private var ineligibleTopicsToDeleteCount = 0 - @volatile private var ineligibleReplicasToDeleteCount = 0 - @volatile private var activeBrokerCount = 0 - - /* single-thread scheduler to clean expired tokens */ - private val tokenCleanScheduler = new KafkaScheduler(1, true, "delegation-token-cleaner") - - metricsGroup.newGauge(ZkMigrationStateMetricName, () => ZkMigrationState.ZK.value().intValue()) - metricsGroup.newGauge(ActiveControllerCountMetricName, () => if (isActive) 1 else 0) - metricsGroup.newGauge(OfflinePartitionsCountMetricName, () => offlinePartitionCount) - metricsGroup.newGauge(PreferredReplicaImbalanceCountMetricName, () => preferredReplicaImbalanceCount) - metricsGroup.newGauge(ControllerStateMetricName, () => state.value) - metricsGroup.newGauge(GlobalTopicCountMetricName, () => globalTopicCount) - metricsGroup.newGauge(GlobalPartitionCountMetricName, () => globalPartitionCount) - metricsGroup.newGauge(TopicsToDeleteCountMetricName, () => topicsToDeleteCount) - metricsGroup.newGauge(ReplicasToDeleteCountMetricName, () => replicasToDeleteCount) - metricsGroup.newGauge(TopicsIneligibleToDeleteCountMetricName, () => ineligibleTopicsToDeleteCount) - metricsGroup.newGauge(ReplicasIneligibleToDeleteCountMetricName, () => ineligibleReplicasToDeleteCount) - metricsGroup.newGauge(ActiveBrokerCountMetricName, () => activeBrokerCount) - // FencedBrokerCount metric is always 0 in the ZK controller. - metricsGroup.newGauge(FencedBrokerCountMetricName, () => 0) - - /** - * Returns true if this broker is the current controller. - */ - def isActive: Boolean = activeControllerId == config.brokerId - - def brokerEpoch: Long = _brokerEpoch - - def epoch: Int = controllerContext.epoch - - /** - * Invoked when the controller module of a Kafka server is started up. This does not assume that the current broker - * is the controller. It merely registers the session expiration listener and starts the controller leader - * elector - */ - def startup(): Unit = { - zkClient.registerStateChangeHandler(new StateChangeHandler { - override val name: String = StateChangeHandlers.ControllerHandler - override def afterInitializingSession(): Unit = { - eventManager.put(RegisterBrokerAndReelect) - } - override def beforeInitializingSession(): Unit = { - val queuedEvent = eventManager.clearAndPut(Expire) - - // Block initialization of the new session until the expiration event is being handled, - // which ensures that all pending events have been processed before creating the new session - queuedEvent.awaitProcessing() - } - }) - eventManager.put(Startup) - eventManager.start() - } - - /** - * Invoked when the controller module of a Kafka server is shutting down. If the broker was the current controller, - * it shuts down the partition and replica state machines. If not, those are a no-op. In addition to that, it also - * shuts down the controller channel manager, if one exists (i.e. if it was the current controller) - */ - def shutdown(): Unit = { - try { - eventManager.close() - onControllerResignation() - } finally { - removeMetrics() - } - } - - /** - * On controlled shutdown, the controller first determines the partitions that the - * shutting down broker leads, and moves leadership of those partitions to another broker - * that is in that partition's ISR. - * - * @param id Id of the broker to shutdown. - * @param brokerEpoch The broker epoch in the controlled shutdown request - * @return The number of partitions that the broker still leads. - */ - def controlledShutdown(id: Int, brokerEpoch: Long, controlledShutdownCallback: Try[Set[TopicPartition]] => Unit): Unit = { - val controlledShutdownEvent = ControlledShutdown(id, brokerEpoch, controlledShutdownCallback) - eventManager.put(controlledShutdownEvent) - } - - private[kafka] def updateBrokerInfo(newBrokerInfo: BrokerInfo): Unit = { - this.brokerInfo = newBrokerInfo - zkClient.updateBrokerInfo(newBrokerInfo) - } - - private[kafka] def enableDefaultUncleanLeaderElection(): Unit = { - eventManager.put(UncleanLeaderElectionEnable) - } - - private[kafka] def enableTopicUncleanLeaderElection(topic: String): Unit = { - if (isActive) { - eventManager.put(TopicUncleanLeaderElectionEnable(topic)) - } - } - - def isTopicQueuedForDeletion(topic: String): Boolean = { - topicDeletionManager.isTopicQueuedUpForDeletion(topic) - } - - private def state: ControllerState = eventManager.state - - /** - * This callback is invoked by the zookeeper leader elector on electing the current broker as the new controller. - * It does the following things on the become-controller state change - - * 1. Initializes the controller's context object that holds cache objects for current topics, live brokers and - * leaders for all existing partitions. - * 2. Starts the controller's channel manager - * 3. Starts the replica state machine - * 4. Starts the partition state machine - * If it encounters any unexpected exception/error while becoming controller, it resigns as the current controller. - * This ensures another controller election will be triggered and there will always be an actively serving controller - */ - private def onControllerFailover(): Unit = { - maybeSetupFeatureVersioning() - - info("Registering handlers") - - // before reading source of truth from zookeeper, register the listeners to get broker/topic callbacks - val childChangeHandlers = Seq(brokerChangeHandler, topicChangeHandler, topicDeletionHandler, logDirEventNotificationHandler, - isrChangeNotificationHandler) - childChangeHandlers.foreach(zkClient.registerZNodeChildChangeHandler) - - val nodeChangeHandlers = Seq(preferredReplicaElectionHandler, partitionReassignmentHandler) - nodeChangeHandlers.foreach(zkClient.registerZNodeChangeHandlerAndCheckExistence) - - info("Deleting log dir event notifications") - zkClient.deleteLogDirEventNotifications(controllerContext.epochZkVersion) - info("Deleting isr change notifications") - zkClient.deleteIsrChangeNotifications(controllerContext.epochZkVersion) - info("Initializing controller context") - initializeControllerContext() - info("Fetching topic deletions in progress") - val (topicsToBeDeleted, topicsIneligibleForDeletion) = fetchTopicDeletionsInProgress() - info("Initializing topic deletion manager") - topicDeletionManager.init(topicsToBeDeleted, topicsIneligibleForDeletion) - - // We need to send UpdateMetadataRequest after the controller context is initialized and before the state machines - // are started. The is because brokers need to receive the list of live brokers from UpdateMetadataRequest before - // they can process the LeaderAndIsrRequests that are generated by replicaStateMachine.startup() and - // partitionStateMachine.startup(). - info("Sending update metadata request") - sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set.empty) - - replicaStateMachine.startup() - partitionStateMachine.startup() - - info(s"Ready to serve as the new controller with epoch $epoch") - - initializePartitionReassignments() - topicDeletionManager.tryTopicDeletion() - val pendingPreferredReplicaElections = fetchPendingPreferredReplicaElections() - onReplicaElection(pendingPreferredReplicaElections, ElectionType.PREFERRED, ZkTriggered) - info("Starting the controller scheduler") - kafkaScheduler.startup() - if (config.autoLeaderRebalanceEnable) { - scheduleAutoLeaderRebalanceTask(delay = 5, unit = TimeUnit.SECONDS) - } - - if (config.tokenAuthEnabled) { - info("starting the token expiry check scheduler") - tokenCleanScheduler.startup() - tokenCleanScheduler.schedule("delete-expired-tokens", - () => tokenManager.expireTokens(), - 0L, - config.delegationTokenExpiryCheckIntervalMs) - } - } - - private def createFeatureZNode(newNode: FeatureZNode): Int = { - info(s"Creating FeatureZNode at path: ${FeatureZNode.path} with contents: $newNode") - zkClient.createFeatureZNode(newNode) - val (_, newVersion) = zkClient.getDataAndVersion(FeatureZNode.path) - newVersion - } - - private def updateFeatureZNode(updatedNode: FeatureZNode): Int = { - info(s"Updating FeatureZNode at path: ${FeatureZNode.path} with contents: $updatedNode") - zkClient.updateFeatureZNode(updatedNode) - } - - /** - * This method enables the feature versioning system (KIP-584). - * - * Development in Kafka (from a high level) is organized into features. Each feature is tracked by - * a name and a range of version numbers or a version number. A feature can be of two types: - * - * 1. Supported feature: - * A supported feature is represented by a name (string) and a range of versions (defined by a - * SupportedVersionRange). It refers to a feature that a particular broker advertises support for. - * Each broker advertises the version ranges of its own supported features in its own - * BrokerIdZNode. The contents of the advertisement are specific to the particular broker and - * do not represent any guarantee of a cluster-wide availability of the feature for any particular - * range of versions. - * - * 2. Finalized feature: - * A finalized feature is represented by a name (string) and a specified version level (defined - * by a Short). Whenever the feature versioning system (KIP-584) is - * enabled, the finalized features are stored in the cluster-wide common FeatureZNode. - * In comparison to a supported feature, the key difference is that a finalized feature exists - * in ZK only when it is guaranteed to be supported by any random broker in the cluster for a - * specified range of version levels. Also, the controller is the only entity modifying the - * information about finalized features. - * - * This method sets up the FeatureZNode with enabled status, which means that the finalized - * features stored in the FeatureZNode are active. The enabled status should be written by the - * controller to the FeatureZNode only when the broker IBP config is greater than or equal to - * IBP_2_7_IV0. - * - * There are multiple cases handled here: - * - * 1. New cluster bootstrap: - * A new Kafka cluster (i.e. it is deployed first time) is almost always started with IBP config - * setting greater than or equal to IBP_2_7_IV0. We would like to start the cluster with all - * the possible supported features finalized immediately. Assuming this is the case, the - * controller will start up and notice that the FeatureZNode is absent in the new cluster, - * it will then create a FeatureZNode (with enabled status) containing the entire list of - * supported features as its finalized features. - * - * 2. Broker binary upgraded, but IBP config set to lower than IBP_2_7_IV0: - * Imagine there was an existing Kafka cluster with IBP config less than IBP_2_7_IV0, and the - * broker binary has now been upgraded to a newer version that supports the feature versioning - * system (KIP-584). But the IBP config is still set to lower than IBP_2_7_IV0, and may be - * set to a higher value later. In this case, we want to start with no finalized features and - * allow the user to finalize them whenever they are ready i.e. in the future whenever the - * user sets IBP config to be greater than or equal to IBP_2_7_IV0, then the user could start - * finalizing the features. This process ensures we do not enable all the possible features - * immediately after an upgrade, which could be harmful to Kafka. - * This is how we handle such a case: - * - Before the IBP config upgrade (i.e. IBP config set to less than IBP_2_7_IV0), the - * controller will start up and check if the FeatureZNode is absent. - * - If the node is absent, it will react by creating a FeatureZNode with disabled status - * and empty finalized features. - * - Otherwise, if a node already exists in enabled status then the controller will just - * flip the status to disabled and clear the finalized features. - * - After the IBP config upgrade (i.e. IBP config set to greater than or equal to - * IBP_2_7_IV0), when the controller starts up it will check if the FeatureZNode exists - * and whether it is disabled. - * - If the node is in disabled status, the controller won’t upgrade all features immediately. - * Instead it will just switch the FeatureZNode status to enabled status. This lets the - * user finalize the features later. - * - Otherwise, if a node already exists in enabled status then the controller will leave - * the node umodified. - * - * 3. Broker binary upgraded, with existing cluster IBP config >= IBP_2_7_IV0: - * Imagine there was an existing Kafka cluster with IBP config >= IBP_2_7_IV0, and the broker - * binary has just been upgraded to a newer version (that supports IBP config IBP_2_7_IV0 and - * higher). The controller will start up and find that a FeatureZNode is already present with - * enabled status and existing finalized features. In such a case, the controller leaves the node - * unmodified. - * - * 4. Broker downgrade: - * Imagine that a Kafka cluster exists already and the IBP config is greater than or equal to - * IBP_2_7_IV0. Then, the user decided to downgrade the cluster by setting IBP config to a - * value less than IBP_2_7_IV0. This means the user is also disabling the feature versioning - * system (KIP-584). In this case, when the controller starts up with the lower IBP config, it - * will switch the FeatureZNode status to disabled with empty features. - */ - private def enableFeatureVersioning(): Unit = { - val (mayBeFeatureZNodeBytes, version) = zkClient.getDataAndVersion(FeatureZNode.path) - if (version == ZkVersion.UnknownVersion) { - val newVersion = createFeatureZNode( - FeatureZNode(config.interBrokerProtocolVersion, - FeatureZNodeStatus.Enabled, - brokerFeatures.defaultFinalizedFeatures.asScala.map { case (k, v) => (k, v.shortValue()) } - )) - featureCache.waitUntilFeatureEpochOrThrow(newVersion, config.zkConnectionTimeoutMs) - } else { - val existingFeatureZNode = FeatureZNode.decode(mayBeFeatureZNodeBytes.get) - val newFeatures = existingFeatureZNode.status match { - case FeatureZNodeStatus.Enabled => existingFeatureZNode.features - case FeatureZNodeStatus.Disabled => - if (existingFeatureZNode.features.nonEmpty) { - warn(s"FeatureZNode at path: ${FeatureZNode.path} with disabled status" + - s" contains non-empty features: ${existingFeatureZNode.features}") - } - Map.empty[String, Short] - } - val newFeatureZNode = FeatureZNode(config.interBrokerProtocolVersion, FeatureZNodeStatus.Enabled, newFeatures) - if (!newFeatureZNode.equals(existingFeatureZNode)) { - val newVersion = updateFeatureZNode(newFeatureZNode) - featureCache.waitUntilFeatureEpochOrThrow(newVersion, config.zkConnectionTimeoutMs) - } - } - } - - /** - * Disables the feature versioning system (KIP-584). - * - * Sets up the FeatureZNode with disabled status. This status means the feature versioning system - * (KIP-584) is disabled, and, the finalized features stored in the FeatureZNode are not relevant. - * This status should be written by the controller to the FeatureZNode only when the broker - * IBP config is less than IBP_2_7_IV0. - * - * NOTE: - * 1. When this method returns, existing finalized features (if any) will be cleared from the - * FeatureZNode. - * 2. This method, unlike enableFeatureVersioning() need not wait for the FinalizedFeatureCache - * to be updated, because, such updates to the cache (via FinalizedFeatureChangeListener) - * are disabled when IBP config is < than IBP_2_7_IV0. - */ - private def disableFeatureVersioning(): Unit = { - val newNode = FeatureZNode(config.interBrokerProtocolVersion, FeatureZNodeStatus.Disabled, Map.empty[String, Short]) - val (mayBeFeatureZNodeBytes, version) = zkClient.getDataAndVersion(FeatureZNode.path) - if (version == ZkVersion.UnknownVersion) { - createFeatureZNode(newNode) - } else { - val existingFeatureZNode = FeatureZNode.decode(mayBeFeatureZNodeBytes.get) - if (existingFeatureZNode.status == FeatureZNodeStatus.Disabled && - existingFeatureZNode.features.nonEmpty) { - warn(s"FeatureZNode at path: ${FeatureZNode.path} with disabled status" + - s" contains non-empty features: ${existingFeatureZNode.features}") - } - if (!newNode.equals(existingFeatureZNode)) { - updateFeatureZNode(newNode) - } - } - } - - private def maybeSetupFeatureVersioning(): Unit = { - if (config.isFeatureVersioningSupported) { - enableFeatureVersioning() - } else { - disableFeatureVersioning() - } - } - - private def scheduleAutoLeaderRebalanceTask(delay: Long, unit: TimeUnit): Unit = { - kafkaScheduler.scheduleOnce("auto-leader-rebalance-task", - () => eventManager.put(AutoPreferredReplicaLeaderElection), - unit.toMillis(delay)) - } - - /** - * This callback is invoked by the zookeeper leader elector when the current broker resigns as the controller. This is - * required to clean up internal controller data structures - */ - private def onControllerResignation(): Unit = { - debug("Resigning") - // de-register listeners - zkClient.unregisterZNodeChildChangeHandler(isrChangeNotificationHandler.path) - zkClient.unregisterZNodeChangeHandler(partitionReassignmentHandler.path) - zkClient.unregisterZNodeChangeHandler(preferredReplicaElectionHandler.path) - zkClient.unregisterZNodeChildChangeHandler(logDirEventNotificationHandler.path) - unregisterBrokerModificationsHandler(brokerModificationsHandlers.keySet) - - // shutdown leader rebalance scheduler - kafkaScheduler.shutdown() - - // stop token expiry check scheduler - tokenCleanScheduler.shutdown() - - // de-register partition ISR listener for on-going partition reassignment task - unregisterPartitionReassignmentIsrChangeHandlers() - // shutdown partition state machine - partitionStateMachine.shutdown() - zkClient.unregisterZNodeChildChangeHandler(topicChangeHandler.path) - unregisterPartitionModificationsHandlers(partitionModificationsHandlers.keys.toSeq) - zkClient.unregisterZNodeChildChangeHandler(topicDeletionHandler.path) - // shutdown replica state machine - replicaStateMachine.shutdown() - zkClient.unregisterZNodeChildChangeHandler(brokerChangeHandler.path) - - controllerChannelManager.shutdown() - controllerContext.resetContext() - - info("Resigned") - } - - private def removeMetrics(): Unit = { - KafkaController.MetricNames.foreach(metricsGroup.removeMetric) - } - - /* - * This callback is invoked by the controller's LogDirEventNotificationListener with the list of broker ids who - * have experienced new log directory failures. In response the controller should send LeaderAndIsrRequest - * to all these brokers to query the state of their replicas. Replicas with an offline log directory respond with - * KAFKA_STORAGE_ERROR, which will be handled by the LeaderAndIsrResponseReceived event. - */ - private def onBrokerLogDirFailure(brokerIds: Seq[Int]): Unit = { - // send LeaderAndIsrRequest for all replicas on those brokers to see if they are still online. - info(s"Handling log directory failure for brokers ${brokerIds.mkString(",")}") - val replicasOnBrokers = controllerContext.replicasOnBrokers(brokerIds.toSet) - replicaStateMachine.handleStateChanges(replicasOnBrokers.toSeq, OnlineReplica) - } - - /** - * This callback is invoked by the replica state machine's broker change listener, with the list of newly started - * brokers as input. It does the following - - * 1. Sends update metadata request to all live and shutting down brokers - * 2. Triggers the OnlinePartition state change for all new/offline partitions - * 3. It checks whether there are reassigned replicas assigned to any newly started brokers. If - * so, it performs the reassignment logic for each topic/partition. - * - * Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point for two reasons: - * 1. The partition state machine, when triggering online state change, will refresh leader and ISR for only those - * partitions currently new or offline (rather than every partition this controller is aware of) - * 2. Even if we do refresh the cache, there is no guarantee that by the time the leader and ISR request reaches - * every broker that it is still valid. Brokers check the leader epoch to determine validity of the request. - */ - private def onBrokerStartup(newBrokers: Seq[Int]): Unit = { - info(s"New broker startup callback for ${newBrokers.mkString(",")}") - newBrokers.foreach(controllerContext.replicasOnOfflineDirs.remove) - val newBrokersSet = newBrokers.toSet - val existingBrokers = controllerContext.liveOrShuttingDownBrokerIds.diff(newBrokersSet) - // Send update metadata request to all the existing brokers in the cluster so that they know about the new brokers - // via this update. No need to include any partition states in the request since there are no partition state changes. - sendUpdateMetadataRequest(existingBrokers.toSeq, Set.empty) - // Send update metadata request to all the new brokers in the cluster with a full set of partition states for initialization. - // In cases of controlled shutdown leaders will not be elected when a new broker comes up. So at least in the - // common controlled shutdown case, the metadata will reach the new brokers faster. - sendUpdateMetadataRequest(newBrokers, controllerContext.partitionsWithLeaders) - // the very first thing to do when a new broker comes up is send it the entire list of partitions that it is - // supposed to host. Based on that the broker starts the high watermark threads for the input list of partitions - val allReplicasOnNewBrokers = controllerContext.replicasOnBrokers(newBrokersSet) - replicaStateMachine.handleStateChanges(allReplicasOnNewBrokers.toSeq, OnlineReplica) - // when a new broker comes up, the controller needs to trigger leader election for all new and offline partitions - // to see if these brokers can become leaders for some/all of those - partitionStateMachine.triggerOnlinePartitionStateChange() - // check if reassignment of some partitions need to be restarted - maybeResumeReassignments { (_, assignment) => - assignment.targetReplicas.exists(newBrokersSet.contains) - } - // check if topic deletion needs to be resumed. If at least one replica that belongs to the topic being deleted exists - // on the newly restarted brokers, there is a chance that topic deletion can resume - val replicasForTopicsToBeDeleted = allReplicasOnNewBrokers.filter(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic)) - if (replicasForTopicsToBeDeleted.nonEmpty) { - info(s"Some replicas ${replicasForTopicsToBeDeleted.mkString(",")} for topics scheduled for deletion " + - s"${controllerContext.topicsToBeDeleted.mkString(",")} are on the newly restarted brokers " + - s"${newBrokers.mkString(",")}. Signaling restart of topic deletion for these topics") - topicDeletionManager.resumeDeletionForTopics(replicasForTopicsToBeDeleted.map(_.topic)) - } - registerBrokerModificationsHandler(newBrokers) - } - - private def maybeResumeReassignments(shouldResume: (TopicPartition, ReplicaAssignment) => Boolean): Unit = { - controllerContext.partitionsBeingReassigned.foreach { tp => - val currentAssignment = controllerContext.partitionFullReplicaAssignment(tp) - if (shouldResume(tp, currentAssignment)) - onPartitionReassignment(tp, currentAssignment) - } - } - - private def registerBrokerModificationsHandler(brokerIds: Iterable[Int]): Unit = { - debug(s"Register BrokerModifications handler for $brokerIds") - brokerIds.foreach { brokerId => - val brokerModificationsHandler = new BrokerModificationsHandler(eventManager, brokerId) - zkClient.registerZNodeChangeHandlerAndCheckExistence(brokerModificationsHandler) - brokerModificationsHandlers.put(brokerId, brokerModificationsHandler) - } - } - - private def unregisterBrokerModificationsHandler(brokerIds: Iterable[Int]): Unit = { - debug(s"Unregister BrokerModifications handler for $brokerIds") - brokerIds.foreach { brokerId => - brokerModificationsHandlers.remove(brokerId).foreach(handler => zkClient.unregisterZNodeChangeHandler(handler.path)) - } - } - - /* - * This callback is invoked by the replica state machine's broker change listener with the list of failed brokers - * as input. It will call onReplicaBecomeOffline(...) with the list of replicas on those failed brokers as input. - */ - private def onBrokerFailure(deadBrokers: Seq[Int]): Unit = { - info(s"Broker failure callback for ${deadBrokers.mkString(",")}") - deadBrokers.foreach(controllerContext.replicasOnOfflineDirs.remove) - val deadBrokersThatWereShuttingDown = - deadBrokers.filter(id => controllerContext.shuttingDownBrokerIds.remove(id)) - if (deadBrokersThatWereShuttingDown.nonEmpty) - info(s"Removed ${deadBrokersThatWereShuttingDown.mkString(",")} from list of shutting down brokers.") - val allReplicasOnDeadBrokers = controllerContext.replicasOnBrokers(deadBrokers.toSet) - onReplicasBecomeOffline(allReplicasOnDeadBrokers) - - unregisterBrokerModificationsHandler(deadBrokers) - } - - private def onBrokerUpdate(updatedBrokerId: Int): Unit = { - info(s"Broker info update callback for $updatedBrokerId") - sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set.empty) - } - - /** - * This method marks the given replicas as offline. It does the following - - * 1. Marks the given partitions as offline - * 2. Triggers the OnlinePartition state change for all new/offline partitions - * 3. Invokes the OfflineReplica state change on the input list of newly offline replicas - * 4. If no partitions are affected then send UpdateMetadataRequest to live or shutting down brokers - * - * Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point. This is because - * the partition state machine will refresh our cache for us when performing leader election for all new/offline - * partitions coming online. - */ - private def onReplicasBecomeOffline(newOfflineReplicas: Set[PartitionAndReplica]): Unit = { - val (newOfflineReplicasForDeletion, newOfflineReplicasNotForDeletion) = - newOfflineReplicas.partition(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic)) - - val partitionsWithOfflineLeader = controllerContext.partitionsWithOfflineLeader - - // trigger OfflinePartition state for all partitions whose current leader is one amongst the newOfflineReplicas - partitionStateMachine.handleStateChanges(partitionsWithOfflineLeader.toSeq, OfflinePartition) - // trigger OnlinePartition state changes for offline or new partitions - val onlineStateChangeResults = partitionStateMachine.triggerOnlinePartitionStateChange() - // trigger OfflineReplica state change for those newly offline replicas - replicaStateMachine.handleStateChanges(newOfflineReplicasNotForDeletion.toSeq, OfflineReplica) - - // fail deletion of topics that are affected by the offline replicas - if (newOfflineReplicasForDeletion.nonEmpty) { - // it is required to mark the respective replicas in TopicDeletionFailed state since the replica cannot be - // deleted when its log directory is offline. This will prevent the replica from being in TopicDeletionStarted state indefinitely - // since topic deletion cannot be retried until at least one replica is in TopicDeletionStarted state - topicDeletionManager.failReplicaDeletion(newOfflineReplicasForDeletion) - } - - // If no partition has changed leader or ISR, no UpdateMetadataRequest is sent through PartitionStateMachine - // and ReplicaStateMachine. In that case, we want to send an UpdateMetadataRequest explicitly to - // propagate the information about the new offline brokers. - if (newOfflineReplicasNotForDeletion.isEmpty && onlineStateChangeResults.values.forall(_.isLeft)) { - sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set.empty) - } - } - - /** - * This callback is invoked by the topic change callback with the list of failed brokers as input. - * It does the following - - * 1. Move the newly created partitions to the NewPartition state - * 2. Move the newly created partitions from NewPartition->OnlinePartition state - */ - private def onNewPartitionCreation(newPartitions: Set[TopicPartition]): Unit = { - info(s"New partition creation callback for ${newPartitions.mkString(",")}") - partitionStateMachine.handleStateChanges(newPartitions.toSeq, NewPartition) - replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions).toSeq, NewReplica) - partitionStateMachine.handleStateChanges( - newPartitions.toSeq, - OnlinePartition, - Some(OfflinePartitionLeaderElectionStrategy(false)) - ) - replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions).toSeq, OnlineReplica) - } - - /** - * This callback is invoked: - * 1. By the AlterPartitionReassignments API - * 2. By the reassigned partitions listener which is triggered when the /admin/reassign/partitions znode is created - * 3. When an ongoing reassignment finishes - this is detected by a change in the partition's ISR znode - * 4. Whenever a new broker comes up which is part of an ongoing reassignment - * 5. On controller startup/failover - * - * Reassigning replicas for a partition goes through a few steps listed in the code. - * RS = current assigned replica set - * ORS = Original replica set for partition - * TRS = Reassigned (target) replica set - * AR = The replicas we are adding as part of this reassignment - * RR = The replicas we are removing as part of this reassignment - * - * A reassignment may have up to three phases, each with its own steps: - - * Phase U (Assignment update): Regardless of the trigger, the first step is in the reassignment process - * is to update the existing assignment state. We always update the state in Zookeeper before - * we update memory so that it can be resumed upon controller fail-over. - * - * U1. Update ZK with RS = ORS + TRS, AR = TRS - ORS, RR = ORS - TRS. - * U2. Update memory with RS = ORS + TRS, AR = TRS - ORS and RR = ORS - TRS - * U3. If we are cancelling or replacing an existing reassignment, send StopReplica to all members - * of AR in the original reassignment if they are not in TRS from the new assignment - * - * To complete the reassignment, we need to bring the new replicas into sync, so depending on the state - * of the ISR, we will execute one of the following steps. - * - * Phase A (when TRS != ISR): The reassignment is not yet complete - * - * A1. Bump the leader epoch for the partition and send LeaderAndIsr updates to RS. - * A2. Start new replicas AR by moving replicas in AR to NewReplica state. - * - * Phase B (when TRS = ISR): The reassignment is complete - * - * B1. Move all replicas in AR to OnlineReplica state. - * B2. Set RS = TRS, AR = [], RR = [] in memory. - * B3. Send a LeaderAndIsr request with RS = TRS. This will prevent the leader from adding any replica in TRS - ORS back in the isr. - * If the current leader is not in TRS or isn't alive, we move the leader to a new replica in TRS. - * We may send the LeaderAndIsr to more than the TRS replicas due to the - * way the partition state machine works (it reads replicas from ZK) - * B4. Move all replicas in RR to OfflineReplica state. As part of OfflineReplica state change, we shrink the - * isr to remove RR in ZooKeeper and send a LeaderAndIsr ONLY to the Leader to notify it of the shrunk isr. - * After that, we send a StopReplica (delete = false) to the replicas in RR. - * B5. Move all replicas in RR to NonExistentReplica state. This will send a StopReplica (delete = true) to - * the replicas in RR to physically delete the replicas on disk. - * B6. Update ZK with RS=TRS, AR=[], RR=[]. - * B7. Remove the ISR reassign listener and maybe update the /admin/reassign_partitions path in ZK to remove this partition from it if present. - * B8. After electing leader, the replicas and isr information changes. So resend the update metadata request to every broker. - * - * In general, there are two goals we want to aim for: - * 1. Every replica present in the replica set of a LeaderAndIsrRequest gets the request sent to it - * 2. Replicas that are removed from a partition's assignment get StopReplica sent to them - * - * For example, if ORS = {1,2,3} and TRS = {4,5,6}, the values in the topic and leader/isr paths in ZK - * may go through the following transitions. - * RS AR RR leader isr - * {1,2,3} {} {} 1 {1,2,3} (initial state) - * {4,5,6,1,2,3} {4,5,6} {1,2,3} 1 {1,2,3} (step A2) - * {4,5,6,1,2,3} {4,5,6} {1,2,3} 1 {1,2,3,4,5,6} (phase B) - * {4,5,6,1,2,3} {4,5,6} {1,2,3} 4 {1,2,3,4,5,6} (step B3) - * {4,5,6,1,2,3} {4,5,6} {1,2,3} 4 {4,5,6} (step B4) - * {4,5,6} {} {} 4 {4,5,6} (step B6) - * - * Note that we have to update RS in ZK with TRS last since it's the only place where we store ORS persistently. - * This way, if the controller crashes before that step, we can still recover. - */ - private def onPartitionReassignment(topicPartition: TopicPartition, reassignment: ReplicaAssignment): Unit = { - // While a reassignment is in progress, deletion is not allowed - topicDeletionManager.markTopicIneligibleForDeletion(Set(topicPartition.topic), reason = "topic reassignment in progress") - - updateCurrentReassignment(topicPartition, reassignment) - - val addingReplicas = reassignment.addingReplicas - val removingReplicas = reassignment.removingReplicas - - if (!isReassignmentComplete(topicPartition, reassignment)) { - // A1. Send LeaderAndIsr request to every replica in ORS + TRS (with the new RS, AR and RR). - updateLeaderEpochAndSendRequest(topicPartition, reassignment) - // A2. replicas in AR -> NewReplica - startNewReplicasForReassignedPartition(topicPartition, addingReplicas) - } else { - // B1. replicas in AR -> OnlineReplica - replicaStateMachine.handleStateChanges(addingReplicas.map(PartitionAndReplica(topicPartition, _)), OnlineReplica) - // B2. Set RS = TRS, AR = [], RR = [] in memory. - val completedReassignment = ReplicaAssignment(reassignment.targetReplicas) - controllerContext.updatePartitionFullReplicaAssignment(topicPartition, completedReassignment) - // B3. Send LeaderAndIsr request with a potential new leader (if current leader not in TRS) and - // a new RS (using TRS) and same isr to every broker in ORS + TRS or TRS - moveReassignedPartitionLeaderIfRequired(topicPartition, completedReassignment) - // B4. replicas in RR -> Offline (force those replicas out of isr) - // B5. replicas in RR -> NonExistentReplica (force those replicas to be deleted) - stopRemovedReplicasOfReassignedPartition(topicPartition, removingReplicas) - // B6. Update ZK with RS = TRS, AR = [], RR = []. - updateReplicaAssignmentForPartition(topicPartition, completedReassignment) - // B7. Remove the ISR reassign listener and maybe update the /admin/reassign_partitions path in ZK to remove this partition from it. - removePartitionFromReassigningPartitions(topicPartition, completedReassignment) - // B8. After electing a leader in B3, the replicas and isr information changes, so resend the update metadata request to every broker - sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set(topicPartition)) - // signal delete topic thread if reassignment for some partitions belonging to topics being deleted just completed - topicDeletionManager.resumeDeletionForTopics(Set(topicPartition.topic)) - } - } - - /** - * Update the current assignment state in Zookeeper and in memory. If a reassignment is already in - * progress, then the new reassignment will supplant it and some replicas will be shutdown. - * - * Note that due to the way we compute the original replica set, we cannot guarantee that a - * cancellation will restore the original replica order. Target replicas are always listed - * first in the replica set in the desired order, which means we have no way to get to the - * original order if the reassignment overlaps with the current assignment. For example, - * with an initial assignment of [1, 2, 3] and a reassignment of [3, 4, 2], then the replicas - * will be encoded as [3, 4, 2, 1] while the reassignment is in progress. If the reassignment - * is cancelled, there is no way to restore the original order. - * - * @param topicPartition The reassigning partition - * @param reassignment The new reassignment - */ - private def updateCurrentReassignment(topicPartition: TopicPartition, reassignment: ReplicaAssignment): Unit = { - val currentAssignment = controllerContext.partitionFullReplicaAssignment(topicPartition) - - if (currentAssignment != reassignment) { - debug(s"Updating assignment of partition $topicPartition from $currentAssignment to $reassignment") - - // U1. Update assignment state in zookeeper - updateReplicaAssignmentForPartition(topicPartition, reassignment) - // U2. Update assignment state in memory - controllerContext.updatePartitionFullReplicaAssignment(topicPartition, reassignment) - - // If there is a reassignment already in progress, then some of the currently adding replicas - // may be eligible for immediate removal, in which case we need to stop the replicas. - val unneededReplicas = currentAssignment.replicas.diff(reassignment.replicas) - if (unneededReplicas.nonEmpty) - stopRemovedReplicasOfReassignedPartition(topicPartition, unneededReplicas) - } - - if (!isAlterPartitionEnabled) { - val reassignIsrChangeHandler = new PartitionReassignmentIsrChangeHandler(eventManager, topicPartition) - zkClient.registerZNodeChangeHandler(reassignIsrChangeHandler) - } - - controllerContext.partitionsBeingReassigned.add(topicPartition) - } - - /** - * Trigger a partition reassignment provided that the topic exists and is not being deleted. - * - * This is called when a reassignment is initially received either through Zookeeper or through the - * AlterPartitionReassignments API - * - * The `partitionsBeingReassigned` field in the controller context will be updated by this - * call after the reassignment completes validation and is successfully stored in the topic - * assignment zNode. - * - * @param reassignments The reassignments to begin processing - * @return A map of any errors in the reassignment. If the error is NONE for a given partition, - * then the reassignment was submitted successfully. - */ - private def maybeTriggerPartitionReassignment(reassignments: Map[TopicPartition, ReplicaAssignment]): Map[TopicPartition, ApiError] = { - reassignments.map { case (tp, reassignment) => - val topic = tp.topic - - val apiError = if (topicDeletionManager.isTopicQueuedUpForDeletion(topic)) { - info(s"Skipping reassignment of $tp since the topic is currently being deleted") - new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, "The partition does not exist.") - } else { - val assignedReplicas = controllerContext.partitionReplicaAssignment(tp) - if (assignedReplicas.nonEmpty) { - try { - onPartitionReassignment(tp, reassignment) - ApiError.NONE - } catch { - case e: ControllerMovedException => - info(s"Failed completing reassignment of partition $tp because controller has moved to another broker") - throw e - case e: Throwable => - error(s"Error completing reassignment of partition $tp", e) - new ApiError(Errors.UNKNOWN_SERVER_ERROR) - } - } else { - new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, "The partition does not exist.") - } - } - - tp -> apiError - } - } - - /** - * Attempt to elect a replica as leader for each of the given partitions. - * @param partitions The partitions to have a new leader elected - * @param electionType The type of election to perform - * @param electionTrigger The reason for trigger this election - * @return A map of failed and successful elections. The keys are the topic partitions and the corresponding values are - * either the exception that was thrown or new leader & ISR. - */ - private[this] def onReplicaElection( - partitions: Set[TopicPartition], - electionType: ElectionType, - electionTrigger: ElectionTrigger - ): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = { - info(s"Starting replica leader election ($electionType) for partitions ${partitions.mkString(",")} triggered by $electionTrigger") - try { - val strategy = electionType match { - case ElectionType.PREFERRED => PreferredReplicaPartitionLeaderElectionStrategy - case ElectionType.UNCLEAN => - /* Let's be conservative and only trigger unclean election if the election type is unclean and it was - * triggered by the admin client - */ - OfflinePartitionLeaderElectionStrategy(allowUnclean = electionTrigger == AdminClientTriggered) - } - - val results = partitionStateMachine.handleStateChanges( - partitions.toSeq, - OnlinePartition, - Some(strategy) - ) - if (electionTrigger != AdminClientTriggered) { - results.foreach { - case (tp, Left(throwable)) => - if (throwable.isInstanceOf[ControllerMovedException]) { - info(s"Error completing replica leader election ($electionType) for partition $tp because controller has moved to another broker.", throwable) - throw throwable - } else { - error(s"Error completing replica leader election ($electionType) for partition $tp", throwable) - } - case (_, Right(_)) => // Ignored; No need to log or throw exception for the success cases - } - } - - results - } finally { - if (electionTrigger != AdminClientTriggered) { - removePartitionsFromPreferredReplicaElection(partitions, electionTrigger == AutoTriggered) - } - } - } - - private def initializeControllerContext(): Unit = { - // update controller cache with delete topic information - val curBrokerAndEpochs = zkClient.getAllBrokerAndEpochsInCluster - val (compatibleBrokerAndEpochs, incompatibleBrokerAndEpochs) = partitionOnFeatureCompatibility(curBrokerAndEpochs) - if (incompatibleBrokerAndEpochs.nonEmpty) { - warn("Ignoring registration of new brokers due to incompatibilities with finalized features: " + - incompatibleBrokerAndEpochs.map { case (broker, _) => broker.id }.toSeq.sorted.mkString(",")) - } - controllerContext.setLiveBrokers(compatibleBrokerAndEpochs) - info(s"Initialized broker epochs cache: ${controllerContext.liveBrokerIdAndEpochs}") - controllerContext.setAllTopics(zkClient.getAllTopicsInCluster(true)) - registerPartitionModificationsHandlers(controllerContext.allTopics.toSeq) - val replicaAssignmentAndTopicIds = zkClient.getReplicaAssignmentAndTopicIdForTopics(controllerContext.allTopics.toSet) - processTopicIds(replicaAssignmentAndTopicIds) - - replicaAssignmentAndTopicIds.foreach { case TopicIdReplicaAssignment(_, _, assignments) => - assignments.foreach { case (topicPartition, replicaAssignment) => - controllerContext.updatePartitionFullReplicaAssignment(topicPartition, replicaAssignment) - if (replicaAssignment.isBeingReassigned) - controllerContext.partitionsBeingReassigned.add(topicPartition) - } - } - controllerContext.clearPartitionLeadershipInfo() - controllerContext.shuttingDownBrokerIds.clear() - // register broker modifications handlers - registerBrokerModificationsHandler(controllerContext.liveOrShuttingDownBrokerIds) - // update the leader and isr cache for all existing partitions from Zookeeper - updateLeaderAndIsrCache() - // start the channel manager - controllerChannelManager.startup(controllerContext.liveOrShuttingDownBrokers) - info(s"Currently active brokers in the cluster: ${controllerContext.liveBrokerIds}") - info(s"Currently shutting brokers in the cluster: ${controllerContext.shuttingDownBrokerIds}") - info(s"Current list of topics in the cluster: ${controllerContext.allTopics}") - } - - private def fetchPendingPreferredReplicaElections(): Set[TopicPartition] = { - val partitionsUndergoingPreferredReplicaElection = zkClient.getPreferredReplicaElection - // check if they are already completed or topic was deleted - val partitionsThatCompletedPreferredReplicaElection = partitionsUndergoingPreferredReplicaElection.filter { partition => - val replicas = controllerContext.partitionReplicaAssignment(partition) - val topicDeleted = replicas.isEmpty - val successful = - if (!topicDeleted) controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader == replicas.head else false - successful || topicDeleted - } - val pendingPreferredReplicaElectionsIgnoringTopicDeletion = partitionsUndergoingPreferredReplicaElection -- partitionsThatCompletedPreferredReplicaElection - val pendingPreferredReplicaElectionsSkippedFromTopicDeletion = pendingPreferredReplicaElectionsIgnoringTopicDeletion.filter(partition => topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic)) - val pendingPreferredReplicaElections = pendingPreferredReplicaElectionsIgnoringTopicDeletion -- pendingPreferredReplicaElectionsSkippedFromTopicDeletion - info(s"Partitions undergoing preferred replica election: ${partitionsUndergoingPreferredReplicaElection.mkString(",")}") - info(s"Partitions that completed preferred replica election: ${partitionsThatCompletedPreferredReplicaElection.mkString(",")}") - info(s"Skipping preferred replica election for partitions due to topic deletion: ${pendingPreferredReplicaElectionsSkippedFromTopicDeletion.mkString(",")}") - info(s"Resuming preferred replica election for partitions: ${pendingPreferredReplicaElections.mkString(",")}") - pendingPreferredReplicaElections - } - - /** - * Initialize pending reassignments. This includes reassignments sent through /admin/reassign_partitions, - * which will supplant any API reassignments already in progress. - */ - private def initializePartitionReassignments(): Unit = { - // New reassignments may have been submitted through Zookeeper while the controller was failing over - val zkPartitionsResumed = processZkPartitionReassignment() - // We may also have some API-based reassignments that need to be restarted - maybeResumeReassignments { (tp, _) => - !zkPartitionsResumed.contains(tp) - } - } - - private def fetchTopicDeletionsInProgress(): (Set[String], Set[String]) = { - val topicsToBeDeleted = zkClient.getTopicDeletions.toSet - val topicsWithOfflineReplicas = controllerContext.allTopics.filter { topic => { - val replicasForTopic = controllerContext.replicasForTopic(topic) - replicasForTopic.exists(r => !controllerContext.isReplicaOnline(r.replica, r.topicPartition)) - }} - val topicsForWhichPartitionReassignmentIsInProgress = controllerContext.partitionsBeingReassigned.map(_.topic) - val topicsIneligibleForDeletion = topicsWithOfflineReplicas | topicsForWhichPartitionReassignmentIsInProgress - info(s"List of topics to be deleted: ${topicsToBeDeleted.mkString(",")}") - info(s"List of topics ineligible for deletion: ${topicsIneligibleForDeletion.mkString(",")}") - (topicsToBeDeleted, topicsIneligibleForDeletion) - } - - private def updateLeaderAndIsrCache(partitions: Seq[TopicPartition] = controllerContext.allPartitions.toSeq): Unit = { - val leaderIsrAndControllerEpochs = zkClient.getTopicPartitionStates(partitions) - leaderIsrAndControllerEpochs.foreachEntry { (partition, leaderIsrAndControllerEpoch) => - controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch) - } - } - - private def isReassignmentComplete(partition: TopicPartition, assignment: ReplicaAssignment): Boolean = { - if (!assignment.isBeingReassigned) { - true - } else { - zkClient.getTopicPartitionStates(Seq(partition)).get(partition).exists { leaderIsrAndControllerEpoch => - val isr = leaderIsrAndControllerEpoch.leaderAndIsr.isr.asScala.toSet.map(Int.unbox) - val targetReplicas = assignment.targetReplicas.toSet - targetReplicas.subsetOf(isr) - } - } - } - - private def moveReassignedPartitionLeaderIfRequired(topicPartition: TopicPartition, - newAssignment: ReplicaAssignment): Unit = { - val reassignedReplicas = newAssignment.replicas - val currentLeader = controllerContext.partitionLeadershipInfo(topicPartition).get.leaderAndIsr.leader - - if (!reassignedReplicas.contains(currentLeader)) { - info(s"Leader $currentLeader for partition $topicPartition being reassigned, " + - s"is not in the new list of replicas ${reassignedReplicas.mkString(",")}. Re-electing leader") - // move the leader to one of the alive and caught up new replicas - partitionStateMachine.handleStateChanges(Seq(topicPartition), OnlinePartition, Some(ReassignPartitionLeaderElectionStrategy)) - } else if (controllerContext.isReplicaOnline(currentLeader, topicPartition)) { - info(s"Leader $currentLeader for partition $topicPartition being reassigned, " + - s"is already in the new list of replicas ${reassignedReplicas.mkString(",")} and is alive") - // shrink replication factor and update the leader epoch in zookeeper to use on the next LeaderAndIsrRequest - updateLeaderEpochAndSendRequest(topicPartition, newAssignment) - } else { - info(s"Leader $currentLeader for partition $topicPartition being reassigned, " + - s"is already in the new list of replicas ${reassignedReplicas.mkString(",")} but is dead") - partitionStateMachine.handleStateChanges(Seq(topicPartition), OnlinePartition, Some(ReassignPartitionLeaderElectionStrategy)) - } - } - - private def stopRemovedReplicasOfReassignedPartition(topicPartition: TopicPartition, - removedReplicas: Seq[Int]): Unit = { - // first move the replica to offline state (the controller removes it from the ISR) - val replicasToBeDeleted = removedReplicas.map(PartitionAndReplica(topicPartition, _)) - replicaStateMachine.handleStateChanges(replicasToBeDeleted, OfflineReplica) - // send stop replica command to the old replicas - replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionStarted) - // TODO: Eventually partition reassignment could use a callback that does retries if deletion failed - replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionSuccessful) - replicaStateMachine.handleStateChanges(replicasToBeDeleted, NonExistentReplica) - } - - private def updateReplicaAssignmentForPartition(topicPartition: TopicPartition, assignment: ReplicaAssignment): Unit = { - val topicAssignment = mutable.Map() ++= - controllerContext.partitionFullReplicaAssignmentForTopic(topicPartition.topic) += - (topicPartition -> assignment) - - val setDataResponse = zkClient.setTopicAssignmentRaw(topicPartition.topic, - controllerContext.topicIds.get(topicPartition.topic), - topicAssignment, controllerContext.epochZkVersion) - setDataResponse.resultCode match { - case Code.OK => - info(s"Successfully updated assignment of partition $topicPartition to $assignment") - case Code.NONODE => - throw new IllegalStateException(s"Failed to update assignment for $topicPartition since the topic " + - "has no current assignment") - case _ => throw new KafkaException(setDataResponse.resultException.get) - } - } - - private def startNewReplicasForReassignedPartition(topicPartition: TopicPartition, newReplicas: Seq[Int]): Unit = { - // send the start replica request to the brokers in the reassigned replicas list that are not in the assigned - // replicas list - newReplicas.foreach { replica => - replicaStateMachine.handleStateChanges(Seq(PartitionAndReplica(topicPartition, replica)), NewReplica) - } - } - - private def updateLeaderEpochAndSendRequest(topicPartition: TopicPartition, - assignment: ReplicaAssignment): Unit = { - val stateChangeLog = stateChangeLogger.withControllerEpoch(controllerContext.epoch) - updateLeaderEpoch(topicPartition) match { - case Some(updatedLeaderIsrAndControllerEpoch) => - try { - brokerRequestBatch.newBatch() - // the isNew flag, when set to true, makes sure that when a replica possibly resided - // in a logDir that is offline, we refrain from just creating a new replica in a good - // logDir. This is exactly the behavior we want for the original replicas, but not - // for the replicas we add in this reassignment. For new replicas, want to be able - // to assign to one of the good logDirs. - brokerRequestBatch.addLeaderAndIsrRequestForBrokers(assignment.originReplicas, topicPartition, - updatedLeaderIsrAndControllerEpoch, assignment, isNew = false) - brokerRequestBatch.addLeaderAndIsrRequestForBrokers(assignment.addingReplicas, topicPartition, - updatedLeaderIsrAndControllerEpoch, assignment, isNew = true) - brokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch) - } catch { - case e: IllegalStateException => - handleIllegalState(e) - } - stateChangeLog.info(s"Sent LeaderAndIsr request $updatedLeaderIsrAndControllerEpoch with " + - s"new replica assignment $assignment to leader ${updatedLeaderIsrAndControllerEpoch.leaderAndIsr.leader} " + - s"for partition being reassigned $topicPartition") - - case None => // fail the reassignment - stateChangeLog.error(s"Failed to send LeaderAndIsr request with new replica assignment " + - s"$assignment to leader for partition being reassigned $topicPartition") - } - } - - private def registerPartitionModificationsHandlers(topics: Seq[String]): Unit = { - topics.foreach { topic => - val partitionModificationsHandler = new PartitionModificationsHandler(eventManager, topic) - partitionModificationsHandlers.put(topic, partitionModificationsHandler) - } - partitionModificationsHandlers.values.foreach(zkClient.registerZNodeChangeHandler) - } - - private[controller] def unregisterPartitionModificationsHandlers(topics: Seq[String]): Unit = { - topics.foreach { topic => - partitionModificationsHandlers.remove(topic).foreach(handler => zkClient.unregisterZNodeChangeHandler(handler.path)) - } - } - - private def unregisterPartitionReassignmentIsrChangeHandlers(): Unit = { - if (!isAlterPartitionEnabled) { - controllerContext.partitionsBeingReassigned.foreach { tp => - val path = TopicPartitionStateZNode.path(tp) - zkClient.unregisterZNodeChangeHandler(path) - } - } - } - - private def removePartitionFromReassigningPartitions(topicPartition: TopicPartition, - assignment: ReplicaAssignment): Unit = { - if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) { - if (!isAlterPartitionEnabled) { - val path = TopicPartitionStateZNode.path(topicPartition) - zkClient.unregisterZNodeChangeHandler(path) - } - maybeRemoveFromZkReassignment((tp, replicas) => tp == topicPartition && replicas == assignment.replicas) - controllerContext.partitionsBeingReassigned.remove(topicPartition) - } else { - throw new IllegalStateException("Cannot remove a reassigning partition because it is not present in memory") - } - } - - /** - * Remove partitions from an active zk-based reassignment (if one exists). - * - * @param shouldRemoveReassignment Predicate indicating which partition reassignments should be removed - */ - private def maybeRemoveFromZkReassignment(shouldRemoveReassignment: (TopicPartition, Seq[Int]) => Boolean): Unit = { - if (!zkClient.reassignPartitionsInProgress) - return - - val reassigningPartitions = zkClient.getPartitionReassignment - val (removingPartitions, updatedPartitionsBeingReassigned) = reassigningPartitions.partition { case (tp, replicas) => - shouldRemoveReassignment(tp, replicas) - } - info(s"Removing partitions $removingPartitions from the list of reassigned partitions in zookeeper") - - // write the new list to zookeeper - if (updatedPartitionsBeingReassigned.isEmpty) { - info(s"No more partitions need to be reassigned. Deleting zk path ${ReassignPartitionsZNode.path}") - zkClient.deletePartitionReassignment(controllerContext.epochZkVersion) - // Ensure we detect future reassignments - eventManager.put(ZkPartitionReassignment) - } else { - try { - zkClient.setOrCreatePartitionReassignment(updatedPartitionsBeingReassigned, controllerContext.epochZkVersion) - } catch { - case e: KeeperException => throw new AdminOperationException(e) - } - } - } - - private def removePartitionsFromPreferredReplicaElection(partitionsToBeRemoved: Set[TopicPartition], - isTriggeredByAutoRebalance : Boolean): Unit = { - for (partition <- partitionsToBeRemoved) { - // check the status - val currentLeader = controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader - val preferredReplica = controllerContext.partitionReplicaAssignment(partition).head - if (currentLeader == preferredReplica) { - info(s"Partition $partition completed preferred replica leader election. New leader is $preferredReplica") - } else { - warn(s"Partition $partition failed to complete preferred replica leader election to $preferredReplica. " + - s"Leader is still $currentLeader") - } - } - if (!isTriggeredByAutoRebalance) { - zkClient.deletePreferredReplicaElection(controllerContext.epochZkVersion) - // Ensure we detect future preferred replica leader elections - eventManager.put(ReplicaLeaderElection(None, ElectionType.PREFERRED, ZkTriggered)) - } - } - - /** - * Send the leader information for selected partitions to selected brokers so that they can correctly respond to - * metadata requests - * - * @param brokers The brokers that the update metadata request should be sent to - */ - private[controller] def sendUpdateMetadataRequest(brokers: Seq[Int], partitions: Set[TopicPartition]): Unit = { - try { - brokerRequestBatch.newBatch() - brokerRequestBatch.addUpdateMetadataRequestForBrokers(brokers, partitions) - brokerRequestBatch.sendRequestsToBrokers(epoch) - } catch { - case e: IllegalStateException => - handleIllegalState(e) - } - } - - /** - * Does not change leader or isr, but just increments the leader epoch - * - * @param partition partition - * @return the new leaderAndIsr with an incremented leader epoch, or None if leaderAndIsr is empty. - */ - private def updateLeaderEpoch(partition: TopicPartition): Option[LeaderIsrAndControllerEpoch] = { - debug(s"Updating leader epoch for partition $partition") - var finalLeaderIsrAndControllerEpoch: Option[LeaderIsrAndControllerEpoch] = None - var zkWriteCompleteOrUnnecessary = false - while (!zkWriteCompleteOrUnnecessary) { - // refresh leader and isr from zookeeper again - zkWriteCompleteOrUnnecessary = zkClient.getTopicPartitionStates(Seq(partition)).get(partition) match { - case Some(leaderIsrAndControllerEpoch) => - val leaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr - val controllerEpoch = leaderIsrAndControllerEpoch.controllerEpoch - if (controllerEpoch > epoch) - throw new StateChangeFailedException("Leader and isr path written by another controller. This probably " + - s"means the current controller with epoch $epoch went through a soft failure and another " + - s"controller was elected with epoch $controllerEpoch. Aborting state change by this controller") - // increment the leader epoch even if there are no leader or isr changes to allow the leader to cache the expanded - // assigned replica list - val newLeaderAndIsr = leaderAndIsr.newEpoch - // update the new leadership decision in zookeeper or retry - val UpdateLeaderAndIsrResult(finishedUpdates, _) = - zkClient.updateLeaderAndIsr(immutable.Map(partition -> newLeaderAndIsr), epoch, controllerContext.epochZkVersion) - - finishedUpdates.get(partition) match { - case Some(Right(leaderAndIsr)) => - val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, epoch) - controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch) - finalLeaderIsrAndControllerEpoch = Some(leaderIsrAndControllerEpoch) - info(s"Updated leader epoch for partition $partition to ${leaderAndIsr.leaderEpoch}, zkVersion=${leaderAndIsr.partitionEpoch}") - true - case Some(Left(e)) => throw e - case None => false - } - case None => - throw new IllegalStateException(s"Cannot update leader epoch for partition $partition as " + - "leaderAndIsr path is empty. This could mean we somehow tried to reassign a partition that doesn't exist") - } - } - finalLeaderIsrAndControllerEpoch - } - - private def checkAndTriggerAutoLeaderRebalance(): Unit = { - trace("Checking need to trigger auto leader balancing") - val preferredReplicasForTopicsByBrokers: Map[Int, Map[TopicPartition, Seq[Int]]] = - controllerContext.allPartitions.filterNot { - tp => topicDeletionManager.isTopicQueuedUpForDeletion(tp.topic) - }.map { tp => - (tp, controllerContext.partitionReplicaAssignment(tp) ) - }.toMap.groupBy { case (_, assignedReplicas) => assignedReplicas.head } - - // for each broker, check if a preferred replica election needs to be triggered - preferredReplicasForTopicsByBrokers.foreachEntry { (leaderBroker, topicPartitionsForBroker) => - val topicsNotInPreferredReplica = topicPartitionsForBroker.filter { case (topicPartition, _) => - val leadershipInfo = controllerContext.partitionLeadershipInfo(topicPartition) - leadershipInfo.exists(_.leaderAndIsr.leader != leaderBroker) - } - debug(s"Topics not in preferred replica for broker $leaderBroker $topicsNotInPreferredReplica") - - val imbalanceRatio = topicsNotInPreferredReplica.size.toDouble / topicPartitionsForBroker.size - trace(s"Leader imbalance ratio for broker $leaderBroker is $imbalanceRatio") - - // check ratio and if greater than desired ratio, trigger a rebalance for the topic partitions - // that need to be on this broker - if (imbalanceRatio > (config.leaderImbalancePerBrokerPercentage.toDouble / 100)) { - val candidatePartitions = topicsNotInPreferredReplica.keys.filter(tp => - !topicDeletionManager.isTopicQueuedUpForDeletion(tp.topic) && - controllerContext.allTopics.contains(tp.topic) && - canPreferredReplicaBeLeader(tp) - ) - onReplicaElection(candidatePartitions.toSet, ElectionType.PREFERRED, AutoTriggered) - } - } - } - - private def canPreferredReplicaBeLeader(tp: TopicPartition): Boolean = { - val assignment = controllerContext.partitionReplicaAssignment(tp) - val liveReplicas = assignment.filter(replica => controllerContext.isReplicaOnline(replica, tp)) - val isr = controllerContext.partitionLeadershipInfo(tp).get.leaderAndIsr.isr.asScala.toSeq.map(_.toInt) - PartitionLeaderElectionAlgorithms - .preferredReplicaPartitionLeaderElection(assignment, isr, liveReplicas.toSet) - .nonEmpty - } - - private def processAutoPreferredReplicaLeaderElection(): Unit = { - if (!isActive) return - try { - info("Processing automatic preferred replica leader election") - checkAndTriggerAutoLeaderRebalance() - } finally { - scheduleAutoLeaderRebalanceTask(delay = config.leaderImbalanceCheckIntervalSeconds, unit = TimeUnit.SECONDS) - } - } - - private def processUncleanLeaderElectionEnable(): Unit = { - if (!isActive) return - info("Unclean leader election has been enabled by default") - partitionStateMachine.triggerOnlinePartitionStateChange() - } - - private def processTopicUncleanLeaderElectionEnable(topic: String): Unit = { - if (!isActive) return - info(s"Unclean leader election has been enabled for topic $topic") - partitionStateMachine.triggerOnlinePartitionStateChange(topic) - } - - private def processControlledShutdown(id: Int, brokerEpoch: Long, controlledShutdownCallback: Try[Set[TopicPartition]] => Unit): Unit = { - val controlledShutdownResult = Try { doControlledShutdown(id, brokerEpoch) } - controlledShutdownCallback(controlledShutdownResult) - } - - private def doControlledShutdown(id: Int, brokerEpoch: Long): Set[TopicPartition] = { - if (!isActive) { - throw new ControllerMovedException("Controller moved to another broker. Aborting controlled shutdown") - } - - // broker epoch in the request is unknown if the controller hasn't been upgraded to use KIP-380 - // so we will keep the previous behavior and don't reject the request - if (brokerEpoch != AbstractControlRequest.UNKNOWN_BROKER_EPOCH) { - val cachedBrokerEpoch = controllerContext.liveBrokerIdAndEpochs(id) - if (brokerEpoch < cachedBrokerEpoch) { - val stateBrokerEpochErrorMessage = "Received controlled shutdown request from an old broker epoch " + - s"$brokerEpoch for broker $id. Current broker epoch is $cachedBrokerEpoch." - info(stateBrokerEpochErrorMessage) - throw new StaleBrokerEpochException(stateBrokerEpochErrorMessage) - } - } - - info(s"Shutting down broker $id") - - if (!controllerContext.liveOrShuttingDownBrokerIds.contains(id)) - throw new BrokerNotAvailableException(s"Broker id $id does not exist.") - - controllerContext.shuttingDownBrokerIds.add(id) - debug(s"All shutting down brokers: ${controllerContext.shuttingDownBrokerIds.mkString(",")}") - debug(s"Live brokers: ${controllerContext.liveBrokerIds.mkString(",")}") - - val partitionsToActOn = controllerContext.partitionsOnBroker(id).filter { partition => - controllerContext.partitionReplicaAssignment(partition).size > 1 && - controllerContext.partitionLeadershipInfo(partition).isDefined && - !topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic) - } - val (partitionsLedByBroker, partitionsFollowedByBroker) = partitionsToActOn.partition { partition => - controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader == id - } - partitionStateMachine.handleStateChanges(partitionsLedByBroker.toSeq, OnlinePartition, Some(ControlledShutdownPartitionLeaderElectionStrategy)) - try { - brokerRequestBatch.newBatch() - partitionsFollowedByBroker.foreach { partition => - brokerRequestBatch.addStopReplicaRequestForBrokers(Seq(id), partition, deletePartition = false) - } - brokerRequestBatch.sendRequestsToBrokers(epoch) - } catch { - case e: IllegalStateException => - handleIllegalState(e) - } - // If the broker is a follower, updates the isr in ZK and notifies the current leader - replicaStateMachine.handleStateChanges(partitionsFollowedByBroker.map(partition => - PartitionAndReplica(partition, id)).toSeq, OfflineReplica) - trace(s"All leaders = ${controllerContext.partitionsLeadershipInfo.mkString(",")}") - controllerContext.partitionLeadersOnBroker(id) - } - - private def processUpdateMetadataResponseReceived(updateMetadataResponse: UpdateMetadataResponse, brokerId: Int): Unit = { - if (!isActive) return - - if (updateMetadataResponse.error != Errors.NONE) { - stateChangeLogger.error(s"Received error ${updateMetadataResponse.error} in UpdateMetadata " + - s"response $updateMetadataResponse from broker $brokerId") - } - } - - private def processLeaderAndIsrResponseReceived(leaderAndIsrResponse: LeaderAndIsrResponse, brokerId: Int): Unit = { - if (!isActive) return - - if (leaderAndIsrResponse.error != Errors.NONE) { - stateChangeLogger.error(s"Received error ${leaderAndIsrResponse.error} in LeaderAndIsr " + - s"response $leaderAndIsrResponse from broker $brokerId") - return - } - - val offlineReplicas = new ArrayBuffer[TopicPartition]() - val onlineReplicas = new ArrayBuffer[TopicPartition]() - - leaderAndIsrResponse.partitionErrors(controllerContext.topicNames.asJava).forEach{ case (tp, error) => - if (error.code() == Errors.KAFKA_STORAGE_ERROR.code) - offlineReplicas += tp - else if (error.code() == Errors.NONE.code) - onlineReplicas += tp - } - - val previousOfflineReplicas = controllerContext.replicasOnOfflineDirs.getOrElse(brokerId, Set.empty[TopicPartition]) - val currentOfflineReplicas = mutable.Set() ++= previousOfflineReplicas --= onlineReplicas ++= offlineReplicas - controllerContext.replicasOnOfflineDirs.put(brokerId, currentOfflineReplicas) - val newOfflineReplicas = currentOfflineReplicas.diff(previousOfflineReplicas) - - if (newOfflineReplicas.nonEmpty) { - stateChangeLogger.info(s"Mark replicas ${newOfflineReplicas.mkString(",")} on broker $brokerId as offline") - onReplicasBecomeOffline(newOfflineReplicas.map(PartitionAndReplica(_, brokerId))) - } - } - - private def processTopicDeletionStopReplicaResponseReceived(replicaId: Int, - requestError: Errors, - partitionErrors: Map[TopicPartition, Errors]): Unit = { - if (!isActive) return - debug(s"Delete topic callback invoked on StopReplica response received from broker $replicaId: " + - s"request error = $requestError, partition errors = $partitionErrors") - - val partitionsInError = if (requestError != Errors.NONE) - partitionErrors.keySet - else - partitionErrors.filter { case (_, error) => error != Errors.NONE }.keySet - - val replicasInError = partitionsInError.map(PartitionAndReplica(_, replicaId)) - // move all the failed replicas to ReplicaDeletionIneligible - topicDeletionManager.failReplicaDeletion(replicasInError) - if (replicasInError.size != partitionErrors.size) { - // some replicas could have been successfully deleted - val deletedReplicas = partitionErrors.keySet.diff(partitionsInError) - topicDeletionManager.completeReplicaDeletion(deletedReplicas.map(PartitionAndReplica(_, replicaId))) - } - } - - private def processStartup(): Unit = { - zkClient.registerZNodeChangeHandlerAndCheckExistence(controllerChangeHandler) - elect() - } - - private def updateMetrics(): Unit = { - if (isActive) { - offlinePartitionCount = controllerContext.offlinePartitionCount - preferredReplicaImbalanceCount = controllerContext.preferredReplicaImbalanceCount - globalTopicCount = controllerContext.allTopics.size - globalPartitionCount = controllerContext.partitionWithLeadersCount - topicsToDeleteCount = controllerContext.topicsToBeDeleted.size - replicasToDeleteCount = controllerContext.topicsToBeDeleted.map { topic => - // For each enqueued topic, count the number of replicas that are not yet deleted - controllerContext.replicasForTopic(topic).count { replica => - controllerContext.replicaState(replica) != ReplicaDeletionSuccessful - } - }.sum - ineligibleTopicsToDeleteCount = controllerContext.topicsIneligibleForDeletion.size - ineligibleReplicasToDeleteCount = controllerContext.topicsToBeDeleted.map { topic => - // For each enqueued topic, count the number of replicas that are ineligible - controllerContext.replicasForTopic(topic).count { replica => - controllerContext.replicaState(replica) == ReplicaDeletionIneligible - } - }.sum - activeBrokerCount = controllerContext.liveOrShuttingDownBrokerIds.size - } else { - offlinePartitionCount = 0 - preferredReplicaImbalanceCount = 0 - globalTopicCount = 0 - globalPartitionCount = 0 - topicsToDeleteCount = 0 - replicasToDeleteCount = 0 - ineligibleTopicsToDeleteCount = 0 - ineligibleReplicasToDeleteCount = 0 - activeBrokerCount = 0 - } - } - - // visible for testing - private[controller] def handleIllegalState(e: IllegalStateException): Nothing = { - // Resign if the controller is in an illegal state - error("Forcing the controller to resign") - brokerRequestBatch.clear() - triggerControllerMove() - throw e - } - - private def triggerControllerMove(): Unit = { - activeControllerId = zkClient.getControllerId.getOrElse(-1) - if (!isActive) { - warn("Controller has already moved when trying to trigger controller movement") - return - } - try { - val expectedControllerEpochZkVersion = controllerContext.epochZkVersion - activeControllerId = -1 - onControllerResignation() - zkClient.deleteController(expectedControllerEpochZkVersion) - } catch { - case _: ControllerMovedException => - warn("Controller has already moved when trying to trigger controller movement") - } - } - - private def maybeResign(): Unit = { - val wasActiveBeforeChange = isActive - zkClient.registerZNodeChangeHandlerAndCheckExistence(controllerChangeHandler) - activeControllerId = zkClient.getControllerId.getOrElse(-1) - if (wasActiveBeforeChange && !isActive) { - onControllerResignation() - } - } - - private def elect(): Unit = { - activeControllerId = zkClient.getControllerId.getOrElse(-1) - /* - * We can get here during the initial startup and the handleDeleted ZK callback. Because of the potential race condition, - * it's possible that the controller has already been elected when we get here. This check will prevent the following - * createEphemeralPath method from getting into an infinite loop if this broker is already the controller. - */ - if (activeControllerId != -1) { - debug(s"Broker $activeControllerId has been elected as the controller, so stopping the election process.") - return - } - - try { - val (epoch, epochZkVersion) = zkClient.registerControllerAndIncrementControllerEpoch(config.brokerId) - controllerContext.epoch = epoch - controllerContext.epochZkVersion = epochZkVersion - activeControllerId = config.brokerId - - info(s"${config.brokerId} successfully elected as the controller. Epoch incremented to ${controllerContext.epoch} " + - s"and epoch zk version is now ${controllerContext.epochZkVersion}") - - onControllerFailover() - } catch { - case e: ControllerMovedException => - maybeResign() - - if (activeControllerId != -1) - debug(s"Broker $activeControllerId was elected as controller instead of broker ${config.brokerId}", e) - else - warn("A controller has been elected but just resigned, this will result in another round of election", e) - case t: Throwable => - error(s"Error while electing or becoming controller on broker ${config.brokerId}. " + - s"Trigger controller movement immediately", t) - triggerControllerMove() - } - } - - /** - * Partitions the provided map of brokers and epochs into 2 new maps: - * - The first map contains only those brokers whose features were found to be compatible with - * the existing finalized features. - * - The second map contains only those brokers whose features were found to be incompatible with - * the existing finalized features. - * - * @param brokersAndEpochs the map to be partitioned - * @return two maps: first contains compatible brokers and second contains - * incompatible brokers as explained above - */ - private def partitionOnFeatureCompatibility(brokersAndEpochs: Map[Broker, Long]): (Map[Broker, Long], Map[Broker, Long]) = { - // There can not be any feature incompatibilities when the feature versioning system is disabled - // or when the finalized feature cache is empty. Otherwise, we check if the non-empty contents - // of the cache are compatible with the supported features of each broker. - brokersAndEpochs.partition { - case (broker, _) => - !config.isFeatureVersioningSupported || - !featureCache.getFeatureOption.exists( - latestFinalizedFeatures => - BrokerFeatures.hasIncompatibleFeatures(broker.features, - latestFinalizedFeatures.finalizedFeatures().asScala. - map(kv => (kv._1, kv._2.toShort: java.lang.Short)).toMap.asJava)) - } - } - - private def processBrokerChange(): Unit = { - if (!isActive) return - val curBrokerAndEpochs = zkClient.getAllBrokerAndEpochsInCluster - val curBrokerIdAndEpochs = curBrokerAndEpochs map { case (broker, epoch) => (broker.id, epoch) } - val curBrokerIds = curBrokerIdAndEpochs.keySet - val liveOrShuttingDownBrokerIds = controllerContext.liveOrShuttingDownBrokerIds - val newBrokerIds = curBrokerIds.diff(liveOrShuttingDownBrokerIds) - val deadBrokerIds = liveOrShuttingDownBrokerIds.diff(curBrokerIds) - val bouncedBrokerIds = (curBrokerIds & liveOrShuttingDownBrokerIds) - .filter(brokerId => curBrokerIdAndEpochs(brokerId) > controllerContext.liveBrokerIdAndEpochs(brokerId)) - val newBrokerAndEpochs = curBrokerAndEpochs.filter { case (broker, _) => newBrokerIds.contains(broker.id) } - val bouncedBrokerAndEpochs = curBrokerAndEpochs.filter { case (broker, _) => bouncedBrokerIds.contains(broker.id) } - val newBrokerIdsSorted = newBrokerIds.toSeq.sorted - val deadBrokerIdsSorted = deadBrokerIds.toSeq.sorted - val liveBrokerIdsSorted = curBrokerIds.toSeq.sorted - val bouncedBrokerIdsSorted = bouncedBrokerIds.toSeq.sorted - info(s"Newly added brokers: ${newBrokerIdsSorted.mkString(",")}, " + - s"deleted brokers: ${deadBrokerIdsSorted.mkString(",")}, " + - s"bounced brokers: ${bouncedBrokerIdsSorted.mkString(",")}, " + - s"all live brokers: ${liveBrokerIdsSorted.mkString(",")}") - - newBrokerAndEpochs.keySet.foreach(controllerChannelManager.addBroker) - bouncedBrokerIds.foreach(controllerChannelManager.removeBroker) - bouncedBrokerAndEpochs.keySet.foreach(controllerChannelManager.addBroker) - deadBrokerIds.foreach(controllerChannelManager.removeBroker) - - if (newBrokerIds.nonEmpty) { - val (newCompatibleBrokerAndEpochs, newIncompatibleBrokerAndEpochs) = - partitionOnFeatureCompatibility(newBrokerAndEpochs) - if (newIncompatibleBrokerAndEpochs.nonEmpty) { - warn("Ignoring registration of new brokers due to incompatibilities with finalized features: " + - newIncompatibleBrokerAndEpochs.map { case (broker, _) => broker.id }.toSeq.sorted.mkString(",")) - } - controllerContext.addLiveBrokers(newCompatibleBrokerAndEpochs) - onBrokerStartup(newBrokerIdsSorted) - } - if (bouncedBrokerIds.nonEmpty) { - controllerContext.removeLiveBrokers(bouncedBrokerIds) - onBrokerFailure(bouncedBrokerIdsSorted) - val (bouncedCompatibleBrokerAndEpochs, bouncedIncompatibleBrokerAndEpochs) = - partitionOnFeatureCompatibility(bouncedBrokerAndEpochs) - if (bouncedIncompatibleBrokerAndEpochs.nonEmpty) { - warn("Ignoring registration of bounced brokers due to incompatibilities with finalized features: " + - bouncedIncompatibleBrokerAndEpochs.map { case (broker, _) => broker.id }.toSeq.sorted.mkString(",")) - } - controllerContext.addLiveBrokers(bouncedCompatibleBrokerAndEpochs) - onBrokerStartup(bouncedBrokerIdsSorted) - } - if (deadBrokerIds.nonEmpty) { - controllerContext.removeLiveBrokers(deadBrokerIds) - onBrokerFailure(deadBrokerIdsSorted) - } - - if (newBrokerIds.nonEmpty || deadBrokerIds.nonEmpty || bouncedBrokerIds.nonEmpty) { - info(s"Updated broker epochs cache: ${controllerContext.liveBrokerIdAndEpochs}") - } - } - - private def processBrokerModification(brokerId: Int): Unit = { - if (!isActive) return - val newMetadataOpt = zkClient.getBroker(brokerId) - val oldMetadataOpt = controllerContext.liveOrShuttingDownBroker(brokerId) - if (newMetadataOpt.nonEmpty && oldMetadataOpt.nonEmpty) { - val oldMetadata = oldMetadataOpt.get - val newMetadata = newMetadataOpt.get - if (newMetadata.endPoints != oldMetadata.endPoints || !oldMetadata.features.equals(newMetadata.features)) { - info(s"Updated broker metadata: $oldMetadata -> $newMetadata") - controllerContext.updateBrokerMetadata(oldMetadata, newMetadata) - onBrokerUpdate(brokerId) - } - } - } - - private def processTopicChange(): Unit = { - if (!isActive) return - val topics = zkClient.getAllTopicsInCluster(true) - val newTopics = topics -- controllerContext.allTopics - val deletedTopics = controllerContext.allTopics.diff(topics) - controllerContext.setAllTopics(topics) - - registerPartitionModificationsHandlers(newTopics.toSeq) - val addedPartitionReplicaAssignment = zkClient.getReplicaAssignmentAndTopicIdForTopics(newTopics) - deletedTopics.foreach(controllerContext.removeTopic) - processTopicIds(addedPartitionReplicaAssignment) - - addedPartitionReplicaAssignment.foreach { case TopicIdReplicaAssignment(_, _, newAssignments) => - newAssignments.foreach { case (topicAndPartition, newReplicaAssignment) => - controllerContext.updatePartitionFullReplicaAssignment(topicAndPartition, newReplicaAssignment) - } - } - info(s"New topics: [$newTopics], deleted topics: [$deletedTopics], new partition replica assignment " + - s"[$addedPartitionReplicaAssignment]") - if (addedPartitionReplicaAssignment.nonEmpty) { - val partitionAssignments = addedPartitionReplicaAssignment - .map { case TopicIdReplicaAssignment(_, _, partitionsReplicas) => partitionsReplicas.keySet } - .reduce((s1, s2) => s1.union(s2)) - onNewPartitionCreation(partitionAssignments) - } - } - - private def processTopicIds(topicIdAssignments: Set[TopicIdReplicaAssignment]): Unit = { - // Create topic IDs for topics missing them if we are using topic IDs - // Otherwise, maintain what we have in the topicZNode - val updatedTopicIdAssignments = if (config.usesTopicId) { - val (withTopicIds, withoutTopicIds) = topicIdAssignments.partition(_.topicId.isDefined) - withTopicIds ++ zkClient.setTopicIds(withoutTopicIds, controllerContext.epochZkVersion) - } else { - topicIdAssignments - } - - // Add topic IDs to controller context - // If we don't have IBP 2.8, but are running 2.8 code, put any topic IDs from the ZNode in controller context - // This is to avoid losing topic IDs during operations like partition reassignments while the cluster is in a mixed state - updatedTopicIdAssignments.foreach { topicIdAssignment => - topicIdAssignment.topicId.foreach { topicId => - controllerContext.addTopicId(topicIdAssignment.topic, topicId) - } - } - } - - private def processLogDirEventNotification(): Unit = { - if (!isActive) return - val sequenceNumbers = zkClient.getAllLogDirEventNotifications - try { - val brokerIds = zkClient.getBrokerIdsFromLogDirEvents(sequenceNumbers) - onBrokerLogDirFailure(brokerIds) - } finally { - // delete processed children - zkClient.deleteLogDirEventNotifications(sequenceNumbers, controllerContext.epochZkVersion) - } - } - - private def processPartitionModifications(topic: String): Unit = { - def restorePartitionReplicaAssignment( - topic: String, - newPartitionReplicaAssignment: Map[TopicPartition, ReplicaAssignment] - ): Unit = { - info("Restoring the partition replica assignment for topic %s".format(topic)) - - val existingPartitions = zkClient.getChildren(TopicPartitionsZNode.path(topic)) - val existingPartitionReplicaAssignment = newPartitionReplicaAssignment - .filter(p => existingPartitions.contains(p._1.partition.toString)) - .map { case (tp, _) => - tp -> controllerContext.partitionFullReplicaAssignment(tp) - }.toMap - - zkClient.setTopicAssignment(topic, - controllerContext.topicIds.get(topic), - existingPartitionReplicaAssignment, - controllerContext.epochZkVersion) - } - - if (!isActive) return - val partitionReplicaAssignment = zkClient.getFullReplicaAssignmentForTopics(immutable.Set(topic)) - val partitionsToBeAdded = partitionReplicaAssignment.filter { case (topicPartition, _) => - controllerContext.partitionReplicaAssignment(topicPartition).isEmpty - } - - if (topicDeletionManager.isTopicQueuedUpForDeletion(topic)) { - if (partitionsToBeAdded.nonEmpty) { - warn("Skipping adding partitions %s for topic %s since it is currently being deleted" - .format(partitionsToBeAdded.map(_._1.partition).mkString(","), topic)) - - restorePartitionReplicaAssignment(topic, partitionReplicaAssignment) - } else { - // This can happen if existing partition replica assignment are restored to prevent increasing partition count during topic deletion - info("Ignoring partition change during topic deletion as no new partitions are added") - } - } else if (partitionsToBeAdded.nonEmpty) { - info(s"New partitions to be added $partitionsToBeAdded") - partitionsToBeAdded.foreachEntry { (topicPartition, assignedReplicas) => - controllerContext.updatePartitionFullReplicaAssignment(topicPartition, assignedReplicas) - } - onNewPartitionCreation(partitionsToBeAdded.keySet) - } - } - - private def processTopicDeletion(): Unit = { - if (!isActive) return - var topicsToBeDeleted = zkClient.getTopicDeletions.toSet - debug(s"Delete topics listener fired for topics ${topicsToBeDeleted.mkString(",")} to be deleted") - val nonExistentTopics = topicsToBeDeleted -- controllerContext.allTopics - if (nonExistentTopics.nonEmpty) { - warn(s"Ignoring request to delete non-existing topics ${nonExistentTopics.mkString(",")}") - zkClient.deleteTopicDeletions(nonExistentTopics.toSeq, controllerContext.epochZkVersion) - } - topicsToBeDeleted --= nonExistentTopics - if (config.deleteTopicEnable) { - if (topicsToBeDeleted.nonEmpty) { - info(s"Starting topic deletion for topics ${topicsToBeDeleted.mkString(",")}") - // mark topic ineligible for deletion if other state changes are in progress - topicsToBeDeleted.foreach { topic => - val partitionReassignmentInProgress = - controllerContext.partitionsBeingReassigned.map(_.topic).contains(topic) - if (partitionReassignmentInProgress) - topicDeletionManager.markTopicIneligibleForDeletion(Set(topic), - reason = "topic reassignment in progress") - } - // add topic to deletion list - topicDeletionManager.enqueueTopicsForDeletion(topicsToBeDeleted) - } - } else { - // If delete topic is disabled remove entries under zookeeper path : /admin/delete_topics - info(s"Removing $topicsToBeDeleted since delete topic is disabled") - zkClient.deleteTopicDeletions(topicsToBeDeleted.toSeq, controllerContext.epochZkVersion) - } - } - - private def processZkPartitionReassignment(): Set[TopicPartition] = { - // We need to register the watcher if the path doesn't exist in order to detect future - // reassignments and we get the `path exists` check for free - if (isActive && zkClient.registerZNodeChangeHandlerAndCheckExistence(partitionReassignmentHandler)) { - val reassignmentResults = mutable.Map.empty[TopicPartition, ApiError] - val partitionsToReassign = mutable.Map.empty[TopicPartition, ReplicaAssignment] - - zkClient.getPartitionReassignment.foreachEntry { (tp, targetReplicas) => - maybeBuildReassignment(tp, Some(targetReplicas)) match { - case Some(context) => partitionsToReassign.put(tp, context) - case None => reassignmentResults.put(tp, new ApiError(Errors.NO_REASSIGNMENT_IN_PROGRESS)) - } - } - - reassignmentResults ++= maybeTriggerPartitionReassignment(partitionsToReassign) - val (partitionsReassigned, partitionsFailed) = reassignmentResults.partition(_._2.error == Errors.NONE) - if (partitionsFailed.nonEmpty) { - warn(s"Failed reassignment through zk with the following errors: $partitionsFailed") - maybeRemoveFromZkReassignment((tp, _) => partitionsFailed.contains(tp)) - } - partitionsReassigned.keySet - } else { - Set.empty - } - } - - /** - * Process a partition reassignment from the AlterPartitionReassignment API. If there is an - * existing reassignment through zookeeper for any of the requested partitions, they will be - * cancelled prior to beginning the new reassignment. Any zk-based reassignment for partitions - * which are NOT included in this call will not be affected. - * - * @param reassignments Map of reassignments passed through the AlterReassignments API. A null value - * means that we should cancel an in-progress reassignment. - * @param callback Callback to send AlterReassignments response - */ - private def processApiPartitionReassignment(reassignments: Map[TopicPartition, Option[Seq[Int]]], - callback: AlterReassignmentsCallback): Unit = { - if (!isActive) { - callback(Right(new ApiError(Errors.NOT_CONTROLLER))) - } else { - val reassignmentResults = mutable.Map.empty[TopicPartition, ApiError] - val partitionsToReassign = mutable.Map.empty[TopicPartition, ReplicaAssignment] - - reassignments.foreachEntry { (tp, targetReplicas) => - val maybeApiError = targetReplicas.flatMap(validateReplicas(tp, _)) - maybeApiError match { - case None => - maybeBuildReassignment(tp, targetReplicas) match { - case Some(context) => partitionsToReassign.put(tp, context) - case None => reassignmentResults.put(tp, new ApiError(Errors.NO_REASSIGNMENT_IN_PROGRESS)) - } - case Some(err) => - reassignmentResults.put(tp, err) - } - } - - // The latest reassignment (whether by API or through zk) always takes precedence, - // so remove from active zk reassignment (if one exists) - maybeRemoveFromZkReassignment((tp, _) => partitionsToReassign.contains(tp)) - - reassignmentResults ++= maybeTriggerPartitionReassignment(partitionsToReassign) - callback(Left(reassignmentResults)) - } - } - - private def validateReplicas(topicPartition: TopicPartition, replicas: Seq[Int]): Option[ApiError] = { - val replicaSet = replicas.toSet - if (replicas.isEmpty) - Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, - s"Empty replica list specified in partition reassignment.")) - else if (replicas.size != replicaSet.size) { - Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, - s"Duplicate replica ids in partition reassignment replica list: $replicas")) - } else if (replicas.exists(_ < 0)) - Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, - s"Invalid broker id in replica list: $replicas")) - else { - // Ensure that any new replicas are among the live brokers - val currentAssignment = controllerContext.partitionFullReplicaAssignment(topicPartition) - val newAssignment = currentAssignment.reassignTo(replicas) - val areNewReplicasAlive = newAssignment.addingReplicas.toSet.subsetOf(controllerContext.liveBrokerIds) - if (!areNewReplicasAlive) - Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT, - s"Replica assignment has brokers that are not alive. Replica list: " + - s"${newAssignment.addingReplicas}, live broker list: ${controllerContext.liveBrokerIds}")) - else None - } - } - - private def maybeBuildReassignment(topicPartition: TopicPartition, - targetReplicasOpt: Option[Seq[Int]]): Option[ReplicaAssignment] = { - val replicaAssignment = controllerContext.partitionFullReplicaAssignment(topicPartition) - if (replicaAssignment.isBeingReassigned) { - val targetReplicas = targetReplicasOpt.getOrElse(replicaAssignment.originReplicas) - Some(replicaAssignment.reassignTo(targetReplicas)) - } else { - targetReplicasOpt.map { targetReplicas => - replicaAssignment.reassignTo(targetReplicas) - } - } - } - - private def processPartitionReassignmentIsrChange(topicPartition: TopicPartition): Unit = { - if (!isActive) return - - if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) { - maybeCompleteReassignment(topicPartition) - } - } - - private def maybeCompleteReassignment(topicPartition: TopicPartition): Unit = { - val reassignment = controllerContext.partitionFullReplicaAssignment(topicPartition) - if (isReassignmentComplete(topicPartition, reassignment)) { - // resume the partition reassignment process - info(s"Target replicas ${reassignment.targetReplicas} have all caught up with the leader for " + - s"reassigning partition $topicPartition") - onPartitionReassignment(topicPartition, reassignment) - } - } - - private def processListPartitionReassignments(partitionsOpt: Option[Set[TopicPartition]], callback: ListReassignmentsCallback): Unit = { - if (!isActive) { - callback(Right(new ApiError(Errors.NOT_CONTROLLER))) - } else { - val results: mutable.Map[TopicPartition, ReplicaAssignment] = mutable.Map.empty - val partitionsToList = partitionsOpt match { - case Some(partitions) => partitions - case None => controllerContext.partitionsBeingReassigned - } - - partitionsToList.foreach { tp => - val assignment = controllerContext.partitionFullReplicaAssignment(tp) - if (assignment.isBeingReassigned) { - results += tp -> assignment - } - } - - callback(Left(results)) - } - } - - /** - * Returns the new finalized version for the feature, if there are no feature - * incompatibilities seen with all known brokers for the provided feature update. - * Otherwise returns an ApiError object containing Errors.INVALID_REQUEST. - * - * @param update the feature update to be processed (this can not be meant to delete the feature) - * - * @return the new finalized version or error, as described above. - */ - private def newFinalizedVersionOrIncompatibilityError(update: UpdateFeaturesRequest.FeatureUpdateItem): - Either[Short, ApiError] = { - if (update.isDeleteRequest) { - throw new IllegalArgumentException(s"Provided feature update can not be meant to delete the feature: $update") - } - - val supportedVersionRange = brokerFeatures.supportedFeatures.get(update.feature) - if (supportedVersionRange == null) { - Right(new ApiError(Errors.INVALID_REQUEST, - "Could not apply finalized feature update because the provided feature" + - " is not supported.")) - } else { - val newVersion = update.versionLevel() - if (supportedVersionRange.isIncompatibleWith(newVersion)) { - Right(new ApiError(Errors.INVALID_REQUEST, - "Could not apply finalized feature update because the provided" + - s" versionLevel:${update.versionLevel} is lower than the" + - s" supported minVersion:${supportedVersionRange.min}.")) - } else { - val newFinalizedFeature = Utils.mkMap(Utils.mkEntry(update.feature, newVersion: java.lang.Short)) - val numIncompatibleBrokers = controllerContext.liveOrShuttingDownBrokers.count(broker => { - BrokerFeatures.hasIncompatibleFeatures(broker.features, newFinalizedFeature) - }) - if (numIncompatibleBrokers == 0) { - Left(newVersion) - } else { - Right(new ApiError(Errors.INVALID_REQUEST, - "Could not apply finalized feature update because" + - " brokers were found to have incompatible versions for the feature.")) - } - } - } - } - - /** - * Validates a feature update on an existing finalized version. - * If the validation succeeds, then, the return value contains: - * 1. the new finalized version for the feature, if the feature update was not meant to delete the feature. - * 2. Option.empty, if the feature update was meant to delete the feature. - * - * If the validation fails, then returned value contains a suitable ApiError. - * - * @param update the feature update to be processed. - * @param existingVersion the existing finalized version which can be empty when no - * finalized version exists for the associated feature - * - * @return the new finalized version to be updated into ZK or error - * as described above. - */ - private def validateFeatureUpdate(update: UpdateFeaturesRequest.FeatureUpdateItem, - existingVersion: Option[Short]): Either[Option[Short], ApiError] = { - def newVersionRangeOrError(update: UpdateFeaturesRequest.FeatureUpdateItem): Either[Option[Short], ApiError] = { - newFinalizedVersionOrIncompatibilityError(update) - .fold(versionRange => Left(Some(versionRange)), error => Right(error)) - } - - if (update.feature.isEmpty) { - // Check that the feature name is not empty. - Right(new ApiError(Errors.INVALID_REQUEST, "Feature name can not be empty.")) - } else if (update.upgradeType.equals(UpgradeType.UNKNOWN)) { - Right(new ApiError(Errors.INVALID_REQUEST, "Received unknown upgrade type.")) - } else { - - // We handle deletion requests separately from non-deletion requests. - if (update.isDeleteRequest) { - if (existingVersion.isEmpty) { - // Disallow deletion of a non-existing finalized feature. - Right(new ApiError(Errors.INVALID_REQUEST, - "Can not delete non-existing finalized feature.")) - } else { - Left(Option.empty) - } - } else if (update.versionLevel() < 1) { - // Disallow deletion of a finalized feature without SAFE downgrade type. - Right(new ApiError(Errors.INVALID_REQUEST, - s"Can not provide versionLevel: ${update.versionLevel} less" + - s" than 1 without setting the SAFE downgradeType in the request.")) - } else { - existingVersion.map(existing => - if (update.versionLevel == existing) { - // Disallow a case where target versionLevel matches existing versionLevel. - Right(new ApiError(Errors.INVALID_REQUEST, - s"Can not ${if (update.upgradeType.equals(UpgradeType.SAFE_DOWNGRADE)) "downgrade" else "upgrade"}" + - s" a finalized feature from existing versionLevel:$existing" + - " to the same value.")) - } else if (update.versionLevel < existing && !update.upgradeType.equals(UpgradeType.SAFE_DOWNGRADE)) { - // Disallow downgrade of a finalized feature without the downgradeType set. - Right(new ApiError(Errors.INVALID_REQUEST, - s"Can not downgrade finalized feature from existing" + - s" versionLevel:$existing to provided" + - s" versionLevel:${update.versionLevel} without setting the" + - " downgradeType to SAFE in the request.")) - } else if (!update.upgradeType.equals(UpgradeType.UPGRADE) && update.versionLevel > existing) { - // Disallow a request that sets downgradeType without specifying a - // versionLevel that's lower than the existing versionLevel. - Right(new ApiError(Errors.INVALID_REQUEST, - s"When the downgradeType is set to SAFE in the request, the provided" + - s" versionLevel:${update.versionLevel} can not be greater than" + - s" existing versionLevel:$existing.")) - } else { - newVersionRangeOrError(update) - } - ).getOrElse(newVersionRangeOrError(update)) - } - } - } - - private def processFeatureUpdates(request: UpdateFeaturesRequest, - callback: UpdateFeaturesCallback): Unit = { - if (isActive) { - processFeatureUpdatesWithActiveController(request, callback) - } else { - callback(Left(new ApiError(Errors.NOT_CONTROLLER))) - } - } - - private def processFeatureUpdatesWithActiveController(request: UpdateFeaturesRequest, - callback: UpdateFeaturesCallback): Unit = { - val updates = request.featureUpdates - val existingFeatures = featureCache.getFeatureOption - .map(featuresAndEpoch => featuresAndEpoch.finalizedFeatures().asScala.map(kv => (kv._1, kv._2.toShort)).toMap) - .getOrElse(Map[String, Short]()) - // A map with key being feature name and value being finalized version. - // This contains the target features to be eventually written to FeatureZNode. - val targetFeatures = scala.collection.mutable.Map[String, Short]() ++ existingFeatures - // A map with key being feature name and value being error encountered when the FeatureUpdate - // was applied. - val errors = scala.collection.mutable.Map[String, ApiError]() - - // Below we process each FeatureUpdate using the following logic: - // - If a FeatureUpdate is found to be valid, then: - // - The corresponding entry in errors map would be updated to contain Errors.NONE. - // - If the FeatureUpdate is an add or update request, then the targetFeatures map is updated - // to contain the new finalized version for the feature. - // - Otherwise if the FeatureUpdate is a delete request, then the feature is removed from the - // targetFeatures map. - // - Otherwise if a FeatureUpdate is found to be invalid, then: - // - The corresponding entry in errors map would be updated with the appropriate ApiError. - // - The entry in targetFeatures map is left untouched. - updates.asScala.iterator.foreach { update => - validateFeatureUpdate(update, existingFeatures.get(update.feature())) match { - case Left(newVersionRangeOrNone) => - newVersionRangeOrNone match { - case Some(newVersionRange) => targetFeatures += (update.feature() -> newVersionRange) - case None => targetFeatures -= update.feature() - } - errors += (update.feature() -> new ApiError(Errors.NONE)) - case Right(featureUpdateFailureReason) => - errors += (update.feature() -> featureUpdateFailureReason) - } - } - - // If the existing and target features are the same, then, we skip the update to the - // FeatureZNode as no changes to the node are required. Otherwise, we replace the contents - // of the FeatureZNode with the new features. This may result in partial or full modification - // of the existing finalized features in ZK. - try { - if (!existingFeatures.equals(targetFeatures)) { - val newNode = FeatureZNode(config.interBrokerProtocolVersion, FeatureZNodeStatus.Enabled, targetFeatures) - val newVersion = updateFeatureZNode(newNode) - featureCache.waitUntilFeatureEpochOrThrow(newVersion, request.data().timeoutMs()) - } - } catch { - // For all features that correspond to valid FeatureUpdate (i.e. error is Errors.NONE), - // we set the error as Errors.FEATURE_UPDATE_FAILED since the FeatureZNode update has failed - // for these. For the rest, the existing error is left untouched. - case e: Exception => - warn(s"Processing of feature updates: $request failed due to error: $e") - errors.foreach { case (feature, apiError) => - if (apiError.error() == Errors.NONE) { - errors(feature) = new ApiError(Errors.FEATURE_UPDATE_FAILED) - } - } - } finally { - callback(Right(errors)) - } - } - - private def processIsrChangeNotification(): Unit = { - def processUpdateNotifications(partitions: Seq[TopicPartition]): Unit = { - val liveBrokers: Seq[Int] = controllerContext.liveOrShuttingDownBrokerIds.toSeq - debug(s"Sending MetadataRequest to Brokers: $liveBrokers for TopicPartitions: $partitions") - sendUpdateMetadataRequest(liveBrokers, partitions.toSet) - } - - if (!isActive) return - val sequenceNumbers = zkClient.getAllIsrChangeNotifications - try { - val partitions = zkClient.getPartitionsFromIsrChangeNotifications(sequenceNumbers) - if (partitions.nonEmpty) { - updateLeaderAndIsrCache(partitions) - processUpdateNotifications(partitions) - - // During a partial upgrade, the controller may be on an IBP which assumes - // ISR changes through the `AlterPartition` API while some brokers are on an older - // IBP which assumes notification through Zookeeper. In this case, since the - // controller will not have registered watches for reassigning partitions, we - // can still rely on the batch ISR change notification path in order to - // complete the reassignment. - partitions.filter(controllerContext.partitionsBeingReassigned.contains).foreach { topicPartition => - maybeCompleteReassignment(topicPartition) - } - } - } finally { - // delete the notifications - zkClient.deleteIsrChangeNotifications(sequenceNumbers, controllerContext.epochZkVersion) - } - } - - def electLeaders( - partitions: Set[TopicPartition], - electionType: ElectionType, - callback: ElectLeadersCallback - ): Unit = { - eventManager.put(ReplicaLeaderElection(Some(partitions), electionType, AdminClientTriggered, callback)) - } - - def listPartitionReassignments(partitions: Option[Set[TopicPartition]], - callback: ListReassignmentsCallback): Unit = { - eventManager.put(ListPartitionReassignments(partitions, callback)) - } - - def updateFeatures(request: UpdateFeaturesRequest, - callback: UpdateFeaturesCallback): Unit = { - eventManager.put(UpdateFeatures(request, callback)) - } - - def alterPartitionReassignments(partitions: Map[TopicPartition, Option[Seq[Int]]], - callback: AlterReassignmentsCallback): Unit = { - eventManager.put(ApiPartitionReassignment(partitions, callback)) - } - - private def processReplicaLeaderElection( - partitionsFromAdminClientOpt: Option[Set[TopicPartition]], - electionType: ElectionType, - electionTrigger: ElectionTrigger, - callback: ElectLeadersCallback - ): Unit = { - if (!isActive) { - callback(partitionsFromAdminClientOpt.fold(Map.empty[TopicPartition, Either[ApiError, Int]]) { partitions => - partitions.iterator.map(partition => partition -> Left(new ApiError(Errors.NOT_CONTROLLER, null))).toMap - }) - } else { - // We need to register the watcher if the path doesn't exist in order to detect future preferred replica - // leader elections and we get the `path exists` check for free - if (electionTrigger == AdminClientTriggered || zkClient.registerZNodeChangeHandlerAndCheckExistence(preferredReplicaElectionHandler)) { - val partitions = partitionsFromAdminClientOpt match { - case Some(partitions) => partitions - case None => zkClient.getPreferredReplicaElection - } - - val allPartitions = controllerContext.allPartitions - val (knownPartitions, unknownPartitions) = partitions.partition(tp => allPartitions.contains(tp)) - unknownPartitions.foreach { p => - info(s"Skipping replica leader election ($electionType) for partition $p by $electionTrigger since it doesn't exist.") - } - - val (partitionsBeingDeleted, livePartitions) = knownPartitions.partition(partition => - topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic)) - if (partitionsBeingDeleted.nonEmpty) { - warn(s"Skipping replica leader election ($electionType) for partitions $partitionsBeingDeleted " + - s"by $electionTrigger since the respective topics are being deleted") - } - - // partition those that have a valid leader - val (electablePartitions, alreadyValidLeader) = livePartitions.partition { partition => - electionType match { - case ElectionType.PREFERRED => - val assignedReplicas = controllerContext.partitionReplicaAssignment(partition) - val preferredReplica = assignedReplicas.head - val currentLeader = controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader - currentLeader != preferredReplica - - case ElectionType.UNCLEAN => - val currentLeader = controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader - currentLeader == LeaderAndIsr.NO_LEADER || !controllerContext.isLiveBroker(currentLeader) - } - } - - val results = onReplicaElection(electablePartitions, electionType, electionTrigger).map { - case (k, Left(ex)) => - if (ex.isInstanceOf[StateChangeFailedException]) { - val error = if (electionType == ElectionType.PREFERRED) { - Errors.PREFERRED_LEADER_NOT_AVAILABLE - } else { - Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE - } - k -> Left(new ApiError(error, ex.getMessage)) - } else { - k -> Left(ApiError.fromThrowable(ex)) - } - case (k, Right(leaderAndIsr)) => k -> Right(leaderAndIsr.leader) - } ++ - alreadyValidLeader.map(_ -> Left(new ApiError(Errors.ELECTION_NOT_NEEDED))) ++ - partitionsBeingDeleted.map( - _ -> Left(new ApiError(Errors.INVALID_TOPIC_EXCEPTION, "The topic is being deleted")) - ) ++ - unknownPartitions.map( - _ -> Left(new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, "The partition does not exist.")) - ) - - debug(s"Waiting for any successful result for election type ($electionType) by $electionTrigger for partitions: $results") - callback(results) - } - } - } - - def alterPartitions( - alterPartitionRequest: AlterPartitionRequestData, - alterPartitionRequestVersion: Short, - callback: AlterPartitionResponseData => Unit - ): Unit = { - eventManager.put(AlterPartitionReceived( - alterPartitionRequest, - alterPartitionRequestVersion, - callback - )) - } - - private def processAlterPartition( - alterPartitionRequest: AlterPartitionRequestData, - alterPartitionRequestVersion: Short, - callback: AlterPartitionResponseData => Unit - ): Unit = { - val partitionResponses = try { - tryProcessAlterPartition( - alterPartitionRequest, - alterPartitionRequestVersion, - callback - ) - } catch { - case e: Throwable => - error(s"Error when processing AlterPartition: $alterPartitionRequest", e) - callback(new AlterPartitionResponseData().setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code)) - mutable.Map.empty - } - - // After we have returned the result of the `AlterPartition` request, we should check whether - // there are any reassignments which can be completed by a successful ISR expansion. - partitionResponses.foreachEntry { (topicPartition, partitionResponse) => - if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) { - val isSuccessfulUpdate = partitionResponse.isRight - if (isSuccessfulUpdate) { - maybeCompleteReassignment(topicPartition) - } - } - } - } - - private def tryProcessAlterPartition( - alterPartitionRequest: AlterPartitionRequestData, - alterPartitionRequestVersion: Short, - callback: AlterPartitionResponseData => Unit - ): mutable.Map[TopicPartition, Either[Errors, LeaderAndIsr]] = { - val useTopicsIds = alterPartitionRequestVersion > 1 - - // Handle a few short-circuits - if (!isActive) { - callback(new AlterPartitionResponseData().setErrorCode(Errors.NOT_CONTROLLER.code)) - return mutable.Map.empty - } - - val brokerId = alterPartitionRequest.brokerId - val brokerEpoch = alterPartitionRequest.brokerEpoch - val brokerEpochOpt = controllerContext.liveBrokerIdAndEpochs.get(brokerId) - if (brokerEpochOpt.isEmpty) { - info(s"Ignoring AlterPartition due to unknown broker $brokerId") - callback(new AlterPartitionResponseData().setErrorCode(Errors.STALE_BROKER_EPOCH.code)) - return mutable.Map.empty - } - - if (!brokerEpochOpt.contains(brokerEpoch)) { - info(s"Ignoring AlterPartition due to stale broker epoch $brokerEpoch and local broker epoch $brokerEpochOpt for broker $brokerId") - callback(new AlterPartitionResponseData().setErrorCode(Errors.STALE_BROKER_EPOCH.code)) - return mutable.Map.empty - } - - val partitionsToAlter = new mutable.HashMap[TopicPartition, LeaderAndIsr]() - val alterPartitionResponse = new AlterPartitionResponseData() - - alterPartitionRequest.topics.forEach { topicReq => - val topicNameOpt = if (useTopicsIds) { - controllerContext.topicName(topicReq.topicId) - } else { - Some(topicReq.topicName) - } - - topicNameOpt match { - case None => - val topicResponse = new AlterPartitionResponseData.TopicData() - .setTopicId(topicReq.topicId) - alterPartitionResponse.topics.add(topicResponse) - topicReq.partitions.forEach { partitionReq => - topicResponse.partitions.add(new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(partitionReq.partitionIndex) - .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code)) - } - - case Some(topicName) => - topicReq.partitions.forEach { partitionReq => - val isr = if (alterPartitionRequestVersion >= 3) { - partitionReq.newIsrWithEpochs.asScala.toList.map(brokerState => Integer.valueOf(brokerState.brokerId())).asJava - } else { - partitionReq.newIsr - } - partitionsToAlter.put( - new TopicPartition(topicName, partitionReq.partitionIndex), - new LeaderAndIsr( - alterPartitionRequest.brokerId, - partitionReq.leaderEpoch, - isr, - LeaderRecoveryState.of(partitionReq.leaderRecoveryState), - partitionReq.partitionEpoch - ) - ) - } - } - } - - val partitionResponses = mutable.HashMap[TopicPartition, Either[Errors, LeaderAndIsr]]() - // Determine which partitions we will accept the new ISR for - val adjustedIsrs = partitionsToAlter.flatMap { case (tp, newLeaderAndIsr) => - controllerContext.partitionLeadershipInfo(tp) match { - case Some(leaderIsrAndControllerEpoch) => - val currentLeaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr - if (newLeaderAndIsr.partitionEpoch > currentLeaderAndIsr.partitionEpoch - || newLeaderAndIsr.leaderEpoch > currentLeaderAndIsr.leaderEpoch) { - // If the partition leader has a higher partition/leader epoch, then it is likely - // that this node is no longer the active controller. We return NOT_CONTROLLER in - // this case to give the leader an opportunity to find the new controller. - partitionResponses(tp) = Left(Errors.NOT_CONTROLLER) - None - } else if (newLeaderAndIsr.leaderEpoch < currentLeaderAndIsr.leaderEpoch) { - partitionResponses(tp) = Left(Errors.FENCED_LEADER_EPOCH) - None - } else if (newLeaderAndIsr.equalsAllowStalePartitionEpoch(currentLeaderAndIsr)) { - // If a partition is already in the desired state, just return it - // this check must be done before fencing based on partition epoch to maintain idempotency - partitionResponses(tp) = Right(currentLeaderAndIsr) - None - } else if (newLeaderAndIsr.partitionEpoch < currentLeaderAndIsr.partitionEpoch) { - partitionResponses(tp) = Left(Errors.INVALID_UPDATE_VERSION) - None - } else if (newLeaderAndIsr.leaderRecoveryState == LeaderRecoveryState.RECOVERING && newLeaderAndIsr.isr.size() > 1) { - partitionResponses(tp) = Left(Errors.INVALID_REQUEST) - info( - s"Rejecting AlterPartition from node $brokerId for $tp because leader is recovering and ISR is greater than 1: " + - s"$newLeaderAndIsr" - ) - None - } else if (currentLeaderAndIsr.leaderRecoveryState == LeaderRecoveryState.RECOVERED && - newLeaderAndIsr.leaderRecoveryState == LeaderRecoveryState.RECOVERING) { - - partitionResponses(tp) = Left(Errors.INVALID_REQUEST) - info( - s"Rejecting AlterPartition from node $brokerId for $tp because the leader recovery state cannot change from " + - s"RECOVERED to RECOVERING: $newLeaderAndIsr" - ) - None - } else { - // Pull out replicas being added to ISR and verify they are all online. - // If a replica is not online, reject the update as specified in KIP-841. - val ineligibleReplicas = newLeaderAndIsr.isr.asScala.toSet.map(Int.unbox) -- controllerContext.liveBrokerIds - if (ineligibleReplicas.nonEmpty) { - info(s"Rejecting AlterPartition request from node $brokerId for $tp because " + - s"it specified ineligible replicas $ineligibleReplicas in the new ISR ${newLeaderAndIsr.isr}." - ) - - if (alterPartitionRequestVersion > 1) { - partitionResponses(tp) = Left(Errors.INELIGIBLE_REPLICA) - } else { - partitionResponses(tp) = Left(Errors.OPERATION_NOT_ATTEMPTED) - } - None - } else { - Some(tp -> newLeaderAndIsr) - } - } - - case None => - partitionResponses(tp) = Left(Errors.UNKNOWN_TOPIC_OR_PARTITION) - None - } - } - - // Do the updates in ZK - debug(s"Updating ISRs for partitions: ${adjustedIsrs.keySet}.") - val UpdateLeaderAndIsrResult(finishedUpdates, badVersionUpdates) = zkClient.updateLeaderAndIsr( - adjustedIsrs, controllerContext.epoch, controllerContext.epochZkVersion) - - val successfulUpdates = finishedUpdates.flatMap { case (partition, isrOrError) => - isrOrError match { - case Right(updatedIsr) => - debug(s"ISR for partition $partition updated to $updatedIsr.") - partitionResponses(partition) = Right(updatedIsr) - Some(partition -> updatedIsr) - case Left(e) => - error(s"Failed to update ISR for partition $partition", e) - partitionResponses(partition) = Left(Errors.forException(e)) - None - } - } - - badVersionUpdates.foreach { partition => - info(s"Failed to update ISR to ${adjustedIsrs(partition)} for partition $partition, bad ZK version.") - partitionResponses(partition) = Left(Errors.INVALID_UPDATE_VERSION) - } - - // Update our cache and send out metadata updates - updateLeaderAndIsrCache(successfulUpdates.keys.toSeq) - sendUpdateMetadataRequest( - controllerContext.liveOrShuttingDownBrokerIds.toSeq, - partitionsToAlter.keySet - ) - - partitionResponses.groupBy(_._1.topic).foreachEntry { (topicName, partitionResponses) => - // Add each topic part to the response - val topicResponse = if (useTopicsIds) { - new AlterPartitionResponseData.TopicData() - .setTopicId(controllerContext.topicIds.getOrElse(topicName, Uuid.ZERO_UUID)) - } else { - new AlterPartitionResponseData.TopicData() - .setTopicName(topicName) - } - alterPartitionResponse.topics.add(topicResponse) - - partitionResponses.foreachEntry { (tp, errorOrIsr) => - // Add each partition part to the response (new ISR or error) - errorOrIsr match { - case Left(error) => - topicResponse.partitions.add( - new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(tp.partition) - .setErrorCode(error.code)) - case Right(leaderAndIsr) => - /* Setting the LeaderRecoveryState field is always safe because it will always be the same - * as the value set in the request. For version 0, that is always the default RECOVERED - * which is ignored when serializing to version 0. For any other version, the - * LeaderRecoveryState field is supported. - */ - topicResponse.partitions.add( - new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(tp.partition) - .setLeaderId(leaderAndIsr.leader) - .setLeaderEpoch(leaderAndIsr.leaderEpoch) - .setIsr(leaderAndIsr.isr) - .setLeaderRecoveryState(leaderAndIsr.leaderRecoveryState.value) - .setPartitionEpoch(leaderAndIsr.partitionEpoch) - ) - } - } - } - - callback(alterPartitionResponse) - - partitionResponses - } - - def allocateProducerIds(allocateProducerIdsRequest: AllocateProducerIdsRequestData, - callback: AllocateProducerIdsResponseData => Unit): Unit = { - - def eventManagerCallback(results: Either[Errors, ProducerIdsBlock]): Unit = { - results match { - case Left(error) => callback.apply(new AllocateProducerIdsResponseData().setErrorCode(error.code)) - case Right(pidBlock) => callback.apply( - new AllocateProducerIdsResponseData() - .setProducerIdStart(pidBlock.firstProducerId()) - .setProducerIdLen(pidBlock.size())) - } - } - eventManager.put(AllocateProducerIds(allocateProducerIdsRequest.brokerId, - allocateProducerIdsRequest.brokerEpoch, eventManagerCallback)) - } - - private def processAllocateProducerIds(brokerId: Int, brokerEpoch: Long, callback: Either[Errors, ProducerIdsBlock] => Unit): Unit = { - // Handle a few short-circuits - if (!isActive) { - callback.apply(Left(Errors.NOT_CONTROLLER)) - return - } - - val brokerEpochOpt = controllerContext.liveBrokerIdAndEpochs.get(brokerId) - if (brokerEpochOpt.isEmpty) { - warn(s"Ignoring AllocateProducerIds due to unknown broker $brokerId") - callback.apply(Left(Errors.BROKER_ID_NOT_REGISTERED)) - return - } - - if (!brokerEpochOpt.contains(brokerEpoch)) { - warn(s"Ignoring AllocateProducerIds due to stale broker epoch $brokerEpoch for broker $brokerId") - callback.apply(Left(Errors.STALE_BROKER_EPOCH)) - return - } - - val maybeNewProducerIdsBlock = try { - Try(ZkProducerIdManager.getNewProducerIdBlock(brokerId, zkClient, this)) - } catch { - case ke: KafkaException => Failure(ke) - } - - maybeNewProducerIdsBlock match { - case Failure(exception) => callback.apply(Left(Errors.forException(exception))) - case Success(newProducerIdBlock) => callback.apply(Right(newProducerIdBlock)) - } - } - - private def processControllerChange(): Unit = { - maybeResign() - } - - private def processReelect(): Unit = { - maybeResign() - elect() - } - - private def processRegisterBrokerAndReelect(): Unit = { - _brokerEpoch = zkClient.registerBroker(brokerInfo) - processReelect() - } - - private def processExpire(): Unit = { - activeControllerId = -1 - onControllerResignation() - } - - - override def process(event: ControllerEvent): Unit = { - try { - event match { - case event: MockEvent => - // Used only in test cases - event.process() - case ShutdownEventThread => - error("Received a ShutdownEventThread event. This type of event is supposed to be handle by ControllerEventThread") - case AutoPreferredReplicaLeaderElection => - processAutoPreferredReplicaLeaderElection() - case ReplicaLeaderElection(partitions, electionType, electionTrigger, callback) => - processReplicaLeaderElection(partitions, electionType, electionTrigger, callback) - case UncleanLeaderElectionEnable => - processUncleanLeaderElectionEnable() - case TopicUncleanLeaderElectionEnable(topic) => - processTopicUncleanLeaderElectionEnable(topic) - case ControlledShutdown(id, brokerEpoch, callback) => - processControlledShutdown(id, brokerEpoch, callback) - case LeaderAndIsrResponseReceived(response, brokerId) => - processLeaderAndIsrResponseReceived(response, brokerId) - case UpdateMetadataResponseReceived(response, brokerId) => - processUpdateMetadataResponseReceived(response, brokerId) - case TopicDeletionStopReplicaResponseReceived(replicaId, requestError, partitionErrors) => - processTopicDeletionStopReplicaResponseReceived(replicaId, requestError, partitionErrors) - case BrokerChange => - processBrokerChange() - case BrokerModifications(brokerId) => - processBrokerModification(brokerId) - case ControllerChange => - processControllerChange() - case Reelect => - processReelect() - case RegisterBrokerAndReelect => - processRegisterBrokerAndReelect() - case Expire => - processExpire() - case TopicChange => - processTopicChange() - case LogDirEventNotification => - processLogDirEventNotification() - case PartitionModifications(topic) => - processPartitionModifications(topic) - case TopicDeletion => - processTopicDeletion() - case ApiPartitionReassignment(reassignments, callback) => - processApiPartitionReassignment(reassignments, callback) - case ZkPartitionReassignment => - processZkPartitionReassignment() - case ListPartitionReassignments(partitions, callback) => - processListPartitionReassignments(partitions, callback) - case UpdateFeatures(request, callback) => - processFeatureUpdates(request, callback) - case PartitionReassignmentIsrChange(partition) => - processPartitionReassignmentIsrChange(partition) - case IsrChangeNotification => - processIsrChangeNotification() - case AlterPartitionReceived(alterPartitionRequest, alterPartitionRequestVersion, callback) => - processAlterPartition(alterPartitionRequest, alterPartitionRequestVersion, callback) - case AllocateProducerIds(brokerId, brokerEpoch, callback) => - processAllocateProducerIds(brokerId, brokerEpoch, callback) - case Startup => - processStartup() - } - } catch { - case e: ControllerMovedException => - info(s"Controller moved to another broker when processing $event.", e) - maybeResign() - case e: Throwable => - error(s"Error processing event $event", e) - } finally { - updateMetrics() - } - } - - override def preempt(event: ControllerEvent): Unit = { - event.preempt() - } -} - -class BrokerChangeHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler { - override val path: String = BrokerIdsZNode.path - - override def handleChildChange(): Unit = { - eventManager.put(BrokerChange) - } -} - -class BrokerModificationsHandler(eventManager: ControllerEventManager, brokerId: Int) extends ZNodeChangeHandler { - override val path: String = BrokerIdZNode.path(brokerId) - - override def handleDataChange(): Unit = { - eventManager.put(BrokerModifications(brokerId)) - } -} - -class TopicChangeHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler { - override val path: String = TopicsZNode.path - - override def handleChildChange(): Unit = eventManager.put(TopicChange) -} - -class LogDirEventNotificationHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler { - override val path: String = LogDirEventNotificationZNode.path - - override def handleChildChange(): Unit = eventManager.put(LogDirEventNotification) -} - -object LogDirEventNotificationHandler { - val Version: Long = 1L -} - -class PartitionModificationsHandler(eventManager: ControllerEventManager, topic: String) extends ZNodeChangeHandler { - override val path: String = TopicZNode.path(topic) - - override def handleDataChange(): Unit = eventManager.put(PartitionModifications(topic)) -} - -class TopicDeletionHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler { - override val path: String = DeleteTopicsZNode.path - - override def handleChildChange(): Unit = eventManager.put(TopicDeletion) -} - -class PartitionReassignmentHandler(eventManager: ControllerEventManager) extends ZNodeChangeHandler { - override val path: String = ReassignPartitionsZNode.path - - // Note that the event is also enqueued when the znode is deleted, but we do it explicitly instead of relying on - // handleDeletion(). This approach is more robust as it doesn't depend on the watcher being re-registered after - // it's consumed during data changes (we ensure re-registration when the znode is deleted). - override def handleCreation(): Unit = eventManager.put(ZkPartitionReassignment) -} - -class PartitionReassignmentIsrChangeHandler(eventManager: ControllerEventManager, partition: TopicPartition) extends ZNodeChangeHandler { - override val path: String = TopicPartitionStateZNode.path(partition) - - override def handleDataChange(): Unit = eventManager.put(PartitionReassignmentIsrChange(partition)) -} - -class IsrChangeNotificationHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler { - override val path: String = IsrChangeNotificationZNode.path - - override def handleChildChange(): Unit = eventManager.put(IsrChangeNotification) -} - -object IsrChangeNotificationHandler { - val Version: Long = 1L -} - -class PreferredReplicaElectionHandler(eventManager: ControllerEventManager) extends ZNodeChangeHandler { - override val path: String = PreferredReplicaElectionZNode.path - - override def handleCreation(): Unit = eventManager.put(ReplicaLeaderElection(None, ElectionType.PREFERRED, ZkTriggered)) -} - -class ControllerChangeHandler(eventManager: ControllerEventManager) extends ZNodeChangeHandler { - override val path: String = ControllerZNode.path - - override def handleCreation(): Unit = eventManager.put(ControllerChange) - override def handleDeletion(): Unit = eventManager.put(Reelect) - override def handleDataChange(): Unit = eventManager.put(ControllerChange) -} - -case class PartitionAndReplica(topicPartition: TopicPartition, replica: Int) { - def topic: String = topicPartition.topic - def partition: Int = topicPartition.partition - - override def toString: String = { - s"[Topic=$topic,Partition=$partition,Replica=$replica]" - } -} - -case class LeaderIsrAndControllerEpoch(leaderAndIsr: LeaderAndIsr, controllerEpoch: Int) { - override def toString: String = { - val leaderAndIsrInfo = new StringBuilder - leaderAndIsrInfo.append("(Leader:" + leaderAndIsr.leader) - leaderAndIsrInfo.append(",ISR:" + leaderAndIsr.isr.asScala.mkString(",")) - leaderAndIsrInfo.append(",LeaderRecoveryState:" + leaderAndIsr.leaderRecoveryState) - leaderAndIsrInfo.append(",LeaderEpoch:" + leaderAndIsr.leaderEpoch) - leaderAndIsrInfo.append(",ZkVersion:" + leaderAndIsr.partitionEpoch) - leaderAndIsrInfo.append(",ControllerEpoch:" + controllerEpoch + ")") - leaderAndIsrInfo.toString() - } -} - -private[controller] class ControllerStats { - private val metricsGroup = new KafkaMetricsGroup(this.getClass) - - val uncleanLeaderElectionRate: Meter = metricsGroup.newMeter("UncleanLeaderElectionsPerSec", "elections", TimeUnit.SECONDS) - - val rateAndTimeMetrics: Map[ControllerState, Timer] = ControllerState.values.flatMap { state => - state.rateAndTimeMetricName.map { metricName => - state -> metricsGroup.newTimer(metricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS) - } - }.toMap - - // For test. - def removeMetric(name: String): Unit = { - metricsGroup.removeMetric(name) - } -} - -sealed trait ControllerEvent { - def state: ControllerState - // preempt() is not executed by `ControllerEventThread` but by the main thread. - def preempt(): Unit -} - -case object ControllerChange extends ControllerEvent { - override def state: ControllerState = ControllerState.ControllerChange - override def preempt(): Unit = {} -} - -case object Reelect extends ControllerEvent { - override def state: ControllerState = ControllerState.ControllerChange - override def preempt(): Unit = {} -} - -case object RegisterBrokerAndReelect extends ControllerEvent { - override def state: ControllerState = ControllerState.ControllerChange - override def preempt(): Unit = {} -} - -case object Expire extends ControllerEvent { - override def state: ControllerState = ControllerState.ControllerChange - override def preempt(): Unit = {} -} - -case object ShutdownEventThread extends ControllerEvent { - override def state: ControllerState = ControllerState.ControllerShutdown - override def preempt(): Unit = {} -} - -case object AutoPreferredReplicaLeaderElection extends ControllerEvent { - override def state: ControllerState = ControllerState.AutoLeaderBalance - override def preempt(): Unit = {} -} - -case object UncleanLeaderElectionEnable extends ControllerEvent { - override def state: ControllerState = ControllerState.UncleanLeaderElectionEnable - override def preempt(): Unit = {} -} - -case class TopicUncleanLeaderElectionEnable(topic: String) extends ControllerEvent { - override def state: ControllerState = ControllerState.TopicUncleanLeaderElectionEnable - override def preempt(): Unit = {} -} - -case class ControlledShutdown(id: Int, brokerEpoch: Long, controlledShutdownCallback: Try[Set[TopicPartition]] => Unit) extends ControllerEvent { - override def state: ControllerState = ControllerState.ControlledShutdown - override def preempt(): Unit = controlledShutdownCallback(Failure(new ControllerMovedException("Controller moved to another broker"))) -} - -case class LeaderAndIsrResponseReceived(leaderAndIsrResponse: LeaderAndIsrResponse, brokerId: Int) extends ControllerEvent { - override def state: ControllerState = ControllerState.LeaderAndIsrResponseReceived - override def preempt(): Unit = {} -} - -case class UpdateMetadataResponseReceived(updateMetadataResponse: UpdateMetadataResponse, brokerId: Int) extends ControllerEvent { - override def state: ControllerState = ControllerState.UpdateMetadataResponseReceived - override def preempt(): Unit = {} -} - -case class TopicDeletionStopReplicaResponseReceived(replicaId: Int, - requestError: Errors, - partitionErrors: Map[TopicPartition, Errors]) extends ControllerEvent { - override def state: ControllerState = ControllerState.TopicDeletion - override def preempt(): Unit = {} -} - -case object Startup extends ControllerEvent { - override def state: ControllerState = ControllerState.ControllerChange - override def preempt(): Unit = {} -} - -case object BrokerChange extends ControllerEvent { - override def state: ControllerState = ControllerState.BrokerChange - override def preempt(): Unit = {} -} - -case class BrokerModifications(brokerId: Int) extends ControllerEvent { - override def state: ControllerState = ControllerState.BrokerChange - override def preempt(): Unit = {} -} - -case object TopicChange extends ControllerEvent { - override def state: ControllerState = ControllerState.TopicChange - override def preempt(): Unit = {} -} - -case object LogDirEventNotification extends ControllerEvent { - override def state: ControllerState = ControllerState.LogDirChange - override def preempt(): Unit = {} -} - -case class PartitionModifications(topic: String) extends ControllerEvent { - override def state: ControllerState = ControllerState.TopicChange - override def preempt(): Unit = {} -} - -case object TopicDeletion extends ControllerEvent { - override def state: ControllerState = ControllerState.TopicDeletion - override def preempt(): Unit = {} -} - -case object ZkPartitionReassignment extends ControllerEvent { - override def state: ControllerState = ControllerState.AlterPartitionReassignment - override def preempt(): Unit = {} -} - -case class ApiPartitionReassignment(reassignments: Map[TopicPartition, Option[Seq[Int]]], - callback: AlterReassignmentsCallback) extends ControllerEvent { - override def state: ControllerState = ControllerState.AlterPartitionReassignment - override def preempt(): Unit = callback(Right(new ApiError(Errors.NOT_CONTROLLER))) -} - -case class PartitionReassignmentIsrChange(partition: TopicPartition) extends ControllerEvent { - override def state: ControllerState = ControllerState.AlterPartitionReassignment - override def preempt(): Unit = {} -} - -case object IsrChangeNotification extends ControllerEvent { - override def state: ControllerState = ControllerState.IsrChange - override def preempt(): Unit = {} -} - -case class AlterPartitionReceived( - alterPartitionRequest: AlterPartitionRequestData, - alterPartitionRequestVersion: Short, - callback: AlterPartitionResponseData => Unit -) extends ControllerEvent { - override def state: ControllerState = ControllerState.IsrChange - override def preempt(): Unit = {} -} - -case class ReplicaLeaderElection( - partitionsFromAdminClientOpt: Option[Set[TopicPartition]], - electionType: ElectionType, - electionTrigger: ElectionTrigger, - callback: ElectLeadersCallback = _ => {} -) extends ControllerEvent { - override def state: ControllerState = ControllerState.ManualLeaderBalance - - override def preempt(): Unit = callback( - partitionsFromAdminClientOpt.fold(Map.empty[TopicPartition, Either[ApiError, Int]]) { partitions => - partitions.iterator.map(partition => partition -> Left(new ApiError(Errors.NOT_CONTROLLER, null))).toMap - } - ) -} - -/** - * @param partitionsOpt - an Optional set of partitions. If not present, all reassigning partitions are to be listed - */ -case class ListPartitionReassignments(partitionsOpt: Option[Set[TopicPartition]], - callback: ListReassignmentsCallback) extends ControllerEvent { - override def state: ControllerState = ControllerState.ListPartitionReassignment - override def preempt(): Unit = callback(Right(new ApiError(Errors.NOT_CONTROLLER, null))) -} - -case class UpdateFeatures(request: UpdateFeaturesRequest, - callback: UpdateFeaturesCallback) extends ControllerEvent { - override def state: ControllerState = ControllerState.UpdateFeatures - override def preempt(): Unit = {} -} - -case class AllocateProducerIds(brokerId: Int, brokerEpoch: Long, callback: Either[Errors, ProducerIdsBlock] => Unit) - extends ControllerEvent { - override def state: ControllerState = ControllerState.Idle - override def preempt(): Unit = {} -} - - -// Used only in test cases -abstract class MockEvent(val state: ControllerState) extends ControllerEvent { - def process(): Unit - def preempt(): Unit -} diff --git a/core/src/main/scala/kafka/controller/PartitionStateMachine.scala b/core/src/main/scala/kafka/controller/PartitionStateMachine.scala deleted file mode 100755 index c0b92b9c638e9..0000000000000 --- a/core/src/main/scala/kafka/controller/PartitionStateMachine.scala +++ /dev/null @@ -1,587 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.controller - -import kafka.common.StateChangeFailedException -import kafka.controller.Election._ -import kafka.server.KafkaConfig -import kafka.utils.Logging -import kafka.zk.KafkaZkClient -import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult -import kafka.zk.TopicPartitionStateZNode -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.ControllerMovedException -import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.server.common.MetadataVersion.IBP_3_2_IV0 -import org.apache.zookeeper.KeeperException -import org.apache.zookeeper.KeeperException.Code - -import scala.collection.{Map, Seq, mutable} -import scala.jdk.CollectionConverters._ - -abstract class PartitionStateMachine(controllerContext: ControllerContext) extends Logging { - /** - * Invoked on successful controller election. - */ - def startup(): Unit = { - info("Initializing partition state") - initializePartitionState() - info("Triggering online partition state changes") - triggerOnlinePartitionStateChange() - debug(s"Started partition state machine with initial state -> ${controllerContext.partitionStates}") - } - - /** - * Invoked on controller shutdown. - */ - def shutdown(): Unit = { - info("Stopped partition state machine") - } - - /** - * This API invokes the OnlinePartition state change on all partitions in either the NewPartition or OfflinePartition - * state. This is called on a successful controller election and on broker changes - */ - def triggerOnlinePartitionStateChange(): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = { - val partitions = controllerContext.partitionsInStates(Set(OfflinePartition, NewPartition)) - triggerOnlineStateChangeForPartitions(partitions) - } - - def triggerOnlinePartitionStateChange(topic: String): Unit = { - val partitions = controllerContext.partitionsInStates(topic, Set(OfflinePartition, NewPartition)) - triggerOnlineStateChangeForPartitions(partitions) - } - - private def triggerOnlineStateChangeForPartitions(partitions: collection.Set[TopicPartition]): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = { - // try to move all partitions in NewPartition or OfflinePartition state to OnlinePartition state except partitions - // that belong to topics to be deleted - val partitionsToTrigger = partitions.filter { partition => - !controllerContext.isTopicQueuedUpForDeletion(partition.topic) - }.toSeq - - handleStateChanges(partitionsToTrigger, OnlinePartition, Some(OfflinePartitionLeaderElectionStrategy(false))) - // TODO: If handleStateChanges catches an exception, it is not enough to bail out and log an error. - // It is important to trigger leader election for those partitions. - } - - /** - * Invoked on startup of the partition's state machine to set the initial state for all existing partitions in - * zookeeper - */ - private def initializePartitionState(): Unit = { - for (topicPartition <- controllerContext.allPartitions) { - // check if leader and isr path exists for partition. If not, then it is in NEW state - controllerContext.partitionLeadershipInfo(topicPartition) match { - case Some(currentLeaderIsrAndEpoch) => - // else, check if the leader for partition is alive. If yes, it is in Online state, else it is in Offline state - if (controllerContext.isReplicaOnline(currentLeaderIsrAndEpoch.leaderAndIsr.leader, topicPartition)) - // leader is alive - controllerContext.putPartitionState(topicPartition, OnlinePartition) - else - controllerContext.putPartitionState(topicPartition, OfflinePartition) - case None => - controllerContext.putPartitionState(topicPartition, NewPartition) - } - } - } - - def handleStateChanges( - partitions: Seq[TopicPartition], - targetState: PartitionState - ): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = { - handleStateChanges(partitions, targetState, None) - } - - def handleStateChanges( - partitions: Seq[TopicPartition], - targetState: PartitionState, - leaderElectionStrategy: Option[PartitionLeaderElectionStrategy] - ): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] - -} - -/** - * This class represents the state machine for partitions. It defines the states that a partition can be in, and - * transitions to move the partition to another legal state. The different states that a partition can be in are - - * 1. NonExistentPartition: This state indicates that the partition was either never created or was created and then - * deleted. Valid previous state, if one exists, is OfflinePartition - * 2. NewPartition : After creation, the partition is in the NewPartition state. In this state, the partition should have - * replicas assigned to it, but no leader/isr yet. Valid previous states are NonExistentPartition - * 3. OnlinePartition : Once a leader is elected for a partition, it is in the OnlinePartition state. - * Valid previous states are NewPartition/OfflinePartition - * 4. OfflinePartition : If, after successful leader election, the leader for partition dies, then the partition - * moves to the OfflinePartition state. Valid previous states are NewPartition/OnlinePartition - */ -class ZkPartitionStateMachine(config: KafkaConfig, - stateChangeLogger: StateChangeLogger, - controllerContext: ControllerContext, - zkClient: KafkaZkClient, - controllerBrokerRequestBatch: ControllerBrokerRequestBatch) - extends PartitionStateMachine(controllerContext) { - - private val isLeaderRecoverySupported = config.interBrokerProtocolVersion.isAtLeast(IBP_3_2_IV0) - - private val controllerId = config.brokerId - this.logIdent = s"[PartitionStateMachine controllerId=$controllerId] " - - /** - * Try to change the state of the given partitions to the given targetState, using the given - * partitionLeaderElectionStrategyOpt if a leader election is required. - * @param partitions The partitions - * @param targetState The state - * @param partitionLeaderElectionStrategyOpt The leader election strategy if a leader election is required. - * @return A map of failed and successful elections when targetState is OnlinePartitions. The keys are the - * topic partitions and the corresponding values are either the exception that was thrown or new - * leader & ISR. - */ - override def handleStateChanges( - partitions: Seq[TopicPartition], - targetState: PartitionState, - partitionLeaderElectionStrategyOpt: Option[PartitionLeaderElectionStrategy] - ): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = { - if (partitions.nonEmpty) { - try { - controllerBrokerRequestBatch.newBatch() - val result = doHandleStateChanges( - partitions, - targetState, - partitionLeaderElectionStrategyOpt - ) - controllerBrokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch) - result - } catch { - case e: ControllerMovedException => - error(s"Controller moved to another broker when moving some partitions to $targetState state", e) - throw e - case e: Throwable => - error(s"Error while moving some partitions to $targetState state", e) - partitions.iterator.map(_ -> Left(e)).toMap - } - } else { - Map.empty - } - } - - private def partitionState(partition: TopicPartition): PartitionState = { - controllerContext.partitionState(partition) - } - - /** - * This API exercises the partition's state machine. It ensures that every state transition happens from a legal - * previous state to the target state. Valid state transitions are: - * NonExistentPartition -> NewPartition: - * --load assigned replicas from ZK to controller cache - * - * NewPartition -> OnlinePartition - * --assign first live replica as the leader and all live replicas as the isr; write leader and isr to ZK for this partition - * --send LeaderAndIsr request to every live replica and UpdateMetadata request to every live broker - * - * OnlinePartition,OfflinePartition -> OnlinePartition - * --select new leader and isr for this partition and a set of replicas to receive the LeaderAndIsr request, and write leader and isr to ZK - * --for this partition, send LeaderAndIsr request to every receiving replica and UpdateMetadata request to every live broker - * - * NewPartition,OnlinePartition,OfflinePartition -> OfflinePartition - * --nothing other than marking partition state as Offline - * - * OfflinePartition -> NonExistentPartition - * --nothing other than marking the partition state as NonExistentPartition - * @param partitions The partitions for which the state transition is invoked - * @param targetState The end state that the partition should be moved to - * @return A map of failed and successful elections when targetState is OnlinePartitions. The keys are the - * topic partitions and the corresponding values are either the exception that was thrown or new - * leader & ISR. - */ - private def doHandleStateChanges( - partitions: Seq[TopicPartition], - targetState: PartitionState, - partitionLeaderElectionStrategyOpt: Option[PartitionLeaderElectionStrategy] - ): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = { - val stateChangeLog = stateChangeLogger.withControllerEpoch(controllerContext.epoch) - val traceEnabled = stateChangeLog.isTraceEnabled - partitions.foreach(partition => controllerContext.putPartitionStateIfNotExists(partition, NonExistentPartition)) - val (validPartitions, invalidPartitions) = controllerContext.checkValidPartitionStateChange(partitions, targetState) - invalidPartitions.foreach(partition => logInvalidTransition(partition, targetState)) - - targetState match { - case NewPartition => - validPartitions.foreach { partition => - stateChangeLog.info(s"Changed partition $partition state from ${partitionState(partition)} to $targetState with " + - s"assigned replicas ${controllerContext.partitionReplicaAssignment(partition).mkString(",")}") - controllerContext.putPartitionState(partition, NewPartition) - } - Map.empty - case OnlinePartition => - val uninitializedPartitions = validPartitions.filter(partition => partitionState(partition) == NewPartition) - val partitionsToElectLeader = validPartitions.filter(partition => partitionState(partition) == OfflinePartition || partitionState(partition) == OnlinePartition) - if (uninitializedPartitions.nonEmpty) { - val successfulInitializations = initializeLeaderAndIsrForPartitions(uninitializedPartitions) - successfulInitializations.foreach { partition => - stateChangeLog.info(s"Changed partition $partition from ${partitionState(partition)} to $targetState with state " + - s"${controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr}") - controllerContext.putPartitionState(partition, OnlinePartition) - } - } - if (partitionsToElectLeader.nonEmpty) { - val electionResults = electLeaderForPartitions( - partitionsToElectLeader, - partitionLeaderElectionStrategyOpt.getOrElse( - throw new IllegalArgumentException("Election strategy is a required field when the target state is OnlinePartition") - ) - ) - - electionResults.foreach { - case (partition, Right(leaderAndIsr)) => - stateChangeLog.info( - s"Changed partition $partition from ${partitionState(partition)} to $targetState with state $leaderAndIsr" - ) - controllerContext.putPartitionState(partition, OnlinePartition) - case (_, Left(_)) => // Ignore; no need to update partition state on election error - } - - electionResults - } else { - Map.empty - } - case OfflinePartition | NonExistentPartition => - validPartitions.foreach { partition => - if (traceEnabled) - stateChangeLog.trace(s"Changed partition $partition state from ${partitionState(partition)} to $targetState") - controllerContext.putPartitionState(partition, targetState) - } - Map.empty - } - } - - /** - * Initialize leader and isr partition state in zookeeper. - * @param partitions The partitions that we're trying to initialize. - * @return The partitions that have been successfully initialized. - */ - private def initializeLeaderAndIsrForPartitions(partitions: Seq[TopicPartition]): Seq[TopicPartition] = { - val successfulInitializations = mutable.Buffer.empty[TopicPartition] - val replicasPerPartition = partitions.map(partition => partition -> controllerContext.partitionReplicaAssignment(partition)) - val liveReplicasPerPartition = replicasPerPartition.map { case (partition, replicas) => - val liveReplicasForPartition = replicas.filter(replica => controllerContext.isReplicaOnline(replica, partition)) - partition -> liveReplicasForPartition - } - val (partitionsWithoutLiveReplicas, partitionsWithLiveReplicas) = liveReplicasPerPartition.partition { case (_, liveReplicas) => liveReplicas.isEmpty } - - partitionsWithoutLiveReplicas.foreach { case (partition, _) => - val failMsg = s"Controller $controllerId epoch ${controllerContext.epoch} encountered error during state change of " + - s"partition $partition from New to Online, assigned replicas are " + - s"[${controllerContext.partitionReplicaAssignment(partition).mkString(",")}], live brokers are [${controllerContext.liveBrokerIds}]. No assigned " + - "replica is alive." - logFailedStateChange(partition, NewPartition, OnlinePartition, new StateChangeFailedException(failMsg)) - } - val leaderIsrAndControllerEpochs = partitionsWithLiveReplicas.map { case (partition, liveReplicas) => - val leaderAndIsr = new LeaderAndIsr(liveReplicas.head, liveReplicas.toList.map(Integer.valueOf).asJava) - val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerContext.epoch) - partition -> leaderIsrAndControllerEpoch - }.toMap - val createResponses = try { - zkClient.createTopicPartitionStatesRaw(leaderIsrAndControllerEpochs, controllerContext.epochZkVersion) - } catch { - case e: ControllerMovedException => - error("Controller moved to another broker when trying to create the topic partition state znode", e) - throw e - case e: Exception => - partitionsWithLiveReplicas.foreach { case (partition, _) => logFailedStateChange(partition, partitionState(partition), NewPartition, e) } - Seq.empty - } - createResponses.foreach { createResponse => - val code = createResponse.resultCode - val partition = createResponse.ctx.get.asInstanceOf[TopicPartition] - val leaderIsrAndControllerEpoch = leaderIsrAndControllerEpochs(partition) - if (code == Code.OK) { - controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch) - controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(leaderIsrAndControllerEpoch.leaderAndIsr.isr.asScala.map(_.toInt), - partition, leaderIsrAndControllerEpoch, controllerContext.partitionFullReplicaAssignment(partition), isNew = true) - successfulInitializations += partition - } else { - logFailedStateChange(partition, NewPartition, OnlinePartition, code) - } - } - successfulInitializations - } - - /** - * Repeatedly attempt to elect leaders for multiple partitions until there are no more remaining partitions to retry. - * @param partitions The partitions that we're trying to elect leaders for. - * @param partitionLeaderElectionStrategy The election strategy to use. - * @return A map of failed and successful elections. The keys are the topic partitions and the corresponding values are - * either the exception that was thrown or new leader & ISR. - */ - private def electLeaderForPartitions( - partitions: Seq[TopicPartition], - partitionLeaderElectionStrategy: PartitionLeaderElectionStrategy - ): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = { - var remaining = partitions - val finishedElections = mutable.Map.empty[TopicPartition, Either[Throwable, LeaderAndIsr]] - - while (remaining.nonEmpty) { - val (finished, updatesToRetry) = doElectLeaderForPartitions(remaining, partitionLeaderElectionStrategy) - remaining = updatesToRetry - - finished.foreach { - case (partition, Left(e)) => - logFailedStateChange(partition, partitionState(partition), OnlinePartition, e) - case (_, Right(_)) => // Ignore; success so no need to log failed state change - } - - finishedElections ++= finished - - if (remaining.nonEmpty) - logger.info(s"Retrying leader election with strategy $partitionLeaderElectionStrategy for partitions $remaining") - } - - finishedElections.toMap - } - - /** - * Try to elect leaders for multiple partitions. - * Electing a leader for a partition updates partition state in zookeeper. - * - * @param partitions The partitions that we're trying to elect leaders for. - * @param partitionLeaderElectionStrategy The election strategy to use. - * @return A tuple of two values: - * 1. The partitions and the expected leader and isr that successfully had a leader elected. And exceptions - * corresponding to failed elections that should not be retried. - * 2. The partitions that we should retry due to a zookeeper BADVERSION conflict. Version conflicts can occur if - * the partition leader updated partition state while the controller attempted to update partition state. - */ - private def doElectLeaderForPartitions( - partitions: Seq[TopicPartition], - partitionLeaderElectionStrategy: PartitionLeaderElectionStrategy - ): (Map[TopicPartition, Either[Exception, LeaderAndIsr]], Seq[TopicPartition]) = { - val getDataResponses = try { - zkClient.getTopicPartitionStatesRaw(partitions) - } catch { - case e: Exception => - return (partitions.iterator.map(_ -> Left(e)).toMap, Seq.empty) - } - val failedElections = mutable.Map.empty[TopicPartition, Either[Exception, LeaderAndIsr]] - val validLeaderAndIsrs = mutable.Buffer.empty[(TopicPartition, LeaderAndIsr)] - - getDataResponses.foreach { getDataResponse => - val partition = getDataResponse.ctx.get.asInstanceOf[TopicPartition] - val currState = partitionState(partition) - if (getDataResponse.resultCode == Code.OK) { - TopicPartitionStateZNode.decode(getDataResponse.data, getDataResponse.stat) match { - case Some(leaderIsrAndControllerEpoch) => - if (leaderIsrAndControllerEpoch.controllerEpoch > controllerContext.epoch) { - val failMsg = s"Aborted leader election for partition $partition since the LeaderAndIsr path was " + - s"already written by another controller. This probably means that the current controller $controllerId went through " + - s"a soft failure and another controller was elected with epoch ${leaderIsrAndControllerEpoch.controllerEpoch}." - failedElections.put(partition, Left(new StateChangeFailedException(failMsg))) - } else { - validLeaderAndIsrs += partition -> leaderIsrAndControllerEpoch.leaderAndIsr - } - - case None => - val exception = new StateChangeFailedException(s"LeaderAndIsr information doesn't exist for partition $partition in $currState state") - failedElections.put(partition, Left(exception)) - } - - } else if (getDataResponse.resultCode == Code.NONODE) { - val exception = new StateChangeFailedException(s"LeaderAndIsr information doesn't exist for partition $partition in $currState state") - failedElections.put(partition, Left(exception)) - } else { - failedElections.put(partition, Left(getDataResponse.resultException.get)) - } - } - - if (validLeaderAndIsrs.isEmpty) { - return (failedElections.toMap, Seq.empty) - } - - val (partitionsWithoutLeaders, partitionsWithLeaders) = partitionLeaderElectionStrategy match { - case OfflinePartitionLeaderElectionStrategy(allowUnclean) => - val partitionsWithUncleanLeaderElectionState = collectUncleanLeaderElectionState( - validLeaderAndIsrs, - allowUnclean - ) - leaderForOffline( - controllerContext, - isLeaderRecoverySupported, - partitionsWithUncleanLeaderElectionState - ).partition(_.leaderAndIsr.isEmpty) - - case ReassignPartitionLeaderElectionStrategy => - leaderForReassign(controllerContext, validLeaderAndIsrs).partition(_.leaderAndIsr.isEmpty) - case PreferredReplicaPartitionLeaderElectionStrategy => - leaderForPreferredReplica(controllerContext, validLeaderAndIsrs).partition(_.leaderAndIsr.isEmpty) - case ControlledShutdownPartitionLeaderElectionStrategy => - leaderForControlledShutdown(controllerContext, validLeaderAndIsrs).partition(_.leaderAndIsr.isEmpty) - } - partitionsWithoutLeaders.foreach { electionResult => - val partition = electionResult.topicPartition - val failMsg = s"Failed to elect leader for partition $partition under strategy $partitionLeaderElectionStrategy" - failedElections.put(partition, Left(new StateChangeFailedException(failMsg))) - } - val recipientsPerPartition = partitionsWithLeaders.map(result => result.topicPartition -> result.liveReplicas).toMap - val adjustedLeaderAndIsrs = partitionsWithLeaders.map(result => result.topicPartition -> result.leaderAndIsr.get).toMap - val UpdateLeaderAndIsrResult(finishedUpdates, updatesToRetry) = zkClient.updateLeaderAndIsr( - adjustedLeaderAndIsrs, controllerContext.epoch, controllerContext.epochZkVersion) - finishedUpdates.foreachEntry { (partition, result) => - result.foreach { leaderAndIsr => - val replicaAssignment = controllerContext.partitionFullReplicaAssignment(partition) - val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerContext.epoch) - controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch) - controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(recipientsPerPartition(partition), partition, - leaderIsrAndControllerEpoch, replicaAssignment, isNew = false) - } - } - - if (isDebugEnabled) { - updatesToRetry.foreach { partition => - debug(s"Controller failed to elect leader for partition $partition. " + - s"Attempted to write state ${adjustedLeaderAndIsrs(partition)}, but failed with bad ZK version. This will be retried.") - } - } - - (finishedUpdates ++ failedElections, updatesToRetry) - } - - /* For the provided set of topic partition and partition sync state it attempts to determine if unclean - * leader election should be performed. Unclean election should be performed if there are no live - * replica which are in sync and unclean leader election is allowed (allowUnclean parameter is true or - * the topic has been configured to allow unclean election). - * - * @param leaderIsrAndControllerEpochs set of partition to determine if unclean leader election should be - * allowed - * @param allowUnclean whether to allow unclean election without having to read the topic configuration - * @return a sequence of three element tuple: - * 1. topic partition - * 2. leader, isr and controller epoc. Some means election should be performed - * 3. allow unclean - */ - private def collectUncleanLeaderElectionState( - leaderAndIsrs: Seq[(TopicPartition, LeaderAndIsr)], - allowUnclean: Boolean - ): Seq[(TopicPartition, Option[LeaderAndIsr], Boolean)] = { - val (partitionsWithNoLiveInSyncReplicas, partitionsWithLiveInSyncReplicas) = leaderAndIsrs.partition { - case (partition, leaderAndIsr) => - val liveInSyncReplicas = leaderAndIsr.isr.asScala.filter(controllerContext.isReplicaOnline(_, partition)) - liveInSyncReplicas.isEmpty - } - - val electionForPartitionWithoutLiveReplicas = if (allowUnclean) { - partitionsWithNoLiveInSyncReplicas.map { case (partition, leaderAndIsr) => - (partition, Option(leaderAndIsr), true) - } - } else { - val (logConfigs, failed) = zkClient.getLogConfigs( - partitionsWithNoLiveInSyncReplicas.iterator.map { case (partition, _) => partition.topic }.toSet, - config.originals() - ) - - partitionsWithNoLiveInSyncReplicas.map { case (partition, leaderAndIsr) => - if (failed.contains(partition.topic)) { - logFailedStateChange(partition, partitionState(partition), OnlinePartition, failed(partition.topic)) - (partition, None, false) - } else { - ( - partition, - Option(leaderAndIsr), - logConfigs(partition.topic).uncleanLeaderElectionEnable.booleanValue() - ) - } - } - } - - electionForPartitionWithoutLiveReplicas ++ - partitionsWithLiveInSyncReplicas.map { case (partition, leaderAndIsr) => - (partition, Option(leaderAndIsr), false) - } - } - - private def logInvalidTransition(partition: TopicPartition, targetState: PartitionState): Unit = { - val currState = partitionState(partition) - val e = new IllegalStateException(s"Partition $partition should be in one of " + - s"${targetState.validPreviousStates.mkString(",")} states before moving to $targetState state. Instead it is in " + - s"$currState state") - logFailedStateChange(partition, currState, targetState, e) - } - - private def logFailedStateChange(partition: TopicPartition, currState: PartitionState, targetState: PartitionState, code: Code): Unit = { - logFailedStateChange(partition, currState, targetState, KeeperException.create(code)) - } - - private def logFailedStateChange(partition: TopicPartition, currState: PartitionState, targetState: PartitionState, t: Throwable): Unit = { - stateChangeLogger.withControllerEpoch(controllerContext.epoch) - .error(s"Controller $controllerId epoch ${controllerContext.epoch} failed to change state for partition $partition " + - s"from $currState to $targetState", t) - } -} - -object PartitionLeaderElectionAlgorithms { - def offlinePartitionLeaderElection(assignment: Seq[Int], isr: Seq[Int], liveReplicas: Set[Int], uncleanLeaderElectionEnabled: Boolean, controllerContext: ControllerContext): Option[Int] = { - assignment.find(id => liveReplicas.contains(id) && isr.contains(id)).orElse { - if (uncleanLeaderElectionEnabled) { - val leaderOpt = assignment.find(liveReplicas.contains) - if (leaderOpt.isDefined) - controllerContext.stats.uncleanLeaderElectionRate.mark() - leaderOpt - } else { - None - } - } - } - - def reassignPartitionLeaderElection(reassignment: Seq[Int], isr: Seq[Int], liveReplicas: Set[Int]): Option[Int] = { - reassignment.find(id => liveReplicas.contains(id) && isr.contains(id)) - } - - def preferredReplicaPartitionLeaderElection(assignment: Seq[Int], isr: Seq[Int], liveReplicas: Set[Int]): Option[Int] = { - assignment.headOption.filter(id => liveReplicas.contains(id) && isr.contains(id)) - } - - def controlledShutdownPartitionLeaderElection(assignment: Seq[Int], isr: Seq[Int], liveReplicas: Set[Int], shuttingDownBrokers: Set[Int]): Option[Int] = { - assignment.find(id => liveReplicas.contains(id) && isr.contains(id) && !shuttingDownBrokers.contains(id)) - } -} - -sealed trait PartitionLeaderElectionStrategy -final case class OfflinePartitionLeaderElectionStrategy(allowUnclean: Boolean) extends PartitionLeaderElectionStrategy -case object ReassignPartitionLeaderElectionStrategy extends PartitionLeaderElectionStrategy -case object PreferredReplicaPartitionLeaderElectionStrategy extends PartitionLeaderElectionStrategy -case object ControlledShutdownPartitionLeaderElectionStrategy extends PartitionLeaderElectionStrategy - -sealed trait PartitionState { - def state: Byte - def validPreviousStates: Set[PartitionState] -} - -case object NewPartition extends PartitionState { - val state: Byte = 0 - val validPreviousStates: Set[PartitionState] = Set(NonExistentPartition) -} - -case object OnlinePartition extends PartitionState { - val state: Byte = 1 - val validPreviousStates: Set[PartitionState] = Set(NewPartition, OnlinePartition, OfflinePartition) -} - -case object OfflinePartition extends PartitionState { - val state: Byte = 2 - val validPreviousStates: Set[PartitionState] = Set(NewPartition, OnlinePartition, OfflinePartition) -} - -case object NonExistentPartition extends PartitionState { - val state: Byte = 3 - val validPreviousStates: Set[PartitionState] = Set(OfflinePartition) -} diff --git a/core/src/main/scala/kafka/controller/ReplicaStateMachine.scala b/core/src/main/scala/kafka/controller/ReplicaStateMachine.scala deleted file mode 100644 index 406fff2b51bbf..0000000000000 --- a/core/src/main/scala/kafka/controller/ReplicaStateMachine.scala +++ /dev/null @@ -1,494 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.controller - -import kafka.common.StateChangeFailedException -import kafka.server.KafkaConfig -import kafka.utils.Logging -import kafka.zk.KafkaZkClient -import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult -import kafka.zk.TopicPartitionStateZNode -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.ControllerMovedException -import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.zookeeper.KeeperException.Code - -import java.util.stream.Collectors -import scala.collection.{Seq, mutable} - -abstract class ReplicaStateMachine(controllerContext: ControllerContext) extends Logging { - /** - * Invoked on successful controller election. - */ - def startup(): Unit = { - info("Initializing replica state") - initializeReplicaState() - info("Triggering online replica state changes") - val (onlineReplicas, offlineReplicas) = controllerContext.onlineAndOfflineReplicas - handleStateChanges(onlineReplicas.toSeq, OnlineReplica) - info("Triggering offline replica state changes") - handleStateChanges(offlineReplicas.toSeq, OfflineReplica) - debug(s"Started replica state machine with initial state -> ${controllerContext.replicaStates}") - } - - /** - * Invoked on controller shutdown. - */ - def shutdown(): Unit = { - info("Stopped replica state machine") - } - - /** - * Invoked on startup of the replica's state machine to set the initial state for replicas of all existing partitions - * in zookeeper - */ - private def initializeReplicaState(): Unit = { - controllerContext.allPartitions.foreach { partition => - val replicas = controllerContext.partitionReplicaAssignment(partition) - replicas.foreach { replicaId => - val partitionAndReplica = PartitionAndReplica(partition, replicaId) - if (controllerContext.isReplicaOnline(replicaId, partition)) { - controllerContext.putReplicaState(partitionAndReplica, OnlineReplica) - } else { - // mark replicas on dead brokers as failed for topic deletion, if they belong to a topic to be deleted. - // This is required during controller failover since during controller failover a broker can go down, - // so the replicas on that broker should be moved to ReplicaDeletionIneligible to be on the safer side. - controllerContext.putReplicaState(partitionAndReplica, ReplicaDeletionIneligible) - } - } - } - } - - def handleStateChanges(replicas: Seq[PartitionAndReplica], targetState: ReplicaState): Unit -} - -/** - * This class represents the state machine for replicas. It defines the states that a replica can be in, and - * transitions to move the replica to another legal state. The different states that a replica can be in are - - * 1. NewReplica : The controller can create new replicas during partition reassignment. In this state, a - * replica can only get become follower state change request. Valid previous - * state is NonExistentReplica - * 2. OnlineReplica : Once a replica is started and part of the assigned replicas for its partition, it is in this - * state. In this state, it can get either become leader or become follower state change requests. - * Valid previous state are NewReplica, OnlineReplica, OfflineReplica and ReplicaDeletionIneligible - * 3. OfflineReplica : If a replica dies, it moves to this state. This happens when the broker hosting the replica - * is down. Valid previous state are NewReplica, OnlineReplica, OfflineReplica and ReplicaDeletionIneligible - * 4. ReplicaDeletionStarted: If replica deletion starts, it is moved to this state. Valid previous state is OfflineReplica - * 5. ReplicaDeletionSuccessful: If replica responds with no error code in response to a delete replica request, it is - * moved to this state. Valid previous state is ReplicaDeletionStarted - * 6. ReplicaDeletionIneligible: If replica deletion fails, it is moved to this state. Valid previous states are - * ReplicaDeletionStarted and OfflineReplica - * 7. NonExistentReplica: If a replica is deleted successfully, it is moved to this state. Valid previous state is - * ReplicaDeletionSuccessful - */ -class ZkReplicaStateMachine(config: KafkaConfig, - stateChangeLogger: StateChangeLogger, - controllerContext: ControllerContext, - zkClient: KafkaZkClient, - controllerBrokerRequestBatch: ControllerBrokerRequestBatch) - extends ReplicaStateMachine(controllerContext) with Logging { - - private val controllerId = config.brokerId - this.logIdent = s"[ReplicaStateMachine controllerId=$controllerId] " - - override def handleStateChanges(replicas: Seq[PartitionAndReplica], targetState: ReplicaState): Unit = { - if (replicas.nonEmpty) { - try { - controllerBrokerRequestBatch.newBatch() - replicas.groupBy(_.replica).foreachEntry { (replicaId, replicas) => - doHandleStateChanges(replicaId, replicas, targetState) - } - controllerBrokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch) - } catch { - case e: ControllerMovedException => - error(s"Controller moved to another broker when moving some replicas to $targetState state", e) - throw e - case e: Throwable => error(s"Error while moving some replicas to $targetState state", e) - } - } - } - - /** - * This API exercises the replica's state machine. It ensures that every state transition happens from a legal - * previous state to the target state. Valid state transitions are: - * NonExistentReplica --> NewReplica - * --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the - * partition to every live broker - * - * NewReplica -> OnlineReplica - * --add the new replica to the assigned replica list if needed - * - * OnlineReplica,OfflineReplica -> OnlineReplica - * --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the - * partition to every live broker - * - * NewReplica,OnlineReplica,OfflineReplica,ReplicaDeletionIneligible -> OfflineReplica - * --send StopReplicaRequest to the replica (w/o deletion) - * --remove this replica from the isr and send LeaderAndIsr request (with new isr) to the leader replica and - * UpdateMetadata request for the partition to every live broker. - * - * OfflineReplica -> ReplicaDeletionStarted - * --send StopReplicaRequest to the replica (with deletion) - * - * ReplicaDeletionStarted -> ReplicaDeletionSuccessful - * -- mark the state of the replica in the state machine - * - * ReplicaDeletionStarted -> ReplicaDeletionIneligible - * -- mark the state of the replica in the state machine - * - * ReplicaDeletionSuccessful -> NonExistentReplica - * -- remove the replica from the in memory partition replica assignment cache - * - * @param replicaId The replica for which the state transition is invoked - * @param replicas The partitions on this replica for which the state transition is invoked - * @param targetState The end state that the replica should be moved to - */ - private def doHandleStateChanges(replicaId: Int, replicas: Seq[PartitionAndReplica], targetState: ReplicaState): Unit = { - val stateLogger = stateChangeLogger.withControllerEpoch(controllerContext.epoch) - val traceEnabled = stateLogger.isTraceEnabled - replicas.foreach(replica => controllerContext.putReplicaStateIfNotExists(replica, NonExistentReplica)) - val (validReplicas, invalidReplicas) = controllerContext.checkValidReplicaStateChange(replicas, targetState) - invalidReplicas.foreach(replica => logInvalidTransition(replica, targetState)) - - targetState match { - case NewReplica => - validReplicas.foreach { replica => - val partition = replica.topicPartition - val currentState = controllerContext.replicaState(replica) - - controllerContext.partitionLeadershipInfo(partition) match { - case Some(leaderIsrAndControllerEpoch) => - if (leaderIsrAndControllerEpoch.leaderAndIsr.leader == replicaId) { - val exception = new StateChangeFailedException(s"Replica $replicaId for partition $partition cannot be moved to NewReplica state as it is being requested to become leader") - logFailedStateChange(replica, currentState, OfflineReplica, exception) - } else { - controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId), - replica.topicPartition, - leaderIsrAndControllerEpoch, - controllerContext.partitionFullReplicaAssignment(replica.topicPartition), - isNew = true) - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, partition, currentState, NewReplica) - controllerContext.putReplicaState(replica, NewReplica) - } - case None => - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, partition, currentState, NewReplica) - controllerContext.putReplicaState(replica, NewReplica) - } - } - case OnlineReplica => - validReplicas.foreach { replica => - val partition = replica.topicPartition - val currentState = controllerContext.replicaState(replica) - - currentState match { - case NewReplica => - val assignment = controllerContext.partitionFullReplicaAssignment(partition) - if (!assignment.replicas.contains(replicaId)) { - error(s"Adding replica ($replicaId) that is not part of the assignment $assignment") - val newAssignment = assignment.copy(replicas = assignment.replicas :+ replicaId) - controllerContext.updatePartitionFullReplicaAssignment(partition, newAssignment) - } - case _ => - controllerContext.partitionLeadershipInfo(partition) match { - case Some(leaderIsrAndControllerEpoch) => - controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(replicaId), - replica.topicPartition, - leaderIsrAndControllerEpoch, - controllerContext.partitionFullReplicaAssignment(partition), isNew = false) - case None => - } - } - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, partition, currentState, OnlineReplica) - controllerContext.putReplicaState(replica, OnlineReplica) - } - case OfflineReplica => - validReplicas.foreach { replica => - controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId), replica.topicPartition, deletePartition = false) - } - val (replicasWithLeadershipInfo, replicasWithoutLeadershipInfo) = validReplicas.partition { replica => - controllerContext.partitionLeadershipInfo(replica.topicPartition).isDefined - } - val updatedLeaderIsrAndControllerEpochs = removeReplicasFromIsr(replicaId, replicasWithLeadershipInfo.map(_.topicPartition)) - updatedLeaderIsrAndControllerEpochs.foreachEntry { (partition, leaderIsrAndControllerEpoch) => - stateLogger.info(s"Partition $partition state changed to $leaderIsrAndControllerEpoch after removing replica $replicaId from the ISR as part of transition to $OfflineReplica") - if (!controllerContext.isTopicQueuedUpForDeletion(partition.topic)) { - val recipients = controllerContext.partitionReplicaAssignment(partition).filterNot(_ == replicaId) - controllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(recipients, - partition, - leaderIsrAndControllerEpoch, - controllerContext.partitionFullReplicaAssignment(partition), isNew = false) - } - val replica = PartitionAndReplica(partition, replicaId) - val currentState = controllerContext.replicaState(replica) - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, partition, currentState, OfflineReplica) - controllerContext.putReplicaState(replica, OfflineReplica) - } - - replicasWithoutLeadershipInfo.foreach { replica => - val currentState = controllerContext.replicaState(replica) - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, OfflineReplica) - controllerBrokerRequestBatch.addUpdateMetadataRequestForBrokers(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set(replica.topicPartition)) - controllerContext.putReplicaState(replica, OfflineReplica) - } - case ReplicaDeletionStarted => - validReplicas.foreach { replica => - val currentState = controllerContext.replicaState(replica) - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, ReplicaDeletionStarted) - controllerContext.putReplicaState(replica, ReplicaDeletionStarted) - controllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(replicaId), replica.topicPartition, deletePartition = true) - } - case ReplicaDeletionIneligible => - validReplicas.foreach { replica => - val currentState = controllerContext.replicaState(replica) - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, ReplicaDeletionIneligible) - controllerContext.putReplicaState(replica, ReplicaDeletionIneligible) - } - case ReplicaDeletionSuccessful => - validReplicas.foreach { replica => - val currentState = controllerContext.replicaState(replica) - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, ReplicaDeletionSuccessful) - controllerContext.putReplicaState(replica, ReplicaDeletionSuccessful) - } - case NonExistentReplica => - validReplicas.foreach { replica => - val currentState = controllerContext.replicaState(replica) - val newAssignedReplicas = controllerContext - .partitionFullReplicaAssignment(replica.topicPartition) - .removeReplica(replica.replica) - - controllerContext.updatePartitionFullReplicaAssignment(replica.topicPartition, newAssignedReplicas) - if (traceEnabled) - logSuccessfulTransition(stateLogger, replicaId, replica.topicPartition, currentState, NonExistentReplica) - controllerContext.removeReplicaState(replica) - } - } - } - - /** - * Repeatedly attempt to remove a replica from the isr of multiple partitions until there are no more remaining partitions - * to retry. - * @param replicaId The replica being removed from isr of multiple partitions - * @param partitions The partitions from which we're trying to remove the replica from isr - * @return The updated LeaderIsrAndControllerEpochs of all partitions for which we successfully removed the replica from isr. - */ - private def removeReplicasFromIsr( - replicaId: Int, - partitions: Seq[TopicPartition] - ): Map[TopicPartition, LeaderIsrAndControllerEpoch] = { - var results = Map.empty[TopicPartition, LeaderIsrAndControllerEpoch] - var remaining = partitions - while (remaining.nonEmpty) { - val (finishedRemoval, removalsToRetry) = doRemoveReplicasFromIsr(replicaId, remaining) - remaining = removalsToRetry - - finishedRemoval.foreach { - case (partition, Left(e)) => - val replica = PartitionAndReplica(partition, replicaId) - val currentState = controllerContext.replicaState(replica) - logFailedStateChange(replica, currentState, OfflineReplica, e) - case (partition, Right(leaderIsrAndEpoch)) => - results += partition -> leaderIsrAndEpoch - } - } - results - } - - /** - * Try to remove a replica from the isr of multiple partitions. - * Removing a replica from isr updates partition state in zookeeper. - * - * @param replicaId The replica being removed from isr of multiple partitions - * @param partitions The partitions from which we're trying to remove the replica from isr - * @return A tuple of two elements: - * 1. The updated Right[LeaderIsrAndControllerEpochs] of all partitions for which we successfully - * removed the replica from isr. Or Left[Exception] corresponding to failed removals that should - * not be retried - * 2. The partitions that we should retry due to a zookeeper BADVERSION conflict. Version conflicts can occur if - * the partition leader updated partition state while the controller attempted to update partition state. - */ - private def doRemoveReplicasFromIsr( - replicaId: Int, - partitions: Seq[TopicPartition] - ): (Map[TopicPartition, Either[Exception, LeaderIsrAndControllerEpoch]], Seq[TopicPartition]) = { - val (leaderAndIsrs, partitionsWithNoLeaderAndIsrInZk) = getTopicPartitionStatesFromZk(partitions) - val (leaderAndIsrsWithReplica, leaderAndIsrsWithoutReplica) = leaderAndIsrs.partition { case (_, result) => - result.map { leaderAndIsr => - leaderAndIsr.isr.contains(replicaId) - }.getOrElse(false) - } - - val adjustedLeaderAndIsrs: Map[TopicPartition, LeaderAndIsr] = leaderAndIsrsWithReplica.flatMap { - case (partition, result) => - result.toOption.map { leaderAndIsr => - val newLeader = if (replicaId == leaderAndIsr.leader) LeaderAndIsr.NO_LEADER else leaderAndIsr.leader - val adjustedIsr = - if (leaderAndIsr.isr.size == 1) leaderAndIsr.isr - else leaderAndIsr.isr.stream().filter(_ != replicaId).collect(Collectors.toList[Integer]) - partition -> leaderAndIsr.newLeaderAndIsr(newLeader, adjustedIsr) - } - } - - val UpdateLeaderAndIsrResult(finishedPartitions, updatesToRetry) = zkClient.updateLeaderAndIsr( - adjustedLeaderAndIsrs, controllerContext.epoch, controllerContext.epochZkVersion) - - val exceptionsForPartitionsWithNoLeaderAndIsrInZk: Map[TopicPartition, Either[Exception, LeaderIsrAndControllerEpoch]] = - partitionsWithNoLeaderAndIsrInZk.iterator.flatMap { partition => - if (!controllerContext.isTopicQueuedUpForDeletion(partition.topic)) { - val exception = new StateChangeFailedException( - s"Failed to change state of replica $replicaId for partition $partition since the leader and isr " + - "path in zookeeper is empty" - ) - Option(partition -> Left(exception)) - } else None - }.toMap - - val leaderIsrAndControllerEpochs: Map[TopicPartition, Either[Exception, LeaderIsrAndControllerEpoch]] = - (leaderAndIsrsWithoutReplica ++ finishedPartitions).map { case (partition, result) => - (partition, result.map { leaderAndIsr => - val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerContext.epoch) - controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch) - leaderIsrAndControllerEpoch - }) - } - - if (isDebugEnabled) { - updatesToRetry.foreach { partition => - debug(s"Controller failed to remove replica $replicaId from ISR of partition $partition. " + - s"Attempted to write state ${adjustedLeaderAndIsrs(partition)}, but failed with bad ZK version. This will be retried.") - } - } - - (leaderIsrAndControllerEpochs ++ exceptionsForPartitionsWithNoLeaderAndIsrInZk, updatesToRetry) - } - - /** - * Gets the partition state from zookeeper - * @param partitions the partitions whose state we want from zookeeper - * @return A tuple of two values: - * 1. The Right(LeaderAndIsrs) of partitions whose state we successfully read from zookeeper. - * The Left(Exception) to failed zookeeper lookups or states whose controller epoch exceeds our current epoch - * 2. The partitions that had no leader and isr state in zookeeper. This happens if the controller - * didn't finish partition initialization. - */ - private def getTopicPartitionStatesFromZk( - partitions: Seq[TopicPartition] - ): (Map[TopicPartition, Either[Exception, LeaderAndIsr]], Seq[TopicPartition]) = { - val getDataResponses = try { - zkClient.getTopicPartitionStatesRaw(partitions) - } catch { - case e: Exception => - return (partitions.iterator.map(_ -> Left(e)).toMap, Seq.empty) - } - - val partitionsWithNoLeaderAndIsrInZk = mutable.Buffer.empty[TopicPartition] - val result = mutable.Map.empty[TopicPartition, Either[Exception, LeaderAndIsr]] - - getDataResponses.foreach[Unit] { getDataResponse => - val partition = getDataResponse.ctx.get.asInstanceOf[TopicPartition] - if (getDataResponse.resultCode == Code.OK) { - TopicPartitionStateZNode.decode(getDataResponse.data, getDataResponse.stat) match { - case None => - partitionsWithNoLeaderAndIsrInZk += partition - case Some(leaderIsrAndControllerEpoch) => - if (leaderIsrAndControllerEpoch.controllerEpoch > controllerContext.epoch) { - val exception = new StateChangeFailedException( - "Leader and isr path written by another controller. This probably " + - s"means the current controller with epoch ${controllerContext.epoch} went through a soft failure and " + - s"another controller was elected with epoch ${leaderIsrAndControllerEpoch.controllerEpoch}. Aborting " + - "state change by this controller" - ) - result += (partition -> Left(exception)) - } else { - result += (partition -> Right(leaderIsrAndControllerEpoch.leaderAndIsr)) - } - } - } else if (getDataResponse.resultCode == Code.NONODE) { - partitionsWithNoLeaderAndIsrInZk += partition - } else { - result += (partition -> Left(getDataResponse.resultException.get)) - } - } - - (result.toMap, partitionsWithNoLeaderAndIsrInZk) - } - - private def logSuccessfulTransition(logger: StateChangeLogger, replicaId: Int, partition: TopicPartition, - currState: ReplicaState, targetState: ReplicaState): Unit = { - logger.trace(s"Changed state of replica $replicaId for partition $partition from $currState to $targetState") - } - - private def logInvalidTransition(replica: PartitionAndReplica, targetState: ReplicaState): Unit = { - val currState = controllerContext.replicaState(replica) - val e = new IllegalStateException(s"Replica $replica should be in the ${targetState.validPreviousStates.mkString(",")} " + - s"states before moving to $targetState state. Instead it is in $currState state") - logFailedStateChange(replica, currState, targetState, e) - } - - private def logFailedStateChange(replica: PartitionAndReplica, currState: ReplicaState, targetState: ReplicaState, t: Throwable): Unit = { - stateChangeLogger.withControllerEpoch(controllerContext.epoch) - .error(s"Controller $controllerId epoch ${controllerContext.epoch} initiated state change of replica ${replica.replica} " + - s"for partition ${replica.topicPartition} from $currState to $targetState failed", t) - } -} - -sealed trait ReplicaState { - def state: Byte - def validPreviousStates: Set[ReplicaState] -} - -case object NewReplica extends ReplicaState { - val state: Byte = 1 - val validPreviousStates: Set[ReplicaState] = Set(NonExistentReplica) -} - -case object OnlineReplica extends ReplicaState { - val state: Byte = 2 - val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible) -} - -case object OfflineReplica extends ReplicaState { - val state: Byte = 3 - val validPreviousStates: Set[ReplicaState] = Set(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible) -} - -case object ReplicaDeletionStarted extends ReplicaState { - val state: Byte = 4 - val validPreviousStates: Set[ReplicaState] = Set(OfflineReplica) -} - -case object ReplicaDeletionSuccessful extends ReplicaState { - val state: Byte = 5 - val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionStarted) -} - -case object ReplicaDeletionIneligible extends ReplicaState { - val state: Byte = 6 - val validPreviousStates: Set[ReplicaState] = Set(OfflineReplica, ReplicaDeletionStarted) -} - -case object NonExistentReplica extends ReplicaState { - val state: Byte = 7 - val validPreviousStates: Set[ReplicaState] = Set(ReplicaDeletionSuccessful) -} diff --git a/core/src/main/scala/kafka/controller/TopicDeletionManager.scala b/core/src/main/scala/kafka/controller/TopicDeletionManager.scala deleted file mode 100755 index f36338edaae9a..0000000000000 --- a/core/src/main/scala/kafka/controller/TopicDeletionManager.scala +++ /dev/null @@ -1,357 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.controller - -import kafka.server.KafkaConfig -import kafka.utils.Logging -import kafka.zk.KafkaZkClient -import org.apache.kafka.common.TopicPartition - -import scala.collection.Set -import scala.collection.mutable - -trait DeletionClient { - def deleteTopic(topic: String, epochZkVersion: Int): Unit - def deleteTopicDeletions(topics: Seq[String], epochZkVersion: Int): Unit - def mutePartitionModifications(topic: String): Unit - def sendMetadataUpdate(partitions: Set[TopicPartition]): Unit -} - -class ControllerDeletionClient(controller: KafkaController, zkClient: KafkaZkClient) extends DeletionClient { - override def deleteTopic(topic: String, epochZkVersion: Int): Unit = { - zkClient.deleteTopicZNode(topic, epochZkVersion) - zkClient.deleteTopicConfigs(Seq(topic), epochZkVersion) - zkClient.deleteTopicDeletions(Seq(topic), epochZkVersion) - } - - override def deleteTopicDeletions(topics: Seq[String], epochZkVersion: Int): Unit = { - zkClient.deleteTopicDeletions(topics, epochZkVersion) - } - - override def mutePartitionModifications(topic: String): Unit = { - controller.unregisterPartitionModificationsHandlers(Seq(topic)) - } - - override def sendMetadataUpdate(partitions: Set[TopicPartition]): Unit = { - controller.sendUpdateMetadataRequest(controller.controllerContext.liveOrShuttingDownBrokerIds.toSeq, partitions) - } -} - -/** - * This manages the state machine for topic deletion. - * 1. TopicCommand issues topic deletion by creating a new admin path /admin/delete_topics/ - * 2. The controller listens for child changes on /admin/delete_topic and starts topic deletion for the respective topics - * 3. The controller's ControllerEventThread handles topic deletion. A topic will be ineligible - * for deletion in the following scenarios - - * 3.1 broker hosting one of the replicas for that topic goes down - * 3.2 partition reassignment for partitions of that topic is in progress - * 4. Topic deletion is resumed when - - * 4.1 broker hosting one of the replicas for that topic is started - * 4.2 partition reassignment for partitions of that topic completes - * 5. Every replica for a topic being deleted is in either of the 3 states - - * 5.1 TopicDeletionStarted Replica enters TopicDeletionStarted phase when onPartitionDeletion is invoked. - * This happens when the child change watch for /admin/delete_topics fires on the controller. As part of this state - * change, the controller sends StopReplicaRequests to all replicas. It registers a callback for the - * StopReplicaResponse when deletePartition=true thereby invoking a callback when a response for delete replica - * is received from every replica) - * 5.2 TopicDeletionSuccessful moves replicas from - * TopicDeletionStarted->TopicDeletionSuccessful depending on the error codes in StopReplicaResponse - * 5.3 TopicDeletionFailed moves replicas from - * TopicDeletionStarted->TopicDeletionFailed depending on the error codes in StopReplicaResponse. - * In general, if a broker dies and if it hosted replicas for topics being deleted, the controller marks the - * respective replicas in TopicDeletionFailed state in the onBrokerFailure callback. The reason is that if a - * broker fails before the request is sent and after the replica is in TopicDeletionStarted state, - * it is possible that the replica will mistakenly remain in TopicDeletionStarted state and topic deletion - * will not be retried when the broker comes back up. - * 6. A topic is marked successfully deleted only if all replicas are in TopicDeletionSuccessful - * state. Topic deletion teardown mode deletes all topic state from the controllerContext - * as well as from zookeeper. This is the only time the /brokers/topics/ path gets deleted. On the other hand, - * if no replica is in TopicDeletionStarted state and at least one replica is in TopicDeletionFailed state, then - * it marks the topic for deletion retry. - */ -class TopicDeletionManager(config: KafkaConfig, - controllerContext: ControllerContext, - replicaStateMachine: ReplicaStateMachine, - partitionStateMachine: PartitionStateMachine, - client: DeletionClient) extends Logging { - this.logIdent = s"[Topic Deletion Manager ${config.brokerId}] " - val isDeleteTopicEnabled: Boolean = config.deleteTopicEnable - - def init(initialTopicsToBeDeleted: Set[String], initialTopicsIneligibleForDeletion: Set[String]): Unit = { - info(s"Initializing manager with initial deletions: $initialTopicsToBeDeleted, " + - s"initial ineligible deletions: $initialTopicsIneligibleForDeletion") - - if (isDeleteTopicEnabled) { - controllerContext.queueTopicDeletion(initialTopicsToBeDeleted) - controllerContext.topicsIneligibleForDeletion ++= initialTopicsIneligibleForDeletion & controllerContext.topicsToBeDeleted - } else { - // if delete topic is disabled clean the topic entries under /admin/delete_topics - info(s"Removing $initialTopicsToBeDeleted since delete topic is disabled") - client.deleteTopicDeletions(initialTopicsToBeDeleted.toSeq, controllerContext.epochZkVersion) - } - } - - def tryTopicDeletion(): Unit = { - if (isDeleteTopicEnabled) { - resumeDeletions() - } - } - - /** - * Invoked by the child change listener on /admin/delete_topics to queue up the topics for deletion. The topic gets added - * to the topicsToBeDeleted list and only gets removed from the list when the topic deletion has completed successfully - * i.e. all replicas of all partitions of that topic are deleted successfully. - * @param topics Topics that should be deleted - */ - def enqueueTopicsForDeletion(topics: Set[String]): Unit = { - if (isDeleteTopicEnabled) { - controllerContext.queueTopicDeletion(topics) - resumeDeletions() - } - } - - /** - * Invoked when any event that can possibly resume topic deletion occurs. These events include - - * 1. New broker starts up. Any replicas belonging to topics queued up for deletion can be deleted since the broker is up - * 2. Partition reassignment completes. Any partitions belonging to topics queued up for deletion finished reassignment - * @param topics Topics for which deletion can be resumed - */ - def resumeDeletionForTopics(topics: Set[String] = Set.empty): Unit = { - if (isDeleteTopicEnabled) { - val topicsToResumeDeletion = topics & controllerContext.topicsToBeDeleted - if (topicsToResumeDeletion.nonEmpty) { - controllerContext.topicsIneligibleForDeletion --= topicsToResumeDeletion - resumeDeletions() - } - } - } - - /** - * Invoked when a broker that hosts replicas for topics to be deleted goes down. Also invoked when the callback for - * StopReplicaResponse receives an error code for the replicas of a topic to be deleted. As part of this, the replicas - * are moved from ReplicaDeletionStarted to ReplicaDeletionIneligible state. Also, the topic is added to the list of topics - * ineligible for deletion until further notice. - * @param replicas Replicas for which deletion has failed - */ - def failReplicaDeletion(replicas: Set[PartitionAndReplica]): Unit = { - if (isDeleteTopicEnabled) { - val replicasThatFailedToDelete = replicas.filter(r => isTopicQueuedUpForDeletion(r.topic)) - if (replicasThatFailedToDelete.nonEmpty) { - val topics = replicasThatFailedToDelete.map(_.topic) - debug(s"Deletion failed for replicas ${replicasThatFailedToDelete.mkString(",")}. Halting deletion for topics $topics") - replicaStateMachine.handleStateChanges(replicasThatFailedToDelete.toSeq, ReplicaDeletionIneligible) - markTopicIneligibleForDeletion(topics, reason = "replica deletion failure") - resumeDeletions() - } - } - } - - /** - * Halt delete topic if - - * 1. replicas being down - * 2. partition reassignment in progress for some partitions of the topic - * @param topics Topics that should be marked ineligible for deletion. No op if the topic is was not previously queued up for deletion - */ - def markTopicIneligibleForDeletion(topics: Set[String], reason: => String): Unit = { - if (isDeleteTopicEnabled) { - val newTopicsToHaltDeletion = controllerContext.topicsToBeDeleted & topics - controllerContext.topicsIneligibleForDeletion ++= newTopicsToHaltDeletion - if (newTopicsToHaltDeletion.nonEmpty) - info(s"Halted deletion of topics ${newTopicsToHaltDeletion.mkString(",")} due to $reason") - } - } - - private def isTopicIneligibleForDeletion(topic: String): Boolean = { - if (isDeleteTopicEnabled) { - controllerContext.topicsIneligibleForDeletion.contains(topic) - } else - true - } - - private def isTopicDeletionInProgress(topic: String): Boolean = { - if (isDeleteTopicEnabled) { - controllerContext.isAnyReplicaInState(topic, ReplicaDeletionStarted) - } else - false - } - - def isTopicQueuedUpForDeletion(topic: String): Boolean = { - if (isDeleteTopicEnabled) { - controllerContext.isTopicQueuedUpForDeletion(topic) - } else - false - } - - /** - * Invoked by the StopReplicaResponse callback when it receives no error code for a replica of a topic to be deleted. - * As part of this, the replicas are moved from ReplicaDeletionStarted to ReplicaDeletionSuccessful state. Tears down - * the topic if all replicas of a topic have been successfully deleted - * @param replicas Replicas that were successfully deleted by the broker - */ - def completeReplicaDeletion(replicas: Set[PartitionAndReplica]): Unit = { - val successfullyDeletedReplicas = replicas.filter(r => isTopicQueuedUpForDeletion(r.topic)) - debug(s"Deletion successfully completed for replicas ${successfullyDeletedReplicas.mkString(",")}") - replicaStateMachine.handleStateChanges(successfullyDeletedReplicas.toSeq, ReplicaDeletionSuccessful) - resumeDeletions() - } - - /** - * Topic deletion can be retried if - - * 1. Topic deletion is not already complete - * 2. Topic deletion is currently not in progress for that topic - * 3. Topic is currently not marked ineligible for deletion - * @param topic Topic - * @return Whether or not deletion can be retried for the topic - */ - private def isTopicEligibleForDeletion(topic: String): Boolean = { - controllerContext.isTopicQueuedUpForDeletion(topic) && - !isTopicDeletionInProgress(topic) && - !isTopicIneligibleForDeletion(topic) - } - - /** - * If the topic is queued for deletion but deletion is not currently under progress, then deletion is retried for that topic - * To ensure a successful retry, reset states for respective replicas from ReplicaDeletionIneligible to OfflineReplica state - * @param topics Topics for which deletion should be retried - */ - private def retryDeletionForIneligibleReplicas(topics: Set[String]): Unit = { - // reset replica states from ReplicaDeletionIneligible to OfflineReplica - val failedReplicas = topics.flatMap(controllerContext.replicasInState(_, ReplicaDeletionIneligible)) - debug(s"Retrying deletion of topics ${topics.mkString(",")} since replicas ${failedReplicas.mkString(",")} were not successfully deleted") - replicaStateMachine.handleStateChanges(failedReplicas.toSeq, OfflineReplica) - } - - private def completeDeleteTopic(topic: String): Unit = { - // deregister partition change listener on the deleted topic. This is to prevent the partition change listener - // firing before the new topic listener when a deleted topic gets auto created - client.mutePartitionModifications(topic) - val replicasForDeletedTopic = controllerContext.replicasInState(topic, ReplicaDeletionSuccessful) - // controller will remove this replica from the state machine as well as its partition assignment cache - replicaStateMachine.handleStateChanges(replicasForDeletedTopic.toSeq, NonExistentReplica) - client.deleteTopic(topic, controllerContext.epochZkVersion) - controllerContext.removeTopic(topic) - } - - /** - * Invoked with the list of topics to be deleted - * It invokes onPartitionDeletion for all partitions of a topic. - * The updateMetadataRequest is also going to set the leader for the topics being deleted to - * [[org.apache.kafka.metadata.LeaderAndIsr#LeaderDuringDelete]]. This lets each broker know that this topic is being deleted and can be - * removed from their caches. - */ - private def onTopicDeletion(topics: Set[String]): Unit = { - val unseenTopicsForDeletion = topics.diff(controllerContext.topicsWithDeletionStarted) - if (unseenTopicsForDeletion.nonEmpty) { - val unseenPartitionsForDeletion = unseenTopicsForDeletion.flatMap(controllerContext.partitionsForTopic) - partitionStateMachine.handleStateChanges(unseenPartitionsForDeletion.toSeq, OfflinePartition) - partitionStateMachine.handleStateChanges(unseenPartitionsForDeletion.toSeq, NonExistentPartition) - // adding of unseenTopicsForDeletion to topics with deletion started must be done after the partition - // state changes to make sure the offlinePartitionCount metric is properly updated - controllerContext.beginTopicDeletion(unseenTopicsForDeletion) - } - - // send update metadata so that brokers stop serving data for topics to be deleted - client.sendMetadataUpdate(topics.flatMap(controllerContext.partitionsForTopic)) - - onPartitionDeletion(topics) - } - - /** - * Invoked by onTopicDeletion with the list of partitions for topics to be deleted - * It does the following - - * 1. Move all dead replicas directly to ReplicaDeletionIneligible state. Also mark the respective topics ineligible - * for deletion if some replicas are dead since it won't complete successfully anyway - * 2. Move all replicas for the partitions to OfflineReplica state. This will send StopReplicaRequest to the replicas - * and LeaderAndIsrRequest to the leader with the shrunk ISR. When the leader replica itself is moved to OfflineReplica state, - * it will skip sending the LeaderAndIsrRequest since the leader will be updated to -1 - * 3. Move all replicas to ReplicaDeletionStarted state. This will send StopReplicaRequest with deletePartition=true. And - * will delete all persistent data from all replicas of the respective partitions - */ - private def onPartitionDeletion(topicsToBeDeleted: Set[String]): Unit = { - val allDeadReplicas = mutable.ListBuffer.empty[PartitionAndReplica] - val allReplicasForDeletionRetry = mutable.ListBuffer.empty[PartitionAndReplica] - val allTopicsIneligibleForDeletion = mutable.Set.empty[String] - - topicsToBeDeleted.foreach { topic => - val (aliveReplicas, deadReplicas) = controllerContext.replicasForTopic(topic).partition { r => - controllerContext.isReplicaOnline(r.replica, r.topicPartition) - } - - val successfullyDeletedReplicas = controllerContext.replicasInState(topic, ReplicaDeletionSuccessful) - val replicasForDeletionRetry = aliveReplicas.diff(successfullyDeletedReplicas) - - allDeadReplicas ++= deadReplicas - allReplicasForDeletionRetry ++= replicasForDeletionRetry - - if (deadReplicas.nonEmpty) { - debug(s"Dead Replicas (${deadReplicas.mkString(",")}) found for topic $topic") - allTopicsIneligibleForDeletion += topic - } - } - - // move dead replicas directly to failed state - replicaStateMachine.handleStateChanges(allDeadReplicas, ReplicaDeletionIneligible) - // send stop replica to all followers that are not in the OfflineReplica state so they stop sending fetch requests to the leader - replicaStateMachine.handleStateChanges(allReplicasForDeletionRetry, OfflineReplica) - replicaStateMachine.handleStateChanges(allReplicasForDeletionRetry, ReplicaDeletionStarted) - - if (allTopicsIneligibleForDeletion.nonEmpty) { - markTopicIneligibleForDeletion(allTopicsIneligibleForDeletion, reason = "offline replicas") - } - } - - private def resumeDeletions(): Unit = { - val topicsQueuedForDeletion = Set.empty[String] ++ controllerContext.topicsToBeDeleted - val topicsEligibleForRetry = mutable.Set.empty[String] - val topicsEligibleForDeletion = mutable.Set.empty[String] - - if (topicsQueuedForDeletion.nonEmpty) - info(s"Handling deletion for topics ${topicsQueuedForDeletion.mkString(",")}") - - topicsQueuedForDeletion.foreach { topic => - // if all replicas are marked as deleted successfully, then topic deletion is done - if (controllerContext.areAllReplicasInState(topic, ReplicaDeletionSuccessful)) { - // clear up all state for this topic from controller cache and zookeeper - completeDeleteTopic(topic) - info(s"Deletion of topic $topic successfully completed") - } else if (!controllerContext.isAnyReplicaInState(topic, ReplicaDeletionStarted)) { - // if you come here, then no replica is in TopicDeletionStarted and all replicas are not in - // TopicDeletionSuccessful. That means, that either given topic haven't initiated deletion - // or there is at least one failed replica (which means topic deletion should be retried). - if (controllerContext.isAnyReplicaInState(topic, ReplicaDeletionIneligible)) { - topicsEligibleForRetry += topic - } - } - - // Add topic to the eligible set if it is eligible for deletion. - if (isTopicEligibleForDeletion(topic)) { - info(s"Deletion of topic $topic (re)started") - topicsEligibleForDeletion += topic - } - } - - // topic deletion retry will be kicked off - if (topicsEligibleForRetry.nonEmpty) { - retryDeletionForIneligibleReplicas(topicsEligibleForRetry) - } - - // topic deletion will be kicked off - if (topicsEligibleForDeletion.nonEmpty) { - onTopicDeletion(topicsEligibleForDeletion) - } - } -} diff --git a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala index 79e0cbe630dd2..211be799a7e89 100644 --- a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala +++ b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala @@ -17,7 +17,7 @@ package kafka.coordinator.group import kafka.cluster.PartitionListener -import kafka.server.{ReplicaManager, defaultError, genericError} +import kafka.server.{AddPartitionsToTxnManager, ReplicaManager} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{MemoryRecords, RecordBatch} @@ -110,7 +110,7 @@ class CoordinatorPartitionWriter( producerEpoch: Short, apiVersion: Short ): CompletableFuture[VerificationGuard] = { - val transactionSupportedOperation = if (apiVersion >= 4) genericError else defaultError + val transactionSupportedOperation = AddPartitionsToTxnManager.txnOffsetCommitRequestVersionToTransactionSupportedOperation(apiVersion) val future = new CompletableFuture[VerificationGuard]() replicaManager.maybeStartTransactionVerificationForPartition( topicPartition = tp, @@ -165,4 +165,25 @@ class CoordinatorPartitionWriter( // Required offset. partitionResult.lastOffset + 1 } + + override def deleteRecords(tp: TopicPartition, deleteBeforeOffset: Long): CompletableFuture[Void] = { + val responseFuture: CompletableFuture[Void] = new CompletableFuture[Void]() + + replicaManager.deleteRecords( + timeout = 30000L, // 30 seconds. + offsetPerPartition = Map(tp -> deleteBeforeOffset), + responseCallback = results => { + val result = results.get(tp) + if (result.isEmpty) { + responseFuture.completeExceptionally(new IllegalStateException(s"Delete status $result should have partition $tp.")) + } else if (result.get.errorCode != Errors.NONE.code) { + responseFuture.completeExceptionally(Errors.forCode(result.get.errorCode).exception) + } else { + responseFuture.complete(null) + } + }, + allowInternalTopicDeletion = true + ) + responseFuture + } } diff --git a/core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala b/core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala index 8ca956daedf98..c1f071413e800 100644 --- a/core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala +++ b/core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala @@ -38,6 +38,7 @@ import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, GroupJoinKe import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.storage.internals.log.VerificationGuard +import java.util.concurrent.CompletableFuture import scala.collection.{Map, Seq, Set, immutable, mutable} import scala.math.max @@ -931,7 +932,7 @@ private[group] class GroupCoordinator( offsetTopicPartition, offsetMetadata, newRequestLocal, responseCallback, Some(verificationGuard)) } } - val transactionSupportedOperation = if (apiVersion >= 4) genericError else defaultError + val transactionSupportedOperation = AddPartitionsToTxnManager.txnOffsetCommitRequestVersionToTransactionSupportedOperation(apiVersion) groupManager.replicaManager.maybeStartTransactionVerificationForPartition( topicPartition = offsetTopicPartition, transactionalId, @@ -982,7 +983,7 @@ private[group] class GroupCoordinator( def scheduleHandleTxnCompletion(producerId: Long, offsetsPartitions: Iterable[TopicPartition], - transactionResult: TransactionResult): Unit = { + transactionResult: TransactionResult): CompletableFuture[Void] = { require(offsetsPartitions.forall(_.topic == Topic.GROUP_METADATA_TOPIC_NAME)) val isCommit = transactionResult == TransactionResult.COMMIT groupManager.scheduleHandleTxnCompletion(producerId, offsetsPartitions.map(_.partition).toSet, isCommit) @@ -1138,15 +1139,20 @@ private[group] class GroupCoordinator( } } - def handleDescribeGroup(groupId: String): (Errors, GroupSummary) = { + def handleDescribeGroup(groupId: String, apiVersion: Short): (Errors, Option[String], GroupSummary) = { validateGroupStatus(groupId, ApiKeys.DESCRIBE_GROUPS) match { - case Some(error) => (error, GroupCoordinator.EmptyGroup) + case Some(error) => (error, None, GroupCoordinator.EmptyGroup) case None => groupManager.getGroup(groupId) match { - case None => (Errors.NONE, GroupCoordinator.DeadGroup) + case None => + if (apiVersion >= 6) { + (Errors.GROUP_ID_NOT_FOUND, Some(s"Group $groupId not found."), GroupCoordinator.DeadGroup) + } else { + (Errors.NONE, None, GroupCoordinator.DeadGroup) + } case Some(group) => group.inLock { - (Errors.NONE, group.summary) + (Errors.NONE, None, group.summary) } } } @@ -1378,68 +1384,70 @@ private[group] class GroupCoordinator( info(s"Static member which joins during Stable stage and doesn't affect selectProtocol will not trigger rebalance.") val groupAssignment: Map[String, Array[Byte]] = group.allMemberMetadata.map(member => member.memberId -> member.assignment).toMap groupManager.storeGroup(group, groupAssignment, error => { - if (error != Errors.NONE) { - warn(s"Failed to persist metadata for group ${group.groupId}: ${error.message}") - - // Failed to persist member.id of the given static member, revert the update of the static member in the group. - group.updateMember(knownStaticMember, oldProtocols, oldRebalanceTimeoutMs, oldSessionTimeoutMs, null) - val oldMember = group.replaceStaticMember(groupInstanceId, newMemberId, oldMemberId) - completeAndScheduleNextHeartbeatExpiration(group, oldMember) - responseCallback(JoinGroupResult( - List.empty, - memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID, - generationId = group.generationId, - protocolType = group.protocolType, - protocolName = group.protocolName, - leaderId = currentLeader, - skipAssignment = false, - error = error - )) - } else if (supportSkippingAssignment) { - // Starting from version 9 of the JoinGroup API, static members are able to - // skip running the assignor based on the `SkipAssignment` field. We leverage - // this to tell the leader that it is the leader of the group but by skipping - // running the assignor while the group is in stable state. - // Notes: - // 1) This allows the leader to continue monitoring metadata changes for the - // group. Note that any metadata changes happening while the static leader is - // down won't be noticed. - // 2) The assignors are not idempotent nor free from side effects. This is why - // we skip entirely the assignment step as it could generate a different group - // assignment which would be ignored by the group coordinator because the group - // is the stable state. - val isLeader = group.isLeader(newMemberId) - group.maybeInvokeJoinCallback(member, JoinGroupResult( - members = if (isLeader) { - group.currentMemberMetadata - } else { - List.empty - }, - memberId = newMemberId, - generationId = group.generationId, - protocolType = group.protocolType, - protocolName = group.protocolName, - leaderId = group.leaderOrNull, - skipAssignment = isLeader, - error = Errors.NONE - )) - } else { - // Prior to version 9 of the JoinGroup API, we wanted to avoid current leader - // performing trivial assignment while the group is in stable stage, because - // the new assignment in leader's next sync call won't be broadcast by a stable group. - // This could be guaranteed by always returning the old leader id so that the current - // leader won't assume itself as a leader based on the returned message, since the new - // member.id won't match returned leader id, therefore no assignment will be performed. - group.maybeInvokeJoinCallback(member, JoinGroupResult( - members = List.empty, - memberId = newMemberId, - generationId = group.generationId, - protocolType = group.protocolType, - protocolName = group.protocolName, - leaderId = currentLeader, - skipAssignment = false, - error = Errors.NONE - )) + group.inLock { + if (error != Errors.NONE) { + warn(s"Failed to persist metadata for group ${group.groupId}: ${error.message}") + + // Failed to persist member.id of the given static member, revert the update of the static member in the group. + group.updateMember(knownStaticMember, oldProtocols, oldRebalanceTimeoutMs, oldSessionTimeoutMs, null) + val oldMember = group.replaceStaticMember(groupInstanceId, newMemberId, oldMemberId) + completeAndScheduleNextHeartbeatExpiration(group, oldMember) + responseCallback(JoinGroupResult( + List.empty, + memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = currentLeader, + skipAssignment = false, + error = error + )) + } else if (supportSkippingAssignment) { + // Starting from version 9 of the JoinGroup API, static members are able to + // skip running the assignor based on the `SkipAssignment` field. We leverage + // this to tell the leader that it is the leader of the group but by skipping + // running the assignor while the group is in stable state. + // Notes: + // 1) This allows the leader to continue monitoring metadata changes for the + // group. Note that any metadata changes happening while the static leader is + // down won't be noticed. + // 2) The assignors are not idempotent nor free from side effects. This is why + // we skip entirely the assignment step as it could generate a different group + // assignment which would be ignored by the group coordinator because the group + // is the stable state. + val isLeader = group.isLeader(newMemberId) + group.maybeInvokeJoinCallback(member, JoinGroupResult( + members = if (isLeader) { + group.currentMemberMetadata + } else { + List.empty + }, + memberId = newMemberId, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = group.leaderOrNull, + skipAssignment = isLeader, + error = Errors.NONE + )) + } else { + // Prior to version 9 of the JoinGroup API, we wanted to avoid current leader + // performing trivial assignment while the group is in stable stage, because + // the new assignment in leader's next sync call won't be broadcast by a stable group. + // This could be guaranteed by always returning the old leader id so that the current + // leader won't assume itself as a leader based on the returned message, since the new + // member.id won't match returned leader id, therefore no assignment will be performed. + group.maybeInvokeJoinCallback(member, JoinGroupResult( + members = List.empty, + memberId = newMemberId, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = currentLeader, + skipAssignment = false, + error = Errors.NONE + )) + } } }, requestLocal) } else { @@ -1796,8 +1804,7 @@ object GroupCoordinator { groupMaxSize = config.groupCoordinatorConfig.classicGroupMaxSize, groupInitialRebalanceDelayMs = config.groupCoordinatorConfig.classicGroupInitialRebalanceDelayMs) - val groupMetadataManager = new GroupMetadataManager(config.brokerId, config.interBrokerProtocolVersion, - offsetConfig, replicaManager, time, metrics) + val groupMetadataManager = new GroupMetadataManager(config.brokerId, offsetConfig, replicaManager, time, metrics) new GroupCoordinator(config.brokerId, groupConfig, offsetConfig, groupMetadataManager, heartbeatPurgatory, rebalancePurgatory, time, metrics) } diff --git a/core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala b/core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala index 970d283953e5e..bc775b5f38060 100644 --- a/core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala +++ b/core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala @@ -249,10 +249,11 @@ private[group] class GroupCoordinatorAdapter( ): CompletableFuture[util.List[DescribeGroupsResponseData.DescribedGroup]] = { def describeGroup(groupId: String): DescribeGroupsResponseData.DescribedGroup = { - val (error, summary) = coordinator.handleDescribeGroup(groupId) + val (error, errorMessage, summary) = coordinator.handleDescribeGroup(groupId, context.apiVersion()) new DescribeGroupsResponseData.DescribedGroup() .setErrorCode(error.code) + .setErrorMessage(errorMessage.orNull) .setGroupId(groupId) .setGroupState(summary.state) .setProtocolType(summary.protocolType) @@ -413,7 +414,6 @@ private[group] class GroupCoordinatorAdapter( partition.committedOffset, partition.committedLeaderEpoch, partition.committedMetadata, - partition.commitTimestamp, expireTimeMs ) } @@ -472,7 +472,6 @@ private[group] class GroupCoordinatorAdapter( partition.committedOffset, partition.committedLeaderEpoch, partition.committedMetadata, - OffsetCommitRequest.DEFAULT_TIMESTAMP, // means that currentTimeMs is used. None ) } @@ -500,7 +499,6 @@ private[group] class GroupCoordinatorAdapter( offset: Long, leaderEpoch: Int, metadata: String, - commitTimestamp: Long, expireTimestamp: Option[Long] ): OffsetAndMetadata = { new OffsetAndMetadata( @@ -513,10 +511,7 @@ private[group] class GroupCoordinatorAdapter( case null => OffsetAndMetadata.NO_METADATA case metadata => metadata }, - commitTimestamp match { - case OffsetCommitRequest.DEFAULT_TIMESTAMP => currentTimeMs - case customTimestamp => customTimestamp - }, + currentTimeMs, expireTimestamp match { case Some(timestamp) => OptionalLong.of(timestamp) case None => OptionalLong.empty() @@ -586,12 +581,16 @@ private[group] class GroupCoordinatorAdapter( producerId: Long, partitions: java.lang.Iterable[TopicPartition], transactionResult: TransactionResult - ): Unit = { - coordinator.scheduleHandleTxnCompletion( - producerId, - partitions.asScala, - transactionResult - ) + ): CompletableFuture[Void] = { + try { + coordinator.scheduleHandleTxnCompletion( + producerId, + partitions.asScala, + transactionResult + ) + } catch { + case e: Throwable => FutureUtils.failedFuture(e) + } } override def onPartitionsDeleted( diff --git a/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala index 8c30efd81f8f7..5b0e262b1b0aa 100644 --- a/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala +++ b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala @@ -21,14 +21,16 @@ import java.nio.ByteBuffer import java.util.{Optional, OptionalInt, OptionalLong} import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.ReentrantLock -import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} import java.util.function.Supplier import com.yammer.metrics.core.Gauge +import kafka.cluster.Partition import kafka.coordinator.group.GroupMetadataManager.maybeConvertOffsetCommitError import kafka.server.ReplicaManager import kafka.utils.CoreUtils.inLock import kafka.utils._ import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.errors.UnsupportedVersionException import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.metrics.{Metrics, Sensor} import org.apache.kafka.common.metrics.stats.{Avg, Max, Meter} @@ -40,9 +42,8 @@ import org.apache.kafka.common.requests.{OffsetCommitRequest, OffsetFetchRespons import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{TopicIdPartition, TopicPartition} import org.apache.kafka.coordinator.group.{OffsetAndMetadata, OffsetConfig} -import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitKey, OffsetCommitValue, GroupMetadataKey => GroupMetadataKeyData} -import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} -import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_1_IV0, IBP_2_1_IV0, IBP_2_1_IV1, IBP_2_3_IV0} +import org.apache.kafka.coordinator.group.generated.{CoordinatorRecordType, GroupMetadataValue, LegacyOffsetCommitKey, OffsetCommitKey, OffsetCommitValue, GroupMetadataKey => GroupMetadataKeyData} +import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.KafkaScheduler @@ -53,7 +54,6 @@ import scala.collection.mutable.ArrayBuffer import scala.jdk.CollectionConverters._ class GroupMetadataManager(brokerId: Int, - interBrokerProtocolVersion: MetadataVersion, config: OffsetConfig, val replicaManager: ReplicaManager, time: Time, @@ -239,79 +239,78 @@ class GroupMetadataManager(brokerId: Int, groupAssignment: Map[String, Array[Byte]], responseCallback: Errors => Unit, requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { - getMagic(partitionFor(group.groupId)) match { - case Some(magicValue) => - // We always use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. - val timestampType = TimestampType.CREATE_TIME - val timestamp = time.milliseconds() - val key = GroupMetadataManager.groupMetadataKey(group.groupId) - val value = GroupMetadataManager.groupMetadataValue(group, groupAssignment, interBrokerProtocolVersion) - - val records = { - val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, compression.`type`(), - Seq(new SimpleRecord(timestamp, key, value)).asJava)) - val builder = MemoryRecords.builder(buffer, magicValue, compression, timestampType, 0L) - builder.append(timestamp, key, value) - builder.build() - } + if (onlinePartition(partitionFor(group.groupId)).isDefined) { + // We always use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. + val timestampType = TimestampType.CREATE_TIME + val timestamp = time.milliseconds() + val key = GroupMetadataManager.groupMetadataKey(group.groupId) + val value = GroupMetadataManager.groupMetadataValue(group, groupAssignment) + + val records = { + val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(RecordBatch.CURRENT_MAGIC_VALUE, compression.`type`(), + Seq(new SimpleRecord(timestamp, key, value)).asJava)) + val builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, timestampType, 0L) + builder.append(timestamp, key, value) + builder.build() + } - val groupMetadataPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partitionFor(group.groupId)) - val groupMetadataRecords = Map(groupMetadataPartition -> records) - val generationId = group.generationId + val groupMetadataPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partitionFor(group.groupId)) + val groupMetadataRecords = Map(groupMetadataPartition -> records) + val generationId = group.generationId - // set the callback function to insert the created group into cache after log append completed - def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { - // the append response should only contain the topics partition - if (responseStatus.size != 1 || !responseStatus.contains(groupMetadataPartition)) - throw new IllegalStateException("Append status %s should only have one partition %s" - .format(responseStatus, groupMetadataPartition)) + // set the callback function to insert the created group into cache after log append completed + def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { + // the append response should only contain the topics partition + if (responseStatus.size != 1 || !responseStatus.contains(groupMetadataPartition)) + throw new IllegalStateException("Append status %s should only have one partition %s" + .format(responseStatus, groupMetadataPartition)) - // construct the error status in the propagated assignment response in the cache - val status = responseStatus(groupMetadataPartition) + // construct the error status in the propagated assignment response in the cache + val status = responseStatus(groupMetadataPartition) - val responseError = if (status.error == Errors.NONE) { - Errors.NONE - } else { - debug(s"Metadata from group ${group.groupId} with generation $generationId failed when appending to log " + - s"due to ${status.error.exceptionName}") + val responseError = if (status.error == Errors.NONE) { + Errors.NONE + } else { + debug(s"Metadata from group ${group.groupId} with generation $generationId failed when appending to log " + + s"due to ${status.error.exceptionName}") - // transform the log append error code to the corresponding the commit status error code - status.error match { - case Errors.UNKNOWN_TOPIC_OR_PARTITION - | Errors.NOT_ENOUGH_REPLICAS - | Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND => - Errors.COORDINATOR_NOT_AVAILABLE + // transform the log append error code to the corresponding the commit status error code + status.error match { + case Errors.UNKNOWN_TOPIC_OR_PARTITION + | Errors.NOT_ENOUGH_REPLICAS + | Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND => + Errors.COORDINATOR_NOT_AVAILABLE - case Errors.NOT_LEADER_OR_FOLLOWER - | Errors.KAFKA_STORAGE_ERROR => - Errors.NOT_COORDINATOR + case Errors.NOT_LEADER_OR_FOLLOWER + | Errors.KAFKA_STORAGE_ERROR => + Errors.NOT_COORDINATOR - case Errors.REQUEST_TIMED_OUT => - Errors.REBALANCE_IN_PROGRESS + case Errors.REQUEST_TIMED_OUT => + Errors.REBALANCE_IN_PROGRESS - case Errors.MESSAGE_TOO_LARGE - | Errors.RECORD_LIST_TOO_LARGE - | Errors.INVALID_FETCH_SIZE => + case Errors.MESSAGE_TOO_LARGE + | Errors.RECORD_LIST_TOO_LARGE + | Errors.INVALID_FETCH_SIZE => - error(s"Appending metadata message for group ${group.groupId} generation $generationId failed due to " + - s"${status.error.exceptionName}, returning UNKNOWN error code to the client") + error(s"Appending metadata message for group ${group.groupId} generation $generationId failed due to " + + s"${status.error.exceptionName}, returning UNKNOWN error code to the client") - Errors.UNKNOWN_SERVER_ERROR + Errors.UNKNOWN_SERVER_ERROR - case other => - error(s"Appending metadata message for group ${group.groupId} generation $generationId failed " + - s"due to unexpected error: ${status.error.exceptionName}") + case other => + error(s"Appending metadata message for group ${group.groupId} generation $generationId failed " + + s"due to unexpected error: ${status.error.exceptionName}") - other - } + other } - - responseCallback(responseError) } - appendForGroup(group, groupMetadataRecords, requestLocal, putCacheCallback) - case None => - responseCallback(Errors.NOT_COORDINATOR) + responseCallback(responseError) + } + + appendForGroup(group, groupMetadataRecords, requestLocal, putCacheCallback) + } else { + responseCallback(Errors.NOT_COORDINATOR) } } @@ -350,7 +349,7 @@ class GroupMetadataManager(brokerId: Int, val records = filteredOffsetMetadata.map { case (topicIdPartition, offsetAndMetadata) => val key = GroupMetadataManager.offsetCommitKey(groupId, topicIdPartition.topicPartition) - val value = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, interBrokerProtocolVersion) + val value = GroupMetadataManager.offsetCommitValue(offsetAndMetadata) new SimpleRecord(timestamp, key, value) } val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, compression.`type`(), records.asJava)) @@ -463,8 +462,7 @@ class GroupMetadataManager(brokerId: Int, return } - val magicOpt = getMagic(partitionFor(group.groupId)) - if (magicOpt.isEmpty) { + if (onlinePartition(partitionFor(group.groupId)).isEmpty) { val commitStatus = offsetMetadata.map { case (topicIdPartition, _) => (topicIdPartition, Errors.NOT_COORDINATOR) } @@ -473,7 +471,7 @@ class GroupMetadataManager(brokerId: Int, } val isTxnOffsetCommit = producerId != RecordBatch.NO_PRODUCER_ID - val records = generateOffsetRecords(magicOpt.get, isTxnOffsetCommit, group.groupId, offsetTopicPartition, filteredOffsetMetadata, producerId, producerEpoch) + val records = generateOffsetRecords(RecordBatch.CURRENT_MAGIC_VALUE, isTxnOffsetCommit, group.groupId, offsetTopicPartition, filteredOffsetMetadata, producerId, producerEpoch) val putCacheCallback = createPutCacheCallback(isTxnOffsetCommit, group, consumerId, offsetMetadata, filteredOffsetMetadata, responseCallback, producerId, records) val verificationGuards = verificationGuard.map(guard => offsetTopicPartition -> guard).toMap @@ -867,56 +865,55 @@ class GroupMetadataManager(brokerId: Int, (removedOffsets, group.is(Dead), group.generationId) } - val offsetsPartition = partitionFor(groupId) - val appendPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, offsetsPartition) - getMagic(offsetsPartition) match { - case Some(magicValue) => - // We always use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. - val timestampType = TimestampType.CREATE_TIME - val timestamp = time.milliseconds() - - replicaManager.onlinePartition(appendPartition).foreach { partition => - val tombstones = ArrayBuffer.empty[SimpleRecord] - removedOffsets.foreachEntry { (topicPartition, offsetAndMetadata) => - trace(s"Removing expired/deleted offset and metadata for $groupId, $topicPartition: $offsetAndMetadata") - val commitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition) - tombstones += new SimpleRecord(timestamp, commitKey, null) - } - trace(s"Marked ${removedOffsets.size} offsets in $appendPartition for deletion.") - - // We avoid writing the tombstone when the generationId is 0, since this group is only using - // Kafka for offset storage. - if (groupIsDead && groupMetadataCache.remove(groupId, group) && generation > 0) { - // Append the tombstone messages to the partition. It is okay if the replicas don't receive these (say, - // if we crash or leaders move) since the new leaders will still expire the consumers with heartbeat and - // retry removing this group. - val groupMetadataKey = GroupMetadataManager.groupMetadataKey(group.groupId) - tombstones += new SimpleRecord(timestamp, groupMetadataKey, null) - trace(s"Group $groupId removed from the metadata cache and marked for deletion in $appendPartition.") - } + val offsetsPartition = partitionFor(groupId) + val appendPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, offsetsPartition) + onlinePartition(offsetsPartition) match { + case Some(partition) => + // We always use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. + val timestampType = TimestampType.CREATE_TIME + val timestamp = time.milliseconds() + + val tombstones = ArrayBuffer.empty[SimpleRecord] + removedOffsets.foreachEntry { (topicPartition, offsetAndMetadata) => + trace(s"Removing expired/deleted offset and metadata for $groupId, $topicPartition: $offsetAndMetadata") + val commitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition) + tombstones += new SimpleRecord(timestamp, commitKey, null) + } + trace(s"Marked ${removedOffsets.size} offsets in $appendPartition for deletion.") + + // We avoid writing the tombstone when the generationId is 0, since this group is only using + // Kafka for offset storage. + if (groupIsDead && groupMetadataCache.remove(groupId, group) && generation > 0) { + // Append the tombstone messages to the partition. It is okay if the replicas don't receive these (say, + // if we crash or leaders move) since the new leaders will still expire the consumers with heartbeat and + // retry removing this group. + val groupMetadataKey = GroupMetadataManager.groupMetadataKey(group.groupId) + tombstones += new SimpleRecord(timestamp, groupMetadataKey, null) + trace(s"Group $groupId removed from the metadata cache and marked for deletion in $appendPartition.") + } - if (tombstones.nonEmpty) { - try { - // do not need to require acks since even if the tombstone is lost, - // it will be appended again in the next purge cycle - val records = MemoryRecords.withRecords(magicValue, 0L, compression, timestampType, tombstones.toArray: _*) - partition.appendRecordsToLeader(records, origin = AppendOrigin.COORDINATOR, requiredAcks = 0, - requestLocal = requestLocal) - - offsetsRemoved += removedOffsets.size - trace(s"Successfully appended ${tombstones.size} tombstones to $appendPartition for expired/deleted " + - s"offsets and/or metadata for group $groupId") - } catch { - case t: Throwable => - error(s"Failed to append ${tombstones.size} tombstones to $appendPartition for expired/deleted " + - s"offsets and/or metadata for group $groupId.", t) - // ignore and continue - } + if (tombstones.nonEmpty) { + try { + // do not need to require acks since even if the tombstone is lost, + // it will be appended again in the next purge cycle + val records = MemoryRecords.withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compression, timestampType, tombstones.toArray: _*) + partition.appendRecordsToLeader(records, origin = AppendOrigin.COORDINATOR, requiredAcks = 0, + requestLocal = requestLocal) + + offsetsRemoved += removedOffsets.size + trace(s"Successfully appended ${tombstones.size} tombstones to $appendPartition for expired/deleted " + + s"offsets and/or metadata for group $groupId") + } catch { + case t: Throwable => + error(s"Failed to append ${tombstones.size} tombstones to $appendPartition for expired/deleted " + + s"offsets and/or metadata for group $groupId.", t) + // ignore and continue } } - case None => - info(s"BrokerId $brokerId is no longer a coordinator for the group $groupId. Proceeding cleanup for other alive groups") + + case None => + info(s"BrokerId $brokerId is no longer a coordinator for the group $groupId. Proceeding cleanup for other alive groups") } } @@ -931,9 +928,17 @@ class GroupMetadataManager(brokerId: Int, * more group metadata locks to handle transaction completion, this operation is scheduled on * the scheduler thread to avoid deadlocks. */ - def scheduleHandleTxnCompletion(producerId: Long, completedPartitions: Set[Int], isCommit: Boolean): Unit = { - scheduler.scheduleOnce(s"handleTxnCompletion-$producerId", () => - handleTxnCompletion(producerId, completedPartitions, isCommit)) + def scheduleHandleTxnCompletion(producerId: Long, completedPartitions: Set[Int], isCommit: Boolean): CompletableFuture[Void] = { + val future = new CompletableFuture[Void]() + scheduler.scheduleOnce(s"handleTxnCompletion-$producerId", () => { + try { + handleTxnCompletion(producerId, completedPartitions, isCommit) + future.complete(null) + } catch { + case e: Throwable => future.completeExceptionally(e) + } + }) + future } private[group] def handleTxnCompletion(producerId: Long, completedPartitions: Set[Int], isCommit: Boolean): Unit = { @@ -993,14 +998,8 @@ class GroupMetadataManager(brokerId: Int, // TODO: clear the caches } - /** - * Check if the replica is local and return the message format version - * - * @param partition Partition of GroupMetadataTopic - * @return Some(MessageFormatVersion) if replica is local, None otherwise - */ - private def getMagic(partition: Int): Option[Byte] = - replicaManager.getMagic(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition)) + private def onlinePartition(partition: Int): Option[Partition] = + replicaManager.onlinePartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition)) /** * Add a partition to the owned partition set. @@ -1061,7 +1060,7 @@ object GroupMetadataManager { * @return key for offset commit message */ def offsetCommitKey(groupId: String, topicPartition: TopicPartition): Array[Byte] = { - MessageUtil.toVersionPrefixedBytes(OffsetCommitKey.HIGHEST_SUPPORTED_VERSION, + MessageUtil.toCoordinatorTypePrefixedBytes(CoordinatorRecordType.OFFSET_COMMIT.id(), new OffsetCommitKey() .setGroup(groupId) .setTopic(topicPartition.topic) @@ -1075,7 +1074,7 @@ object GroupMetadataManager { * @return key bytes for group metadata message */ def groupMetadataKey(groupId: String): Array[Byte] = { - MessageUtil.toVersionPrefixedBytes(GroupMetadataKeyData.HIGHEST_SUPPORTED_VERSION, + MessageUtil.toCoordinatorTypePrefixedBytes(CoordinatorRecordType.GROUP_METADATA.id(), new GroupMetadataKeyData() .setGroup(groupId)) } @@ -1084,17 +1083,15 @@ object GroupMetadataManager { * Generates the payload for offset commit message from given offset and metadata * * @param offsetAndMetadata consumer's current offset and metadata - * @param metadataVersion the api version + * @param maxVersion the highest version allowed, we may use a lower version for compatibility reasons + * we serialize with the highest supported non-flexible version until a tagged field is introduced + * or the version is bumped. * @return payload for offset commit message */ - def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata, - metadataVersion: MetadataVersion): Array[Byte] = { + def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata, maxVersion: Short = 3): Array[Byte] = { val version = - if (metadataVersion.isLessThan(IBP_2_1_IV0) || offsetAndMetadata.expireTimestampMs.isPresent) 1.toShort - else if (metadataVersion.isLessThan(IBP_2_1_IV1)) 2.toShort - // Serialize with the highest supported non-flexible version - // until a tagged field is introduced or the version is bumped. - else 3.toShort + if (offsetAndMetadata.expireTimestampMs.isPresent) Math.min(1, maxVersion).toShort + else maxVersion MessageUtil.toVersionPrefixedBytes(version, new OffsetCommitValue() .setOffset(offsetAndMetadata.committedOffset) .setMetadata(offsetAndMetadata.metadata) @@ -1111,21 +1108,14 @@ object GroupMetadataManager { * * @param groupMetadata current group metadata * @param assignment the assignment for the rebalancing generation - * @param metadataVersion the api version + * @param version the version to serialize it with, the default is `3`, the highest supported non-flexible version + * until a tagged field is introduced or the version is bumped. The default should always be used + * outside of tests * @return payload for offset commit message */ def groupMetadataValue(groupMetadata: GroupMetadata, assignment: Map[String, Array[Byte]], - metadataVersion: MetadataVersion): Array[Byte] = { - - val version = - if (metadataVersion.isLessThan(IBP_0_10_1_IV0)) 0.toShort - else if (metadataVersion.isLessThan(IBP_2_1_IV0)) 1.toShort - else if (metadataVersion.isLessThan(IBP_2_3_IV0)) 2.toShort - // Serialize with the highest supported non-flexible version - // until a tagged field is introduced or the version is bumped. - else 3.toShort - + version: Short = 3): Array[Byte] = { MessageUtil.toVersionPrefixedBytes(version, new GroupMetadataValue() .setProtocolType(groupMetadata.protocolType.getOrElse("")) .setGeneration(groupMetadata.generationId) @@ -1156,16 +1146,28 @@ object GroupMetadataManager { */ def readMessageKey(buffer: ByteBuffer): BaseKey = { val version = buffer.getShort - if (version >= OffsetCommitKey.LOWEST_SUPPORTED_VERSION && version <= OffsetCommitKey.HIGHEST_SUPPORTED_VERSION) { - // version 0 and 1 refer to offset - val key = new OffsetCommitKey(new ByteBufferAccessor(buffer), version) - OffsetKey(version, GroupTopicPartition(key.group, new TopicPartition(key.topic, key.partition))) - } else if (version >= GroupMetadataKeyData.LOWEST_SUPPORTED_VERSION && version <= GroupMetadataKeyData.HIGHEST_SUPPORTED_VERSION) { - // version 2 refers to group metadata - val key = new GroupMetadataKeyData(new ByteBufferAccessor(buffer), version) - GroupMetadataKey(version, key.group) - } else { - UnknownKey(version) + try { + CoordinatorRecordType.fromId(version) match { + case CoordinatorRecordType.LEGACY_OFFSET_COMMIT => + // version 0 refers to the legacy offset commit. + val key = new LegacyOffsetCommitKey(new ByteBufferAccessor(buffer), 0.toShort) + OffsetKey(version, GroupTopicPartition(key.group, new TopicPartition(key.topic, key.partition))) + + case CoordinatorRecordType.OFFSET_COMMIT => + // version 1 refers to offset commit. + val key = new OffsetCommitKey(new ByteBufferAccessor(buffer), 0.toShort) + OffsetKey(version, GroupTopicPartition(key.group, new TopicPartition(key.topic, key.partition))) + + case CoordinatorRecordType.GROUP_METADATA => + // version 2 refers to group metadata. + val key = new GroupMetadataKeyData(new ByteBufferAccessor(buffer), 0.toShort) + GroupMetadataKey(version, key.group) + + case _ => + UnknownKey(version) + } + } catch { + case _: UnsupportedVersionException => UnknownKey(version) } } diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala index c0126d583dc98..d6ca11add8614 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala @@ -16,8 +16,6 @@ */ package kafka.coordinator.transaction -import java.util.Properties -import java.util.concurrent.atomic.AtomicBoolean import kafka.server.{KafkaConfig, MetadataCache, ReplicaManager} import kafka.utils.Logging import org.apache.kafka.common.TopicPartition @@ -33,6 +31,8 @@ import org.apache.kafka.coordinator.transaction.ProducerIdManager import org.apache.kafka.server.common.{RequestLocal, TransactionVersion} import org.apache.kafka.server.util.Scheduler +import java.util.Properties +import java.util.concurrent.atomic.AtomicBoolean import scala.jdk.CollectionConverters._ object TransactionCoordinator { @@ -133,7 +133,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, try { val createdMetadata = new TransactionMetadata(transactionalId = transactionalId, producerId = producerIdManager.generateProducerId(), - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = RecordBatch.NO_PRODUCER_EPOCH, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -221,7 +221,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // could be a retry after a valid epoch bump that the producer never received the response for txnMetadata.producerEpoch == RecordBatch.NO_PRODUCER_EPOCH || producerIdAndEpoch.producerId == txnMetadata.producerId || - (producerIdAndEpoch.producerId == txnMetadata.previousProducerId && TransactionMetadata.isEpochExhausted(producerIdAndEpoch.epoch)) + (producerIdAndEpoch.producerId == txnMetadata.prevProducerId && TransactionMetadata.isEpochExhausted(producerIdAndEpoch.epoch)) } if (txnMetadata.pendingTransitionInProgress) { @@ -391,6 +391,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, producerEpoch: Short, partitions: collection.Set[TopicPartition], responseCallback: AddPartitionsCallback, + clientTransactionVersion: TransactionVersion, requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { if (transactionalId == null || transactionalId.isEmpty) { debug(s"Returning ${Errors.INVALID_REQUEST} error code to client for $transactionalId's AddPartitions request") @@ -420,7 +421,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // this is an optimization: if the partitions are already in the metadata reply OK immediately Left(Errors.NONE) } else { - Right(coordinatorEpoch, txnMetadata.prepareAddPartitions(partitions.toSet, time.milliseconds())) + Right(coordinatorEpoch, txnMetadata.prepareAddPartitions(partitions.toSet, time.milliseconds(), clientTransactionVersion)) } } } @@ -476,7 +477,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, private def logInvalidStateTransitionAndReturnError(transactionalId: String, transactionState: TransactionState, transactionResult: TransactionResult) = { - debug(s"TransactionalId: $transactionalId's state is $transactionState, but received transaction " + + warn(s"TransactionalId: $transactionalId's state is $transactionState, but received transaction " + s"marker result to send: $transactionResult") Left(Errors.INVALID_TXN_STATE) } @@ -498,6 +499,232 @@ class TransactionCoordinator(txnConfig: TransactionConfig, requestLocal) } + /** + * Handling the endTxn request under the Transaction Version 1. + * + * @param transactionalId The transaction ID from the endTxn request + * @param producerId The producer ID from the endTxn request + * @param producerEpoch The producer epoch from the endTxn request + * @param txnMarkerResult To commit or abort the transaction + * @param isFromClient Is the request from client + * @param responseCallback The response callback + * @param requestLocal The request local object + */ + private def endTransactionWithTV1(transactionalId: String, + producerId: Long, + producerEpoch: Short, + txnMarkerResult: TransactionResult, + isFromClient: Boolean, + responseCallback: EndTxnCallback, + requestLocal: RequestLocal): Unit = { + var isEpochFence = false + if (transactionalId == null || transactionalId.isEmpty) + responseCallback(Errors.INVALID_REQUEST, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) + else { + val preAppendResult: ApiResult[(Int, TxnTransitMetadata)] = txnManager.getTransactionState(transactionalId).flatMap { + case None => + Left(Errors.INVALID_PRODUCER_ID_MAPPING) + + case Some(epochAndTxnMetadata) => + val txnMetadata = epochAndTxnMetadata.transactionMetadata + val coordinatorEpoch = epochAndTxnMetadata.coordinatorEpoch + + txnMetadata.inLock { + if (txnMetadata.producerId != producerId) + Left(Errors.INVALID_PRODUCER_ID_MAPPING) + // Strict equality is enforced on the client side requests, as they shouldn't bump the producer epoch. + else if ((isFromClient && producerEpoch != txnMetadata.producerEpoch) || producerEpoch < txnMetadata.producerEpoch) + Left(Errors.PRODUCER_FENCED) + else if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != PrepareEpochFence) + Left(Errors.CONCURRENT_TRANSACTIONS) + else txnMetadata.state match { + case Ongoing => + val nextState = if (txnMarkerResult == TransactionResult.COMMIT) + PrepareCommit + else + PrepareAbort + + if (nextState == PrepareAbort && txnMetadata.pendingState.contains(PrepareEpochFence)) { + // We should clear the pending state to make way for the transition to PrepareAbort and also bump + // the epoch in the transaction metadata we are about to append. + isEpochFence = true + txnMetadata.pendingState = None + txnMetadata.producerEpoch = producerEpoch + txnMetadata.lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH + } + + Right(coordinatorEpoch, txnMetadata.prepareAbortOrCommit(nextState, TransactionVersion.fromFeatureLevel(0), RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)) + case CompleteCommit => + if (txnMarkerResult == TransactionResult.COMMIT) + Left(Errors.NONE) + else + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + case CompleteAbort => + if (txnMarkerResult == TransactionResult.ABORT) + Left(Errors.NONE) + else + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + case PrepareCommit => + if (txnMarkerResult == TransactionResult.COMMIT) + Left(Errors.CONCURRENT_TRANSACTIONS) + else + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + case PrepareAbort => + if (txnMarkerResult == TransactionResult.ABORT) + Left(Errors.CONCURRENT_TRANSACTIONS) + else + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + case Empty => + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + case Dead | PrepareEpochFence => + val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + + s"This is illegal as we should never have transitioned to this state." + fatal(errorMsg) + throw new IllegalStateException(errorMsg) + } + } + } + + preAppendResult match { + case Left(err) => + debug(s"Aborting append of $txnMarkerResult to transaction log with coordinator and returning $err error to client for $transactionalId's EndTransaction request") + responseCallback(err, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) + + case Right((coordinatorEpoch, newMetadata)) => + def sendTxnMarkersCallback(error: Errors): Unit = { + if (error == Errors.NONE) { + val preSendResult: ApiResult[(TransactionMetadata, TxnTransitMetadata)] = txnManager.getTransactionState(transactionalId).flatMap { + case None => + val errorMsg = s"The coordinator still owns the transaction partition for $transactionalId, but there is " + + s"no metadata in the cache; this is not expected" + fatal(errorMsg) + throw new IllegalStateException(errorMsg) + + case Some(epochAndMetadata) => + if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { + val txnMetadata = epochAndMetadata.transactionMetadata + txnMetadata.inLock { + if (txnMetadata.producerId != producerId) + Left(Errors.INVALID_PRODUCER_ID_MAPPING) + else if (txnMetadata.producerEpoch != producerEpoch) + Left(Errors.PRODUCER_FENCED) + else if (txnMetadata.pendingTransitionInProgress) + Left(Errors.CONCURRENT_TRANSACTIONS) + else txnMetadata.state match { + case Empty| Ongoing | CompleteCommit | CompleteAbort => + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + case PrepareCommit => + if (txnMarkerResult != TransactionResult.COMMIT) + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + else + Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) + case PrepareAbort => + if (txnMarkerResult != TransactionResult.ABORT) + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + else + Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) + case Dead | PrepareEpochFence => + val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + + s"This is illegal as we should never have transitioned to this state." + fatal(errorMsg) + throw new IllegalStateException(errorMsg) + } + } + } else { + debug(s"The transaction coordinator epoch has changed to ${epochAndMetadata.coordinatorEpoch} after $txnMarkerResult was " + + s"successfully appended to the log for $transactionalId with old epoch $coordinatorEpoch") + Left(Errors.NOT_COORDINATOR) + } + } + + preSendResult match { + case Left(err) => + info(s"Aborting sending of transaction markers after appended $txnMarkerResult to transaction log and returning $err error to client for $transactionalId's EndTransaction request") + responseCallback(err, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) + + case Right((txnMetadata, newPreSendMetadata)) => + // we can respond to the client immediately and continue to write the txn markers if + // the log append was successful + responseCallback(Errors.NONE, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) + + txnMarkerChannelManager.addTxnMarkersToSend(coordinatorEpoch, txnMarkerResult, txnMetadata, newPreSendMetadata) + } + } else { + info(s"Aborting sending of transaction markers and returning $error error to client for $transactionalId's EndTransaction request of $txnMarkerResult, " + + s"since appending $newMetadata to transaction log with coordinator epoch $coordinatorEpoch failed") + + if (isEpochFence) { + txnManager.getTransactionState(transactionalId).foreach { + case None => + warn(s"The coordinator still owns the transaction partition for $transactionalId, but there is " + + s"no metadata in the cache; this is not expected") + + case Some(epochAndMetadata) => + if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { + // This was attempted epoch fence that failed, so mark this state on the metadata + epochAndMetadata.transactionMetadata.hasFailedEpochFence = true + warn(s"The coordinator failed to write an epoch fence transition for producer $transactionalId to the transaction log " + + s"with error $error. The epoch was increased to ${newMetadata.producerEpoch} but not returned to the client") + } + } + } + + responseCallback(error, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) + } + } + + txnManager.appendTransactionToLog(transactionalId, coordinatorEpoch, newMetadata, + sendTxnMarkersCallback, requestLocal = requestLocal) + } + } + } + + + /* + Here is the table to demonstrate the state transition for Empty, CompleteAbort, CompleteCommit in Transaction V2. + Note: + PF = PRODUCER_FENCED + ITS = INVALID_TXN_STATE + NONE = No error and no epoch bump + EB = No error and epoch bump + + Retry => producerEpoch = txnState.ProducerEpoch - 1 + Current => producerEpoch = txnState.ProducerEpoch + + ------------------------------------------------------ + With transaction V1, Retry is not allowed, PRODUCER_FENCED will be returned if the epoch does not match. + Empty does not accept Abort and Commit. + CompleteAbort only accepts Abort. + CompleteCommit only accepts Commit. + For all invalid cases, INVALID_TXN_STATE is returned. + + ------------------------------------------------------ + With transaction V2. + +----------------+-----------------+-----------------+ + | | Abort | Commit | + +----------------+-------+---------+-------+---------+ + | | Retry | Current | Retry | Current | + +----------------+-------+---------+-------+---------+ + | Empty | PF | EB | PF | ITS | + +----------------+-------+---------+-------+---------+ + | CompleteAbort | NONE | EB | ITS | ITS | + +----------------+-------+---------+-------+---------+ + | CompleteCommit | ITS | EB | NONE | ITS | + +----------------+-------+---------+-------+---------+ + */ + + /** + * Handling the endTxn request above the Transaction Version 2. + * + * @param transactionalId The transaction ID from the endTxn request + * @param producerId The producer ID from the endTxn request + * @param producerEpoch The producer epoch from the endTxn request + * @param txnMarkerResult To commit or abort the transaction + * @param isFromClient Is the request from client + * @param clientTransactionVersion The transaction version for the endTxn request + * @param responseCallback The response callback + * @param requestLocal The request local object + */ private def endTransaction(transactionalId: String, producerId: Long, producerEpoch: Short, @@ -506,6 +733,10 @@ class TransactionCoordinator(txnConfig: TransactionConfig, clientTransactionVersion: TransactionVersion, responseCallback: EndTxnCallback, requestLocal: RequestLocal): Unit = { + if (!clientTransactionVersion.supportsEpochBump()) { + endTransactionWithTV1(transactionalId, producerId, producerEpoch, txnMarkerResult, isFromClient, responseCallback, requestLocal) + return + } var isEpochFence = false if (transactionalId == null || transactionalId.isEmpty) responseCallback(Errors.INVALID_REQUEST, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) @@ -524,15 +755,16 @@ class TransactionCoordinator(txnConfig: TransactionConfig, producerIdCopy = txnMetadata.producerId producerEpochCopy = txnMetadata.producerEpoch // PrepareEpochFence has slightly different epoch bumping logic so don't include it here. - val currentTxnMetadataIsAtLeastTransactionsV2 = !txnMetadata.pendingState.contains(PrepareEpochFence) && txnMetadata.clientTransactionVersion.supportsEpochBump() - // True if the client used TV_2 and retried a request that had overflowed the epoch, and a new producer ID is stored in the txnMetadata - val retryOnOverflow = currentTxnMetadataIsAtLeastTransactionsV2 && - txnMetadata.previousProducerId == producerId && producerEpoch == Short.MaxValue - 1 && txnMetadata.producerEpoch == 0 - // True if the client used TV_2 and retried an endTxn request, and the bumped producer epoch is stored in the txnMetadata. - val retryOnEpochBump = endTxnEpochBumped(txnMetadata, producerEpoch) + // Note that, it can only happen when the current state is Ongoing. + isEpochFence = txnMetadata.pendingState.contains(PrepareEpochFence) + // True if the client retried a request that had overflowed the epoch, and a new producer ID is stored in the txnMetadata + val retryOnOverflow = !isEpochFence && txnMetadata.prevProducerId == producerId && + producerEpoch == Short.MaxValue - 1 && txnMetadata.producerEpoch == 0 + // True if the client retried an endTxn request, and the bumped producer epoch is stored in the txnMetadata. + val retryOnEpochBump = !isEpochFence && txnMetadata.producerEpoch == producerEpoch + 1 val isValidEpoch = { - if (currentTxnMetadataIsAtLeastTransactionsV2) { + if (!isEpochFence) { // With transactions V2, state + same epoch is not sufficient to determine if a retry transition is valid. If the epoch is the // same it actually indicates the next endTransaction call. Instead, we want to check the epoch matches with the epoch in the retry conditions. // Return producer fenced even in the cases where the epoch is higher and could indicate an invalid state transition. @@ -543,20 +775,49 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case PrepareCommit | PrepareAbort => retryOnEpochBump case CompleteCommit | CompleteAbort => - retryOnEpochBump || retryOnOverflow + retryOnEpochBump || retryOnOverflow || producerEpoch == txnMetadata.producerEpoch } } else { - // For transactions V1 strict equality is enforced on the client side requests, as they shouldn't bump the producer epoch without server knowledge. + // If the epoch is going to be fenced, it bumps the epoch differently with TV2. (!isFromClient || producerEpoch == txnMetadata.producerEpoch) && producerEpoch >= txnMetadata.producerEpoch } } + val isRetry = retryOnEpochBump || retryOnOverflow + + def generateTxnTransitMetadataForTxnCompletion(nextState: TransactionState, noPartitionAdded: Boolean): ApiResult[(Int, TxnTransitMetadata)] = { + // Maybe allocate new producer ID if we are bumping epoch and epoch is exhausted + val nextProducerIdOrErrors = + if (!isEpochFence && txnMetadata.isProducerEpochExhausted) { + try { + Right(producerIdManager.generateProducerId()) + } catch { + case e: Exception => Left(Errors.forException(e)) + } + } else { + Right(RecordBatch.NO_PRODUCER_ID) + } + + if (nextState == PrepareAbort && isEpochFence) { + // We should clear the pending state to make way for the transition to PrepareAbort and also bump + // the epoch in the transaction metadata we are about to append. + txnMetadata.pendingState = None + txnMetadata.producerEpoch = producerEpoch + txnMetadata.lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH + } + + nextProducerIdOrErrors.flatMap { + nextProducerId => + Right(coordinatorEpoch, txnMetadata.prepareAbortOrCommit(nextState, clientTransactionVersion, nextProducerId.asInstanceOf[Long], time.milliseconds(), noPartitionAdded)) + } + } + if (txnMetadata.producerId != producerId && !retryOnOverflow) Left(Errors.INVALID_PRODUCER_ID_MAPPING) - else if (!isValidEpoch) - Left(Errors.PRODUCER_FENCED) else if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != PrepareEpochFence) Left(Errors.CONCURRENT_TRANSACTIONS) + else if (!isValidEpoch) + Left(Errors.PRODUCER_FENCED) else txnMetadata.state match { case Ongoing => val nextState = if (txnMarkerResult == TransactionResult.COMMIT) @@ -564,41 +825,30 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else PrepareAbort - // Maybe allocate new producer ID if we are bumping epoch and epoch is exhausted - val nextProducerIdOrErrors = - if (clientTransactionVersion.supportsEpochBump() && !txnMetadata.pendingState.contains(PrepareEpochFence) && txnMetadata.isProducerEpochExhausted) { - try { - Right(producerIdManager.generateProducerId()) - } catch { - case e: Exception => Left(Errors.forException(e)) - } - } else { - Right(RecordBatch.NO_PRODUCER_ID) - } - - if (nextState == PrepareAbort && txnMetadata.pendingState.contains(PrepareEpochFence)) { - // We should clear the pending state to make way for the transition to PrepareAbort and also bump - // the epoch in the transaction metadata we are about to append. - isEpochFence = true - txnMetadata.pendingState = None - txnMetadata.producerEpoch = producerEpoch - txnMetadata.lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH - } - - nextProducerIdOrErrors.flatMap { - nextProducerId => - Right(coordinatorEpoch, txnMetadata.prepareAbortOrCommit(nextState, clientTransactionVersion, nextProducerId.asInstanceOf[Long], time.milliseconds())) - } + generateTxnTransitMetadataForTxnCompletion(nextState, false) case CompleteCommit => - if (txnMarkerResult == TransactionResult.COMMIT) - Left(Errors.NONE) - else - logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + if (txnMarkerResult == TransactionResult.COMMIT) { + if (isRetry) + Left(Errors.NONE) + else + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + } else { + // Abort. + if (isRetry) + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + else + generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) + } case CompleteAbort => - if (txnMarkerResult == TransactionResult.ABORT) - Left(Errors.NONE) - else + if (txnMarkerResult == TransactionResult.ABORT) { + if (isRetry) + Left(Errors.NONE) + else + generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) + } else { + // Commit. logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + } case PrepareCommit => if (txnMarkerResult == TransactionResult.COMMIT) Left(Errors.CONCURRENT_TRANSACTIONS) @@ -610,7 +860,11 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) case Empty => - logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + if (txnMarkerResult == TransactionResult.ABORT) { + generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) + } else { + logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) + } case Dead | PrepareEpochFence => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." @@ -626,8 +880,8 @@ class TransactionCoordinator(txnConfig: TransactionConfig, if (err == Errors.NONE) { responseCallback(err, producerIdCopy, producerEpochCopy) } else { - debug(s"Aborting append of $txnMarkerResult to transaction log with coordinator and returning $err error to client for $transactionalId's EndTransaction request") - responseCallback(err, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) + debug(s"Aborting append of $txnMarkerResult to transaction log with coordinator and returning $err error to client for $transactionalId's EndTransaction request") + responseCallback(err, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) } case Right((coordinatorEpoch, newMetadata)) => @@ -646,7 +900,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, txnMetadata.inLock { if (txnMetadata.producerId != producerId) Left(Errors.INVALID_PRODUCER_ID_MAPPING) - else if (txnMetadata.producerEpoch != producerEpoch && !endTxnEpochBumped(txnMetadata, producerEpoch)) + else if (txnMetadata.producerEpoch != producerEpoch && txnMetadata.producerEpoch != producerEpoch + 1) Left(Errors.PRODUCER_FENCED) else if (txnMetadata.pendingTransitionInProgress) Left(Errors.CONCURRENT_TRANSACTIONS) @@ -720,14 +974,6 @@ class TransactionCoordinator(txnConfig: TransactionConfig, } } - // When a client and server support V2, every endTransaction call bumps the producer epoch. When checking epoch, we want to - // check epoch + 1. Epoch bumps from PrepareEpochFence state are handled separately, so this method should not be used to check that case. - // Returns true if the transaction state epoch is the specified producer epoch + 1 and epoch bump on every transaction is expected. - private def endTxnEpochBumped(txnMetadata: TransactionMetadata, producerEpoch: Short): Boolean = { - !txnMetadata.pendingState.contains(PrepareEpochFence) && txnMetadata.clientTransactionVersion.supportsEpochBump() && - txnMetadata.producerEpoch == producerEpoch + 1 - } - def transactionTopicConfigs: Properties = txnManager.transactionTopicConfigs def partitionFor(transactionalId: String): Int = txnManager.partitionFor(transactionalId) diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala index 2d8a8c5a84044..6db509f504df8 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala @@ -19,7 +19,7 @@ package kafka.coordinator.transaction import java.nio.ByteBuffer import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.protocol.{ByteBufferAccessor, MessageUtil} -import org.apache.kafka.common.record.{Record, RecordBatch} +import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.TopicPartition import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogValue} import org.apache.kafka.server.common.TransactionVersion @@ -121,7 +121,7 @@ object TransactionLog { val transactionMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = value.producerId, - previousProducerId = value.previousProducerId, + prevProducerId = value.previousProducerId, nextProducerId = value.nextProducerId, producerEpoch = value.producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -143,33 +143,6 @@ object TransactionLog { } else throw new IllegalStateException(s"Unknown version $version from the transaction log message value") } } - - /** - * Exposed for printing records using [[kafka.tools.DumpLogSegments]] - */ - def formatRecordKeyAndValue(record: Record): (Option[String], Option[String]) = { - TransactionLog.readTxnRecordKey(record.key) match { - case txnKey: TxnKey => - val keyString = s"transaction_metadata::transactionalId=${txnKey.transactionalId}" - - val valueString = TransactionLog.readTxnRecordValue(txnKey.transactionalId, record.value) match { - case None => "" - - case Some(txnMetadata) => s"producerId:${txnMetadata.producerId}," + - s"producerEpoch:${txnMetadata.producerEpoch}," + - s"state=${txnMetadata.state}," + - s"partitions=${txnMetadata.topicPartitions.mkString("[", ",", "]")}," + - s"txnLastUpdateTimestamp=${txnMetadata.txnLastUpdateTimestamp}," + - s"txnTimeoutMs=${txnMetadata.txnTimeoutMs}" - } - - (Some(keyString), Some(valueString)) - - case unknownKey: UnknownKey => - (Some(s"unknown::version=${unknownKey.version}"), None) - } - } - } sealed trait BaseKey{ diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala index 9eaf19e6e0973..2e71a72420957 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala @@ -62,7 +62,6 @@ object TransactionMarkerChannelManager { config.interBrokerListenerName, config.saslMechanismInterBrokerProtocol, time, - config.saslInterBrokerHandshakeRequestEnable, logContext ) channelBuilder match { @@ -256,9 +255,7 @@ class TransactionMarkerChannelManager( }.filter { case (_, entries) => !entries.isEmpty }.map { case (node, entries) => val markersToSend = entries.asScala.map(_.txnMarkerEntry).asJava val requestCompletionHandler = new TransactionMarkerRequestCompletionHandler(node.id, txnStateManager, this, entries) - val request = new WriteTxnMarkersRequest.Builder( - metadataCache.metadataVersion().writeTxnMarkersRequestVersion(), markersToSend - ) + val request = new WriteTxnMarkersRequest.Builder(markersToSend) new RequestAndCompletionHandler( currentTimeMs, diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala index 36b09ad43198c..99ef4711171da 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala @@ -75,6 +75,7 @@ private[transaction] sealed trait TransactionState { * * transition: received AddPartitionsToTxnRequest => Ongoing * received AddOffsetsToTxnRequest => Ongoing + * received EndTxnRequest with abort and TransactionV2 enabled => PrepareAbort */ private[transaction] case object Empty extends TransactionState { val id: Byte = 0 @@ -112,11 +113,14 @@ private[transaction] case object PrepareCommit extends TransactionState { * Group is preparing to abort * * transition: received acks from all partitions => CompleteAbort + * + * Note, In transaction v2, we allow Empty, CompleteCommit, CompleteAbort to transition to PrepareAbort. because the + * client may not know the txn state on the server side, it needs to send endTxn request when uncertain. */ private[transaction] case object PrepareAbort extends TransactionState { val id: Byte = 3 val name: String = "PrepareAbort" - val validPreviousStates: Set[TransactionState] = Set(Ongoing, PrepareEpochFence) + val validPreviousStates: Set[TransactionState] = Set(Ongoing, PrepareEpochFence, Empty, CompleteCommit, CompleteAbort) } /** @@ -174,7 +178,7 @@ private[transaction] case class TxnTransitMetadata(producerId: Long, lastProducerEpoch: Short, txnTimeoutMs: Int, txnState: TransactionState, - topicPartitions: immutable.Set[TopicPartition], + topicPartitions: mutable.Set[TopicPartition], txnStartTimestamp: Long, txnLastUpdateTimestamp: Long, clientTransactionVersion: TransactionVersion) { @@ -197,7 +201,7 @@ private[transaction] case class TxnTransitMetadata(producerId: Long, /** * * @param producerId producer id - * @param previousProducerId producer id for the last committed transaction with this transactional ID + * @param prevProducerId producer id for the last committed transaction with this transactional ID * @param nextProducerId Latest producer ID sent to the producer for the given transactional ID * @param producerEpoch current epoch of the producer * @param lastProducerEpoch last epoch of the producer @@ -211,13 +215,13 @@ private[transaction] case class TxnTransitMetadata(producerId: Long, @nonthreadsafe private[transaction] class TransactionMetadata(val transactionalId: String, var producerId: Long, - var previousProducerId: Long, + var prevProducerId: Long, var nextProducerId: Long, var producerEpoch: Short, var lastProducerEpoch: Short, var txnTimeoutMs: Int, var state: TransactionState, - val topicPartitions: mutable.Set[TopicPartition], + var topicPartitions: mutable.Set[TopicPartition], @volatile var txnStartTimestamp: Long = -1, @volatile var txnLastUpdateTimestamp: Long, var clientTransactionVersion: TransactionVersion) extends Logging { @@ -250,8 +254,8 @@ private[transaction] class TransactionMetadata(val transactionalId: String, // this is visible for test only def prepareNoTransit(): TxnTransitMetadata = { // do not call transitTo as it will set the pending state, a follow-up call to abort the transaction will set its pending state - TxnTransitMetadata(producerId, previousProducerId, nextProducerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, state, topicPartitions.toSet, - txnStartTimestamp, txnLastUpdateTimestamp, TransactionVersion.TV_0) + TxnTransitMetadata(producerId, prevProducerId, nextProducerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, state, topicPartitions.clone(), + txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) } def prepareFenceProducerEpoch(): TxnTransitMetadata = { @@ -262,8 +266,11 @@ private[transaction] class TransactionMetadata(val transactionalId: String, // This is safe because we never return the epoch to client if we fail to fence the epoch val bumpedEpoch = if (hasFailedEpochFence) producerEpoch else (producerEpoch + 1).toShort - prepareTransitionTo(PrepareEpochFence, producerId, bumpedEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, - topicPartitions.toSet, txnStartTimestamp, txnLastUpdateTimestamp) + prepareTransitionTo( + state = PrepareEpochFence, + producerEpoch = bumpedEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH + ) } def prepareIncrementProducerEpoch(newTxnTimeoutMs: Int, @@ -301,8 +308,15 @@ private[transaction] class TransactionMetadata(val transactionalId: String, } epochBumpResult match { - case Right((nextEpoch, lastEpoch)) => Right(prepareTransitionTo(Empty, producerId, nextEpoch, lastEpoch, newTxnTimeoutMs, - immutable.Set.empty[TopicPartition], -1, updateTimestamp)) + case Right((nextEpoch, lastEpoch)) => Right(prepareTransitionTo( + state = Empty, + producerEpoch = nextEpoch, + lastProducerEpoch = lastEpoch, + txnTimeoutMs = newTxnTimeoutMs, + topicPartitions = mutable.Set.empty[TopicPartition], + txnStartTimestamp = -1, + txnLastUpdateTimestamp = updateTimestamp + )) case Left(err) => Left(err) } @@ -315,21 +329,34 @@ private[transaction] class TransactionMetadata(val transactionalId: String, if (hasPendingTransaction) throw new IllegalStateException("Cannot rotate producer ids while a transaction is still pending") - prepareTransitionTo(Empty, newProducerId, 0, if (recordLastEpoch) producerEpoch else RecordBatch.NO_PRODUCER_EPOCH, - newTxnTimeoutMs, immutable.Set.empty[TopicPartition], -1, updateTimestamp) + prepareTransitionTo( + state = Empty, + producerId = newProducerId, + producerEpoch = 0, + lastProducerEpoch = if (recordLastEpoch) producerEpoch else RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = newTxnTimeoutMs, + topicPartitions = mutable.Set.empty[TopicPartition], + txnStartTimestamp = -1, + txnLastUpdateTimestamp = updateTimestamp + ) } - def prepareAddPartitions(addedTopicPartitions: immutable.Set[TopicPartition], updateTimestamp: Long): TxnTransitMetadata = { + def prepareAddPartitions(addedTopicPartitions: immutable.Set[TopicPartition], updateTimestamp: Long, clientTransactionVersion: TransactionVersion): TxnTransitMetadata = { val newTxnStartTimestamp = state match { case Empty | CompleteAbort | CompleteCommit => updateTimestamp case _ => txnStartTimestamp } - prepareTransitionTo(Ongoing, producerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, - (topicPartitions ++ addedTopicPartitions).toSet, newTxnStartTimestamp, updateTimestamp) + prepareTransitionTo( + state = Ongoing, + topicPartitions = (topicPartitions ++ addedTopicPartitions), + txnStartTimestamp = newTxnStartTimestamp, + txnLastUpdateTimestamp = updateTimestamp, + clientTransactionVersion = clientTransactionVersion + ) } - def prepareAbortOrCommit(newState: TransactionState, clientTransactionVersion: TransactionVersion, nextProducerId: Long, updateTimestamp: Long): TxnTransitMetadata = { + def prepareAbortOrCommit(newState: TransactionState, clientTransactionVersion: TransactionVersion, nextProducerId: Long, updateTimestamp: Long, noPartitionAdded: Boolean): TxnTransitMetadata = { val (updatedProducerEpoch, updatedLastProducerEpoch) = if (clientTransactionVersion.supportsEpochBump()) { // We already ensured that we do not overflow here. MAX_SHORT is the highest possible value. ((producerEpoch + 1).toShort, producerEpoch) @@ -337,8 +364,18 @@ private[transaction] class TransactionMetadata(val transactionalId: String, (producerEpoch, lastProducerEpoch) } - prepareTransitionTo(newState, producerId, nextProducerId, updatedProducerEpoch, updatedLastProducerEpoch, txnTimeoutMs, topicPartitions.toSet, - txnStartTimestamp, updateTimestamp, clientTransactionVersion) + // With transaction V2, it is allowed to abort the transaction without adding any partitions. Then, the transaction + // start time is uncertain but it is still required. So we can use the update time as the transaction start time. + val newTxnStartTimestamp = if (noPartitionAdded) updateTimestamp else txnStartTimestamp + prepareTransitionTo( + state = newState, + nextProducerId = nextProducerId, + producerEpoch = updatedProducerEpoch, + lastProducerEpoch = updatedLastProducerEpoch, + txnStartTimestamp = newTxnStartTimestamp, + txnLastUpdateTimestamp = updateTimestamp, + clientTransactionVersion = clientTransactionVersion + ) } def prepareComplete(updateTimestamp: Long): TxnTransitMetadata = { @@ -358,13 +395,22 @@ private[transaction] class TransactionMetadata(val transactionalId: String, } else { (producerId, producerEpoch) } - prepareTransitionTo(newState, updatedProducerId, RecordBatch.NO_PRODUCER_ID, updatedProducerEpoch, lastProducerEpoch, txnTimeoutMs, Set.empty[TopicPartition], - txnStartTimestamp, updateTimestamp, clientTransactionVersion) + + prepareTransitionTo( + state = newState, + producerId = updatedProducerId, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = updatedProducerEpoch, + topicPartitions = mutable.Set.empty[TopicPartition], + txnLastUpdateTimestamp = updateTimestamp + ) } def prepareDead(): TxnTransitMetadata = { - prepareTransitionTo(Dead, producerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, Set.empty[TopicPartition], - txnStartTimestamp, txnLastUpdateTimestamp) + prepareTransitionTo( + state = Dead, + topicPartitions = mutable.Set.empty[TopicPartition] + ) } /** @@ -380,50 +426,39 @@ private[transaction] class TransactionMetadata(val transactionalId: String, } } - private def prepareTransitionTo(updatedState: TransactionState, - updatedProducerId: Long, - updatedEpoch: Short, - updatedLastEpoch: Short, - updatedTxnTimeoutMs: Int, - updatedTopicPartitions: immutable.Set[TopicPartition], - updatedTxnStartTimestamp: Long, - updateTimestamp: Long): TxnTransitMetadata = { - prepareTransitionTo(updatedState, updatedProducerId, RecordBatch.NO_PRODUCER_ID, updatedEpoch, updatedLastEpoch, updatedTxnTimeoutMs, updatedTopicPartitions, updatedTxnStartTimestamp, updateTimestamp, TransactionVersion.TV_0) - } - - private def prepareTransitionTo(updatedState: TransactionState, - updatedProducerId: Long, - nextProducerId: Long, - updatedEpoch: Short, - updatedLastEpoch: Short, - updatedTxnTimeoutMs: Int, - updatedTopicPartitions: immutable.Set[TopicPartition], - updatedTxnStartTimestamp: Long, - updateTimestamp: Long, - clientTransactionVersion: TransactionVersion): TxnTransitMetadata = { + private def prepareTransitionTo(state: TransactionState, + producerId: Long = this.producerId, + nextProducerId: Long = this.nextProducerId, + producerEpoch: Short = this.producerEpoch, + lastProducerEpoch: Short = this.lastProducerEpoch, + txnTimeoutMs: Int = this.txnTimeoutMs, + topicPartitions: mutable.Set[TopicPartition] = this.topicPartitions, + txnStartTimestamp: Long = this.txnStartTimestamp, + txnLastUpdateTimestamp: Long = this.txnLastUpdateTimestamp, + clientTransactionVersion: TransactionVersion = this.clientTransactionVersion): TxnTransitMetadata = { if (pendingState.isDefined) - throw new IllegalStateException(s"Preparing transaction state transition to $updatedState " + + throw new IllegalStateException(s"Preparing transaction state transition to $state " + s"while it already a pending state ${pendingState.get}") - if (updatedProducerId < 0) - throw new IllegalArgumentException(s"Illegal new producer id $updatedProducerId") + if (producerId < 0) + throw new IllegalArgumentException(s"Illegal new producer id $producerId") // The epoch is initialized to NO_PRODUCER_EPOCH when the TransactionMetadata // is created for the first time and it could stay like this until transitioning // to Dead. - if (updatedState != Dead && updatedEpoch < 0) - throw new IllegalArgumentException(s"Illegal new producer epoch $updatedEpoch") + if (state != Dead && producerEpoch < 0) + throw new IllegalArgumentException(s"Illegal new producer epoch $producerEpoch") // check that the new state transition is valid and update the pending state if necessary - if (updatedState.validPreviousStates.contains(state)) { - val transitMetadata = TxnTransitMetadata(updatedProducerId, producerId, nextProducerId, updatedEpoch, updatedLastEpoch, updatedTxnTimeoutMs, updatedState, - updatedTopicPartitions, updatedTxnStartTimestamp, updateTimestamp, clientTransactionVersion) - debug(s"TransactionalId $transactionalId prepare transition from $state to $transitMetadata") - pendingState = Some(updatedState) + if (state.validPreviousStates.contains(this.state)) { + val transitMetadata = TxnTransitMetadata(producerId, this.producerId, nextProducerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, state, + topicPartitions, txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) + debug(s"TransactionalId ${this.transactionalId} prepare transition from ${this.state} to $transitMetadata") + pendingState = Some(state) transitMetadata } else { - throw new IllegalStateException(s"Preparing transaction state transition to $updatedState failed since the target state" + - s" $updatedState is not a valid previous state of the current state $state") + throw new IllegalStateException(s"Preparing transaction state transition to $state failed since the target state" + + s" $state is not a valid previous state of the current state ${this.state}") } } @@ -457,12 +492,6 @@ private[transaction] class TransactionMetadata(val transactionalId: String, transitMetadata.txnStartTimestamp != -1) { throwStateTransitionFailure(transitMetadata) - } else { - txnTimeoutMs = transitMetadata.txnTimeoutMs - producerEpoch = transitMetadata.producerEpoch - lastProducerEpoch = transitMetadata.lastProducerEpoch - producerId = transitMetadata.producerId - previousProducerId = transitMetadata.prevProducerId } case Ongoing => // from addPartitions @@ -471,40 +500,26 @@ private[transaction] class TransactionMetadata(val transactionalId: String, txnTimeoutMs != transitMetadata.txnTimeoutMs) { throwStateTransitionFailure(transitMetadata) - } else { - txnStartTimestamp = transitMetadata.txnStartTimestamp - addPartitions(transitMetadata.topicPartitions) } case PrepareAbort | PrepareCommit => // from endTxn + // In V2, we allow state transits from Empty, CompleteCommit and CompleteAbort to PrepareAbort. It is possible + // their updated start time is not equal to the current start time. + val allowedEmptyAbort = toState == PrepareAbort && transitMetadata.clientTransactionVersion.supportsEpochBump() && + (state == Empty || state == CompleteCommit || state == CompleteAbort) + val validTimestamp = txnStartTimestamp == transitMetadata.txnStartTimestamp || allowedEmptyAbort if (!validProducerEpoch(transitMetadata) || - !topicPartitions.toSet.equals(transitMetadata.topicPartitions) || - txnTimeoutMs != transitMetadata.txnTimeoutMs || - txnStartTimestamp != transitMetadata.txnStartTimestamp) { + !topicPartitions.equals(transitMetadata.topicPartitions) || + txnTimeoutMs != transitMetadata.txnTimeoutMs || !validTimestamp) { throwStateTransitionFailure(transitMetadata) - } else if (transitMetadata.clientTransactionVersion.supportsEpochBump()) { - producerEpoch = transitMetadata.producerEpoch - lastProducerEpoch = transitMetadata.lastProducerEpoch - nextProducerId = transitMetadata.nextProducerId } case CompleteAbort | CompleteCommit => // from write markers if (!validProducerEpoch(transitMetadata) || txnTimeoutMs != transitMetadata.txnTimeoutMs || transitMetadata.txnStartTimestamp == -1) { - throwStateTransitionFailure(transitMetadata) - } else { - txnStartTimestamp = transitMetadata.txnStartTimestamp - topicPartitions.clear() - if (transitMetadata.clientTransactionVersion.supportsEpochBump()) { - producerEpoch = transitMetadata.producerEpoch - lastProducerEpoch = transitMetadata.lastProducerEpoch - previousProducerId = transitMetadata.prevProducerId - producerId = transitMetadata.producerId - nextProducerId = transitMetadata.nextProducerId - } } case PrepareEpochFence => @@ -524,8 +539,17 @@ private[transaction] class TransactionMetadata(val transactionalId: String, } debug(s"TransactionalId $transactionalId complete transition from $state to $transitMetadata") - clientTransactionVersion = transitMetadata.clientTransactionVersion + producerId = transitMetadata.producerId + prevProducerId = transitMetadata.prevProducerId + nextProducerId = transitMetadata.nextProducerId + producerEpoch = transitMetadata.producerEpoch + lastProducerEpoch = transitMetadata.lastProducerEpoch + txnTimeoutMs = transitMetadata.txnTimeoutMs + topicPartitions = transitMetadata.topicPartitions + txnStartTimestamp = transitMetadata.txnStartTimestamp txnLastUpdateTimestamp = transitMetadata.txnLastUpdateTimestamp + clientTransactionVersion = transitMetadata.clientTransactionVersion + pendingState = None state = toState } @@ -593,15 +617,17 @@ private[transaction] class TransactionMetadata(val transactionalId: String, "TransactionMetadata(" + s"transactionalId=$transactionalId, " + s"producerId=$producerId, " + - s"previousProducerId=$previousProducerId, " - s"nextProducerId=$nextProducerId, " + s"prevProducerId=$prevProducerId, " + + s"nextProducerId=$nextProducerId, " + s"producerEpoch=$producerEpoch, " + + s"lastProducerEpoch=$lastProducerEpoch, " + s"txnTimeoutMs=$txnTimeoutMs, " + s"state=$state, " + s"pendingState=$pendingState, " + s"topicPartitions=$topicPartitions, " + s"txnStartTimestamp=$txnStartTimestamp, " + - s"txnLastUpdateTimestamp=$txnLastUpdateTimestamp)" + s"txnLastUpdateTimestamp=$txnLastUpdateTimestamp, " + + s"clientTransactionVersion=$clientTransactionVersion)" } override def equals(that: Any): Boolean = that match { @@ -614,13 +640,14 @@ private[transaction] class TransactionMetadata(val transactionalId: String, state.equals(other.state) && topicPartitions.equals(other.topicPartitions) && txnStartTimestamp == other.txnStartTimestamp && - txnLastUpdateTimestamp == other.txnLastUpdateTimestamp + txnLastUpdateTimestamp == other.txnLastUpdateTimestamp && + clientTransactionVersion == other.clientTransactionVersion case _ => false } override def hashCode(): Int = { val fields = Seq(transactionalId, producerId, producerEpoch, txnTimeoutMs, state, topicPartitions, - txnStartTimestamp, txnLastUpdateTimestamp) + txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) fields.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b) } } diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala index 747d3f63eb925..a6e7dd30bf062 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala @@ -596,10 +596,9 @@ class TransactionStateManager(brokerId: Int, */ def removeTransactionsForTxnTopicPartition(partitionId: Int, coordinatorEpoch: Int): Unit = { val topicPartition = new TopicPartition(Topic.TRANSACTION_STATE_TOPIC_NAME, partitionId) - val partitionAndLeaderEpoch = TransactionPartitionAndLeaderEpoch(partitionId, coordinatorEpoch) inWriteLock(stateLock) { - loadingPartitions.remove(partitionAndLeaderEpoch) + removeLoadingPartitionWithEpoch(partitionId, coordinatorEpoch) transactionMetadataCache.remove(partitionId) match { case Some(txnMetadataCacheEntry) => info(s"Unloaded transaction metadata $txnMetadataCacheEntry for $topicPartition on become-follower transition") @@ -610,6 +609,18 @@ class TransactionStateManager(brokerId: Int, } } + /** + * Remove the loading partition if the epoch is less than the specified epoch. Note: This method must be called under the write state lock. + */ + private def removeLoadingPartitionWithEpoch(partitionId: Int, coordinatorEpoch: Int): Unit = { + loadingPartitions.find(_.txnPartitionId == partitionId).foreach { partitionAndLeaderEpoch => + if (partitionAndLeaderEpoch.coordinatorEpoch < coordinatorEpoch) { + loadingPartitions.remove(partitionAndLeaderEpoch) + info(s"Cancelling load of currently loading partition $partitionAndLeaderEpoch") + } + } + } + private def validateTransactionTopicPartitionCountIsStable(): Unit = { val previouslyDeterminedPartitionCount = transactionTopicPartitionCount val curTransactionTopicPartitionCount = retrieveTransactionTopicPartitionCount() diff --git a/core/src/main/scala/kafka/coordinator/transaction/ZkProducerIdManager.scala b/core/src/main/scala/kafka/coordinator/transaction/ZkProducerIdManager.scala deleted file mode 100644 index 05e6310053610..0000000000000 --- a/core/src/main/scala/kafka/coordinator/transaction/ZkProducerIdManager.scala +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.coordinator.transaction - -import kafka.utils.Logging -import kafka.zk.{KafkaZkClient, ProducerIdBlockZNode} -import org.apache.kafka.common.KafkaException -import org.apache.kafka.coordinator.transaction.ProducerIdManager -import org.apache.kafka.server.common.ProducerIdsBlock - -object ZkProducerIdManager { - def getNewProducerIdBlock(brokerId: Int, zkClient: KafkaZkClient, logger: Logging): ProducerIdsBlock = { - // Get or create the existing PID block from ZK and attempt to update it. We retry in a loop here since other - // brokers may be generating PID blocks during a rolling upgrade - var zkWriteComplete = false - while (!zkWriteComplete) { - // refresh current producerId block from zookeeper again - val (dataOpt, zkVersion) = zkClient.getDataAndVersion(ProducerIdBlockZNode.path) - - // generate the new producerId block - val newProducerIdBlock = dataOpt match { - case Some(data) => - val currProducerIdBlock = ProducerIdBlockZNode.parseProducerIdBlockData(data) - logger.debug(s"Read current producerId block $currProducerIdBlock, Zk path version $zkVersion") - - if (currProducerIdBlock.lastProducerId > Long.MaxValue - ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE) { - // we have exhausted all producerIds (wow!), treat it as a fatal error - logger.fatal(s"Exhausted all producerIds as the next block's end producerId is will has exceeded long type limit (current block end producerId is ${currProducerIdBlock.lastProducerId})") - throw new KafkaException("Have exhausted all producerIds.") - } - - new ProducerIdsBlock(brokerId, currProducerIdBlock.nextBlockFirstId(), ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE) - case None => - logger.debug(s"There is no producerId block yet (Zk path version $zkVersion), creating the first block") - new ProducerIdsBlock(brokerId, 0L, ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE) - } - - val newProducerIdBlockData = ProducerIdBlockZNode.generateProducerIdBlockJson(newProducerIdBlock) - - // try to write the new producerId block into zookeeper - val (succeeded, version) = zkClient.conditionalUpdatePath(ProducerIdBlockZNode.path, newProducerIdBlockData, zkVersion, None) - zkWriteComplete = succeeded - - if (zkWriteComplete) { - logger.info(s"Acquired new producerId block $newProducerIdBlock by writing to Zk with path version $version") - return newProducerIdBlock - } - } - throw new IllegalStateException() - } -} - -class ZkProducerIdManager(brokerId: Int, zkClient: KafkaZkClient) extends ProducerIdManager with Logging { - - this.logIdent = "[ZK ProducerId Manager " + brokerId + "]: " - val RETRY_BACKOFF_MS = 50 - - private var currentProducerIdBlock: ProducerIdsBlock = ProducerIdsBlock.EMPTY - private var nextProducerId: Long = _ - - // grab the first block of producerIds - this synchronized { - allocateNewProducerIdBlock() - nextProducerId = currentProducerIdBlock.firstProducerId - } - - private def allocateNewProducerIdBlock(): Unit = { - this synchronized { - currentProducerIdBlock = ZkProducerIdManager.getNewProducerIdBlock(brokerId, zkClient, this) - } - } - - def generateProducerId(): Long = { - this synchronized { - // grab a new block of producerIds if this block has been exhausted - if (nextProducerId > currentProducerIdBlock.lastProducerId) { - try { - allocateNewProducerIdBlock() - } catch { - case t: Throwable => throw new KafkaException("Failed to acquire a new block of producerIds", t) - } - nextProducerId = currentProducerIdBlock.firstProducerId - } - nextProducerId += 1 - nextProducerId - 1 - } - } -} \ No newline at end of file diff --git a/core/src/main/scala/kafka/log/LogCleaner.scala b/core/src/main/scala/kafka/log/LogCleaner.scala index 4f8d545be608e..c07437e68d599 100644 --- a/core/src/main/scala/kafka/log/LogCleaner.scala +++ b/core/src/main/scala/kafka/log/LogCleaner.scala @@ -21,7 +21,6 @@ import java.io.{File, IOException} import java.nio._ import java.util.Date import java.util.concurrent.TimeUnit -import kafka.common._ import kafka.log.LogCleaner.{CleanerRecopyPercentMetricName, DeadThreadCountMetricName, MaxBufferUtilizationPercentMetricName, MaxCleanTimeMetricName, MaxCompactionDelayMetricsName} import kafka.server.{BrokerReconfigurable, KafkaConfig} import kafka.utils.{Logging, Pool} @@ -35,7 +34,7 @@ import org.apache.kafka.common.utils.{BufferSupplier, Time} import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.ShutdownableThread -import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, LastRecord, LogDirFailureChannel, LogSegment, LogSegmentOffsetOverflowException, OffsetMap, SkimpyOffsetMap, TransactionIndex} +import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, LastRecord, LogCleaningAbortedException, LogDirFailureChannel, LogSegment, LogSegmentOffsetOverflowException, OffsetMap, SkimpyOffsetMap, ThreadShutdownException, TransactionIndex} import org.apache.kafka.storage.internals.utils.Throttler import scala.jdk.CollectionConverters._ @@ -116,12 +115,11 @@ class LogCleaner(initialConfig: CleanerConfig, private[log] val cleaners = mutable.ArrayBuffer[CleanerThread]() /** - * scala 2.12 does not support maxOption so we handle the empty manually. * @param f to compute the result * @return the max value (int value) or 0 if there is no cleaner */ - private def maxOverCleanerThreads(f: CleanerThread => Double): Int = - cleaners.foldLeft(0.0d)((max: Double, thread: CleanerThread) => math.max(max, f(thread))).toInt + private[log] def maxOverCleanerThreads(f: CleanerThread => Double): Int = + cleaners.map(f).maxOption.getOrElse(0.0d).toInt /* a metric to track the maximum utilization of any thread's buffer in the last cleaning */ metricsGroup.newGauge(MaxBufferUtilizationPercentMetricName, @@ -684,7 +682,8 @@ private[log] class Cleaner(val id: Int, try { cleanInto(log.topicPartition, currentSegment.log, cleaned, map, retainLegacyDeletesAndTxnMarkers, log.config.deleteRetentionMs, - log.config.maxMessageSize, transactionMetadata, lastOffsetOfActiveProducers, upperBoundOffsetOfCleaningRound, stats, currentTime = currentTime) + log.config.maxMessageSize, transactionMetadata, lastOffsetOfActiveProducers, + upperBoundOffsetOfCleaningRound, stats, currentTime = currentTime) } catch { case e: LogSegmentOffsetOverflowException => // Split the current segment. It's also safest to abort the current cleaning process, so that we retry from @@ -810,7 +809,7 @@ private[log] class Cleaner(val id: Int, sourceRecords.readInto(readBuffer, position) val records = MemoryRecords.readableRecords(readBuffer) throttler.maybeThrottle(records.sizeInBytes) - val result = records.filterTo(topicPartition, logCleanerFilter, writeBuffer, maxLogMessageSize, decompressionBufferSupplier) + val result = records.filterTo(logCleanerFilter, writeBuffer, decompressionBufferSupplier) stats.readMessages(result.messagesRead, result.bytesRead) stats.recopyMessages(result.messagesRetained, result.bytesRetained) @@ -824,7 +823,7 @@ private[log] class Cleaner(val id: Int, val retained = MemoryRecords.readableRecords(outputBuffer) // it's OK not to hold the Log's lock in this case, because this segment is only accessed by other threads // after `Log.replaceSegments` (which acquires the lock) is called - dest.append(result.maxOffset, result.maxTimestamp, result.shallowOffsetOfMaxTimestamp(), retained) + dest.append(result.maxOffset, retained) throttler.maybeThrottle(outputBuffer.limit()) } diff --git a/core/src/main/scala/kafka/log/LogCleanerManager.scala b/core/src/main/scala/kafka/log/LogCleanerManager.scala index 7238eacad9e56..983e87dd4379b 100755 --- a/core/src/main/scala/kafka/log/LogCleanerManager.scala +++ b/core/src/main/scala/kafka/log/LogCleanerManager.scala @@ -21,14 +21,13 @@ import java.lang.{Long => JLong} import java.io.File import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock -import kafka.common.LogCleaningAbortedException import kafka.utils.CoreUtils._ import kafka.utils.{Logging, Pool} import org.apache.kafka.common.{KafkaException, TopicPartition} import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.utils.Time import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile -import org.apache.kafka.storage.internals.log.LogDirFailureChannel +import org.apache.kafka.storage.internals.log.{LogCleaningAbortedException, LogDirFailureChannel} import org.apache.kafka.server.metrics.KafkaMetricsGroup import java.util.Comparator @@ -533,21 +532,15 @@ private[log] class LogCleanerManager(val logDirs: Seq[File], def maintainUncleanablePartitions(): Unit = { // Remove deleted partitions from uncleanablePartitions inLock(lock) { - // Note: we don't use retain or filterInPlace method in this function because retain is deprecated in - // scala 2.13 while filterInPlace is not available in scala 2.12. - // Remove deleted partitions - uncleanablePartitions.values.foreach { - partitions => - val partitionsToRemove = partitions.filterNot(logs.contains).toList - partitionsToRemove.foreach { partitions.remove } + uncleanablePartitions.values.foreach { partitions => + partitions.filterInPlace(logs.contains) } // Remove entries with empty partition set. - val logDirsToRemove = uncleanablePartitions.filter { - case (_, partitions) => partitions.isEmpty - }.keys.toList - logDirsToRemove.foreach { uncleanablePartitions.remove } + uncleanablePartitions.filterInPlace { + case (_, partitions) => partitions.nonEmpty + } } } diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index a0e683641388f..d42d83fde4d72 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -35,14 +35,12 @@ import scala.jdk.CollectionConverters._ import scala.collection._ import scala.collection.mutable.ArrayBuffer import scala.util.{Failure, Success, Try} -import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.requests.{AbstractControlRequest, LeaderAndIsrRequest} import org.apache.kafka.image.TopicsImage import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, PropertiesUtils} import java.util.{Collections, OptionalLong, Properties} import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.storage.internals.log.LogConfig.MessageFormatVersion import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.{FileLock, Scheduler} import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, RemoteIndexCache} @@ -50,7 +48,6 @@ import org.apache.kafka.storage.internals.checkpoint.{CleanShutdownFileHandler, import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.util -import scala.annotation.nowarn /** * The entry point to the kafka log management subsystem. The log manager is responsible for log creation, retrieval, and cleaning. @@ -81,7 +78,6 @@ class LogManager(logDirs: Seq[File], brokerTopicStats: BrokerTopicStats, logDirFailureChannel: LogDirFailureChannel, time: Time, - val keepPartitionMetadataFile: Boolean, remoteStorageSystemEnable: Boolean, val initialTaskDelayMs: Long) extends Logging { @@ -349,7 +345,6 @@ class LogManager(logDirs: Seq[File], logDirFailureChannel = logDirFailureChannel, lastShutdownClean = hadCleanShutdown, topicId = None, - keepPartitionMetadataFile = keepPartitionMetadataFile, numRemainingSegments = numRemainingSegments, remoteStorageSystemEnable = remoteStorageSystemEnable) @@ -583,27 +578,13 @@ class LogManager(logDirs: Seq[File], } // visible for testing - @nowarn("cat=deprecation") private[log] def fetchTopicConfigOverrides(defaultConfig: LogConfig, topicNames: Set[String]): Map[String, LogConfig] = { val topicConfigOverrides = mutable.Map[String, LogConfig]() val defaultProps = defaultConfig.originals() topicNames.foreach { topicName => - var overrides = configRepository.topicConfig(topicName) + val overrides = configRepository.topicConfig(topicName) // save memory by only including configs for topics with overrides if (!overrides.isEmpty) { - Option(overrides.getProperty(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG)).foreach { versionString => - val messageFormatVersion = new MessageFormatVersion(versionString, interBrokerProtocolVersion.version) - if (messageFormatVersion.shouldIgnore) { - val copy = new Properties() - copy.putAll(overrides) - copy.remove(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG) - overrides = copy - - if (messageFormatVersion.shouldWarn) - warn(messageFormatVersion.topicWarningMessage(topicName)) - } - } - val logConfig = LogConfig.fromProps(defaultProps, overrides) topicConfigOverrides(topicName) = logConfig } @@ -967,8 +948,7 @@ class LogManager(logDirs: Seq[File], def updateTopicConfig(topic: String, newTopicConfig: Properties, isRemoteLogStorageSystemEnabled: Boolean, - wasRemoteLogEnabled: Boolean, - fromZK: Boolean): Unit = { + wasRemoteLogEnabled: Boolean): Unit = { topicConfigUpdated(topic) val logs = logsByTopic(topic) // Combine the default properties with the overrides in zk to create the new LogConfig @@ -978,10 +958,6 @@ class LogManager(logDirs: Seq[File], // Otherwise we risk someone creating a tiered-topic, disabling Tiered Storage cluster-wide and the check // failing since the logs for the topic are non-existent. LogConfig.validateRemoteStorageOnlyIfSystemEnabled(newLogConfig.values(), isRemoteLogStorageSystemEnabled, true) - // `remote.log.delete.on.disable` and `remote.log.copy.disable` are unsupported in ZK mode - if (fromZK) { - LogConfig.validateNoInvalidRemoteStorageConfigsInZK(newLogConfig.values()) - } LogConfig.validateTurningOffRemoteStorageWithDelete(newLogConfig.values(), wasRemoteLogEnabled, isRemoteLogStorageEnabled) LogConfig.validateRetentionConfigsWhenRemoteCopyDisabled(newLogConfig.values(), isRemoteLogStorageEnabled) if (logs.nonEmpty) { @@ -1091,7 +1067,6 @@ class LogManager(logDirs: Seq[File], brokerTopicStats = brokerTopicStats, logDirFailureChannel = logDirFailureChannel, topicId = topicId, - keepPartitionMetadataFile = keepPartitionMetadataFile, remoteStorageSystemEnable = remoteStorageSystemEnable) if (isFuture) @@ -1569,8 +1544,7 @@ object LogManager { kafkaScheduler: Scheduler, time: Time, brokerTopicStats: BrokerTopicStats, - logDirFailureChannel: LogDirFailureChannel, - keepPartitionMetadataFile: Boolean): LogManager = { + logDirFailureChannel: LogDirFailureChannel): LogManager = { val defaultProps = config.extractLogConfigMap LogConfig.validateBrokerLogConfigValues(defaultProps, config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) @@ -1595,7 +1569,6 @@ object LogManager { brokerTopicStats = brokerTopicStats, logDirFailureChannel = logDirFailureChannel, time = time, - keepPartitionMetadataFile = keepPartitionMetadataFile, interBrokerProtocolVersion = config.interBrokerProtocolVersion, remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), initialTaskDelayMs = config.logInitialTaskDelayMs) diff --git a/core/src/main/scala/kafka/log/OffsetResultHolder.scala b/core/src/main/scala/kafka/log/OffsetResultHolder.scala deleted file mode 100644 index 64b78c6cee912..0000000000000 --- a/core/src/main/scala/kafka/log/OffsetResultHolder.scala +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.log - -import org.apache.kafka.common.errors.ApiException -import org.apache.kafka.common.record.FileRecords.TimestampAndOffset - -import java.util.concurrent.{CompletableFuture, Future} - -case class OffsetResultHolder(timestampAndOffsetOpt: Option[TimestampAndOffset], - futureHolderOpt: Option[AsyncOffsetReadFutureHolder[Either[Exception, Option[TimestampAndOffset]]]] = None) { - - var maybeOffsetsError: Option[ApiException] = None - var lastFetchableOffset: Option[Long] = None -} - -/** - * A remote log offset read task future holder. It contains two futures: - * 1. JobFuture - Use this future to cancel the running job. - * 2. TaskFuture - Use this future to get the result of the job/computation. - */ -case class AsyncOffsetReadFutureHolder[T](jobFuture: Future[Void], taskFuture: CompletableFuture[T]) { - -} diff --git a/core/src/main/scala/kafka/log/UnifiedLog.scala b/core/src/main/scala/kafka/log/UnifiedLog.scala index f4ef718f94f8c..a3267e5ec8c50 100644 --- a/core/src/main/scala/kafka/log/UnifiedLog.scala +++ b/core/src/main/scala/kafka/log/UnifiedLog.scala @@ -17,7 +17,6 @@ package kafka.log -import kafka.common.{OffsetsOutOfOrderException, UnexpectedAppendOffsetException} import kafka.log.remote.RemoteLogManager import kafka.utils._ import org.apache.kafka.common.errors._ @@ -30,17 +29,16 @@ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_ import org.apache.kafka.common.requests.ProduceResponse.RecordError import org.apache.kafka.common.utils.{PrimitiveRef, Time, Utils} import org.apache.kafka.common.{InvalidRecordException, KafkaException, TopicPartition, Uuid} -import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch, RequestLocal} -import org.apache.kafka.server.common.MetadataVersion.IBP_0_10_0_IV0 +import org.apache.kafka.server.common.{OffsetAndEpoch, RequestLocal} import org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.record.BrokerCompressionType -import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.server.storage.log.{FetchIsolation, UnexpectedAppendOffsetException} import org.apache.kafka.server.util.Scheduler import org.apache.kafka.storage.internals.checkpoint.{LeaderEpochCheckpointFile, PartitionMetadataFile} import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache import org.apache.kafka.storage.internals.log.LocalLog.SplitSegmentResult -import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, BatchMetadata, CompletedTxn, FetchDataInfo, LastRecord, LeaderHwChange, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogValidator, ProducerAppendInfo, ProducerStateManager, ProducerStateManagerConfig, RollParams, SegmentDeletionReason, VerificationGuard, UnifiedLog => JUnifiedLog} +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, BatchMetadata, CompletedTxn, FetchDataInfo, LastRecord, LeaderHwChange, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogValidator, OffsetResultHolder, OffsetsOutOfOrderException, ProducerAppendInfo, ProducerStateManager, ProducerStateManagerConfig, RollParams, SegmentDeletionReason, VerificationGuard, UnifiedLog => JUnifiedLog} import org.apache.kafka.storage.log.metrics.{BrokerTopicMetrics, BrokerTopicStats} import java.io.{File, IOException} @@ -50,7 +48,6 @@ import java.util import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap, ScheduledFuture} import java.util.stream.Collectors import java.util.{Collections, Optional, OptionalInt, OptionalLong} -import scala.annotation.nowarn import scala.collection.mutable.{ArrayBuffer, ListBuffer} import scala.collection.{Seq, mutable} import scala.jdk.CollectionConverters._ @@ -88,13 +85,6 @@ import scala.jdk.OptionConverters.{RichOption, RichOptional, RichOptionalInt} * @param _topicId optional Uuid to specify the topic ID for the topic if it exists. Should only be specified when * first creating the log through Partition.makeLeader or Partition.makeFollower. When reloading a log, * this field will be populated by reading the topic ID value from partition.metadata if it exists. - * @param keepPartitionMetadataFile boolean flag to indicate whether the partition.metadata file should be kept in the - * log directory. A partition.metadata file is only created when the raft controller is used - * or the ZK controller and this broker's inter-broker protocol version is at least 2.8. - * This file will persist the topic ID on the broker. If inter-broker protocol for a ZK controller - * is downgraded below 2.8, a topic ID may be lost and a new ID generated upon re-upgrade. - * If the inter-broker protocol version on a ZK cluster is below 2.8, partition.metadata - * will be deleted to avoid ID conflicts upon re-upgrade. * @param remoteStorageSystemEnable flag to indicate whether the system level remote log storage is enabled or not. */ @threadsafe @@ -102,10 +92,9 @@ class UnifiedLog(@volatile var logStartOffset: Long, private val localLog: LocalLog, val brokerTopicStats: BrokerTopicStats, val producerIdExpirationCheckIntervalMs: Int, - @volatile var leaderEpochCache: Option[LeaderEpochFileCache], + @volatile var leaderEpochCache: LeaderEpochFileCache, val producerStateManager: ProducerStateManager, @volatile private var _topicId: Option[Uuid], - val keepPartitionMetadataFile: Boolean, val remoteStorageSystemEnable: Boolean = false, @volatile private var logOffsetsListener: LogOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER) extends Logging with AutoCloseable { @@ -193,40 +182,26 @@ class UnifiedLog(@volatile var logStartOffset: Long, /** * Initialize topic ID information for the log by maintaining the partition metadata file and setting the in-memory _topicId. - * Delete partition metadata file if the version does not support topic IDs. * Set _topicId based on a few scenarios: - * - Recover topic ID if present and topic IDs are supported. Ensure we do not try to assign a provided topicId that is inconsistent + * - Recover topic ID if present. Ensure we do not try to assign a provided topicId that is inconsistent * with the ID on file. - * - If we were provided a topic ID when creating the log, partition metadata files are supported, and one does not yet exist + * - If we were provided a topic ID when creating the log and one does not yet exist * set _topicId and write to the partition metadata file. - * - Otherwise set _topicId to None */ private def initializeTopicId(): Unit = { val partMetadataFile = partitionMetadataFile.getOrElse( throw new KafkaException("The partitionMetadataFile should have been initialized")) if (partMetadataFile.exists()) { - if (keepPartitionMetadataFile) { - val fileTopicId = partMetadataFile.read().topicId - if (_topicId.isDefined && !_topicId.contains(fileTopicId)) - throw new InconsistentTopicIdException(s"Tried to assign topic ID $topicId to log for topic partition $topicPartition," + - s"but log already contained topic ID $fileTopicId") - - _topicId = Some(fileTopicId) + val fileTopicId = partMetadataFile.read().topicId + if (_topicId.isDefined && !_topicId.contains(fileTopicId)) + throw new InconsistentTopicIdException(s"Tried to assign topic ID $topicId to log for topic partition $topicPartition," + + s"but log already contained topic ID $fileTopicId") - } else { - try partMetadataFile.delete() - catch { - case e: IOException => - error(s"Error while trying to delete partition metadata file $partMetadataFile", e) - } - } - } else if (keepPartitionMetadataFile) { + _topicId = Some(fileTopicId) + } else { _topicId.foreach(partMetadataFile.record) scheduler.scheduleOnce("flush-metadata-file", () => maybeFlushMetadataFile()) - } else { - // We want to keep the file and the in-memory topic ID in sync. - _topicId = None } } @@ -255,10 +230,6 @@ class UnifiedLog(@volatile var logStartOffset: Long, def updateConfig(newConfig: LogConfig): LogConfig = { val oldConfig = localLog.config localLog.updateConfig(newConfig) - val oldRecordVersion = oldConfig.recordVersion - val newRecordVersion = newConfig.recordVersion - if (newRecordVersion != oldRecordVersion) - initializeLeaderEpochCache() oldConfig } @@ -481,8 +452,6 @@ class UnifiedLog(@volatile var logStartOffset: Long, updateHighWatermark(localLog.logEndOffsetMetadata) } - private def recordVersion: RecordVersion = config.recordVersion - private def initializePartitionMetadata(): Unit = lock synchronized { val partitionMetadata = PartitionMetadataFile.newFile(dir) partitionMetadataFile = Some(new PartitionMetadataFile(partitionMetadata, logDirFailureChannel)) @@ -502,24 +471,22 @@ class UnifiedLog(@volatile var logStartOffset: Long, } case None => - if (keepPartitionMetadataFile) { - _topicId = Some(topicId) - partitionMetadataFile match { - case Some(partMetadataFile) => - if (!partMetadataFile.exists()) { - partMetadataFile.record(topicId) - scheduler.scheduleOnce("flush-metadata-file", () => maybeFlushMetadataFile()) - } - case _ => warn(s"The topic id $topicId will not be persisted to the partition metadata file " + - "since the partition is deleted") - } + _topicId = Some(topicId) + partitionMetadataFile match { + case Some(partMetadataFile) => + if (!partMetadataFile.exists()) { + partMetadataFile.record(topicId) + scheduler.scheduleOnce("flush-metadata-file", () => maybeFlushMetadataFile()) + } + case _ => warn(s"The topic id $topicId will not be persisted to the partition metadata file " + + "since the partition is deleted") } } } - private def initializeLeaderEpochCache(): Unit = lock synchronized { - leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - dir, topicPartition, logDirFailureChannel, recordVersion, logIdent, leaderEpochCache, scheduler) + private def reinitializeLeaderEpochCache(): Unit = lock synchronized { + leaderEpochCache = UnifiedLog.createLeaderEpochCache( + dir, topicPartition, logDirFailureChannel, Option.apply(leaderEpochCache), scheduler) } private def updateHighWatermarkWithLogEndOffset(): Unit = { @@ -553,7 +520,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, private def rebuildProducerState(lastOffset: Long, producerStateManager: ProducerStateManager): Unit = lock synchronized { localLog.checkIfMemoryMappedBufferClosed() - JUnifiedLog.rebuildProducerState(producerStateManager, localLog.segments, logStartOffset, lastOffset, recordVersion, time, false, logIdent) + JUnifiedLog.rebuildProducerState(producerStateManager, localLog.segments, logStartOffset, lastOffset, time, false, logIdent) } @threadsafe @@ -603,7 +570,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, * Creation starts the verification process. Otherwise return the sentinel VerificationGuard. */ def maybeStartTransactionVerification(producerId: Long, sequence: Int, epoch: Short): VerificationGuard = lock synchronized { - if (hasOngoingTransaction(producerId)) + if (hasOngoingTransaction(producerId, epoch)) VerificationGuard.SENTINEL else maybeCreateVerificationGuard(producerId, sequence, epoch) @@ -629,10 +596,11 @@ class UnifiedLog(@volatile var logStartOffset: Long, /** * Return true if the given producer ID has a transaction ongoing. + * Note, if the incoming producer epoch is newer than the stored one, the transaction may have finished. */ - def hasOngoingTransaction(producerId: Long): Boolean = lock synchronized { + def hasOngoingTransaction(producerId: Long, producerEpoch: Short): Boolean = lock synchronized { val entry = producerStateManager.activeProducers.get(producerId) - entry != null && entry.currentTxnFirstOffset.isPresent + entry != null && entry.currentTxnFirstOffset.isPresent && entry.producerEpoch() == producerEpoch } /** @@ -680,10 +648,10 @@ class UnifiedLog(@volatile var logStartOffset: Long, if (shouldReinitialize) { // re-initialize leader epoch cache so that LeaderEpochCheckpointFile.checkpoint can correctly reference // the checkpoint file in renamed log directory - initializeLeaderEpochCache() + reinitializeLeaderEpochCache() initializePartitionMetadata() } else { - leaderEpochCache = None + leaderEpochCache.clear() partitionMetadataFile = None } } @@ -706,7 +674,6 @@ class UnifiedLog(@volatile var logStartOffset: Long, * * @param records The records to append * @param origin Declares the origin of the append which affects required validations - * @param interBrokerProtocolVersion Inter-broker message protocol version * @param requestLocal request local instance * @throws KafkaStorageException If the append fails due to an I/O error. * @return Information about the appended messages including the first and last offset. @@ -714,11 +681,22 @@ class UnifiedLog(@volatile var logStartOffset: Long, def appendAsLeader(records: MemoryRecords, leaderEpoch: Int, origin: AppendOrigin = AppendOrigin.CLIENT, - interBrokerProtocolVersion: MetadataVersion = MetadataVersion.latestProduction, requestLocal: RequestLocal = RequestLocal.noCaching, verificationGuard: VerificationGuard = VerificationGuard.SENTINEL): LogAppendInfo = { val validateAndAssignOffsets = origin != AppendOrigin.RAFT_LEADER - append(records, origin, interBrokerProtocolVersion, validateAndAssignOffsets, leaderEpoch, Some(requestLocal), verificationGuard, ignoreRecordSize = false) + append(records, origin, validateAndAssignOffsets, leaderEpoch, Some(requestLocal), verificationGuard, ignoreRecordSize = false) + } + + /** + * Even though we always write to disk with record version v2 since Apache Kafka 4.0, older record versions may have + * been persisted to disk before that. In order to test such scenarios, we need the ability to append with older + * record versions. This method exists for that purpose and hence it should only be used from test code. + * + * Also see #appendAsLeader. + */ + private[log] def appendAsLeaderWithRecordVersion(records: MemoryRecords, leaderEpoch: Int, recordVersion: RecordVersion): LogAppendInfo = { + append(records, AppendOrigin.CLIENT, true, leaderEpoch, Some(RequestLocal.noCaching), + VerificationGuard.SENTINEL, ignoreRecordSize = false, recordVersion.value) } /** @@ -731,7 +709,6 @@ class UnifiedLog(@volatile var logStartOffset: Long, def appendAsFollower(records: MemoryRecords): LogAppendInfo = { append(records, origin = AppendOrigin.REPLICATION, - interBrokerProtocolVersion = MetadataVersion.latestProduction, validateAndAssignOffsets = false, leaderEpoch = -1, requestLocal = None, @@ -748,7 +725,6 @@ class UnifiedLog(@volatile var logStartOffset: Long, * * @param records The log records to append * @param origin Declares the origin of the append which affects required validations - * @param interBrokerProtocolVersion Inter-broker message protocol version * @param validateAndAssignOffsets Should the log assign offsets to this message set or blindly apply what it is given * @param leaderEpoch The partition's leader epoch which will be applied to messages when offsets are assigned on the leader * @param requestLocal The request local instance if validateAndAssignOffsets is true @@ -760,12 +736,12 @@ class UnifiedLog(@volatile var logStartOffset: Long, */ private def append(records: MemoryRecords, origin: AppendOrigin, - interBrokerProtocolVersion: MetadataVersion, validateAndAssignOffsets: Boolean, leaderEpoch: Int, requestLocal: Option[RequestLocal], verificationGuard: VerificationGuard, - ignoreRecordSize: Boolean): LogAppendInfo = { + ignoreRecordSize: Boolean, + toMagic: Byte = RecordBatch.CURRENT_MAGIC_VALUE): LogAppendInfo = { // We want to ensure the partition metadata file is written to the log dir before any log data is written to disk. // This will ensure that any log data can be recovered with the correct topic ID in the case of failure. maybeFlushMetadataFile() @@ -795,13 +771,12 @@ class UnifiedLog(@volatile var logStartOffset: Long, appendInfo.sourceCompression, targetCompression, config.compact, - config.recordVersion.value, + toMagic, config.messageTimestampType, config.messageTimestampBeforeMaxMs, config.messageTimestampAfterMaxMs, leaderEpoch, - origin, - interBrokerProtocolVersion + origin ) validator.validateMessagesAndAssignOffsets(offset, validatorMetricsRecorder, @@ -816,7 +791,6 @@ class UnifiedLog(@volatile var logStartOffset: Long, validRecords = validateAndOffsetAssignResult.validatedRecords appendInfo.setMaxTimestamp(validateAndOffsetAssignResult.maxTimestampMs) - appendInfo.setShallowOffsetOfMaxTimestamp(validateAndOffsetAssignResult.shallowOffsetOfMaxTimestamp) appendInfo.setLastOffset(offset.value - 1) appendInfo.setRecordValidationStats(validateAndOffsetAssignResult.recordValidationStats) if (config.messageTimestampType == TimestampType.LOG_APPEND_TIME) @@ -858,14 +832,14 @@ class UnifiedLog(@volatile var logStartOffset: Long, // update the epoch cache with the epoch stamped onto the message by the leader validRecords.batches.forEach { batch => if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) { - maybeAssignEpochStartOffset(batch.partitionLeaderEpoch, batch.baseOffset) + assignEpochStartOffset(batch.partitionLeaderEpoch, batch.baseOffset) } else { // In partial upgrade scenarios, we may get a temporary regression to the message format. In // order to ensure the safety of leader election, we clear the epoch cache so that we revert // to truncation by high watermark after the next leader election. - leaderEpochCache.filter(_.nonEmpty).foreach { cache => + if (leaderEpochCache.nonEmpty) { warn(s"Clearing leader epoch cache after unexpected append with message format v${batch.magic}") - cache.clearAndFlush() + leaderEpochCache.clearAndFlush() } } } @@ -902,7 +876,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, // will be cleaned up after the log directory is recovered. Note that the end offset of the // ProducerStateManager will not be updated and the last stable offset will not advance // if the append to the transaction index fails. - localLog.append(appendInfo.lastOffset, appendInfo.maxTimestamp, appendInfo.shallowOffsetOfMaxTimestamp, validRecords) + localLog.append(appendInfo.lastOffset, validRecords) updateHighWatermarkWithLogEndOffset() // update the producer state @@ -936,23 +910,18 @@ class UnifiedLog(@volatile var logStartOffset: Long, } } - def maybeAssignEpochStartOffset(leaderEpoch: Int, startOffset: Long): Unit = { - leaderEpochCache.foreach { cache => - cache.assign(leaderEpoch, startOffset) - } - } + def assignEpochStartOffset(leaderEpoch: Int, startOffset: Long): Unit = + leaderEpochCache.assign(leaderEpoch, startOffset) - def latestEpoch: Option[Int] = leaderEpochCache.flatMap(_.latestEpoch.toScala) + def latestEpoch: Option[Int] = leaderEpochCache.latestEpoch.toScala def endOffsetForEpoch(leaderEpoch: Int): Option[OffsetAndEpoch] = { - leaderEpochCache.flatMap { cache => - val entry = cache.endOffsetFor(leaderEpoch, logEndOffset) - val (foundEpoch, foundOffset) = (entry.getKey, entry.getValue) - if (foundOffset == UNDEFINED_EPOCH_OFFSET) - None - else - Some(new OffsetAndEpoch(foundOffset, foundEpoch)) - } + val entry = leaderEpochCache.endOffsetFor(leaderEpoch, logEndOffset) + val (foundEpoch, foundOffset) = (entry.getKey, entry.getValue) + if (foundOffset == UNDEFINED_EPOCH_OFFSET) + None + else + Some(new OffsetAndEpoch(foundOffset, foundEpoch)) } private def maybeIncrementFirstUnstableOffset(): Unit = lock synchronized { @@ -1012,7 +981,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, updatedLogStartOffset = true updateLogStartOffset(newLogStartOffset) info(s"Incremented log start offset to $newLogStartOffset due to $reason") - leaderEpochCache.foreach(_.truncateFromStartAsyncFlush(logStartOffset)) + leaderEpochCache.truncateFromStartAsyncFlush(logStartOffset) producerStateManager.onLogStartOffsetIncremented(newLogStartOffset) maybeIncrementFirstUnstableOffset() } @@ -1061,7 +1030,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, // transaction is completed or aborted. We can guarantee the transaction coordinator knows about the transaction given step 1 and that the transaction is still // ongoing. If the transaction is expected to be ongoing, we will not set a VerificationGuard. If the transaction is aborted, hasOngoingTransaction is false and // requestVerificationGuard is the sentinel, so we will throw an error. A subsequent produce request (retry) should create verification state and return to phase 1. - if (batch.isTransactional && !hasOngoingTransaction(batch.producerId) && batchMissingRequiredVerification(batch, requestVerificationGuard)) + if (batch.isTransactional && !hasOngoingTransaction(batch.producerId, batch.producerEpoch()) && batchMissingRequiredVerification(batch, requestVerificationGuard)) throw new InvalidTxnStateException("Record was not part of an ongoing transaction") } @@ -1188,7 +1157,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, else OptionalInt.empty() - new LogAppendInfo(firstOffset, lastOffset, lastLeaderEpochOpt, maxTimestamp, shallowOffsetOfMaxTimestamp, + new LogAppendInfo(firstOffset, lastOffset, lastLeaderEpochOpt, maxTimestamp, RecordBatch.NO_TIMESTAMP, logStartOffset, RecordValidationStats.EMPTY, sourceCompression, validBytesCount, lastOffsetOfFirstBatch, Collections.emptyList[RecordError], LeaderHwChange.NONE) } @@ -1269,71 +1238,46 @@ class UnifiedLog(@volatile var logStartOffset: Long, *

        • All special timestamp offset results are returned immediately irrespective of the remote storage. * */ - @nowarn("cat=deprecation") def fetchOffsetByTimestamp(targetTimestamp: Long, remoteLogManager: Option[RemoteLogManager] = None): OffsetResultHolder = { maybeHandleIOException(s"Error while fetching offset by timestamp for $topicPartition in dir ${dir.getParent}") { debug(s"Searching offset for timestamp $targetTimestamp") - if (config.messageFormatVersion.isLessThan(IBP_0_10_0_IV0) && - targetTimestamp != ListOffsetsRequest.EARLIEST_TIMESTAMP && - targetTimestamp != ListOffsetsRequest.LATEST_TIMESTAMP) - throw new UnsupportedForMessageFormatException(s"Cannot search offsets based on timestamp because message format version " + - s"for partition $topicPartition is ${config.messageFormatVersion} which is earlier than the minimum " + - s"required version $IBP_0_10_0_IV0") - // For the earliest and latest, we do not need to return the timestamp. if (targetTimestamp == ListOffsetsRequest.EARLIEST_TIMESTAMP || (!remoteLogEnabled() && targetTimestamp == ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)) { // The first cached epoch usually corresponds to the log start offset, but we have to verify this since // it may not be true following a message format version bump as the epoch will not be available for // log entries written in the older format. - val earliestEpochEntry = leaderEpochCache.toJava.flatMap(_.earliestEntry()) + val earliestEpochEntry = leaderEpochCache.earliestEntry() val epochOpt = if (earliestEpochEntry.isPresent && earliestEpochEntry.get().startOffset <= logStartOffset) { Optional.of[Integer](earliestEpochEntry.get().epoch) } else Optional.empty[Integer]() - OffsetResultHolder(Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, logStartOffset, epochOpt))) + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, logStartOffset, epochOpt)) } else if (targetTimestamp == ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) { val curLocalLogStartOffset = localLogStartOffset() - val epochResult: Optional[Integer] = - if (leaderEpochCache.isDefined) { - val epochOpt = leaderEpochCache.get.epochForOffset(curLocalLogStartOffset) - if (epochOpt.isPresent) Optional.of(epochOpt.getAsInt) else Optional.empty() - } else { - Optional.empty() - } + val epochResult: Optional[Integer] = { + val epochOpt = leaderEpochCache.epochForOffset(curLocalLogStartOffset) + if (epochOpt.isPresent) Optional.of(epochOpt.getAsInt) else Optional.empty() + } - OffsetResultHolder(Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, curLocalLogStartOffset, epochResult))) + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, curLocalLogStartOffset, epochResult)) } else if (targetTimestamp == ListOffsetsRequest.LATEST_TIMESTAMP) { - val epoch = leaderEpochCache match { - case Some(cache) => - val latestEpoch = cache.latestEpoch() - if (latestEpoch.isPresent) Optional.of[Integer](latestEpoch.getAsInt) else Optional.empty[Integer]() - case None => Optional.empty[Integer]() - } - OffsetResultHolder(Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, logEndOffset, epoch))) + val latestEpoch = leaderEpochCache.latestEpoch() + val epoch = if (latestEpoch.isPresent) Optional.of[Integer](latestEpoch.getAsInt) else Optional.empty[Integer]() + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, logEndOffset, epoch)) } else if (targetTimestamp == ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) { if (remoteLogEnabled()) { val curHighestRemoteOffset = highestOffsetInRemoteStorage() - + val epochOpt = leaderEpochCache.epochForOffset(curHighestRemoteOffset) val epochResult: Optional[Integer] = - if (leaderEpochCache.isDefined) { - val epochOpt = leaderEpochCache.get.epochForOffset(curHighestRemoteOffset) - if (epochOpt.isPresent) { - Optional.of(epochOpt.getAsInt) - } else if (curHighestRemoteOffset == -1) { - Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH) - } else { - Optional.empty() - } - } else { - Optional.empty() - } - - OffsetResultHolder(Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, curHighestRemoteOffset, epochResult))) + if (epochOpt.isPresent) Optional.of(epochOpt.getAsInt) + else if (curHighestRemoteOffset == -1) Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH) + else Optional.empty() + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, curHighestRemoteOffset, epochResult)) } else { - OffsetResultHolder(Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, -1L, Optional.of(-1)))) + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, -1L, Optional.of(-1))) } } else if (targetTimestamp == ListOffsetsRequest.MAX_TIMESTAMP) { // Cache to avoid race conditions. `toBuffer` is faster than most alternatives and provides @@ -1347,22 +1291,20 @@ class UnifiedLog(@volatile var logStartOffset: Long, .find(_.maxTimestamp() == maxTimestampSoFar.timestamp) .flatMap(batch => batch.offsetOfMaxTimestamp().toScala.map(new TimestampAndOffset(batch.maxTimestamp(), _, Optional.of[Integer](batch.partitionLeaderEpoch()).filter(_ >= 0)))) - OffsetResultHolder(timestampAndOffsetOpt) + new OffsetResultHolder(timestampAndOffsetOpt.toJava) } else { // We need to search the first segment whose largest timestamp is >= the target timestamp if there is one. if (remoteLogEnabled() && !isEmpty) { if (remoteLogManager.isEmpty) { throw new KafkaException("RemoteLogManager is empty even though the remote log storage is enabled.") } - if (recordVersion.value < RecordVersion.V2.value) { - throw new KafkaException("Tiered storage is supported only with versions supporting leader epochs, that means RecordVersion must be >= 2.") - } val asyncOffsetReadFutureHolder = remoteLogManager.get.asyncOffsetRead(topicPartition, targetTimestamp, - logStartOffset, leaderEpochCache.get, () => searchOffsetInLocalLog(targetTimestamp, localLogStartOffset())) - OffsetResultHolder(None, Some(asyncOffsetReadFutureHolder)) + logStartOffset, leaderEpochCache, () => searchOffsetInLocalLog(targetTimestamp, localLogStartOffset())) + + new OffsetResultHolder(Optional.empty(), Optional.of(asyncOffsetReadFutureHolder)) } else { - OffsetResultHolder(searchOffsetInLocalLog(targetTimestamp, logStartOffset)) + new OffsetResultHolder(searchOffsetInLocalLog(targetTimestamp, logStartOffset).toJava) } } } @@ -1786,7 +1728,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, lock synchronized { localLog.checkIfMemoryMappedBufferClosed() producerExpireCheck.cancel(true) - leaderEpochCache.foreach(_.clear()) + leaderEpochCache.clear() val deletedSegments = localLog.deleteAllSegments() deleteProducerSnapshots(deletedSegments, asyncDelete = false) localLog.deleteEmptyDir() @@ -1839,7 +1781,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, // and inserted the first start offset entry, but then failed to append any entries // before another leader was elected. lock synchronized { - leaderEpochCache.foreach(_.truncateFromEndAsyncFlush(logEndOffset)) + leaderEpochCache.truncateFromEndAsyncFlush(logEndOffset) } false @@ -1852,7 +1794,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, } else { val deletedSegments = localLog.truncateTo(targetOffset) deleteProducerSnapshots(deletedSegments, asyncDelete = true) - leaderEpochCache.foreach(_.truncateFromEndAsyncFlush(targetOffset)) + leaderEpochCache.truncateFromEndAsyncFlush(targetOffset) logStartOffset = math.min(targetOffset, logStartOffset) rebuildProducerState(targetOffset, producerStateManager) if (highWatermark >= localLog.logEndOffset) @@ -1876,7 +1818,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, debug(s"Truncate and start at offset $newOffset, logStartOffset: ${logStartOffsetOpt.getOrElse(newOffset)}") lock synchronized { localLog.truncateFullyAndStartAt(newOffset) - leaderEpochCache.foreach(_.clearAndFlush()) + leaderEpochCache.clearAndFlush() producerStateManager.truncateFullyAndStartAt(newOffset) logStartOffset = logStartOffsetOpt.getOrElse(newOffset) if (remoteLogEnabled()) _localLogStartOffset = newOffset @@ -2022,7 +1964,6 @@ object UnifiedLog extends Logging { logDirFailureChannel: LogDirFailureChannel, lastShutdownClean: Boolean = true, topicId: Option[Uuid], - keepPartitionMetadataFile: Boolean, numRemainingSegments: ConcurrentMap[String, Integer] = new ConcurrentHashMap[String, Integer], remoteStorageSystemEnable: Boolean = false, logOffsetsListener: LogOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER): UnifiedLog = { @@ -2033,12 +1974,10 @@ object UnifiedLog extends Logging { // The created leaderEpochCache will be truncated by LogLoader if necessary // so it is guaranteed that the epoch entries will be correct even when on-disk // checkpoint was stale (due to async nature of LeaderEpochFileCache#truncateFromStart/End). - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( dir, topicPartition, logDirFailureChannel, - config.recordVersion, - s"[UnifiedLog partition=$topicPartition, dir=${dir.getParent}] ", None, scheduler) val producerStateManager = new ProducerStateManager(topicPartition, dir, @@ -2055,7 +1994,7 @@ object UnifiedLog extends Logging { segments, logStartOffset, recoveryPoint, - leaderEpochCache.toJava, + leaderEpochCache, producerStateManager, numRemainingSegments, isRemoteLogEnabled, @@ -2069,7 +2008,6 @@ object UnifiedLog extends Logging { leaderEpochCache, producerStateManager, topicId, - keepPartitionMetadataFile, remoteStorageSystemEnable, logOffsetsListener) } @@ -2091,40 +2029,24 @@ object UnifiedLog extends Logging { def parseTopicPartitionName(dir: File): TopicPartition = LocalLog.parseTopicPartitionName(dir) /** - * If the recordVersion is >= RecordVersion.V2, create a new LeaderEpochFileCache instance. - * Loading the epoch entries from the backing checkpoint file or the provided currentCache if not empty. - * Otherwise, the message format is considered incompatible and the existing LeaderEpoch file - * is deleted. + * Create a new LeaderEpochFileCache instance and load the epoch entries from the backing checkpoint file or + * the provided currentCache (if not empty). * * @param dir The directory in which the log will reside * @param topicPartition The topic partition * @param logDirFailureChannel The LogDirFailureChannel to asynchronously handle log dir failure - * @param recordVersion The record version - * @param logPrefix The logging prefix * @param currentCache The current LeaderEpochFileCache instance (if any) * @param scheduler The scheduler for executing asynchronous tasks * @return The new LeaderEpochFileCache instance (if created), none otherwise */ - def maybeCreateLeaderEpochCache(dir: File, - topicPartition: TopicPartition, - logDirFailureChannel: LogDirFailureChannel, - recordVersion: RecordVersion, - logPrefix: String, - currentCache: Option[LeaderEpochFileCache], - scheduler: Scheduler): Option[LeaderEpochFileCache] = { + def createLeaderEpochCache(dir: File, + topicPartition: TopicPartition, + logDirFailureChannel: LogDirFailureChannel, + currentCache: Option[LeaderEpochFileCache], + scheduler: Scheduler): LeaderEpochFileCache = { val leaderEpochFile = LeaderEpochCheckpointFile.newFile(dir) - - if (recordVersion.precedes(RecordVersion.V2)) { - if (leaderEpochFile.exists()) { - warn(s"${logPrefix}Deleting non-empty leader epoch cache due to incompatible message format $recordVersion") - } - Files.deleteIfExists(leaderEpochFile.toPath) - None - } else { - val checkpointFile = new LeaderEpochCheckpointFile(leaderEpochFile, logDirFailureChannel) - currentCache.map(_.withCheckpoint(checkpointFile)) - .orElse(Some(new LeaderEpochFileCache(topicPartition, checkpointFile, scheduler))) - } + val checkpointFile = new LeaderEpochCheckpointFile(leaderEpochFile, logDirFailureChannel) + currentCache.map(_.withCheckpoint(checkpointFile)).getOrElse(new LeaderEpochFileCache(topicPartition, checkpointFile, scheduler)) } private[log] def replaceSegments(existingSegments: LogSegments, diff --git a/core/src/main/scala/kafka/network/RequestChannel.scala b/core/src/main/scala/kafka/network/RequestChannel.scala index 44f5e926eb905..aba46474867f8 100644 --- a/core/src/main/scala/kafka/network/RequestChannel.scala +++ b/core/src/main/scala/kafka/network/RequestChannel.scala @@ -48,7 +48,12 @@ object RequestChannel extends Logging { private val ResponseQueueSizeMetric = "ResponseQueueSize" val ProcessorMetricTag = "processor" - private def isRequestLoggingEnabled: Boolean = requestLogger.underlying.isDebugEnabled + /** + * Deprecated protocol apis are logged at info level while the rest are logged at debug level. + * That makes it possible to enable the former without enabling latter. + */ + private def isRequestLoggingEnabled(header: RequestHeader): Boolean = requestLogger.underlying.isDebugEnabled || + (requestLogger.underlying.isInfoEnabled && header.isApiVersionDeprecated()) sealed trait BaseRequest case object ShutdownRequest extends BaseRequest @@ -84,7 +89,7 @@ object RequestChannel extends Logging { // This is constructed on creation of a Request so that the JSON representation is computed before the request is // processed by the api layer. Otherwise, a ProduceRequest can occur without its data (ie. it goes into purgatory). val requestLog: Option[JsonNode] = - if (RequestChannel.isRequestLoggingEnabled) Some(RequestConvertToJson.request(loggableRequest)) + if (RequestChannel.isRequestLoggingEnabled(context.header)) Some(RequestConvertToJson.request(loggableRequest)) else None def header: RequestHeader = context.header @@ -128,7 +133,7 @@ object RequestChannel extends Logging { } def responseNode(response: AbstractResponse): Option[JsonNode] = { - if (RequestChannel.isRequestLoggingEnabled) + if (RequestChannel.isRequestLoggingEnabled(context.header)) Some(RequestConvertToJson.response(response, context.apiVersion)) else None @@ -249,14 +254,19 @@ object RequestChannel extends Logging { // the total time spent on authentication, which may be significant for SASL/SSL. recordNetworkThreadTimeCallback.foreach(record => record.accept(networkThreadTimeNanos)) - if (isRequestLoggingEnabled) { + if (isRequestLoggingEnabled(header)) { val desc = RequestConvertToJson.requestDescMetrics(header, requestLog.toJava, response.responseLog.toJava, context, session, isForwarded, totalTimeMs, requestQueueTimeMs, apiLocalTimeMs, apiRemoteTimeMs, apiThrottleTimeMs, responseQueueTimeMs, responseSendTimeMs, temporaryMemoryBytes, messageConversionsTimeMs) - requestLogger.debug("Completed request:" + desc.toString) + val logPrefix = "Completed request: {}" + // log deprecated apis at `info` level to allow them to be selectively enabled + if (header.isApiVersionDeprecated()) + requestLogger.info(logPrefix, desc) + else + requestLogger.debug(logPrefix, desc) } } diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index f706a8dff9572..d0a2c4e811d2f 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -33,7 +33,7 @@ import kafka.server.{ApiVersionManager, BrokerReconfigurable, KafkaConfig} import org.apache.kafka.common.message.ApiMessageType.ListenerType import kafka.utils._ import org.apache.kafka.common.config.ConfigException -import org.apache.kafka.common.errors.InvalidRequestException +import org.apache.kafka.common.errors.{InvalidRequestException, UnsupportedVersionException} import org.apache.kafka.common.memory.{MemoryPool, SimpleMemoryPool} import org.apache.kafka.common.metrics._ import org.apache.kafka.common.metrics.stats.{Avg, CumulativeSum, Meter, Rate} @@ -69,13 +69,6 @@ import scala.util.control.ControlThrowable * It is possible to configure multiple data-planes by specifying multiple "," separated endpoints for "listeners" in KafkaConfig. * Acceptor has N Processor threads that each have their own selector and read requests from sockets * M Handler threads that handle requests and produce responses back to the processor threads for writing. - * - control-plane : - * - Handles requests from controller. This is optional and can be configured by specifying "control.plane.listener.name". - * If not configured, the controller requests are handled by the data-plane. - * - The threading model is - * 1 Acceptor thread that handles new connections - * Acceptor has 1 Processor thread that has its own selector and read requests from the socket. - * 1 Handler thread that handles requests and produces responses back to the processor thread for writing. */ class SocketServer( val config: KafkaConfig, @@ -105,10 +98,6 @@ class SocketServer( // data-plane private[network] val dataPlaneAcceptors = new ConcurrentHashMap[EndPoint, DataPlaneAcceptor]() val dataPlaneRequestChannel = new RequestChannel(maxQueuedRequests, DataPlaneAcceptor.MetricPrefix, time, apiVersionManager.newRequestMetrics) - // control-plane - private[network] var controlPlaneAcceptorOpt: Option[ControlPlaneAcceptor] = None - val controlPlaneRequestChannelOpt: Option[RequestChannel] = config.controlPlaneListenerName.map(_ => - new RequestChannel(20, ControlPlaneAcceptor.MetricPrefix, time, apiVersionManager.newRequestMetrics)) private[this] val nextProcessorId: AtomicInteger = new AtomicInteger(0) val connectionQuotas = new ConnectionQuotas(config, time, metrics) @@ -137,17 +126,7 @@ class SocketServer( }.sum / dataPlaneProcessors.size } }) - if (config.requiresZookeeper) { - metricsGroup.newGauge(s"${ControlPlaneAcceptor.MetricPrefix}NetworkProcessorAvgIdlePercent", () => SocketServer.this.synchronized { - val controlPlaneProcessorOpt = controlPlaneAcceptorOpt.map(a => a.processors(0)) - val ioWaitRatioMetricName = controlPlaneProcessorOpt.map { p => - metrics.metricName("io-wait-ratio", MetricsGroup, p.metricTags) - } - ioWaitRatioMetricName.map { metricName => - Option(metrics.metric(metricName)).fold(0.0)(m => Math.min(m.metricValue.asInstanceOf[Double], 1.0)) - }.getOrElse(Double.NaN) - }) - } + metricsGroup.newGauge("MemoryPoolAvailable", () => memoryPool.availableMemory) metricsGroup.newGauge("MemoryPoolUsed", () => memoryPool.size() - memoryPool.availableMemory) metricsGroup.newGauge(s"${DataPlaneAcceptor.MetricPrefix}ExpiredConnectionsKilledCount", () => SocketServer.this.synchronized { @@ -159,17 +138,6 @@ class SocketServer( Option(metrics.metric(metricName)).fold(0.0)(m => m.metricValue.asInstanceOf[Double]) }.sum }) - if (config.requiresZookeeper) { - metricsGroup.newGauge(s"${ControlPlaneAcceptor.MetricPrefix}ExpiredConnectionsKilledCount", () => SocketServer.this.synchronized { - val controlPlaneProcessorOpt = controlPlaneAcceptorOpt.map(a => a.processors(0)) - val expiredConnectionsKilledCountMetricNames = controlPlaneProcessorOpt.map { p => - metrics.metricName("expired-connections-killed-count", MetricsGroup, p.metricTags) - } - expiredConnectionsKilledCountMetricNames.map { metricName => - Option(metrics.metric(metricName)).fold(0.0)(m => m.metricValue.asInstanceOf[Double]) - }.getOrElse(0.0) - }) - } // Create acceptors and processors for the statically configured endpoints when the // SocketServer is constructed. Note that this just opens the ports and creates the data @@ -178,7 +146,6 @@ class SocketServer( if (apiVersionManager.listenerType.equals(ListenerType.CONTROLLER)) { config.controllerListeners.foreach(createDataPlaneAcceptorAndProcessors) } else { - config.controlPlaneListener.foreach(createControlPlaneAcceptorAndProcessor) config.dataPlaneListeners.foreach(createDataPlaneAcceptorAndProcessors) } @@ -232,16 +199,14 @@ class SocketServer( } info("Enabling request processing.") - controlPlaneAcceptorOpt.foreach(chainAcceptorFuture) dataPlaneAcceptors.values().forEach(chainAcceptorFuture) FutureUtils.chainFuture(CompletableFuture.allOf(authorizerFutures.values.toArray: _*), allAuthorizerFuturesComplete) // Construct a future that will be completed when all Acceptors have been successfully started. // Alternately, if any of them fail to start, this future will be completed exceptionally. - val allAcceptors = dataPlaneAcceptors.values().asScala.toSeq ++ controlPlaneAcceptorOpt val enableFuture = new CompletableFuture[Void] - FutureUtils.chainFuture(CompletableFuture.allOf(allAcceptors.map(_.startedFuture).toArray: _*), enableFuture) + FutureUtils.chainFuture(CompletableFuture.allOf(dataPlaneAcceptors.values().asScala.toArray.map(_.startedFuture): _*), enableFuture) enableFuture } @@ -251,8 +216,7 @@ class SocketServer( } val parsedConfigs = config.valuesFromThisConfigWithPrefixOverride(endpoint.listenerName.configPrefix) connectionQuotas.addListener(config, endpoint.listenerName) - val isPrivilegedListener = controlPlaneRequestChannelOpt.isEmpty && - config.interBrokerListenerName == endpoint.listenerName + val isPrivilegedListener = config.interBrokerListenerName == endpoint.listenerName val dataPlaneAcceptor = createDataPlaneAcceptor(endpoint, isPrivilegedListener, dataPlaneRequestChannel) config.addReconfigurable(dataPlaneAcceptor) dataPlaneAcceptor.configure(parsedConfigs) @@ -260,27 +224,12 @@ class SocketServer( info(s"Created data-plane acceptor and processors for endpoint : ${endpoint.listenerName}") } - private def createControlPlaneAcceptorAndProcessor(endpoint: EndPoint): Unit = synchronized { - if (stopped) { - throw new RuntimeException("Can't create new control plane acceptor and processor: SocketServer is stopped.") - } - connectionQuotas.addListener(config, endpoint.listenerName) - val controlPlaneAcceptor = createControlPlaneAcceptor(endpoint, controlPlaneRequestChannelOpt.get) - controlPlaneAcceptor.addProcessors(1) - controlPlaneAcceptorOpt = Some(controlPlaneAcceptor) - info(s"Created control-plane acceptor and processor for endpoint : ${endpoint.listenerName}") - } - private def endpoints = config.listeners.map(l => l.listenerName -> l).toMap protected def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { new DataPlaneAcceptor(this, endPoint, config, nodeId, connectionQuotas, time, isPrivilegedListener, requestChannel, metrics, credentialProvider, logContext, memoryPool, apiVersionManager) } - private def createControlPlaneAcceptor(endPoint: EndPoint, requestChannel: RequestChannel): ControlPlaneAcceptor = { - new ControlPlaneAcceptor(this, endPoint, config, nodeId, connectionQuotas, time, requestChannel, metrics, credentialProvider, logContext, memoryPool, apiVersionManager) - } - /** * Stop processing requests and new connections. */ @@ -289,11 +238,8 @@ class SocketServer( stopped = true info("Stopping socket server request processors") dataPlaneAcceptors.asScala.values.foreach(_.beginShutdown()) - controlPlaneAcceptorOpt.foreach(_.beginShutdown()) dataPlaneAcceptors.asScala.values.foreach(_.close()) - controlPlaneAcceptorOpt.foreach(_.close()) dataPlaneRequestChannel.clear() - controlPlaneRequestChannelOpt.foreach(_.clear()) info("Stopped socket server request processors") } } @@ -309,7 +255,6 @@ class SocketServer( this.synchronized { stopProcessingRequests() dataPlaneRequestChannel.shutdown() - controlPlaneRequestChannelOpt.foreach(_.shutdown()) connectionQuotas.close() } info("Shutdown completed") @@ -321,7 +266,7 @@ class SocketServer( if (acceptor != null) { acceptor.localPort } else { - controlPlaneAcceptorOpt.map(_.localPort).getOrElse(throw new KafkaException("Could not find listenerName : " + listenerName + " in data-plane or control-plane")) + throw new KafkaException("Could not find listenerName : " + listenerName + " in data-plane.") } } catch { case e: Exception => @@ -418,10 +363,7 @@ object SocketServer { val ListenerReconfigurableConfigs: Set[String] = Set(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG) - def closeSocket( - channel: SocketChannel, - logging: Logging - ): Unit = { + def closeSocket(channel: SocketChannel): Unit = { Utils.closeQuietly(channel.socket, "channel socket") Utils.closeQuietly(channel, "channel") } @@ -531,42 +473,6 @@ class DataPlaneAcceptor(socketServer: SocketServer, } } -object ControlPlaneAcceptor { - val ThreadPrefix = "control-plane" - val MetricPrefix = "ControlPlane" -} - -class ControlPlaneAcceptor(socketServer: SocketServer, - endPoint: EndPoint, - config: KafkaConfig, - nodeId: Int, - connectionQuotas: ConnectionQuotas, - time: Time, - requestChannel: RequestChannel, - metrics: Metrics, - credentialProvider: CredentialProvider, - logContext: LogContext, - memoryPool: MemoryPool, - apiVersionManager: ApiVersionManager) - extends Acceptor(socketServer, - endPoint, - config, - nodeId, - connectionQuotas, - time, - true, - requestChannel, - metrics, - credentialProvider, - logContext, - memoryPool, - apiVersionManager) { - - override def metricPrefix(): String = ControlPlaneAcceptor.MetricPrefix - override def threadPrefix(): String = ControlPlaneAcceptor.ThreadPrefix - -} - /** * Thread that accepts and configures new connections. There is one of these per endpoint. */ @@ -716,7 +622,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, // The serverChannel will be null if Acceptor's thread is not started Utils.closeQuietly(serverChannel, "Acceptor serverChannel") Utils.closeQuietly(nioSelector, "Acceptor nioSelector") - throttledSockets.foreach(throttledSocket => closeSocket(throttledSocket.socket, this)) + throttledSockets.foreach(throttledSocket => closeSocket(throttledSocket.socket)) throttledSockets.clear() } @@ -822,7 +728,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, while (throttledSockets.headOption.exists(_.endThrottleTimeMs < timeMs)) { val closingSocket = throttledSockets.dequeue() debug(s"Closing socket from ip ${closingSocket.socket.getRemoteAddress}") - closeSocket(closingSocket.socket, this) + closeSocket(closingSocket.socket) } } @@ -891,6 +797,17 @@ private[kafka] object Processor { val NetworkProcessorMetricTag = "networkProcessor" val ListenerMetricTag = "listener" val ConnectionQueueSize = 20 + + private[network] def parseRequestHeader(apiVersionManager: ApiVersionManager, buffer: ByteBuffer): RequestHeader = { + val header = RequestHeader.parse(buffer) + if (apiVersionManager.isApiEnabled(header.apiKey, header.apiVersion)) { + header + } else if (header.isApiVersionSupported()) { + throw new InvalidRequestException(s"Received request api key ${header.apiKey} with version ${header.apiVersion} which is not enabled") + } else { + throw new UnsupportedVersionException(s"Received request api key ${header.apiKey} with version ${header.apiVersion} which is not supported") + } + } } /** @@ -1106,21 +1023,12 @@ private[kafka] class Processor( } } - private def parseRequestHeader(buffer: ByteBuffer): RequestHeader = { - val header = RequestHeader.parse(buffer) - if (apiVersionManager.isApiEnabled(header.apiKey, header.apiVersion)) { - header - } else { - throw new InvalidRequestException(s"Received request api key ${header.apiKey} with version ${header.apiVersion} which is not enabled") - } - } - private def processCompletedReceives(): Unit = { selector.completedReceives.forEach { receive => try { openOrClosingChannel(receive.source) match { case Some(channel) => - val header = parseRequestHeader(receive.payload) + val header = parseRequestHeader(apiVersionManager, receive.payload) if (header.apiKey == ApiKeys.SASL_HANDSHAKE && channel.maybeBeginServerReauthentication(receive, () => time.nanoseconds())) trace(s"Begin re-authentication: $channel") @@ -1807,7 +1715,7 @@ class ConnectionQuotas(config: KafkaConfig, time: Time, metrics: Metrics) extend if (channel != null) { log.debug(s"Closing connection from ${channel.socket.getRemoteSocketAddress}") dec(listenerName, channel.socket.getInetAddress) - closeSocket(channel, log) + closeSocket(channel) } } diff --git a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala index c1d8b4abc8ed7..cd3f1db2d98ef 100644 --- a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala +++ b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala @@ -28,7 +28,7 @@ import org.apache.kafka.common.errors.InvalidConfigurationException import org.apache.kafka.common.record.{MemoryRecords, Records} import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} -import org.apache.kafka.raft.{Isolation, KafkaRaftClient, LogAppendInfo, LogFetchInfo, LogOffsetMetadata, OffsetAndEpoch, OffsetMetadata, ReplicatedLog, ValidOffsetAndEpoch} +import org.apache.kafka.raft.{Isolation, KafkaRaftClient, LogAppendInfo, LogFetchInfo, LogOffsetMetadata, OffsetAndEpoch, OffsetMetadata, ReplicatedLog, SegmentPosition, ValidOffsetAndEpoch} import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} import org.apache.kafka.server.storage.log.FetchIsolation @@ -81,7 +81,7 @@ final class KafkaMetadataLog private ( new LogOffsetMetadata( fetchInfo.fetchOffsetMetadata.messageOffset, - Optional.of(SegmentPosition( + Optional.of(new SegmentPosition( fetchInfo.fetchOffsetMetadata.segmentBaseOffset, fetchInfo.fetchOffsetMetadata.relativePositionInSegment)) ) @@ -155,7 +155,7 @@ final class KafkaMetadataLog private ( val endOffsetMetadata = log.logEndOffsetMetadata new LogOffsetMetadata( endOffsetMetadata.messageOffset, - Optional.of(SegmentPosition( + Optional.of(new SegmentPosition( endOffsetMetadata.segmentBaseOffset, endOffsetMetadata.relativePositionInSegment) ) @@ -197,7 +197,7 @@ final class KafkaMetadataLog private ( } override def initializeLeaderEpoch(epoch: Int): Unit = { - log.maybeAssignEpochStartOffset(epoch, log.logEndOffset) + log.assignEpochStartOffset(epoch, log.logEndOffset) } override def updateHighWatermark(offsetMetadata: LogOffsetMetadata): Unit = { @@ -226,7 +226,7 @@ final class KafkaMetadataLog private ( override def highWatermark: LogOffsetMetadata = { val hwm = log.fetchOffsetSnapshot.highWatermark val segmentPosition: Optional[OffsetMetadata] = if (!hwm.messageOffsetOnly) { - Optional.of(SegmentPosition(hwm.segmentBaseOffset, hwm.relativePositionInSegment)) + Optional.of(new SegmentPosition(hwm.segmentBaseOffset, hwm.relativePositionInSegment)) } else { Optional.empty() } @@ -272,6 +272,25 @@ final class KafkaMetadataLog private ( ) } + /* + Perform a check that the requested snapshot offset is batch aligned via a log read, which + returns the base offset of the batch that contains the requested offset. A snapshot offset + is one greater than the last offset contained in the snapshot, and cannot go past the high + watermark. + + This check is necessary because Raft replication code assumes the snapshot offset is the + start of a batch. If a follower applies a non-batch aligned snapshot at offset (X) and + fetches from this offset, the returned batch will start at offset (X - M), and the + follower will be unable to append it since (X - M) < (X). + */ + val baseOffset = read(snapshotId.offset, Isolation.COMMITTED).startOffsetMetadata.offset + if (snapshotId.offset != baseOffset) { + throw new IllegalArgumentException( + s"Cannot create snapshot at offset (${snapshotId.offset}) because it is not batch aligned. " + + s"The batch containing the requested offset has a base offset of ($baseOffset)" + ) + } + createNewSnapshotUnchecked(snapshotId) } @@ -601,8 +620,7 @@ object KafkaMetadataLog extends Logging { producerIdExpirationCheckIntervalMs = Int.MaxValue, logDirFailureChannel = new LogDirFailureChannel(5), lastShutdownClean = false, - topicId = Some(topicId), - keepPartitionMetadataFile = true + topicId = Some(topicId) ) val metadataLog = new KafkaMetadataLog( diff --git a/core/src/main/scala/kafka/raft/RaftManager.scala b/core/src/main/scala/kafka/raft/RaftManager.scala index 80fa1af5894e8..ba4dbcdfab66a 100644 --- a/core/src/main/scala/kafka/raft/RaftManager.scala +++ b/core/src/main/scala/kafka/raft/RaftManager.scala @@ -34,7 +34,6 @@ import org.apache.kafka.common.KafkaException import org.apache.kafka.common.Node import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.Uuid -import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.{ChannelBuilders, ListenerName, NetworkReceive, Selectable, Selector} import org.apache.kafka.common.protocol.ApiMessage @@ -43,9 +42,9 @@ import org.apache.kafka.common.requests.RequestHeader import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.{LogContext, Time, Utils} -import org.apache.kafka.raft.{Endpoints, FileQuorumStateStore, KafkaNetworkChannel, KafkaRaftClient, KafkaRaftClientDriver, LeaderAndEpoch, QuorumConfig, RaftClient, ReplicatedLog} +import org.apache.kafka.raft.{Endpoints, FileQuorumStateStore, KafkaNetworkChannel, KafkaRaftClient, KafkaRaftClientDriver, LeaderAndEpoch, QuorumConfig, RaftClient, ReplicatedLog, TimingWheelExpirationService} import org.apache.kafka.server.ProcessRole -import org.apache.kafka.server.common.Features +import org.apache.kafka.server.common.Feature import org.apache.kafka.server.common.serialization.RecordSerde import org.apache.kafka.server.util.{FileLock, KafkaScheduler} import org.apache.kafka.server.fault.FaultHandler @@ -84,41 +83,6 @@ object KafkaRaftManager { .map(Paths.get(_).toAbsolutePath) .contains(Paths.get(config.metadataLogDir).toAbsolutePath) } - - /** - * Obtain the file lock and delete the metadata log directory completely. - * - * This is only used by ZK brokers that are in pre-migration or hybrid mode of the ZK to KRaft migration. - * The rationale for deleting the metadata log in these cases is that it is safe to do on brokers and it - * makes recovery from a failed migration much easier. See KAFKA-16463. - * - * @param config The broker config - */ - def maybeDeleteMetadataLogDir(config: KafkaConfig): Unit = { - // These constraints are enforced in KafkaServer, but repeating them here to guard against future callers - if (config.processRoles.nonEmpty) { - throw new RuntimeException("Not deleting metadata log dir since this node is in KRaft mode.") - } else if (!config.migrationEnabled) { - throw new RuntimeException("Not deleting metadata log dir since migrations are not enabled.") - } else { - val metadataDir = new File(config.metadataLogDir) - val logDirName = UnifiedLog.logDirName(Topic.CLUSTER_METADATA_TOPIC_PARTITION) - val metadataPartitionDir = KafkaRaftManager.createLogDirectory(metadataDir, logDirName) - val deletionLock = if (hasDifferentLogDir(config)) { - Some(KafkaRaftManager.lockDataDir(metadataDir)) - } else { - None - } - - try { - Utils.delete(metadataPartitionDir) - } catch { - case t: Throwable => throw new RuntimeException("Failed to delete metadata log", t) - } finally { - deletionLock.foreach(_.destroy()) - } - } - } } trait RaftManager[T] { @@ -240,7 +204,7 @@ class KafkaRaftManager[T]( clusterId, bootstrapServers, localListeners, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), raftConfig ) } @@ -279,7 +243,6 @@ class KafkaRaftManager[T]( controllerListenerName, config.saslMechanismControllerProtocol, time, - config.saslInterBrokerHandshakeRequestEnable, logContext ) diff --git a/core/src/main/scala/kafka/raft/TimingWheelExpirationService.scala b/core/src/main/scala/kafka/raft/TimingWheelExpirationService.scala deleted file mode 100644 index 3c330fb6f2ec2..0000000000000 --- a/core/src/main/scala/kafka/raft/TimingWheelExpirationService.scala +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.raft - -import java.util.concurrent.CompletableFuture -import org.apache.kafka.common.errors.TimeoutException -import org.apache.kafka.raft.ExpirationService -import org.apache.kafka.server.util.ShutdownableThread -import org.apache.kafka.server.util.timer.{Timer, TimerTask} - -object TimingWheelExpirationService { - private val WorkTimeoutMs: Long = 200L - - private class TimerTaskCompletableFuture[T](delayMs: Long) extends TimerTask(delayMs) { - val future = new CompletableFuture[T] - override def run(): Unit = { - future.completeExceptionally(new TimeoutException( - s"Future failed to be completed before timeout of $delayMs ms was reached")) - } - } -} - -class TimingWheelExpirationService(timer: Timer) extends ExpirationService { - import TimingWheelExpirationService._ - - private val expirationReaper = new ExpiredOperationReaper() - - expirationReaper.start() - - override def failAfter[T](timeoutMs: Long): CompletableFuture[T] = { - val task = new TimerTaskCompletableFuture[T](timeoutMs) - task.future.whenComplete { (_, _) => - task.cancel() - } - timer.add(task) - task.future - } - - private class ExpiredOperationReaper extends ShutdownableThread("raft-expiration-reaper", false) { - - override def doWork(): Unit = { - timer.advanceClock(WorkTimeoutMs) - } - } - - def shutdown(): Unit = { - expirationReaper.shutdown() - } -} diff --git a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala index 061e7b1171dab..be663d19ec808 100755 --- a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala @@ -20,7 +20,7 @@ package kafka.server import com.yammer.metrics.core.Meter import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.utils.CoreUtils.inLock -import kafka.utils.{DelayedItem, Logging, Pool} +import kafka.utils.{Logging, Pool} import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.PartitionStates import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset @@ -30,6 +30,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{FileRecords, MemoryRecords, Records} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.requests._ +import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{ClientIdAndBroker, InvalidRecordException, TopicPartition, Uuid} import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.server.metrics.KafkaMetricsGroup @@ -93,8 +94,6 @@ abstract class AbstractFetcherThread(name: String, protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] - protected val isOffsetForLeaderEpochSupported: Boolean - override def shutdown(): Unit = { initiateShutdown() inLock(partitionMapLock) { @@ -150,7 +149,7 @@ abstract class AbstractFetcherThread(name: String, partitionStates.partitionStateMap.forEach { (tp, state) => if (state.isTruncating) { latestEpoch(tp) match { - case Some(epoch) if isOffsetForLeaderEpochSupported => + case Some(epoch) => partitionsWithEpochs += tp -> new EpochData() .setPartition(tp.partition) .setCurrentLeaderEpoch(state.currentLeaderEpoch) @@ -789,7 +788,7 @@ abstract class AbstractFetcherThread(name: String, Option(partitionStates.stateValue(partition)).foreach { currentFetchState => if (!currentFetchState.isDelayed) { partitionStates.updateAndMoveToEnd(partition, PartitionFetchState(currentFetchState.topicId, currentFetchState.fetchOffset, - currentFetchState.lag, currentFetchState.currentLeaderEpoch, Some(new DelayedItem(delay)), + currentFetchState.lag, currentFetchState.currentLeaderEpoch, Some(delay), currentFetchState.state, currentFetchState.lastFetchedEpoch)) } } @@ -945,25 +944,27 @@ object PartitionFetchState { /** * case class to keep partition offset and its state(truncatingLog, delayed) * This represents a partition as being either: - * (1) Truncating its log, for example having recently become a follower - * (2) Delayed, for example due to an error, where we subsequently back off a bit - * (3) ReadyForFetch, the is the active state where the thread is actively fetching data. + * (1) Truncating its log, for example, having recently become a follower + * (2) Delayed, for example, due to an error, where we subsequently back off a bit + * (3) ReadyForFetch, the active state where the thread is actively fetching data. */ case class PartitionFetchState(topicId: Option[Uuid], fetchOffset: Long, lag: Option[Long], currentLeaderEpoch: Int, - delay: Option[DelayedItem], + delay: Option[Long], state: ReplicaState, lastFetchedEpoch: Option[Int]) { + private val dueMs = delay.map(_ + Time.SYSTEM.milliseconds) + def isReadyForFetch: Boolean = state == Fetching && !isDelayed def isReplicaInSync: Boolean = lag.isDefined && lag.get <= 0 def isTruncating: Boolean = state == Truncating && !isDelayed - def isDelayed: Boolean = delay.exists(_.getDelay(TimeUnit.MILLISECONDS) > 0) + def isDelayed: Boolean = dueMs.exists(_ > Time.SYSTEM.milliseconds) override def toString: String = { s"FetchState(topicId=$topicId" + @@ -972,7 +973,7 @@ case class PartitionFetchState(topicId: Option[Uuid], s", lastFetchedEpoch=$lastFetchedEpoch" + s", state=$state" + s", lag=$lag" + - s", delay=${delay.map(_.delayMs).getOrElse(0)}ms" + + s", delay=${delay.getOrElse(0)}ms" + s")" } diff --git a/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala b/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala index 0b680e034b2ac..eab17214c64a8 100644 --- a/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala +++ b/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala @@ -39,17 +39,37 @@ object AddPartitionsToTxnManager { val VerificationFailureRateMetricName = "VerificationFailureRate" val VerificationTimeMsMetricName = "VerificationTimeMs" + + def produceRequestVersionToTransactionSupportedOperation(version: Short): TransactionSupportedOperation = { + if (version > 11) { + addPartition + } else if (version > 10) { + genericErrorSupported + } else { + defaultError + } + } + + def txnOffsetCommitRequestVersionToTransactionSupportedOperation(version: Short): TransactionSupportedOperation = { + if (version > 4) { + addPartition + } else if (version > 3) { + genericErrorSupported + } else { + defaultError + } + } } /** * This is an enum which handles the Partition Response based on the Request Version and the exact operation - * defaultError: This is the default workflow which maps to cases when the Produce Request Version or the Txn_offset_commit request was lower than the first version supporting the new Error Class - * genericError: This maps to the case when the clients are updated to handle the TransactionAbortableException - * addPartition: This is a WIP. To be updated as a part of KIP-890 Part 2 + * defaultError: This is the default workflow which maps to cases when the Produce Request Version or the Txn_offset_commit request was lower than the first version supporting the new Error Class + * genericErrorSupported: This maps to the case when the clients are updated to handle the TransactionAbortableException + * addPartition: This allows the partition to be added to the transactions inflight with the Produce and TxnOffsetCommit requests. Plus the behaviors in genericErrorSupported. */ sealed trait TransactionSupportedOperation case object defaultError extends TransactionSupportedOperation -case object genericError extends TransactionSupportedOperation +case object genericErrorSupported extends TransactionSupportedOperation case object addPartition extends TransactionSupportedOperation /* @@ -85,7 +105,7 @@ class AddPartitionsToTxnManager( private val verificationFailureRate = metricsGroup.newMeter(VerificationFailureRateMetricName, "failures", TimeUnit.SECONDS) private val verificationTimeMs = metricsGroup.newHistogram(VerificationTimeMsMetricName) - def verifyTransaction( + def addOrVerifyTransaction( transactionalId: String, producerId: Long, producerEpoch: Short, @@ -108,7 +128,7 @@ class AddPartitionsToTxnManager( .setTransactionalId(transactionalId) .setProducerId(producerId) .setProducerEpoch(producerEpoch) - .setVerifyOnly(true) + .setVerifyOnly(transactionSupportedOperation != addPartition) .setTopics(topicCollection) addTxnData(coordinatorNode.get, transactionData, callback, transactionSupportedOperation) @@ -225,7 +245,8 @@ class AddPartitionsToTxnManager( val code = if (partitionResult.partitionErrorCode == Errors.PRODUCER_FENCED.code) Errors.INVALID_PRODUCER_EPOCH.code - else if (partitionResult.partitionErrorCode() == Errors.TRANSACTION_ABORTABLE.code && transactionDataAndCallbacks.transactionSupportedOperation != genericError) // For backward compatibility with clients. + else if (partitionResult.partitionErrorCode() == Errors.TRANSACTION_ABORTABLE.code + && transactionDataAndCallbacks.transactionSupportedOperation == defaultError) // For backward compatibility with clients. Errors.INVALID_TXN_STATE.code else partitionResult.partitionErrorCode diff --git a/core/src/main/scala/kafka/server/AlterPartitionManager.scala b/core/src/main/scala/kafka/server/AlterPartitionManager.scala index bd754f497292c..fa1c3602beedb 100644 --- a/core/src/main/scala/kafka/server/AlterPartitionManager.scala +++ b/core/src/main/scala/kafka/server/AlterPartitionManager.scala @@ -20,7 +20,6 @@ import java.util import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} import kafka.utils.Logging -import kafka.zk.KafkaZkClient import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.TopicIdPartition import org.apache.kafka.common.TopicPartition @@ -41,9 +40,8 @@ import scala.collection.mutable.ListBuffer import scala.jdk.OptionConverters.RichOptional /** - * Handles updating the ISR by sending AlterPartition requests to the controller (as of 2.7) or by updating ZK directly - * (prior to 2.7). Updating the ISR is an asynchronous operation, so partitions will learn about the result of their - * request through a callback. + * Handles updating the ISR by sending AlterPartition requests to the controller. Updating the ISR is an asynchronous + * operation, so partitions will learn about the result of their request through a callback. * * Note that ISR state changes can still be initiated by the controller and sent to the partitions via LeaderAndIsr * requests. @@ -55,22 +53,20 @@ trait AlterPartitionManager { def submit( topicIdPartition: TopicIdPartition, - leaderAndIsr: LeaderAndIsr, - controllerEpoch: Int + leaderAndIsr: LeaderAndIsr ): CompletableFuture[LeaderAndIsr] } case class AlterPartitionItem( topicIdPartition: TopicIdPartition, leaderAndIsr: LeaderAndIsr, - future: CompletableFuture[LeaderAndIsr], - controllerEpoch: Int // controllerEpoch needed for `ZkAlterPartitionManager` + future: CompletableFuture[LeaderAndIsr] ) object AlterPartitionManager { /** - * Factory to AlterPartition based implementation, used when IBP >= 2.7-IV2 + * Factory to AlterPartition based implementation */ def apply( config: KafkaConfig, @@ -100,17 +96,6 @@ object AlterPartitionManager { metadataVersionSupplier = () => metadataCache.metadataVersion() ) } - - /** - * Factory for ZK based implementation, used when IBP < 2.7-IV2 - */ - def apply( - scheduler: Scheduler, - time: Time, - zkClient: KafkaZkClient - ): AlterPartitionManager = { - new ZkAlterPartitionManager(scheduler, time, zkClient) - } } class DefaultAlterPartitionManager( @@ -124,18 +109,11 @@ class DefaultAlterPartitionManager( // Used to allow only one pending ISR update per partition (visible for testing). // Note that we key items by TopicPartition despite using TopicIdPartition while - // submitting changes. We do this to ensure that topics with the same name but - // with a different topic id or no topic id collide here. There are two cases to - // consider: - // 1) When the cluster is upgraded from IBP < 2.8 to IBP >= 2.8, the ZK controller - // assigns topic ids to the partitions. So partitions will start sending updates - // with a topic id while they might still have updates without topic ids in this - // Map. This would break the contract of only allowing one pending ISR update per - // partition. - // 2) When a topic is deleted and re-created, we cannot have two entries in this Map - // especially if we cannot use an AlterPartition request version which supports - // topic ids in the end because the two updates with the same name would be merged - // together. + // submitting changes. This is done to ensure that topics with the same name but + // with a different topic id or no topic id collide here. When a topic is deleted + // and re-created, we cannot have two entries in this Map especially if we cannot + // use an AlterPartition request version which supports topic ids in the end because + // the two updates with the same name would be merged together. private[server] val unsentIsrUpdates: util.Map[TopicPartition, AlterPartitionItem] = new ConcurrentHashMap[TopicPartition, AlterPartitionItem]() // Used to allow only one in-flight request at a time @@ -151,11 +129,10 @@ class DefaultAlterPartitionManager( override def submit( topicIdPartition: TopicIdPartition, - leaderAndIsr: LeaderAndIsr, - controllerEpoch: Int + leaderAndIsr: LeaderAndIsr ): CompletableFuture[LeaderAndIsr] = { val future = new CompletableFuture[LeaderAndIsr]() - val alterPartitionItem = AlterPartitionItem(topicIdPartition, leaderAndIsr, future, controllerEpoch) + val alterPartitionItem = AlterPartitionItem(topicIdPartition, leaderAndIsr, future) val enqueued = unsentIsrUpdates.putIfAbsent(alterPartitionItem.topicIdPartition.topicPartition, alterPartitionItem) == null if (enqueued) { maybePropagateIsrChanges() @@ -241,9 +218,6 @@ class DefaultAlterPartitionManager( * supported by the controller. The final decision is taken when the AlterPartitionRequest * is built in the network client based on the advertised api versions of the controller. * - * We could use version 2 or above if all the pending changes have an topic id defined; - * otherwise we must use version 1 or below. - * * @return A tuple containing the AlterPartitionRequest.Builder and a mapping from * topic id to topic name. This mapping is used in the response handling. */ @@ -257,9 +231,6 @@ class DefaultAlterPartitionManager( // the metadata cache is updated after the partition state so it might not know // yet about a topic id already used here. val topicNamesByIds = mutable.HashMap[Uuid, String]() - // We can use topic ids only if all the pending changed have one defined and - // we use IBP 2.8 or above. - var canUseTopicIds = metadataVersion.isTopicIdsSupported val message = new AlterPartitionRequestData() .setBrokerId(brokerId) @@ -267,7 +238,6 @@ class DefaultAlterPartitionManager( inflightAlterPartitionItems.groupBy(_.topicIdPartition.topic).foreach { case (topicName, items) => val topicId = items.head.topicIdPartition.topicId - canUseTopicIds &= topicId != Uuid.ZERO_UUID topicNamesByIds(topicId) = topicName // Both the topic name and the topic id are set here because at this stage @@ -292,8 +262,7 @@ class DefaultAlterPartitionManager( } } - // If we cannot use topic ids, the builder will ensure that no version higher than 1 is used. - (new AlterPartitionRequest.Builder(message, canUseTopicIds), topicNamesByIds) + (new AlterPartitionRequest.Builder(message), topicNamesByIds) } private def handleAlterPartitionResponse( diff --git a/core/src/main/scala/kafka/server/ApiVersionManager.scala b/core/src/main/scala/kafka/server/ApiVersionManager.scala index 588fe99aea10a..972af0414e463 100644 --- a/core/src/main/scala/kafka/server/ApiVersionManager.scala +++ b/core/src/main/scala/kafka/server/ApiVersionManager.scala @@ -150,14 +150,12 @@ class DefaultApiVersionManager( } val apiVersions = if (controllerApiVersions.isDefined) { ApiVersionsResponse.controllerApiVersions( - finalizedFeatures.metadataVersion().highestSupportedRecordVersion, controllerApiVersions.get, listenerType, enableUnstableLastVersion, clientTelemetryEnabled) } else { ApiVersionsResponse.brokerApiVersions( - finalizedFeatures.metadataVersion().highestSupportedRecordVersion, listenerType, enableUnstableLastVersion, clientTelemetryEnabled) diff --git a/core/src/main/scala/kafka/server/AuthHelper.scala b/core/src/main/scala/kafka/server/AuthHelper.scala index 4d21fb4385959..b208f8406bab6 100644 --- a/core/src/main/scala/kafka/server/AuthHelper.scala +++ b/core/src/main/scala/kafka/server/AuthHelper.scala @@ -55,7 +55,7 @@ class AuthHelper(authorizer: Option[Authorizer]) { def authorizeClusterOperation(request: RequestChannel.Request, operation: AclOperation): Unit = { if (!authorize(request.context, operation, CLUSTER, CLUSTER_NAME)) - throw new ClusterAuthorizationException(s"Request $request is not authorized.") + throw new ClusterAuthorizationException(s"Request $request needs $operation permission.") } def authorizedOperations(request: RequestChannel.Request, resource: Resource): Int = { diff --git a/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala b/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala index 58b3035935cb5..e3abde0bda42e 100644 --- a/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala +++ b/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala @@ -18,9 +18,7 @@ package kafka.server import java.util.concurrent.ConcurrentHashMap -import java.util.concurrent.atomic.AtomicReference import java.util.{Collections, Properties} -import kafka.controller.KafkaController import kafka.coordinator.transaction.TransactionCoordinator import kafka.utils.Logging import org.apache.kafka.clients.ClientResponse @@ -31,7 +29,7 @@ import org.apache.kafka.common.message.CreateTopicsRequestData import org.apache.kafka.common.message.CreateTopicsRequestData.{CreatableTopic, CreatableTopicConfig, CreatableTopicConfigCollection} import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{ApiError, CreateTopicsRequest, RequestContext, RequestHeader} +import org.apache.kafka.common.requests.{CreateTopicsRequest, RequestContext, RequestHeader} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} @@ -49,34 +47,13 @@ trait AutoTopicCreationManager { ): Seq[MetadataResponseTopic] } -object AutoTopicCreationManager { - - def apply( - config: KafkaConfig, - channelManager: Option[NodeToControllerChannelManager], - adminManager: Option[ZkAdminManager], - controller: Option[KafkaController], - groupCoordinator: GroupCoordinator, - txnCoordinator: TransactionCoordinator, - shareCoordinator: Option[ShareCoordinator], - ): AutoTopicCreationManager = { - new DefaultAutoTopicCreationManager(config, channelManager, adminManager, - controller, groupCoordinator, txnCoordinator, shareCoordinator) - } -} - class DefaultAutoTopicCreationManager( config: KafkaConfig, - channelManager: Option[NodeToControllerChannelManager], - adminManager: Option[ZkAdminManager], - controller: Option[KafkaController], + channelManager: NodeToControllerChannelManager, groupCoordinator: GroupCoordinator, txnCoordinator: TransactionCoordinator, shareCoordinator: Option[ShareCoordinator] ) extends AutoTopicCreationManager with Logging { - if (controller.isEmpty && channelManager.isEmpty) { - throw new IllegalArgumentException("Must supply a channel manager if not supplying a controller") - } private val inflightTopics = Collections.newSetFromMap(new ConcurrentHashMap[String, java.lang.Boolean]()) @@ -99,65 +76,13 @@ class DefaultAutoTopicCreationManager( val creatableTopicResponses = if (creatableTopics.isEmpty) { Seq.empty - } else if (controller.isEmpty || !controller.get.isActive && channelManager.isDefined) { - sendCreateTopicRequest(creatableTopics, metadataRequestContext) } else { - createTopicsInZk(creatableTopics, controllerMutationQuota) + sendCreateTopicRequest(creatableTopics, metadataRequestContext) } uncreatableTopicResponses ++ creatableTopicResponses } - private def createTopicsInZk( - creatableTopics: Map[String, CreatableTopic], - controllerMutationQuota: ControllerMutationQuota - ): Seq[MetadataResponseTopic] = { - val topicErrors = new AtomicReference[Map[String, ApiError]]() - try { - // Note that we use timeout = 0 since we do not need to wait for metadata propagation - // and we want to get the response error immediately. - adminManager.get.createTopics( - timeout = 0, - validateOnly = false, - creatableTopics, - Map.empty, - controllerMutationQuota, - topicErrors.set - ) - - val creatableTopicResponses = Option(topicErrors.get) match { - case Some(errors) => - errors.toSeq.map { case (topic, apiError) => - val error = apiError.error match { - case Errors.TOPIC_ALREADY_EXISTS | Errors.REQUEST_TIMED_OUT => - // The timeout error is expected because we set timeout=0. This - // nevertheless indicates that the topic metadata was created - // successfully, so we return LEADER_NOT_AVAILABLE. - Errors.LEADER_NOT_AVAILABLE - case error => error - } - - new MetadataResponseTopic() - .setErrorCode(error.code) - .setName(topic) - .setIsInternal(Topic.isInternal(topic)) - } - - case None => - creatableTopics.keySet.toSeq.map { topic => - new MetadataResponseTopic() - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) - .setName(topic) - .setIsInternal(Topic.isInternal(topic)) - } - } - - creatableTopicResponses - } finally { - clearInflightRequests(creatableTopics) - } - } - private def sendCreateTopicRequest( creatableTopics: Map[String, CreatableTopic], metadataRequestContext: Option[RequestContext] @@ -189,10 +114,6 @@ class DefaultAutoTopicCreationManager( } } - val channelManager = this.channelManager.getOrElse { - throw new IllegalStateException("Channel manager must be defined in order to send CreateTopic requests.") - } - val request = metadataRequestContext.map { context => val requestVersion = channelManager.controllerApiVersions.toScala match { diff --git a/core/src/main/scala/kafka/server/BrokerBlockingSender.scala b/core/src/main/scala/kafka/server/BrokerBlockingSender.scala index ccd1060fabe05..c8405be6c6e6f 100644 --- a/core/src/main/scala/kafka/server/BrokerBlockingSender.scala +++ b/core/src/main/scala/kafka/server/BrokerBlockingSender.scala @@ -60,7 +60,6 @@ class BrokerBlockingSender(sourceBroker: BrokerEndPoint, brokerConfig.interBrokerListenerName, brokerConfig.saslMechanismInterBrokerProtocol, time, - brokerConfig.saslInterBrokerHandshakeRequestEnable, logContext ) val reconfigurableChannelBuilder = channelBuilder match { diff --git a/core/src/main/scala/kafka/server/BrokerServer.scala b/core/src/main/scala/kafka/server/BrokerServer.scala index 4a215cc507886..47b89dd1de18f 100644 --- a/core/src/main/scala/kafka/server/BrokerServer.scala +++ b/core/src/main/scala/kafka/server/BrokerServer.scala @@ -193,7 +193,7 @@ class BrokerServer( info("Starting broker") val clientMetricsReceiverPlugin = new ClientMetricsReceiverPlugin() - config.dynamicConfig.initialize(zkClientOpt = None, Some(clientMetricsReceiverPlugin)) + config.dynamicConfig.initialize(Some(clientMetricsReceiverPlugin)) /* start scheduler */ kafkaScheduler = new KafkaScheduler(config.backgroundThreads) @@ -216,8 +216,7 @@ class BrokerServer( kafkaScheduler, time, brokerTopicStats, - logDirFailureChannel, - keepPartitionMetadataFile = true) + logDirFailureChannel) remoteLogManagerOpt = createRemoteLogManager() @@ -347,7 +346,6 @@ class BrokerServer( alterPartitionManager = alterPartitionManager, brokerTopicStats = brokerTopicStats, isShuttingDown = isShuttingDown, - zkClient = None, threadNamePrefix = None, // The ReplicaManager only runs on the broker, and already includes the ID in thread names. delayedRemoteFetchPurgatoryParam = None, brokerEpochSupplier = () => lifecycleManager.brokerEpoch, @@ -385,11 +383,11 @@ class BrokerServer( producerIdManagerSupplier, metrics, metadataCache, Time.SYSTEM) autoTopicCreationManager = new DefaultAutoTopicCreationManager( - config, Some(clientToControllerChannelManager), None, None, - groupCoordinator, transactionCoordinator, shareCoordinator) + config, clientToControllerChannelManager, groupCoordinator, + transactionCoordinator, shareCoordinator) dynamicConfigHandlers = Map[String, ConfigHandler]( - ConfigType.TOPIC -> new TopicConfigHandler(replicaManager, config, quotaManagers, None), + ConfigType.TOPIC -> new TopicConfigHandler(replicaManager, config, quotaManagers), ConfigType.BROKER -> new BrokerConfigHandler(config, quotaManagers), ConfigType.CLIENT_METRICS -> new ClientMetricsConfigHandler(clientMetricsManager), ConfigType.GROUP -> new GroupConfigHandler(groupCoordinator)) @@ -424,7 +422,7 @@ class BrokerServer( val fetchSessionCacheShards = (0 until NumFetchSessionCacheShards) .map(shardNum => new FetchSessionCacheShard( config.maxIncrementalFetchSessionCacheSlots / NumFetchSessionCacheShards, - KafkaServer.MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS, + KafkaBroker.MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS, sessionIdRange, shardNum )) @@ -432,7 +430,7 @@ class BrokerServer( val shareFetchSessionCache : ShareSessionCache = new ShareSessionCache( config.shareGroupConfig.shareGroupMaxGroups * config.groupCoordinatorConfig.shareGroupMaxSize, - KafkaServer.MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS) + KafkaBroker.MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS) sharePartitionManager = new SharePartitionManager( replicaManager, @@ -447,11 +445,9 @@ class BrokerServer( metrics ) - // Create the request processor objects. - val raftSupport = RaftSupport(forwardingManager, metadataCache) dataPlaneRequestProcessor = new KafkaApis( requestChannel = socketServer.dataPlaneRequestChannel, - metadataSupport = raftSupport, + forwardingManager = forwardingManager, replicaManager = replicaManager, groupCoordinator = groupCoordinator, txnCoordinator = transactionCoordinator, @@ -465,13 +461,13 @@ class BrokerServer( authorizer = authorizer, quotas = quotaManagers, fetchManager = fetchManager, - sharePartitionManager = Some(sharePartitionManager), + sharePartitionManager = sharePartitionManager, brokerTopicStats = brokerTopicStats, clusterId = clusterId, time = time, tokenManager = tokenManager, apiVersionManager = apiVersionManager, - clientMetricsManager = Some(clientMetricsManager)) + clientMetricsManager = clientMetricsManager) dataPlaneRequestHandlerPool = new KafkaRequestHandlerPool(config.nodeId, socketServer.dataPlaneRequestChannel, dataPlaneRequestProcessor, time, diff --git a/core/src/main/scala/kafka/server/ClientQuotaManager.scala b/core/src/main/scala/kafka/server/ClientQuotaManager.scala index 82c0532161b19..f621d0dbcf437 100644 --- a/core/src/main/scala/kafka/server/ClientQuotaManager.scala +++ b/core/src/main/scala/kafka/server/ClientQuotaManager.scala @@ -216,7 +216,7 @@ class ClientQuotaManager(private val config: ClientQuotaManagerConfig, /** * Records that a user/clientId accumulated or would like to accumulate the provided amount at the - * the specified time, returns throttle time in milliseconds. + * specified time, returns throttle time in milliseconds. * * @param session The session from which the user is extracted * @param clientId The client id diff --git a/core/src/main/scala/kafka/server/ConfigHandler.scala b/core/src/main/scala/kafka/server/ConfigHandler.scala index d8cf895677d3e..eabe12d0d942a 100644 --- a/core/src/main/scala/kafka/server/ConfigHandler.scala +++ b/core/src/main/scala/kafka/server/ConfigHandler.scala @@ -19,27 +19,21 @@ package kafka.server import java.net.{InetAddress, UnknownHostException} import java.util.{Collections, Properties} -import kafka.controller.KafkaController import kafka.log.UnifiedLog import kafka.network.ConnectionQuotas import kafka.server.QuotaFactory.QuotaManagers import kafka.utils.Logging -import org.apache.kafka.server.config.{QuotaConfig, ReplicationConfigs, ZooKeeperInternals} -import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.server.config.{QuotaConfig, ZooKeeperInternals} import org.apache.kafka.common.metrics.Quota import org.apache.kafka.common.metrics.Quota._ import org.apache.kafka.common.utils.Sanitizer import org.apache.kafka.coordinator.group.GroupCoordinator -import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.ClientMetricsManager import org.apache.kafka.server.common.StopPartition import org.apache.kafka.storage.internals.log.{LogStartOffsetIncrementReason, ThrottledReplicaListValidator} -import org.apache.kafka.storage.internals.log.LogConfig.MessageFormatVersion -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ import scala.collection.Seq -import scala.util.Try /** * The ConfigHandler is used to process broker configuration change notifications. @@ -54,26 +48,18 @@ trait ConfigHandler { */ class TopicConfigHandler(private val replicaManager: ReplicaManager, kafkaConfig: KafkaConfig, - val quotas: QuotaManagers, - kafkaController: Option[KafkaController]) extends ConfigHandler with Logging { + val quotas: QuotaManagers) extends ConfigHandler with Logging { private def updateLogConfig(topic: String, topicConfig: Properties): Unit = { val logManager = replicaManager.logManager - // Validate the configurations. - val configNamesToExclude = excludedConfigs(topic, topicConfig) - val props = new Properties() - topicConfig.asScala.foreachEntry { (key, value) => - if (!configNamesToExclude.contains(key)) props.put(key, value) - } val logs = logManager.logsByTopic(topic) val wasRemoteLogEnabled = logs.exists(_.remoteLogEnabled()) val wasCopyDisabled = logs.exists(_.config.remoteLogCopyDisable()) - // kafkaController is only defined in Zookeeper's mode - logManager.updateTopicConfig(topic, props, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), - wasRemoteLogEnabled, kafkaController.isDefined) + logManager.updateTopicConfig(topic, topicConfig, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), + wasRemoteLogEnabled) maybeUpdateRemoteLogComponents(topic, logs, wasRemoteLogEnabled, wasCopyDisabled) } @@ -139,10 +125,6 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, } updateThrottledList(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, quotas.leader) updateThrottledList(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG, quotas.follower) - - if (Try(topicConfig.getProperty(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG).toBoolean).getOrElse(false)) { - kafkaController.foreach(_.enableTopicUncleanLeaderElection(topic)) - } } def parseThrottledPartitions(topicConfig: Properties, brokerId: Int, prop: String): Seq[Int] = { @@ -158,24 +140,6 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, .map(_ (0).toInt).toSeq //convert to list of partition ids } } - - @nowarn("cat=deprecation") - private def excludedConfigs(topic: String, topicConfig: Properties): Set[String] = { - // Verify message format version - Option(topicConfig.getProperty(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG)).flatMap { versionString => - val messageFormatVersion = new MessageFormatVersion(versionString, kafkaConfig.interBrokerProtocolVersion.version) - if (messageFormatVersion.shouldIgnore) { - if (messageFormatVersion.shouldWarn) - warn(messageFormatVersion.topicWarningMessage(topic)) - Some(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG) - } else if (kafkaConfig.interBrokerProtocolVersion.isLessThan(messageFormatVersion.messageFormatVersion)) { - warn(s"Topic configuration ${TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG} is ignored for `$topic` because `$versionString` " + - s"is higher than what is allowed by the inter-broker protocol version `${kafkaConfig.interBrokerProtocolVersionString}`") - Some(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG) - } else - None - }.toSet - } } @@ -214,37 +178,6 @@ class QuotaConfigHandler(private val quotaManagers: QuotaManagers) { } } -/** - * The ClientIdConfigHandler will process clientId config changes in ZK. - * The callback provides the clientId and the full properties set read from ZK. - */ -class ClientIdConfigHandler(private val quotaManagers: QuotaManagers) extends QuotaConfigHandler(quotaManagers) with ConfigHandler { - - def processConfigChanges(sanitizedClientId: String, clientConfig: Properties): Unit = { - updateQuotaConfig(None, Some(sanitizedClientId), clientConfig) - } -} - -/** - * The UserConfigHandler will process and quota changes in ZK. - * The callback provides the node name containing sanitized user principal, sanitized client-id if this is - * a update and the full properties set read from ZK. - */ -class UserConfigHandler(private val quotaManagers: QuotaManagers, val credentialProvider: CredentialProvider) extends QuotaConfigHandler(quotaManagers) with ConfigHandler { - - def processConfigChanges(quotaEntityPath: String, config: Properties): Unit = { - // Entity path is or /clients/ - val entities = quotaEntityPath.split("/") - if (entities.length != 1 && entities.length != 3) - throw new IllegalArgumentException("Invalid quota entity path: " + quotaEntityPath) - val sanitizedUser = entities(0) - val sanitizedClientId = if (entities.length == 3) Some(entities(2)) else None - updateQuotaConfig(Some(sanitizedUser), sanitizedClientId, config) - if (sanitizedClientId.isEmpty && sanitizedUser != ZooKeeperInternals.DEFAULT_STRING) - credentialProvider.updateCredentials(Sanitizer.desanitize(sanitizedUser), config) - } -} - class IpConfigHandler(private val connectionQuotas: ConnectionQuotas) extends ConfigHandler with Logging { def processConfigChanges(ip: String, config: Properties): Unit = { diff --git a/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala b/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala index 1a644e30d9adc..cc3a889221767 100644 --- a/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala +++ b/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala @@ -194,7 +194,7 @@ class ControllerMutationQuotaManager(private val config: ClientQuotaManagerConfi /** * Records that a user/clientId accumulated or would like to accumulate the provided amount at the - * the specified time, returns throttle time in milliseconds. The quota is strict meaning that it + * specified time, returns throttle time in milliseconds. The quota is strict meaning that it * does not accept any mutations once the quota is exhausted until it gets back to the defined rate. * * @param session The session from which the user is extracted @@ -264,7 +264,7 @@ class ControllerMutationQuotaManager(private val config: ClientQuotaManagerConfi /** * Returns a ControllerMutationQuota based on `strictSinceVersion`. It returns a strict * quota if the version is equal to or above of the `strictSinceVersion`, a permissive - * quota if the version is below, and a unbounded quota if the quota is disabled. + * quota if the version is below, and an unbounded quota if the quota is disabled. * * When the quota is strictly enforced. Any operation above the quota is not allowed * and rejected with a THROTTLING_QUOTA_EXCEEDED error. diff --git a/core/src/main/scala/kafka/server/ControllerServer.scala b/core/src/main/scala/kafka/server/ControllerServer.scala index 90deff7ed8607..76ffffe53b38d 100644 --- a/core/src/main/scala/kafka/server/ControllerServer.scala +++ b/core/src/main/scala/kafka/server/ControllerServer.scala @@ -26,6 +26,7 @@ import kafka.server.metadata.{AclPublisher, ClientQuotaMetadataManager, Delegati import kafka.utils.{CoreUtils, Logging} import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.{LogContext, Utils} @@ -123,7 +124,7 @@ class ControllerServer( try { this.logIdent = logContext.logPrefix() info("Starting controller") - config.dynamicConfig.initialize(zkClientOpt = None, clientMetricsReceiverPluginOpt = None) + config.dynamicConfig.initialize(clientMetricsReceiverPluginOpt = None) maybeChangeStatus(STARTING, STARTED) @@ -155,6 +156,11 @@ class ControllerServer( () => featuresPublisher.features() ) + // metrics will be set to null when closing a controller, so we should recreate it for testing + if (sharedServer.metrics == null){ + sharedServer.metrics = new Metrics() + } + tokenCache = new DelegationTokenCache(ScramMechanism.mechanismNames) credentialProvider = new CredentialProvider(ScramMechanism.mechanismNames, tokenCache) socketServer = new SocketServer(config, @@ -194,7 +200,7 @@ class ControllerServer( startupDeadline, time) val controllerNodes = QuorumConfig.voterConnectionsToNodes(voterConnections) val quorumFeatures = new QuorumFeatures(config.nodeId, - QuorumFeatures.defaultFeatureMap(config.unstableFeatureVersionsEnabled), + QuorumFeatures.defaultSupportedFeatureMap(config.unstableFeatureVersionsEnabled), controllerNodes.asScala.map(node => Integer.valueOf(node.id())).asJava) val delegationTokenKeyString = { @@ -243,7 +249,8 @@ class ControllerServer( setDelegationTokenExpiryCheckIntervalMs(config.delegationTokenExpiryCheckIntervalMs). setUncleanLeaderElectionCheckIntervalMs(config.uncleanLeaderElectionCheckIntervalMs). setInterBrokerListenerName(config.interBrokerListenerName.value()). - setEligibleLeaderReplicasEnabled(config.elrEnabled) + setControllerPerformanceSamplePeriodMs(config.controllerPerformanceSamplePeriodMs). + setControllerPerformanceAlwaysLogThresholdMs(config.controllerPerformanceAlwaysLogThresholdMs) } controller = controllerBuilder.build() @@ -293,7 +300,7 @@ class ControllerServer( clusterId, time, s"controller-${config.nodeId}-", - QuorumFeatures.defaultFeatureMap(config.unstableFeatureVersionsEnabled), + QuorumFeatures.defaultSupportedFeatureMap(config.unstableFeatureVersionsEnabled), incarnationId, listenerInfo) diff --git a/core/src/main/scala/kafka/server/DelayedCreatePartitions.scala b/core/src/main/scala/kafka/server/DelayedCreatePartitions.scala deleted file mode 100644 index 5f204a24ff114..0000000000000 --- a/core/src/main/scala/kafka/server/DelayedCreatePartitions.scala +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.utils.Logging -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests.ApiError -import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.server.purgatory.DelayedOperation - -import scala.collection._ - -/** - * The create metadata maintained by the delayed create topic or create partitions operations. - */ -case class CreatePartitionsMetadata(topic: String, partitions: Set[Int], error: ApiError) - -object CreatePartitionsMetadata { - def apply(topic: String, partitions: Set[Int]): CreatePartitionsMetadata = { - CreatePartitionsMetadata(topic, partitions, ApiError.NONE) - } - - def apply(topic: String, error: Errors): CreatePartitionsMetadata = { - CreatePartitionsMetadata(topic, Set.empty, new ApiError(error, null)) - } - - def apply(topic: String, throwable: Throwable): CreatePartitionsMetadata = { - CreatePartitionsMetadata(topic, Set.empty, ApiError.fromThrowable(throwable)) - } -} - -/** - * A delayed create topic or create partitions operation that is stored in the topic purgatory. - */ -class DelayedCreatePartitions(delayMs: Long, - createMetadata: Seq[CreatePartitionsMetadata], - adminManager: ZkAdminManager, - responseCallback: Map[String, ApiError] => Unit) - extends DelayedOperation(delayMs) with Logging { - - /** - * The operation can be completed if all of the topics that do not have an error exist and every partition has a - * leader in the controller. - * See KafkaController.onNewTopicCreation - */ - override def tryComplete() : Boolean = { - trace(s"Trying to complete operation for $createMetadata") - - val leaderlessPartitionCount = createMetadata.filter(_.error.isSuccess).foldLeft(0) { case (topicCounter, metadata) => - topicCounter + missingLeaderCount(metadata.topic, metadata.partitions) - } - - if (leaderlessPartitionCount == 0) { - trace("All partitions have a leader, completing the delayed operation") - forceComplete() - } else { - trace(s"$leaderlessPartitionCount partitions do not have a leader, not completing the delayed operation") - false - } - } - - /** - * Check for partitions that are still missing a leader, update their error code and call the responseCallback - */ - override def onComplete(): Unit = { - trace(s"Completing operation for $createMetadata") - val results = createMetadata.map { metadata => - // ignore topics that already have errors - if (metadata.error.isSuccess && missingLeaderCount(metadata.topic, metadata.partitions) > 0) - (metadata.topic, new ApiError(Errors.REQUEST_TIMED_OUT, null)) - else - (metadata.topic, metadata.error) - }.toMap - responseCallback(results) - } - - override def onExpiration(): Unit = {} - - private def missingLeaderCount(topic: String, partitions: Set[Int]): Int = { - partitions.foldLeft(0) { case (counter, partition) => - if (isMissingLeader(topic, partition)) counter + 1 else counter - } - } - - private def isMissingLeader(topic: String, partition: Int): Boolean = { - val partitionInfo = adminManager.metadataCache.getPartitionInfo(topic, partition) - partitionInfo.forall(_.leader == LeaderAndIsr.NO_LEADER) - } -} diff --git a/core/src/main/scala/kafka/server/DelayedDeleteTopics.scala b/core/src/main/scala/kafka/server/DelayedDeleteTopics.scala deleted file mode 100644 index 4ec4698aecbbe..0000000000000 --- a/core/src/main/scala/kafka/server/DelayedDeleteTopics.scala +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.utils.Logging -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.server.purgatory.DelayedOperation - -import scala.collection._ - -/** - * The delete metadata maintained by the delayed delete operation - */ -case class DeleteTopicMetadata(topic: String, error: Errors) - -object DeleteTopicMetadata { - def apply(topic: String, throwable: Throwable): DeleteTopicMetadata = { - DeleteTopicMetadata(topic, Errors.forException(throwable)) - } -} - -/** - * A delayed delete topics operation that can be created by the admin manager and watched - * in the topic purgatory - */ -class DelayedDeleteTopics(delayMs: Long, - deleteMetadata: Seq[DeleteTopicMetadata], - adminManager: ZkAdminManager, - responseCallback: Map[String, Errors] => Unit) - extends DelayedOperation(delayMs) with Logging { - - /** - * The operation can be completed if all of the topics not in error have been removed - */ - override def tryComplete() : Boolean = { - trace(s"Trying to complete operation for $deleteMetadata") - - // Ignore topics that already have errors - val existingTopics = deleteMetadata.count { metadata => metadata.error == Errors.NONE && topicExists(metadata.topic) } - - if (existingTopics == 0) { - trace("All topics have been deleted or have errors, completing the delayed operation") - forceComplete() - } else { - trace(s"$existingTopics topics still exist, not completing the delayed operation") - false - } - } - - /** - * Check for partitions that still exist, update their error code and call the responseCallback - */ - override def onComplete(): Unit = { - trace(s"Completing operation for $deleteMetadata") - val results = deleteMetadata.map { metadata => - // ignore topics that already have errors - if (metadata.error == Errors.NONE && topicExists(metadata.topic)) - (metadata.topic, Errors.REQUEST_TIMED_OUT) - else - (metadata.topic, metadata.error) - }.toMap - responseCallback(results) - } - - override def onExpiration(): Unit = { } - - private def topicExists(topic: String): Boolean = { - adminManager.metadataCache.contains(topic) - } -} diff --git a/core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala b/core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala index b75a42522553e..f2bb8c37d85a6 100644 --- a/core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala +++ b/core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala @@ -25,7 +25,9 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.ListOffsetsResponse import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.purgatory.DelayedOperation +import org.apache.kafka.storage.internals.log.OffsetResultHolder.FileRecordsOrError +import java.util.Optional import java.util.concurrent.TimeUnit import scala.collection.{Map, mutable} import scala.jdk.CollectionConverters._ @@ -40,7 +42,7 @@ class DelayedRemoteListOffsets(delayMs: Long, // If there is a task to track, then build the response as REQUEST_TIMED_OUT by default. statusByPartition.foreachEntry { (topicPartition, status) => status.completed = status.futureHolderOpt.isEmpty - if (status.futureHolderOpt.isDefined) { + if (status.futureHolderOpt.isPresent) { status.responseOpt = Some(buildErrorResponse(Errors.REQUEST_TIMED_OUT, topicPartition.partition())) } trace(s"Initial partition status for $topicPartition is $status") @@ -53,7 +55,7 @@ class DelayedRemoteListOffsets(delayMs: Long, statusByPartition.foreachEntry { (topicPartition, status) => if (!status.completed) { debug(s"Expiring list offset request for partition $topicPartition with status $status") - status.futureHolderOpt.foreach(futureHolder => futureHolder.jobFuture.cancel(true)) + status.futureHolderOpt.ifPresent(futureHolder => futureHolder.jobFuture.cancel(true)) DelayedRemoteListOffsetsMetrics.recordExpiration(topicPartition) } } @@ -86,26 +88,26 @@ class DelayedRemoteListOffsets(delayMs: Long, replicaManager.getPartitionOrException(partition) } catch { case e: ApiException => - status.futureHolderOpt.foreach { futureHolder => + status.futureHolderOpt.ifPresent { futureHolder => futureHolder.jobFuture.cancel(false) - futureHolder.taskFuture.complete(Left(e)) + futureHolder.taskFuture.complete(new FileRecordsOrError(Optional.of(e), Optional.empty())) } } - status.futureHolderOpt.foreach { futureHolder => + status.futureHolderOpt.ifPresent { futureHolder => if (futureHolder.taskFuture.isDone) { - val response = futureHolder.taskFuture.get() match { - case Left(e) => - buildErrorResponse(Errors.forException(e), partition.partition()) - - case Right(None) => + val taskFuture = futureHolder.taskFuture.get() + val response = { + if (taskFuture.hasException) { + buildErrorResponse(Errors.forException(taskFuture.exception().get()), partition.partition()) + } else if (!taskFuture.hasTimestampAndOffset) { val error = status.maybeOffsetsError .map(e => if (version >= 5) Errors.forException(e) else Errors.LEADER_NOT_AVAILABLE) .getOrElse(Errors.NONE) buildErrorResponse(error, partition.partition()) - - case Right(Some(found)) => + } else { var partitionResponse = buildErrorResponse(Errors.NONE, partition.partition()) + val found = taskFuture.timestampAndOffset().get() if (status.lastFetchableOffset.isDefined && found.offset >= status.lastFetchableOffset.get) { if (status.maybeOffsetsError.isDefined) { val error = if (version >= 5) Errors.forException(status.maybeOffsetsError.get) else Errors.LEADER_NOT_AVAILABLE @@ -123,6 +125,7 @@ class DelayedRemoteListOffsets(delayMs: Long, } } partitionResponse + } } status.responseOpt = Some(response) status.completed = true diff --git a/core/src/main/scala/kafka/server/DelegationTokenManagerZk.scala b/core/src/main/scala/kafka/server/DelegationTokenManagerZk.scala deleted file mode 100644 index 4b7b8080637a0..0000000000000 --- a/core/src/main/scala/kafka/server/DelegationTokenManagerZk.scala +++ /dev/null @@ -1,350 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import java.nio.ByteBuffer -import java.nio.charset.StandardCharsets -import java.util.Base64 - -import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener} -import kafka.utils.{CoreUtils, Json} -import kafka.zk.{DelegationTokenChangeNotificationSequenceZNode, DelegationTokenChangeNotificationZNode, DelegationTokensZNode} -import kafka.zk.KafkaZkClient -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.security.auth.KafkaPrincipal -import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache -import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation} -import org.apache.kafka.common.utils.{Sanitizer, SecurityUtils, Time} - -import scala.jdk.CollectionConverters._ -import scala.collection.mutable - -/* - * Object used to encode and decode TokenInformation to Zk - */ -object DelegationTokenManagerZk { - private val OwnerKey ="owner" - private val TokenRequesterKey = "tokenRequester" - private val RenewersKey = "renewers" - private val IssueTimestampKey = "issueTimestamp" - private val MaxTimestampKey = "maxTimestamp" - private val ExpiryTimestampKey = "expiryTimestamp" - private val TokenIdKey = "tokenId" - private val VersionKey = "version" - private val CurrentVersion = 3 - - def toJsonCompatibleMap(tokenInfo: TokenInformation): Map[String, Any] = { - val tokenInfoMap = mutable.Map[String, Any]() - tokenInfoMap(VersionKey) = CurrentVersion - tokenInfoMap(OwnerKey) = Sanitizer.sanitize(tokenInfo.ownerAsString) - tokenInfoMap(TokenRequesterKey) = Sanitizer.sanitize(tokenInfo.tokenRequester.toString) - tokenInfoMap(RenewersKey) = tokenInfo.renewersAsString.asScala.map(e => Sanitizer.sanitize(e)).asJava - tokenInfoMap(IssueTimestampKey) = tokenInfo.issueTimestamp - tokenInfoMap(MaxTimestampKey) = tokenInfo.maxTimestamp - tokenInfoMap(ExpiryTimestampKey) = tokenInfo.expiryTimestamp - tokenInfoMap(TokenIdKey) = tokenInfo.tokenId() - tokenInfoMap.toMap - } - - def fromBytes(bytes: Array[Byte]): Option[TokenInformation] = { - if (bytes == null || bytes.isEmpty) - return None - - Json.parseBytes(bytes) match { - case Some(js) => - val mainJs = js.asJsonObject - val version = mainJs(VersionKey).to[Int] - require(version > 0 && version <= CurrentVersion) - val owner = SecurityUtils.parseKafkaPrincipal(Sanitizer.desanitize(mainJs(OwnerKey).to[String])) - var tokenRequester = owner - if (version >= 3) - tokenRequester = SecurityUtils.parseKafkaPrincipal(Sanitizer.desanitize(mainJs(TokenRequesterKey).to[String])) - val renewerStr = mainJs(RenewersKey).to[Seq[String]] - val renewers = renewerStr.map(Sanitizer.desanitize).map(SecurityUtils.parseKafkaPrincipal) - val issueTimestamp = mainJs(IssueTimestampKey).to[Long] - val expiryTimestamp = mainJs(ExpiryTimestampKey).to[Long] - val maxTimestamp = mainJs(MaxTimestampKey).to[Long] - val tokenId = mainJs(TokenIdKey).to[String] - - val tokenInfo = new TokenInformation(tokenId, owner, tokenRequester, renewers.asJava, - issueTimestamp, maxTimestamp, expiryTimestamp) - - Some(tokenInfo) - case None => - None - } - } -} - -/* - * Cache for Delegation Tokens when using Zk for metadata. - * This includes other Zk specific handling of Delegation Tokens. - */ -class DelegationTokenManagerZk(config: KafkaConfig, - tokenCache: DelegationTokenCache, - time: Time, - val zkClient: KafkaZkClient) - extends DelegationTokenManager(config, tokenCache, time) { - - import DelegationTokenManager._ - - private var tokenChangeListener: ZkNodeChangeNotificationListener = _ - - override def startup(): Unit = { - if (config.tokenAuthEnabled) { - zkClient.createDelegationTokenPaths() - loadCache() - tokenChangeListener = new ZkNodeChangeNotificationListener(zkClient, DelegationTokenChangeNotificationZNode.path, DelegationTokenChangeNotificationSequenceZNode.SequenceNumberPrefix, TokenChangedNotificationHandler) - tokenChangeListener.init() - } - } - - private def loadCache(): Unit = { - lock.synchronized { - val tokens = zkClient.getChildren(DelegationTokensZNode.path) - info(s"Loading the token cache. Total token count: ${tokens.size}") - for (tokenId <- tokens) { - try { - getTokenFromZk(tokenId) match { - case Some(token) => updateCache(token) - case None => - } - } catch { - case ex: Throwable => error(s"Error while getting Token for tokenId: $tokenId", ex) - } - } - } - } - - private def getTokenFromZk(tokenId: String): Option[DelegationToken] = { - zkClient.getDelegationTokenInfo(tokenId) match { - case Some(tokenInformation) => { - val hmac = createHmac(tokenId, secretKey) - Some(new DelegationToken(tokenInformation, hmac)) - } - case None => - None - } - } - - override def shutdown(): Unit = { - if (config.tokenAuthEnabled) { - if (tokenChangeListener != null) tokenChangeListener.close() - } - } - - /** - * @param token - */ - override def updateToken(token: DelegationToken): Unit = { - zkClient.setOrCreateDelegationToken(token) - updateCache(token) - zkClient.createTokenChangeNotification(token.tokenInfo.tokenId()) - } - - /** - * - * @param owner - * @param renewers - * @param maxLifeTimeMs - * @param responseCallback - */ - override def createToken(owner: KafkaPrincipal, - tokenRequester: KafkaPrincipal, - renewers: List[KafkaPrincipal], - maxLifeTimeMs: Long, - responseCallback: CreateResponseCallback): Unit = { - - if (!config.tokenAuthEnabled) { - responseCallback(CreateTokenResult(owner, tokenRequester, -1, -1, -1, "", Array[Byte](), Errors.DELEGATION_TOKEN_AUTH_DISABLED)) - } else { - lock.synchronized { - val tokenId = CoreUtils.generateUuidAsBase64() - - val issueTimeStamp = time.milliseconds - val maxLifeTime = if (maxLifeTimeMs <= 0) tokenMaxLifetime else Math.min(maxLifeTimeMs, tokenMaxLifetime) - val maxLifeTimeStamp = issueTimeStamp + maxLifeTime - val expiryTimeStamp = Math.min(maxLifeTimeStamp, issueTimeStamp + defaultTokenRenewTime) - - val tokenInfo = new TokenInformation(tokenId, owner, tokenRequester, renewers.asJava, issueTimeStamp, maxLifeTimeStamp, expiryTimeStamp) - - val hmac = createHmac(tokenId, secretKey) - val token = new DelegationToken(tokenInfo, hmac) - updateToken(token) - info(s"Created a delegation token: $tokenId for owner: $owner") - responseCallback(CreateTokenResult(owner, tokenRequester, issueTimeStamp, expiryTimeStamp, maxLifeTimeStamp, tokenId, hmac, Errors.NONE)) - } - } - } - - /** - * - * @param hmac - * @return - */ - private def getToken(hmac: ByteBuffer): Option[DelegationToken] = { - try { - val byteArray = new Array[Byte](hmac.remaining) - hmac.get(byteArray) - val base64Pwd = Base64.getEncoder.encodeToString(byteArray) - val tokenInfo = tokenCache.tokenForHmac(base64Pwd) - if (tokenInfo == null) None else Some(new DelegationToken(tokenInfo, byteArray)) - } catch { - case e: Exception => - error("Exception while getting token for hmac", e) - None - } - } - - /** - * - * @param principal - * @param tokenInfo - * @return - */ - private def allowedToRenew(principal: KafkaPrincipal, tokenInfo: TokenInformation): Boolean = { - if (principal.equals(tokenInfo.owner) || tokenInfo.renewers.asScala.toList.contains(principal)) true else false - } - - /** - * - * @param principal - * @param hmac - * @param renewLifeTimeMs - * @param renewResponseCallback - */ - override def renewToken(principal: KafkaPrincipal, - hmac: ByteBuffer, - renewLifeTimeMs: Long, - renewCallback: RenewResponseCallback): Unit = { - - if (!config.tokenAuthEnabled) { - renewCallback(Errors.DELEGATION_TOKEN_AUTH_DISABLED, -1) - } else { - lock.synchronized { - getToken(hmac) match { - case Some(token) => { - val now = time.milliseconds - val tokenInfo = token.tokenInfo - - if (!allowedToRenew(principal, tokenInfo)) { - renewCallback(Errors.DELEGATION_TOKEN_OWNER_MISMATCH, -1) - } else if (tokenInfo.maxTimestamp < now || tokenInfo.expiryTimestamp < now) { - renewCallback(Errors.DELEGATION_TOKEN_EXPIRED, -1) - } else { - val renewLifeTime = if (renewLifeTimeMs < 0) defaultTokenRenewTime else renewLifeTimeMs - val renewTimeStamp = now + renewLifeTime - val expiryTimeStamp = Math.min(tokenInfo.maxTimestamp, renewTimeStamp) - tokenInfo.setExpiryTimestamp(expiryTimeStamp) - - updateToken(token) - info(s"Delegation token renewed for token: ${tokenInfo.tokenId} for owner: ${tokenInfo.owner}") - renewCallback(Errors.NONE, expiryTimeStamp) - } - } - case None => renewCallback(Errors.DELEGATION_TOKEN_NOT_FOUND, -1) - } - } - } - } - - /** - * - * @param principal - * @param hmac - * @param expireLifeTimeMs - * @param expireResponseCallback - */ - override def expireToken(principal: KafkaPrincipal, - hmac: ByteBuffer, - expireLifeTimeMs: Long, - expireResponseCallback: ExpireResponseCallback): Unit = { - - if (!config.tokenAuthEnabled) { - expireResponseCallback(Errors.DELEGATION_TOKEN_AUTH_DISABLED, -1) - } else { - lock.synchronized { - getToken(hmac) match { - case Some(token) => { - val tokenInfo = token.tokenInfo - val now = time.milliseconds - - if (!allowedToRenew(principal, tokenInfo)) { - expireResponseCallback(Errors.DELEGATION_TOKEN_OWNER_MISMATCH, -1) - } else if (expireLifeTimeMs < 0) { //expire immediately - removeToken(tokenInfo.tokenId) - info(s"Token expired for token: ${tokenInfo.tokenId} for owner: ${tokenInfo.owner}") - expireResponseCallback(Errors.NONE, now) - } else if (tokenInfo.maxTimestamp < now || tokenInfo.expiryTimestamp < now) { - expireResponseCallback(Errors.DELEGATION_TOKEN_EXPIRED, -1) - } else { - //set expiry time stamp - val expiryTimeStamp = Math.min(tokenInfo.maxTimestamp, now + expireLifeTimeMs) - tokenInfo.setExpiryTimestamp(expiryTimeStamp) - - updateToken(token) - info(s"Updated expiry time for token: ${tokenInfo.tokenId} for owner: ${tokenInfo.owner}") - expireResponseCallback(Errors.NONE, expiryTimeStamp) - } - } - case None => expireResponseCallback(Errors.DELEGATION_TOKEN_NOT_FOUND, -1) - } - } - } - } - - /** - * - * @param tokenId - */ - override def removeToken(tokenId: String): Unit = { - zkClient.deleteDelegationToken(tokenId) - removeCache(tokenId) - zkClient.createTokenChangeNotification(tokenId) - } - - /** - * - * @return - */ - override def expireTokens(): Unit = { - lock.synchronized { - for (tokenInfo <- getAllTokenInformation) { - val now = time.milliseconds - if (tokenInfo.maxTimestamp < now || tokenInfo.expiryTimestamp < now) { - info(s"Delegation token expired for token: ${tokenInfo.tokenId} for owner: ${tokenInfo.owner}") - removeToken(tokenInfo.tokenId) - } - } - } - } - - private object TokenChangedNotificationHandler extends NotificationHandler { - override def processNotification(tokenIdBytes: Array[Byte]): Unit = { - lock.synchronized { - val tokenId = new String(tokenIdBytes, StandardCharsets.UTF_8) - info(s"Processing Token Notification for tokenId: $tokenId") - getTokenFromZk(tokenId) match { - case Some(token) => updateCache(token) - case None => removeCache(tokenId) - } - } - } - } -} - diff --git a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala index 9f69a44c919ff..32febffb546e8 100755 --- a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala +++ b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala @@ -26,10 +26,9 @@ import kafka.log.{LogCleaner, LogManager} import kafka.network.{DataPlaneAcceptor, SocketServer} import kafka.server.DynamicBrokerConfig._ import kafka.utils.{CoreUtils, Logging} -import kafka.zk.{AdminZkClient, KafkaZkClient} import org.apache.kafka.common.Reconfigurable import org.apache.kafka.common.config.internals.BrokerSecurityConfigs -import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, SaslConfigs, SslConfigs, TopicConfig} +import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, SaslConfigs, SslConfigs} import org.apache.kafka.common.metrics.{Metrics, MetricsReporter} import org.apache.kafka.common.config.types.Password import org.apache.kafka.common.network.{ListenerName, ListenerReconfigurable} @@ -39,7 +38,7 @@ import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.security.PasswordEncoder import org.apache.kafka.server.ProcessRole -import org.apache.kafka.server.config.{ConfigType, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms, ZooKeeperInternals} +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.{ClientMetricsReceiverPlugin, MetricConfigs} import org.apache.kafka.server.telemetry.ClientTelemetry @@ -58,8 +57,6 @@ import scala.jdk.CollectionConverters._ * * The order of precedence for broker configs is: *
            - *
          1. DYNAMIC_BROKER_CONFIG: stored in ZK at /configs/brokers/{brokerId}
          2. - *
          3. DYNAMIC_DEFAULT_BROKER_CONFIG: stored in ZK at /configs/brokers/<default>
          4. *
          5. STATIC_BROKER_CONFIG: properties that broker is started up with, typically from server.properties file
          6. *
          7. DEFAULT_CONFIG: Default configs defined in KafkaConfig
          8. *
          @@ -213,23 +210,11 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging private val lock = new ReentrantReadWriteLock private var metricsReceiverPluginOpt: Option[ClientMetricsReceiverPlugin] = _ private var currentConfig: KafkaConfig = _ - private val dynamicConfigPasswordEncoder = if (kafkaConfig.processRoles.isEmpty) { - maybeCreatePasswordEncoder(kafkaConfig.passwordEncoderSecret) - } else { - Some(PasswordEncoder.NOOP) - } + private val dynamicConfigPasswordEncoder = Some(PasswordEncoder.NOOP) - private[server] def initialize(zkClientOpt: Option[KafkaZkClient], clientMetricsReceiverPluginOpt: Option[ClientMetricsReceiverPlugin]): Unit = { + private[server] def initialize(clientMetricsReceiverPluginOpt: Option[ClientMetricsReceiverPlugin]): Unit = { currentConfig = new KafkaConfig(kafkaConfig.props, false) metricsReceiverPluginOpt = clientMetricsReceiverPluginOpt - - zkClientOpt.foreach { zkClient => - val adminZkClient = new AdminZkClient(zkClient) - updateDefaultConfig(adminZkClient.fetchEntityConfig(ConfigType.BROKER, ZooKeeperInternals.DEFAULT_STRING), false) - val props = adminZkClient.fetchEntityConfig(ConfigType.BROKER, kafkaConfig.brokerId.toString) - val brokerConfig = maybeReEncodePasswords(props, adminZkClient) - updateBrokerConfig(kafkaConfig.brokerId, brokerConfig) - } } /** @@ -373,16 +358,6 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging }) } - private def maybeCreatePasswordEncoder(secret: Option[Password]): Option[PasswordEncoder] = { - secret.map { secret => - PasswordEncoder.encrypting(secret, - kafkaConfig.passwordEncoderKeyFactoryAlgorithm, - kafkaConfig.passwordEncoderCipherAlgorithm, - kafkaConfig.passwordEncoderKeyLength, - kafkaConfig.passwordEncoderIterations) - } - } - private def passwordEncoder: PasswordEncoder = { dynamicConfigPasswordEncoder.getOrElse(throw new ConfigException("Password encoder secret not configured")) } @@ -441,32 +416,6 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging props } - // If the secret has changed, password.encoder.old.secret contains the old secret that was used - // to encode the configs in ZK. Decode passwords using the old secret and update ZK with values - // encoded using the current secret. Ignore any errors during decoding since old secret may not - // have been removed during broker restart. - private def maybeReEncodePasswords(persistentProps: Properties, adminZkClient: AdminZkClient): Properties = { - val props = persistentProps.clone().asInstanceOf[Properties] - if (props.asScala.keySet.exists(isPasswordConfig)) { - maybeCreatePasswordEncoder(kafkaConfig.passwordEncoderOldSecret).foreach { passwordDecoder => - persistentProps.asScala.foreachEntry { (configName, value) => - if (isPasswordConfig(configName) && value != null) { - val decoded = try { - Some(passwordDecoder.decode(value).value) - } catch { - case _: Exception => - debug(s"Dynamic password config $configName could not be decoded using old secret, new secret will be used.") - None - } - decoded.foreach(value => props.put(configName, passwordEncoder.encode(new Password(value)))) - } - } - adminZkClient.changeBrokerConfig(Some(kafkaConfig.brokerId), props) - } - } - props - } - /** * Validate the provided configs `propsOverride` and return the full Kafka configs with * the configured defaults and these overrides. @@ -662,24 +611,13 @@ trait BrokerReconfigurable { } object DynamicLogConfig { - /** - * The log configurations that are non-reconfigurable. This set contains the names you - * would use when setting a dynamic configuration on a topic, which are different than the - * corresponding broker configuration names. - * - * For now, message.format.version is not reconfigurable, since we need to check that - * the version is supported on all brokers in the cluster. - */ - val NonReconfigrableLogConfigs: Set[String] = Set(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG) - /** * The broker configurations pertaining to logs that are reconfigurable. This set contains * the names you would use when setting a static or dynamic broker configuration (not topic * configuration). */ val ReconfigurableConfigs: Set[String] = - ServerTopicConfigSynonyms.TOPIC_CONFIG_SYNONYMS.asScala. - filterNot(s => NonReconfigrableLogConfigs.contains(s._1)).values.toSet + ServerTopicConfigSynonyms.TOPIC_CONFIG_SYNONYMS.asScala.values.toSet } class DynamicLogConfig(logManager: LogManager, server: KafkaBroker) extends BrokerReconfigurable with Logging { @@ -742,27 +680,11 @@ class DynamicLogConfig(logManager: LogManager, server: KafkaBroker) extends Brok } override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { - val originalLogConfig = logManager.currentDefaultConfig - val originalUncleanLeaderElectionEnable = originalLogConfig.uncleanLeaderElectionEnable val newBrokerDefaults = new util.HashMap[String, Object](newConfig.extractLogConfigMap) - val originalLogConfigMap = originalLogConfig.originals() - DynamicLogConfig.NonReconfigrableLogConfigs.foreach(k => { - Option(originalLogConfigMap.get(k)) match { - case None => newBrokerDefaults.remove(k) - case Some(v) => newBrokerDefaults.put(k, v) - } - }) logManager.reconfigureDefaultLogConfig(new LogConfig(newBrokerDefaults)) updateLogsConfig(newBrokerDefaults.asScala) - - if (logManager.currentDefaultConfig.uncleanLeaderElectionEnable && !originalUncleanLeaderElectionEnable) { - server match { - case kafkaServer: KafkaServer => kafkaServer.kafkaController.enableDefaultUncleanLeaderElection() - case _ => - } - } } } @@ -959,7 +881,6 @@ object DynamicListenerConfig { */ val ReconfigurableConfigs = Set( // Listener configs - SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, SocketServerConfigs.LISTENERS_CONFIG, SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, @@ -1045,40 +966,16 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi DynamicListenerConfig.ReconfigurableConfigs } - private def listenerRegistrationsAltered( - oldAdvertisedListeners: Map[ListenerName, EndPoint], - newAdvertisedListeners: Map[ListenerName, EndPoint] - ): Boolean = { - if (oldAdvertisedListeners.size != newAdvertisedListeners.size) return true - oldAdvertisedListeners.foreachEntry { - case (oldListenerName, oldEndpoint) => - newAdvertisedListeners.get(oldListenerName) match { - case None => return true - case Some(newEndpoint) => if (!newEndpoint.equals(oldEndpoint)) { - return true - } - } - } - false - } - - private def verifyListenerRegistrationAlterationSupported(): Unit = { - if (!server.config.requiresZookeeper) { - throw new ConfigException("Advertised listeners cannot be altered when using a " + - "Raft-based metadata quorum.") - } - } - def validateReconfiguration(newConfig: KafkaConfig): Unit = { val oldConfig = server.config - val newListeners = listenersToMap(newConfig.listeners) - val newAdvertisedListeners = listenersToMap(newConfig.effectiveAdvertisedBrokerListeners) - val oldListeners = listenersToMap(oldConfig.listeners) - if (!newAdvertisedListeners.keySet.subsetOf(newListeners.keySet)) - throw new ConfigException(s"Advertised listeners '$newAdvertisedListeners' must be a subset of listeners '$newListeners'") - if (!newListeners.keySet.subsetOf(newConfig.effectiveListenerSecurityProtocolMap.keySet)) + val newListeners = newConfig.listeners.map(_.listenerName).toSet + val oldAdvertisedListeners = oldConfig.effectiveAdvertisedBrokerListeners.map(_.listenerName).toSet + val oldListeners = oldConfig.listeners.map(_.listenerName).toSet + if (!oldAdvertisedListeners.subsetOf(newListeners)) + throw new ConfigException(s"Advertised listeners '$oldAdvertisedListeners' must be a subset of listeners '$newListeners'") + if (!newListeners.subsetOf(newConfig.effectiveListenerSecurityProtocolMap.keySet)) throw new ConfigException(s"Listeners '$newListeners' must be subset of listener map '${newConfig.effectiveListenerSecurityProtocolMap}'") - newListeners.keySet.intersect(oldListeners.keySet).foreach { listenerName => + newListeners.intersect(oldListeners).foreach { listenerName => def immutableListenerConfigs(kafkaConfig: KafkaConfig, prefix: String): Map[String, AnyRef] = { kafkaConfig.originalsWithPrefix(prefix, true).asScala.filter { case (key, _) => // skip the reconfigurable configs @@ -1091,15 +988,6 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi if (oldConfig.effectiveListenerSecurityProtocolMap(listenerName) != newConfig.effectiveListenerSecurityProtocolMap(listenerName)) throw new ConfigException(s"Security protocol cannot be updated for existing listener $listenerName") } - if (!newAdvertisedListeners.contains(newConfig.interBrokerListenerName)) - throw new ConfigException(s"Advertised listener must be specified for inter-broker listener ${newConfig.interBrokerListenerName}") - - // Currently, we do not support adding or removing listeners when in KRaft mode. - // However, we support changing other listener configurations (max connections, etc.) - if (listenerRegistrationsAltered(listenersToMap(oldConfig.effectiveAdvertisedBrokerListeners), - listenersToMap(newConfig.effectiveAdvertisedBrokerListeners))) { - verifyListenerRegistrationAlterationSupported() - } } def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { @@ -1114,14 +1002,6 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi if (listenersRemoved.nonEmpty) server.socketServer.removeListeners(listenersRemoved) if (listenersAdded.nonEmpty) server.socketServer.addListeners(listenersAdded) } - if (listenerRegistrationsAltered(listenersToMap(oldConfig.effectiveAdvertisedBrokerListeners), - listenersToMap(newConfig.effectiveAdvertisedBrokerListeners))) { - verifyListenerRegistrationAlterationSupported() - server match { - case kafkaServer: KafkaServer => kafkaServer.kafkaController.updateBrokerInfo(kafkaServer.createBrokerInfo) - case _ => throw new RuntimeException("Unable to handle non-kafkaServer") - } - } } private def listenersToMap(listeners: Seq[EndPoint]): Map[ListenerName, EndPoint] = @@ -1167,6 +1047,22 @@ class DynamicRemoteLogConfig(server: KafkaBroker) extends BrokerReconfigurable w throw new ConfigException(s"$errorMsg, value should be at least 1") } } + + if (RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP.equals(k) || + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP.equals(k) || + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP.equals(k)) { + val newValue = v.asInstanceOf[Int] + val oldValue = server.config.getInt(k) + if (newValue != oldValue) { + val errorMsg = s"Dynamic thread count update validation failed for $k=$v" + if (newValue <= 0) + throw new ConfigException(s"$errorMsg, value should be at least 1") + if (newValue < oldValue / 2) + throw new ConfigException(s"$errorMsg, value should be at least half the current value $oldValue") + if (newValue > oldValue * 2) + throw new ConfigException(s"$errorMsg, value should not be greater than double the current value $oldValue") + } + } } } @@ -1176,29 +1072,40 @@ class DynamicRemoteLogConfig(server: KafkaBroker) extends BrokerReconfigurable w def isChangedLongValue(k : String): Boolean = oldLongValue(k) != newLongValue(k) - val remoteLogManager = server.remoteLogManagerOpt - if (remoteLogManager.nonEmpty) { + if (server.remoteLogManagerOpt.nonEmpty) { + val remoteLogManager = server.remoteLogManagerOpt.get if (isChangedLongValue(RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP)) { val oldValue = oldLongValue(RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP) val newValue = newLongValue(RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP) - remoteLogManager.get.resizeCacheSize(newValue) + remoteLogManager.resizeCacheSize(newValue) info(s"Dynamic remote log manager config: ${RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP} updated, " + s"old value: $oldValue, new value: $newValue") } if (isChangedLongValue(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP)) { val oldValue = oldLongValue(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP) val newValue = newLongValue(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP) - remoteLogManager.get.updateCopyQuota(newValue) + remoteLogManager.updateCopyQuota(newValue) info(s"Dynamic remote log manager config: ${RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP} updated, " + s"old value: $oldValue, new value: $newValue") } if (isChangedLongValue(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP)) { val oldValue = oldLongValue(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP) val newValue = newLongValue(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP) - remoteLogManager.get.updateFetchQuota(newValue) + remoteLogManager.updateFetchQuota(newValue) info(s"Dynamic remote log manager config: ${RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP} updated, " + s"old value: $oldValue, new value: $newValue") } + + val newRLMConfig = newConfig.remoteLogManagerConfig + val oldRLMConfig = oldConfig.remoteLogManagerConfig + if (newRLMConfig.remoteLogManagerCopierThreadPoolSize() != oldRLMConfig.remoteLogManagerCopierThreadPoolSize()) + remoteLogManager.resizeCopierThreadPool(newRLMConfig.remoteLogManagerCopierThreadPoolSize()) + + if (newRLMConfig.remoteLogManagerExpirationThreadPoolSize() != oldRLMConfig.remoteLogManagerExpirationThreadPoolSize()) + remoteLogManager.resizeExpirationThreadPool(newRLMConfig.remoteLogManagerExpirationThreadPoolSize()) + + if (newRLMConfig.remoteLogReaderThreads() != oldRLMConfig.remoteLogReaderThreads()) + remoteLogManager.resizeReaderThreadPool(newRLMConfig.remoteLogReaderThreads()) } } @@ -1219,6 +1126,9 @@ object DynamicRemoteLogConfig { RemoteLogManagerConfig.REMOTE_FETCH_MAX_WAIT_MS_PROP, RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP, RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP, - RemoteLogManagerConfig.REMOTE_LIST_OFFSETS_REQUEST_TIMEOUT_MS_PROP + RemoteLogManagerConfig.REMOTE_LIST_OFFSETS_REQUEST_TIMEOUT_MS_PROP, + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, + RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP ) } diff --git a/core/src/main/scala/kafka/server/FetchSession.scala b/core/src/main/scala/kafka/server/FetchSession.scala index e8772a02e91e4..773958cd4318b 100644 --- a/core/src/main/scala/kafka/server/FetchSession.scala +++ b/core/src/main/scala/kafka/server/FetchSession.scala @@ -19,7 +19,7 @@ package kafka.server import com.typesafe.scalalogging.Logger import kafka.utils.Logging -import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.{Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.message.FetchResponseData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.FetchMetadata.{FINAL_EPOCH, INITIAL_EPOCH, INVALID_SESSION_ID} @@ -326,7 +326,7 @@ trait FetchContext extends Logging { * Updates the fetch context with new partition information. Generates response data. * The response data may require subsequent down-conversion. */ - def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse + def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP, nodeEndpoints: util.List[Node]): FetchResponse def partitionsToLogString(partitions: util.Collection[TopicIdPartition]): String = FetchSession.partitionsToLogString(partitions, isTraceEnabled) @@ -334,8 +334,8 @@ trait FetchContext extends Logging { /** * Return an empty throttled response due to quota violation. */ - def getThrottledResponse(throttleTimeMs: Int): FetchResponse = - FetchResponse.of(Errors.NONE, throttleTimeMs, INVALID_SESSION_ID, new FetchSession.RESP_MAP) + def getThrottledResponse(throttleTimeMs: Int, nodeEndpoints: util.List[Node]): FetchResponse = + FetchResponse.of(Errors.NONE, throttleTimeMs, INVALID_SESSION_ID, new FetchSession.RESP_MAP, nodeEndpoints) } /** @@ -352,9 +352,9 @@ class SessionErrorContext(val error: Errors, } // Because of the fetch session error, we don't know what partitions were supposed to be in this request. - override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = { + override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP, nodeEndpoints: util.List[Node]): FetchResponse = { debug(s"Session error fetch context returning $error") - FetchResponse.of(error, 0, INVALID_SESSION_ID, new FetchSession.RESP_MAP) + FetchResponse.of(error, 0, INVALID_SESSION_ID, new FetchSession.RESP_MAP, nodeEndpoints) } } @@ -382,9 +382,9 @@ class SessionlessFetchContext(val fetchData: util.Map[TopicIdPartition, FetchReq FetchResponse.sizeOf(versionId, updates.entrySet.iterator) } - override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = { + override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP, nodeEndpoints: util.List[Node]): FetchResponse = { debug(s"Sessionless fetch context returning ${partitionsToLogString(updates.keySet)}") - FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, updates) + FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, updates, nodeEndpoints) } } @@ -430,7 +430,7 @@ class FullFetchContext(private val time: Time, FetchResponse.sizeOf(versionId, updates.entrySet.iterator) } - override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = { + override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP, nodeEndpoints: util.List[Node]): FetchResponse = { def createNewSession: FetchSession.CACHE_MAP = { val cachedPartitions = new FetchSession.CACHE_MAP(updates.size) updates.forEach { (part, respData) => @@ -444,7 +444,7 @@ class FullFetchContext(private val time: Time, updates.size, usesTopicIds, () => createNewSession) debug(s"Full fetch context with session id $responseSessionId returning " + s"${partitionsToLogString(updates.keySet)}") - FetchResponse.of(Errors.NONE, 0, responseSessionId, updates) + FetchResponse.of(Errors.NONE, 0, responseSessionId, updates, nodeEndpoints) } } @@ -533,7 +533,7 @@ class IncrementalFetchContext(private val time: Time, } } - override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = { + override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP, nodeEndpoints: util.List[Node]): FetchResponse = { session.synchronized { // Check to make sure that the session epoch didn't change in between // creating this fetch context and generating this response. @@ -541,7 +541,7 @@ class IncrementalFetchContext(private val time: Time, if (session.epoch != expectedEpoch) { info(s"Incremental fetch session ${session.id} expected epoch $expectedEpoch, but " + s"got ${session.epoch}. Possible duplicate request.") - FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, 0, session.id, new FetchSession.RESP_MAP) + FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, 0, session.id, new FetchSession.RESP_MAP, nodeEndpoints) } else { // Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent val partitionIter = new PartitionIterator(updates.entrySet.iterator, true) @@ -550,12 +550,12 @@ class IncrementalFetchContext(private val time: Time, } debug(s"Incremental fetch context with session id ${session.id} returning " + s"${partitionsToLogString(updates.keySet)}") - FetchResponse.of(Errors.NONE, 0, session.id, updates) + FetchResponse.of(Errors.NONE, 0, session.id, updates, nodeEndpoints) } } } - override def getThrottledResponse(throttleTimeMs: Int): FetchResponse = { + override def getThrottledResponse(throttleTimeMs: Int, nodeEndpoints: util.List[Node]): FetchResponse = { session.synchronized { // Check to make sure that the session epoch didn't change in between // creating this fetch context and generating this response. @@ -563,9 +563,9 @@ class IncrementalFetchContext(private val time: Time, if (session.epoch != expectedEpoch) { info(s"Incremental fetch session ${session.id} expected epoch $expectedEpoch, but " + s"got ${session.epoch}. Possible duplicate request.") - FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, throttleTimeMs, session.id, new FetchSession.RESP_MAP) + FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, throttleTimeMs, session.id, new FetchSession.RESP_MAP, nodeEndpoints) } else { - FetchResponse.of(Errors.NONE, throttleTimeMs, session.id, new FetchSession.RESP_MAP) + FetchResponse.of(Errors.NONE, throttleTimeMs, session.id, new FetchSession.RESP_MAP, nodeEndpoints) } } } diff --git a/core/src/main/scala/kafka/server/FinalizedFeatureChangeListener.scala b/core/src/main/scala/kafka/server/FinalizedFeatureChangeListener.scala deleted file mode 100644 index 1668118d63e1e..0000000000000 --- a/core/src/main/scala/kafka/server/FinalizedFeatureChangeListener.scala +++ /dev/null @@ -1,265 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.server.metadata.{FeatureCacheUpdateException, ZkMetadataCache} - -import java.util.concurrent.{CountDownLatch, LinkedBlockingQueue, TimeUnit} -import kafka.utils.Logging -import kafka.zk.{FeatureZNode, FeatureZNodeStatus, KafkaZkClient, ZkVersion} -import kafka.zookeeper.{StateChangeHandler, ZNodeChangeHandler} -import org.apache.kafka.common.internals.FatalExitError -import org.apache.kafka.server.util.ShutdownableThread - -import scala.concurrent.TimeoutException - -/** - * Listens to changes in the ZK feature node, via the ZK client. Whenever a change notification - * is received from ZK, the feature cache in FinalizedFeatureCache is asynchronously updated - * to the latest features read from ZK. The cache updates are serialized through a single - * notification processor thread. - * - * This updates the features cached in ZkMetadataCache - * - * @param finalizedFeatureCache the finalized feature cache - * @param zkClient the Zookeeper client - */ -class FinalizedFeatureChangeListener(private val finalizedFeatureCache: ZkMetadataCache, - private val zkClient: KafkaZkClient) extends Logging { - - /** - * Helper class used to update the FinalizedFeatureCache. - * - * @param featureZkNodePath the path to the ZK feature node to be read - * @param maybeNotifyOnce an optional latch that can be used to notify the caller when an - * updateOrThrow() operation is over - */ - private class FeatureCacheUpdater(featureZkNodePath: String, maybeNotifyOnce: Option[CountDownLatch]) { - - def this(featureZkNodePath: String) = this(featureZkNodePath, Option.empty) - - /** - * Updates the feature cache in FinalizedFeatureCache with the latest features read from the - * ZK node in featureZkNodePath. If the cache update is not successful, then, a suitable - * exception is raised. - * - * NOTE: if a notifier was provided in the constructor, then, this method can be invoked exactly - * once successfully. A subsequent invocation will raise an exception. - * - * @throws IllegalStateException, if a non-empty notifier was provided in the constructor, and - * this method is called again after a successful previous invocation. - * @throws FeatureCacheUpdateException, if there was an error in updating the - * FinalizedFeatureCache. - */ - def updateLatestOrThrow(): Unit = { - maybeNotifyOnce.foreach(notifier => { - if (notifier.getCount != 1) { - throw new IllegalStateException( - "Can not notify after updateLatestOrThrow was called more than once successfully.") - } - }) - - debug(s"Reading feature ZK node at path: $featureZkNodePath") - val (mayBeFeatureZNodeBytes, version) = zkClient.getDataAndVersion(featureZkNodePath) - - // There are 4 cases: - // - // (empty dataBytes, valid version) => The empty dataBytes will fail FeatureZNode deserialization. - // FeatureZNode, when present in ZK, can not have empty contents. - // (non-empty dataBytes, valid version) => This is a valid case, and should pass FeatureZNode deserialization - // if dataBytes contains valid data. - // (empty dataBytes, unknown version) => This is a valid case, and this can happen if the FeatureZNode - // does not exist in ZK. - // (non-empty dataBytes, unknown version) => This case is impossible, since, KafkaZkClient.getDataAndVersion - // API ensures that unknown version is returned only when the - // ZK node is absent. Therefore dataBytes should be empty in such - // a case. - if (version == ZkVersion.UnknownVersion) { - info(s"Feature ZK node at path: $featureZkNodePath does not exist") - finalizedFeatureCache.clearFeatures() - } else { - var maybeFeatureZNode: Option[FeatureZNode] = Option.empty - try { - maybeFeatureZNode = Some(FeatureZNode.decode(mayBeFeatureZNodeBytes.get)) - } catch { - case e: IllegalArgumentException => { - error(s"Unable to deserialize feature ZK node at path: $featureZkNodePath", e) - finalizedFeatureCache.clearFeatures() - } - } - maybeFeatureZNode.foreach(featureZNode => { - featureZNode.status match { - case FeatureZNodeStatus.Disabled => { - info(s"Feature ZK node at path: $featureZkNodePath is in disabled status.") - finalizedFeatureCache.clearFeatures() - } - case FeatureZNodeStatus.Enabled => { - finalizedFeatureCache.updateFeaturesOrThrow(featureZNode.features.toMap, version) - } - case _ => throw new IllegalStateException(s"Unexpected FeatureZNodeStatus found in $featureZNode") - } - }) - } - - maybeNotifyOnce.foreach(notifier => notifier.countDown()) - } - - /** - * Waits until at least a single updateLatestOrThrow completes successfully. This method returns - * immediately if an updateLatestOrThrow call had already completed successfully. - * - * @param waitTimeMs the timeout for the wait operation - * - * @throws TimeoutException if the wait can not be completed in waitTimeMs - * milli seconds - */ - def awaitUpdateOrThrow(waitTimeMs: Long): Unit = { - maybeNotifyOnce.foreach(notifier => { - if (!notifier.await(waitTimeMs, TimeUnit.MILLISECONDS)) { - throw new TimeoutException( - s"Timed out after waiting for ${waitTimeMs}ms for FeatureCache to be updated.") - } - }) - } - } - - /** - * A shutdownable thread to process feature node change notifications that are populated into the - * queue. If any change notification can not be processed successfully (unless it is due to an - * interrupt), the thread treats it as a fatal event and triggers Broker exit. - * - * @param name name of the thread - */ - private class ChangeNotificationProcessorThread(name: String) extends ShutdownableThread(name) with Logging { - - this.logIdent = logPrefix - - override def doWork(): Unit = { - try { - queue.take.updateLatestOrThrow() - } catch { - case ie: InterruptedException => - // While the queue is empty and this thread is blocking on taking an item from the queue, - // a concurrent call to FinalizedFeatureChangeListener.close() could interrupt the thread - // and cause an InterruptedException to be raised from queue.take(). In such a case, it is - // safe to ignore the exception if the thread is being shutdown. We raise the exception - // here again, because, it is ignored by ShutdownableThread if it is shutting down. - throw ie - case cacheUpdateException: FeatureCacheUpdateException => - error("Failed to process feature ZK node change event. The broker will eventually exit.", cacheUpdateException) - throw new FatalExitError(1) - case e: Exception => - // do not exit for exceptions unrelated to cache change processing (e.g. ZK session expiration) - warn("Unexpected exception in feature ZK node change event processing; will continue processing.", e) - } - } - } - - // Feature ZK node change handler. - private object FeatureZNodeChangeHandler extends ZNodeChangeHandler { - override val path: String = FeatureZNode.path - - override def handleCreation(): Unit = { - info(s"Feature ZK node created at path: $path") - queue.add(new FeatureCacheUpdater(path)) - } - - override def handleDataChange(): Unit = { - info(s"Feature ZK node updated at path: $path") - queue.add(new FeatureCacheUpdater(path)) - } - - override def handleDeletion(): Unit = { - warn(s"Feature ZK node deleted at path: $path") - // This event may happen, rarely (ex: ZK corruption or operational error). - // In such a case, we prefer to just log a warning and treat the case as if the node is absent, - // and populate the FinalizedFeatureCache with empty finalized features. - queue.add(new FeatureCacheUpdater(path)) - } - } - - object ZkStateChangeHandler extends StateChangeHandler { - val path: String = FeatureZNode.path - - override val name: String = path - - override def afterInitializingSession(): Unit = { - queue.add(new FeatureCacheUpdater(path)) - } - } - - private val queue = new LinkedBlockingQueue[FeatureCacheUpdater] - - private val thread = new ChangeNotificationProcessorThread("feature-zk-node-event-process-thread") - - /** - * This method initializes the feature ZK node change listener. Optionally, it also ensures to - * update the FinalizedFeatureCache once with the latest contents of the feature ZK node - * (if the node exists). This step helps ensure that feature incompatibilities (if any) in brokers - * are conveniently detected before the initOrThrow() method returns to the caller. If feature - * incompatibilities are detected, this method will throw an Exception to the caller, and the Broker - * will exit eventually. - * - * @param waitOnceForCacheUpdateMs # of milli seconds to wait for feature cache to be updated once. - * (should be > 0) - * - * @throws Exception if feature incompatibility check could not be finished in a timely manner - */ - def initOrThrow(waitOnceForCacheUpdateMs: Long): Unit = { - if (waitOnceForCacheUpdateMs <= 0) { - throw new IllegalArgumentException( - s"Expected waitOnceForCacheUpdateMs > 0, but provided: $waitOnceForCacheUpdateMs") - } - - thread.start() - zkClient.registerStateChangeHandler(ZkStateChangeHandler) - zkClient.registerZNodeChangeHandlerAndCheckExistence(FeatureZNodeChangeHandler) - val ensureCacheUpdateOnce = new FeatureCacheUpdater( - FeatureZNodeChangeHandler.path, Some(new CountDownLatch(1))) - queue.add(ensureCacheUpdateOnce) - try { - ensureCacheUpdateOnce.awaitUpdateOrThrow(waitOnceForCacheUpdateMs) - } catch { - case e: Exception => { - close() - throw e - } - } - } - - /** - * Closes the feature ZK node change listener by unregistering the listener from ZK client, - * clearing the queue and shutting down the ChangeNotificationProcessorThread. - */ - def close(): Unit = { - zkClient.unregisterStateChangeHandler(ZkStateChangeHandler.name) - zkClient.unregisterZNodeChangeHandler(FeatureZNodeChangeHandler.path) - queue.clear() - thread.shutdown() - } - - // For testing only. - def isListenerInitiated: Boolean = { - thread.isRunning && thread.isAlive - } - - // For testing only. - def isListenerDead: Boolean = { - !thread.isRunning && !thread.isAlive - } -} diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 21942229d8723..3f5d7bb8a7971 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -17,33 +17,23 @@ package kafka.server -import kafka.controller.ReplicaAssignment import kafka.coordinator.transaction.{InitProducerIdResult, TransactionCoordinator} import kafka.network.RequestChannel import kafka.server.QuotaFactory.{QuotaManagers, UNBOUNDED_QUOTA} import kafka.server.handlers.DescribeTopicPartitionsRequestHandler import kafka.server.metadata.{ConfigRepository, KRaftMetadataCache} import kafka.server.share.SharePartitionManager -import kafka.utils.{CoreUtils, Logging} +import kafka.utils.Logging import org.apache.kafka.admin.AdminUtils import org.apache.kafka.clients.CommonClientConfigs -import org.apache.kafka.clients.admin.AlterConfigOp.OpType -import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry, EndpointType} +import org.apache.kafka.clients.admin.EndpointType import org.apache.kafka.common.acl.AclOperation import org.apache.kafka.common.acl.AclOperation._ -import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic.{GROUP_METADATA_TOPIC_NAME, SHARE_GROUP_STATE_TOPIC_NAME, TRANSACTION_STATE_TOPIC_NAME, isInternal} import org.apache.kafka.common.internals.{FatalExitError, Topic} import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.{AddPartitionsToTxnResult, AddPartitionsToTxnResultCollection} -import org.apache.kafka.common.message.AlterConfigsResponseData.AlterConfigsResourceResponse -import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.{ReassignablePartitionResponse, ReassignableTopicResponse} -import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult -import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic -import org.apache.kafka.common.message.CreateTopicsResponseData.{CreatableTopicResult, CreatableTopicResultCollection} import org.apache.kafka.common.message.DeleteRecordsResponseData.{DeleteRecordsPartitionResult, DeleteRecordsTopicResult} -import org.apache.kafka.common.message.DeleteTopicsResponseData.{DeletableTopicResult, DeletableTopicResultCollection} -import org.apache.kafka.common.message.ElectLeadersResponseData.{PartitionResult, ReplicaElectionResult} import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData.ClientMetricsResource import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} @@ -52,7 +42,7 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetFor import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{EpochEndOffset, OffsetForLeaderTopicResult, OffsetForLeaderTopicResultCollection} import org.apache.kafka.common.message._ import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network.{ListenerName, NetworkSend, Send} +import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors} import org.apache.kafka.common.record._ import org.apache.kafka.common.replica.ClientMetadata @@ -71,10 +61,7 @@ import org.apache.kafka.coordinator.group.{Group, GroupCoordinator} import org.apache.kafka.coordinator.share.ShareCoordinator import org.apache.kafka.server.ClientMetricsManager import org.apache.kafka.server.authorizer._ -import org.apache.kafka.server.common.{GroupVersion, MetadataVersion, RequestLocal, TransactionVersion} -import org.apache.kafka.server.common.MetadataVersion.{IBP_0_11_0_IV0, IBP_2_3_IV0} -import org.apache.kafka.server.purgatory.TopicPartitionOperationKey -import org.apache.kafka.server.record.BrokerCompressionType +import org.apache.kafka.server.common.{GroupVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.share.context.ShareFetchContext import org.apache.kafka.server.share.{ErroneousAndValidPartitionData, SharePartitionKey} import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch @@ -82,24 +69,21 @@ import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPa import org.apache.kafka.storage.internals.log.AppendOrigin import org.apache.kafka.storage.log.metrics.BrokerTopicStats -import java.lang.{Long => JLong} -import java.nio.ByteBuffer import java.time.Duration import java.util import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} -import java.util.{Collections, Optional, OptionalInt} +import java.util.{Collections, Optional} import scala.annotation.nowarn import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, Set, mutable} import scala.jdk.CollectionConverters._ -import scala.util.{Failure, Success, Try} /** * Logic to handle the various Kafka requests */ class KafkaApis(val requestChannel: RequestChannel, - val metadataSupport: MetadataSupport, + val forwardingManager: ForwardingManager, val replicaManager: ReplicaManager, val groupCoordinator: GroupCoordinator, val txnCoordinator: TransactionCoordinator, @@ -113,13 +97,13 @@ class KafkaApis(val requestChannel: RequestChannel, val authorizer: Option[Authorizer], val quotas: QuotaManagers, val fetchManager: FetchManager, - val sharePartitionManager: Option[SharePartitionManager], + val sharePartitionManager: SharePartitionManager, brokerTopicStats: BrokerTopicStats, val clusterId: String, time: Time, val tokenManager: DelegationTokenManager, val apiVersionManager: ApiVersionManager, - val clientMetricsManager: Option[ClientMetricsManager] + val clientMetricsManager: ClientMetricsManager ) extends ApiRequestHandler with Logging { type FetchResponseStats = Map[TopicPartition, RecordValidationStats] @@ -140,21 +124,15 @@ class KafkaApis(val requestChannel: RequestChannel, info("Shutdown complete.") } - private def isForwardingEnabled(request: RequestChannel.Request): Boolean = { - metadataSupport.forwardingManager.isDefined && request.context.principalSerde.isPresent - } - - private def maybeForwardToController( - request: RequestChannel.Request, - handler: RequestChannel.Request => Unit - ): Unit = { + private def forwardToController(request: RequestChannel.Request): Unit = { def responseCallback(responseOpt: Option[AbstractResponse]): Unit = { responseOpt match { case Some(response) => requestHelper.sendForwardedResponse(request, response) case None => handleInvalidVersionsDuringForwarding(request) } } - metadataSupport.maybeForward(request, handler, responseCallback) + + forwardingManager.forwardRequest(request, responseCallback) } private def handleInvalidVersionsDuringForwarding(request: RequestChannel.Request): Unit = { @@ -164,16 +142,6 @@ class KafkaApis(val requestChannel: RequestChannel, requestChannel.closeConnection(request, Collections.emptyMap()) } - private def forwardToControllerOrFail( - request: RequestChannel.Request - ): Unit = { - def errorHandler(request: RequestChannel.Request): Unit = { - throw new IllegalStateException(s"Unable to forward $request to the controller") - } - - maybeForwardToController(request, errorHandler) - } - /** * Top-level method that handles all requests and multiplexes to the right api */ @@ -199,10 +167,6 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.FETCH => handleFetchRequest(request) case ApiKeys.LIST_OFFSETS => handleListOffsetRequest(request) case ApiKeys.METADATA => handleTopicMetadataRequest(request) - case ApiKeys.LEADER_AND_ISR => handleLeaderAndIsrRequest(request) - case ApiKeys.STOP_REPLICA => handleStopReplicaRequest(request) - case ApiKeys.UPDATE_METADATA => handleUpdateMetadataRequest(request, requestLocal) - case ApiKeys.CONTROLLED_SHUTDOWN => handleControlledShutdownRequest(request) case ApiKeys.OFFSET_COMMIT => handleOffsetCommitRequest(request, requestLocal).exceptionally(handleError) case ApiKeys.OFFSET_FETCH => handleOffsetFetchRequest(request).exceptionally(handleError) case ApiKeys.FIND_COORDINATOR => handleFindCoordinatorRequest(request) @@ -214,8 +178,8 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.LIST_GROUPS => handleListGroupsRequest(request).exceptionally(handleError) case ApiKeys.SASL_HANDSHAKE => handleSaslHandshakeRequest(request) case ApiKeys.API_VERSIONS => handleApiVersionsRequest(request) - case ApiKeys.CREATE_TOPICS => maybeForwardToController(request, handleCreateTopicsRequest) - case ApiKeys.DELETE_TOPICS => maybeForwardToController(request, handleDeleteTopicsRequest) + case ApiKeys.CREATE_TOPICS => forwardToController(request) + case ApiKeys.DELETE_TOPICS => forwardToController(request) case ApiKeys.DELETE_RECORDS => handleDeleteRecordsRequest(request) case ApiKeys.INIT_PRODUCER_ID => handleInitProducerIdRequest(request, requestLocal) case ApiKeys.OFFSET_FOR_LEADER_EPOCH => handleOffsetForLeaderEpochRequest(request) @@ -225,14 +189,14 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.WRITE_TXN_MARKERS => handleWriteTxnMarkersRequest(request, requestLocal) case ApiKeys.TXN_OFFSET_COMMIT => handleTxnOffsetCommitRequest(request, requestLocal).exceptionally(handleError) case ApiKeys.DESCRIBE_ACLS => handleDescribeAcls(request) - case ApiKeys.CREATE_ACLS => maybeForwardToController(request, handleCreateAcls) - case ApiKeys.DELETE_ACLS => maybeForwardToController(request, handleDeleteAcls) + case ApiKeys.CREATE_ACLS => forwardToController(request) + case ApiKeys.DELETE_ACLS => forwardToController(request) case ApiKeys.ALTER_CONFIGS => handleAlterConfigsRequest(request) case ApiKeys.DESCRIBE_CONFIGS => handleDescribeConfigsRequest(request) case ApiKeys.ALTER_REPLICA_LOG_DIRS => handleAlterReplicaLogDirsRequest(request) case ApiKeys.DESCRIBE_LOG_DIRS => handleDescribeLogDirsRequest(request) case ApiKeys.SASL_AUTHENTICATE => handleSaslAuthenticateRequest(request) - case ApiKeys.CREATE_PARTITIONS => maybeForwardToController(request, handleCreatePartitionsRequest) + case ApiKeys.CREATE_PARTITIONS => forwardToController(request) // Create, renew and expire DelegationTokens must first validate that the connection // itself is not authenticated with a delegation token before maybeForwardToController. case ApiKeys.CREATE_DELEGATION_TOKEN => handleCreateTokenRequest(request) @@ -240,33 +204,30 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.EXPIRE_DELEGATION_TOKEN => handleExpireTokenRequest(request) case ApiKeys.DESCRIBE_DELEGATION_TOKEN => handleDescribeTokensRequest(request) case ApiKeys.DELETE_GROUPS => handleDeleteGroupsRequest(request, requestLocal).exceptionally(handleError) - case ApiKeys.ELECT_LEADERS => maybeForwardToController(request, handleElectLeaders) + case ApiKeys.ELECT_LEADERS => forwardToController(request) case ApiKeys.INCREMENTAL_ALTER_CONFIGS => handleIncrementalAlterConfigsRequest(request) - case ApiKeys.ALTER_PARTITION_REASSIGNMENTS => maybeForwardToController(request, handleAlterPartitionReassignmentsRequest) - case ApiKeys.LIST_PARTITION_REASSIGNMENTS => maybeForwardToController(request, handleListPartitionReassignmentsRequest) + case ApiKeys.ALTER_PARTITION_REASSIGNMENTS => forwardToController(request) + case ApiKeys.LIST_PARTITION_REASSIGNMENTS => forwardToController(request) case ApiKeys.OFFSET_DELETE => handleOffsetDeleteRequest(request, requestLocal).exceptionally(handleError) case ApiKeys.DESCRIBE_CLIENT_QUOTAS => handleDescribeClientQuotasRequest(request) - case ApiKeys.ALTER_CLIENT_QUOTAS => maybeForwardToController(request, handleAlterClientQuotasRequest) + case ApiKeys.ALTER_CLIENT_QUOTAS => forwardToController(request) case ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS => handleDescribeUserScramCredentialsRequest(request) - case ApiKeys.ALTER_USER_SCRAM_CREDENTIALS => maybeForwardToController(request, handleAlterUserScramCredentialsRequest) - case ApiKeys.ALTER_PARTITION => handleAlterPartitionRequest(request) - case ApiKeys.UPDATE_FEATURES => maybeForwardToController(request, handleUpdateFeatures) - case ApiKeys.ENVELOPE => handleEnvelope(request, requestLocal) + case ApiKeys.ALTER_USER_SCRAM_CREDENTIALS => forwardToController(request) + case ApiKeys.UPDATE_FEATURES => forwardToController(request) case ApiKeys.DESCRIBE_CLUSTER => handleDescribeCluster(request) case ApiKeys.DESCRIBE_PRODUCERS => handleDescribeProducersRequest(request) - case ApiKeys.UNREGISTER_BROKER => forwardToControllerOrFail(request) + case ApiKeys.UNREGISTER_BROKER => forwardToController(request) case ApiKeys.DESCRIBE_TRANSACTIONS => handleDescribeTransactionsRequest(request) case ApiKeys.LIST_TRANSACTIONS => handleListTransactionsRequest(request) - case ApiKeys.ALLOCATE_PRODUCER_IDS => handleAllocateProducerIdsRequest(request) - case ApiKeys.DESCRIBE_QUORUM => forwardToControllerOrFail(request) + case ApiKeys.DESCRIBE_QUORUM => forwardToController(request) case ApiKeys.CONSUMER_GROUP_HEARTBEAT => handleConsumerGroupHeartbeat(request).exceptionally(handleError) case ApiKeys.CONSUMER_GROUP_DESCRIBE => handleConsumerGroupDescribe(request).exceptionally(handleError) case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => handleDescribeTopicPartitionsRequest(request) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => handleGetTelemetrySubscriptionsRequest(request) case ApiKeys.PUSH_TELEMETRY => handlePushTelemetryRequest(request) case ApiKeys.LIST_CLIENT_METRICS_RESOURCES => handleListClientMetricsResources(request) - case ApiKeys.ADD_RAFT_VOTER => forwardToControllerOrFail(request) - case ApiKeys.REMOVE_RAFT_VOTER => forwardToControllerOrFail(request) + case ApiKeys.ADD_RAFT_VOTER => forwardToController(request) + case ApiKeys.REMOVE_RAFT_VOTER => forwardToController(request) case ApiKeys.SHARE_GROUP_HEARTBEAT => handleShareGroupHeartbeat(request).exceptionally(handleError) case ApiKeys.SHARE_GROUP_DESCRIBE => handleShareGroupDescribe(request).exceptionally(handleError) case ApiKeys.SHARE_FETCH => handleShareFetchRequest(request) @@ -276,6 +237,7 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.WRITE_SHARE_GROUP_STATE => handleWriteShareGroupStateRequest(request) case ApiKeys.DELETE_SHARE_GROUP_STATE => handleDeleteShareGroupStateRequest(request) case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => handleReadShareGroupStateSummaryRequest(request) + case ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS => handleDescribeShareGroupOffsetsRequest(request) case _ => throw new IllegalStateException(s"No handler for request api key ${request.header.apiKey}") } } catch { @@ -297,156 +259,6 @@ class KafkaApis(val requestChannel: RequestChannel, replicaManager.tryCompleteActions() } - def handleLeaderAndIsrRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request)) - // ensureTopicExists is only for client facing requests - // We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they - // stop serving data to clients for the topic being deleted - val correlationId = request.header.correlationId - val leaderAndIsrRequest = request.body[LeaderAndIsrRequest] - - authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - if (zkSupport.isBrokerEpochStale(leaderAndIsrRequest.brokerEpoch, leaderAndIsrRequest.isKRaftController)) { - // When the broker restarts very quickly, it is possible for this broker to receive request intended - // for its previous generation so the broker should skip the stale request. - info(s"Received LeaderAndIsr request with broker epoch ${leaderAndIsrRequest.brokerEpoch} " + - s"smaller than the current broker epoch ${zkSupport.controller.brokerEpoch} from " + - s"controller ${leaderAndIsrRequest.controllerId} with epoch ${leaderAndIsrRequest.controllerEpoch}.") - requestHelper.sendResponseExemptThrottle(request, leaderAndIsrRequest.getErrorResponse(0, Errors.STALE_BROKER_EPOCH.exception)) - } else { - val response = replicaManager.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest, - RequestHandlerHelper.onLeadershipChange(groupCoordinator, txnCoordinator, _, _)) - requestHelper.sendResponseExemptThrottle(request, response) - } - } - - def handleStopReplicaRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request)) - // ensureTopicExists is only for client facing requests - // We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they - // stop serving data to clients for the topic being deleted - val stopReplicaRequest = request.body[StopReplicaRequest] - authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - if (zkSupport.isBrokerEpochStale(stopReplicaRequest.brokerEpoch, stopReplicaRequest.isKRaftController)) { - // When the broker restarts very quickly, it is possible for this broker to receive request intended - // for its previous generation so the broker should skip the stale request. - info(s"Received StopReplica request with broker epoch ${stopReplicaRequest.brokerEpoch} " + - s"smaller than the current broker epoch ${zkSupport.controller.brokerEpoch} from " + - s"controller ${stopReplicaRequest.controllerId} with epoch ${stopReplicaRequest.controllerEpoch}.") - requestHelper.sendResponseExemptThrottle(request, new StopReplicaResponse( - new StopReplicaResponseData().setErrorCode(Errors.STALE_BROKER_EPOCH.code))) - } else { - val partitionStates = stopReplicaRequest.partitionStates().asScala - val (result, error) = replicaManager.stopReplicas( - request.context.correlationId, - stopReplicaRequest.controllerId, - stopReplicaRequest.controllerEpoch, - partitionStates) - // Clear the coordinator caches in case we were the leader. In the case of a reassignment, we - // cannot rely on the LeaderAndIsr API for this since it is only sent to active replicas. - result.foreachEntry { (topicPartition, error) => - if (error == Errors.NONE) { - val partitionState = partitionStates(topicPartition) - if (topicPartition.topic == GROUP_METADATA_TOPIC_NAME - && partitionState.deletePartition) { - val leaderEpoch = if (partitionState.leaderEpoch >= 0) - OptionalInt.of(partitionState.leaderEpoch) - else - OptionalInt.empty - groupCoordinator.onResignation(topicPartition.partition, leaderEpoch) - } else if (topicPartition.topic == TRANSACTION_STATE_TOPIC_NAME - && partitionState.deletePartition) { - val leaderEpoch = if (partitionState.leaderEpoch >= 0) - Some(partitionState.leaderEpoch) - else - None - txnCoordinator.onResignation(topicPartition.partition, coordinatorEpoch = leaderEpoch) - } - } - } - - def toStopReplicaPartition(tp: TopicPartition, error: Errors) = - new StopReplicaResponseData.StopReplicaPartitionError() - .setTopicName(tp.topic) - .setPartitionIndex(tp.partition) - .setErrorCode(error.code) - - requestHelper.sendResponseExemptThrottle(request, new StopReplicaResponse(new StopReplicaResponseData() - .setErrorCode(error.code) - .setPartitionErrors(result.map { - case (tp, error) => toStopReplicaPartition(tp, error) - }.toBuffer.asJava))) - } - - CoreUtils.swallow(replicaManager.replicaFetcherManager.shutdownIdleFetcherThreads(), this) - } - - def handleUpdateMetadataRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request)) - val correlationId = request.header.correlationId - val updateMetadataRequest = request.body[UpdateMetadataRequest] - - authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - if (zkSupport.isBrokerEpochStale(updateMetadataRequest.brokerEpoch, updateMetadataRequest.isKRaftController)) { - // When the broker restarts very quickly, it is possible for this broker to receive request intended - // for its previous generation so the broker should skip the stale request. - info(s"Received UpdateMetadata request with broker epoch ${updateMetadataRequest.brokerEpoch} " + - s"smaller than the current broker epoch ${zkSupport.controller.brokerEpoch} from " + - s"controller ${updateMetadataRequest.controllerId} with epoch ${updateMetadataRequest.controllerEpoch}.") - requestHelper.sendResponseExemptThrottle(request, - new UpdateMetadataResponse(new UpdateMetadataResponseData().setErrorCode(Errors.STALE_BROKER_EPOCH.code))) - } else { - val deletedPartitions = replicaManager.maybeUpdateMetadataCache(correlationId, updateMetadataRequest) - if (deletedPartitions.nonEmpty) { - groupCoordinator.onPartitionsDeleted(deletedPartitions.asJava, requestLocal.bufferSupplier) - } - - if (zkSupport.adminManager.hasDelayedTopicOperations) { - updateMetadataRequest.partitionStates.forEach { partitionState => - zkSupport.adminManager.tryCompleteDelayedTopicOperations(partitionState.topicName) - } - } - - quotas.clientQuotaCallback.ifPresent { callback => - if (callback.updateClusterMetadata(metadataCache.getClusterMetadata(clusterId, request.context.listenerName))) { - quotas.fetch.updateQuotaMetricConfigs() - quotas.produce.updateQuotaMetricConfigs() - quotas.request.updateQuotaMetricConfigs() - quotas.controllerMutation.updateQuotaMetricConfigs() - } - } - if (replicaManager.hasDelayedElectionOperations) { - updateMetadataRequest.partitionStates.forEach { partitionState => - val tp = new TopicPartition(partitionState.topicName, partitionState.partitionIndex) - replicaManager.tryCompleteElection(new TopicPartitionOperationKey(tp)) - } - } - requestHelper.sendResponseExemptThrottle(request, new UpdateMetadataResponse( - new UpdateMetadataResponseData().setErrorCode(Errors.NONE.code))) - } - } - - def handleControlledShutdownRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request)) - // ensureTopicExists is only for client facing requests - // We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they - // stop serving data to clients for the topic being deleted - val controlledShutdownRequest = request.body[ControlledShutdownRequest] - authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - - def controlledShutdownCallback(controlledShutdownResult: Try[Set[TopicPartition]]): Unit = { - val response = controlledShutdownResult match { - case Success(partitionsRemaining) => - ControlledShutdownResponse.prepareResponse(Errors.NONE, partitionsRemaining.asJava) - - case Failure(throwable) => - controlledShutdownRequest.getErrorResponse(throwable) - } - requestHelper.sendResponseExemptThrottle(request, response) - } - zkSupport.controller.controlledShutdown(controlledShutdownRequest.data.brokerId, controlledShutdownRequest.data.brokerEpoch, controlledShutdownCallback) - } - /** * Handle an offset commit request */ @@ -460,12 +272,6 @@ class KafkaApis(val requestChannel: RequestChannel, if (!authHelper.authorize(request.context, READ, GROUP, offsetCommitRequest.data.groupId)) { requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) CompletableFuture.completedFuture[Unit](()) - } else if (offsetCommitRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) { - // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic - // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard - // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. - requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) } else { val authorizedTopics = authHelper.filterByAuthorized( request.context, @@ -508,14 +314,6 @@ class KafkaApis(val requestChannel: RequestChannel, if (authorizedTopicsRequest.isEmpty) { requestHelper.sendMaybeThrottle(request, responseBuilder.build()) CompletableFuture.completedFuture(()) - } else if (request.header.apiVersion == 0) { - // For version 0, always store offsets in ZK. - commitOffsetsToZookeeper( - request, - offsetCommitRequest, - authorizedTopicsRequest, - responseBuilder - ) } else { // For version > 0, store offsets in Coordinator. commitOffsetsToCoordinator( @@ -529,41 +327,6 @@ class KafkaApis(val requestChannel: RequestChannel, } } - private def commitOffsetsToZookeeper( - request: RequestChannel.Request, - offsetCommitRequest: OffsetCommitRequest, - authorizedTopicsRequest: mutable.ArrayBuffer[OffsetCommitRequestData.OffsetCommitRequestTopic], - responseBuilder: OffsetCommitResponse.Builder - ): CompletableFuture[Unit] = { - val zkSupport = metadataSupport.requireZkOrThrow( - KafkaApis.unsupported("Version 0 offset commit requests")) - - authorizedTopicsRequest.foreach { topic => - topic.partitions.forEach { partition => - val error = try { - if (partition.committedMetadata != null && partition.committedMetadata.length > config.groupCoordinatorConfig.offsetMetadataMaxSize) { - Errors.OFFSET_METADATA_TOO_LARGE - } else { - zkSupport.zkClient.setOrCreateConsumerOffset( - offsetCommitRequest.data.groupId, - new TopicPartition(topic.name, partition.partitionIndex), - partition.committedOffset - ) - Errors.NONE - } - } catch { - case e: Throwable => - Errors.forException(e) - } - - responseBuilder.addPartition(topic.name, partition.partitionIndex, error) - } - } - - requestHelper.sendMaybeThrottle(request, responseBuilder.build()) - CompletableFuture.completedFuture[Unit](()) - } - private def commitOffsetsToCoordinator( request: RequestChannel.Request, offsetCommitRequest: OffsetCommitRequest, @@ -741,7 +504,7 @@ class KafkaApis(val requestChannel: RequestChannel, sendResponseCallback(Map.empty) else { val internalTopicsAllowed = request.header.clientId == AdminUtils.ADMIN_CLIENT_ID - val transactionSupportedOperation = if (request.header.apiVersion > 10) genericError else defaultError + val transactionSupportedOperation = AddPartitionsToTxnManager.produceRequestVersionToTransactionSupportedOperation(request.header.apiVersion()) // call the replica manager to append messages to the replicas replicaManager.handleProduceAppend( timeout = produceRequest.timeout.toLong, @@ -834,81 +597,6 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def maybeConvertFetchedData(tp: TopicIdPartition, - partitionData: FetchResponseData.PartitionData): FetchResponseData.PartitionData = { - // We will never return a logConfig when the topic is unresolved and the name is null. This is ok since we won't have any records to convert. - val logConfig = replicaManager.getLogConfig(tp.topicPartition) - - if (logConfig.exists(_.compressionType == BrokerCompressionType.ZSTD) && versionId < 10) { - trace(s"Fetching messages is disabled for ZStandard compressed partition $tp. Sending unsupported version response to $clientId.") - FetchResponse.partitionResponse(tp, Errors.UNSUPPORTED_COMPRESSION_TYPE) - } else { - // Down-conversion of fetched records is needed when the on-disk magic value is greater than what is - // supported by the fetch request version. - // If the inter-broker protocol version is `3.0` or higher, the log config message format version is - // always `3.0` (i.e. magic value is `v2`). As a result, we always go through the down-conversion - // path if the fetch version is 3 or lower (in rare cases the down-conversion may not be needed, but - // it's not worth optimizing for them). - // If the inter-broker protocol version is lower than `3.0`, we rely on the log config message format - // version as a proxy for the on-disk magic value to maintain the long-standing behavior originally - // introduced in Kafka 0.10.0. An important implication is that it's unsafe to downgrade the message - // format version after a single message has been produced (the broker would return the message(s) - // without down-conversion irrespective of the fetch version). - val unconvertedRecords = FetchResponse.recordsOrFail(partitionData) - val downConvertMagic = - logConfig.map(_.recordVersion.value).flatMap { magic => - if (magic > RecordBatch.MAGIC_VALUE_V0 && versionId <= 1) - Some(RecordBatch.MAGIC_VALUE_V0) - else if (magic > RecordBatch.MAGIC_VALUE_V1 && versionId <= 3) - Some(RecordBatch.MAGIC_VALUE_V1) - else - None - } - - downConvertMagic match { - case Some(magic) => - // For fetch requests from clients, check if down-conversion is disabled for the particular partition - if (!fetchRequest.isFromFollower && !logConfig.forall(_.messageDownConversionEnable)) { - trace(s"Conversion to message format ${downConvertMagic.get} is disabled for partition $tp. Sending unsupported version response to $clientId.") - FetchResponse.partitionResponse(tp, Errors.UNSUPPORTED_VERSION) - } else { - try { - trace(s"Down converting records from partition $tp to message format version $magic for fetch request from $clientId") - // Because down-conversion is extremely memory intensive, we want to try and delay the down-conversion as much - // as possible. With KIP-283, we have the ability to lazily down-convert in a chunked manner. The lazy, chunked - // down-conversion always guarantees that at least one batch of messages is down-converted and sent out to the - // client. - new FetchResponseData.PartitionData() - .setPartitionIndex(tp.partition) - .setErrorCode(maybeDownConvertStorageError(Errors.forCode(partitionData.errorCode)).code) - .setHighWatermark(partitionData.highWatermark) - .setLastStableOffset(partitionData.lastStableOffset) - .setLogStartOffset(partitionData.logStartOffset) - .setAbortedTransactions(partitionData.abortedTransactions) - .setRecords(new LazyDownConversionRecords(tp.topicPartition, unconvertedRecords, magic, fetchContext.getFetchOffset(tp).get, time)) - .setPreferredReadReplica(partitionData.preferredReadReplica()) - } catch { - case e: UnsupportedCompressionTypeException => - trace("Received unsupported compression type error during down-conversion", e) - FetchResponse.partitionResponse(tp, Errors.UNSUPPORTED_COMPRESSION_TYPE) - } - } - case None => - new FetchResponseData.PartitionData() - .setPartitionIndex(tp.partition) - .setErrorCode(maybeDownConvertStorageError(Errors.forCode(partitionData.errorCode)).code) - .setHighWatermark(partitionData.highWatermark) - .setLastStableOffset(partitionData.lastStableOffset) - .setLogStartOffset(partitionData.logStartOffset) - .setAbortedTransactions(partitionData.abortedTransactions) - .setRecords(unconvertedRecords) - .setPreferredReadReplica(partitionData.preferredReadReplica) - .setDivergingEpoch(partitionData.divergingEpoch) - .setCurrentLeader(partitionData.currentLeader()) - } - } - } - // the callback for process a fetch response, invoked before throttling def processResponseCallback(responsePartitionData: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { val partitions = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] @@ -947,25 +635,9 @@ class KafkaApis(val requestChannel: RequestChannel, } erroneous.foreach { case (tp, data) => partitions.put(tp, data) } - def createResponse(throttleTimeMs: Int, unconvertedFetchResponse: FetchResponse): FetchResponse = { - // Down-convert messages for each partition if required - val convertedData = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] - unconvertedFetchResponse.data().responses().forEach { topicResponse => - topicResponse.partitions().forEach { unconvertedPartitionData => - val tp = new TopicIdPartition(topicResponse.topicId, new TopicPartition(topicResponse.topic, unconvertedPartitionData.partitionIndex())) - val error = Errors.forCode(unconvertedPartitionData.errorCode) - if (error != Errors.NONE) - debug(s"Fetch request with correlation id ${request.header.correlationId} from client $clientId " + - s"on partition $tp failed due to ${error.exceptionName}") - convertedData.put(tp, maybeConvertFetchedData(tp, unconvertedPartitionData)) - } - } - - // Prepare fetch response from converted data - val response = - FetchResponse.of(unconvertedFetchResponse.error, throttleTimeMs, unconvertedFetchResponse.sessionId, convertedData, nodeEndpoints.values.toList.asJava) + def recordBytesOutMetric(fetchResponse: FetchResponse): Unit = { // record the bytes out metrics only when the response is being sent - response.data.responses.forEach { topicResponse => + fetchResponse.data.responses.forEach { topicResponse => topicResponse.partitions.forEach { data => // If the topic name was not known, we will have no bytes out. if (topicResponse.topic != null) { @@ -974,23 +646,19 @@ class KafkaApis(val requestChannel: RequestChannel, } } } - response } if (fetchRequest.isFromFollower) { // We've already evaluated against the quota and are good to go. Just need to record it now. - val unconvertedFetchResponse = fetchContext.updateAndGenerateResponseData(partitions) - val responseSize = KafkaApis.sizeOfThrottledPartitions(versionId, unconvertedFetchResponse, quotas.leader) + val fetchResponse = fetchContext.updateAndGenerateResponseData(partitions, Seq.empty.asJava) + val responseSize = KafkaApis.sizeOfThrottledPartitions(versionId, fetchResponse, quotas.leader) quotas.leader.record(responseSize) - val responsePartitionsSize = unconvertedFetchResponse.data().responses().stream().mapToInt(_.partitions().size()).sum() + val responsePartitionsSize = fetchResponse.data().responses().stream().mapToInt(_.partitions().size()).sum() trace(s"Sending Fetch response with partitions.size=$responsePartitionsSize, " + - s"metadata=${unconvertedFetchResponse.sessionId}") - requestHelper.sendResponseExemptThrottle(request, createResponse(0, unconvertedFetchResponse), onFetchComplete(request)) + s"metadata=${fetchResponse.sessionId}") + recordBytesOutMetric(fetchResponse) + requestHelper.sendResponseExemptThrottle(request, fetchResponse) } else { - // Fetch size used to determine throttle time is calculated before any down conversions. - // This may be slightly different from the actual response size. But since down conversions - // result in data being loaded into memory, we should do this only when we are not going to throttle. - // // Record both bandwidth and request quota-specific values and throttle by muting the channel if any of the // quotas have been violated. If both quotas have been violated, use the max throttle time between the two // quotas. When throttled, we unrecord the recorded bandwidth quota value. @@ -1000,7 +668,7 @@ class KafkaApis(val requestChannel: RequestChannel, val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request, responseSize, timeMs) val maxThrottleTimeMs = math.max(bandwidthThrottleTimeMs, requestThrottleTimeMs) - val unconvertedFetchResponse = if (maxThrottleTimeMs > 0) { + val fetchResponse = if (maxThrottleTimeMs > 0) { request.apiThrottleTimeMs = maxThrottleTimeMs // Even if we need to throttle for request quota violation, we should "unrecord" the already recorded value // from the fetch quota because we are going to return an empty response. @@ -1011,18 +679,19 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.throttle(quotas.request, request, requestThrottleTimeMs) } // If throttling is required, return an empty response. - fetchContext.getThrottledResponse(maxThrottleTimeMs) + fetchContext.getThrottledResponse(maxThrottleTimeMs, nodeEndpoints.values.toSeq.asJava) } else { // Get the actual response. This will update the fetch context. - val unconvertedFetchResponse = fetchContext.updateAndGenerateResponseData(partitions) - val responsePartitionsSize = unconvertedFetchResponse.data().responses().stream().mapToInt(_.partitions().size()).sum() + val fetchResponse = fetchContext.updateAndGenerateResponseData(partitions, nodeEndpoints.values.toSeq.asJava) + val responsePartitionsSize = fetchResponse.data().responses().stream().mapToInt(_.partitions().size()).sum() trace(s"Sending Fetch response with partitions.size=$responsePartitionsSize, " + - s"metadata=${unconvertedFetchResponse.sessionId}") - unconvertedFetchResponse + s"metadata=${fetchResponse.sessionId}") + fetchResponse } + recordBytesOutMetric(fetchResponse) // Send the response immediately. - requestChannel.sendResponse(request, createResponse(maxThrottleTimeMs, unconvertedFetchResponse), onFetchComplete(request)) + requestChannel.sendResponse(request, fetchResponse, None) } } @@ -1077,86 +746,6 @@ class KafkaApis(val requestChannel: RequestChannel, if (fetchRequest.isFromFollower) quotas.leader else UNBOUNDED_QUOTA def handleListOffsetRequest(request: RequestChannel.Request): Unit = { - val version = request.header.apiVersion - - def sendResponseCallback(response: List[ListOffsetsTopicResponse]): Unit = { - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new ListOffsetsResponse(new ListOffsetsResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setTopics(response.asJava))) - } - - if (version == 0) - sendResponseCallback(handleListOffsetRequestV0(request)) - else - handleListOffsetRequestV1AndAbove(request, sendResponseCallback) - } - - private def handleListOffsetRequestV0(request : RequestChannel.Request) : List[ListOffsetsTopicResponse] = { - val correlationId = request.header.correlationId - val clientId = request.header.clientId - val offsetRequest = request.body[ListOffsetsRequest] - - val (authorizedRequestInfo, unauthorizedRequestInfo) = authHelper.partitionSeqByAuthorized(request.context, - DESCRIBE, TOPIC, offsetRequest.topics.asScala.toSeq)(_.name) - - val unauthorizedResponseStatus = unauthorizedRequestInfo.map(topic => - new ListOffsetsTopicResponse() - .setName(topic.name) - .setPartitions(topic.partitions.asScala.map(partition => - new ListOffsetsPartitionResponse() - .setPartitionIndex(partition.partitionIndex) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)).asJava) - ) - - val responseTopics = authorizedRequestInfo.map { topic => - val responsePartitions = topic.partitions.asScala.map { partition => - if (partition.timestamp() < ListOffsetsRequest.EARLIEST_TIMESTAMP) { - // Negative timestamps are reserved for some functions. - // For v0 requests, negative timestamps only support LATEST_TIMESTAMP (-1) and EARLIEST_TIMESTAMP (-2). - new ListOffsetsPartitionResponse() - .setPartitionIndex(partition.partitionIndex) - .setErrorCode(Errors.UNSUPPORTED_VERSION.code) - } else { - val topicPartition = new TopicPartition(topic.name, partition.partitionIndex) - - try { - val offsets = replicaManager.legacyFetchOffsetsForTimestamp( - topicPartition = topicPartition, - timestamp = partition.timestamp, - maxNumOffsets = partition.maxNumOffsets, - isFromConsumer = offsetRequest.replicaId == ListOffsetsRequest.CONSUMER_REPLICA_ID, - fetchOnlyFromLeader = offsetRequest.replicaId != ListOffsetsRequest.DEBUGGING_REPLICA_ID) - new ListOffsetsPartitionResponse() - .setPartitionIndex(partition.partitionIndex) - .setErrorCode(Errors.NONE.code) - .setOldStyleOffsets(offsets.map(JLong.valueOf).asJava) - } catch { - // NOTE: UnknownTopicOrPartitionException and NotLeaderOrFollowerException are special cases since these error messages - // are typically transient and there is no value in logging the entire stack trace for the same - case e @ (_ : UnknownTopicOrPartitionException | - _ : NotLeaderOrFollowerException | - _ : KafkaStorageException) => - debug("Offset request with correlation id %d from client %s on partition %s failed due to %s".format( - correlationId, clientId, topicPartition, e.getMessage)) - new ListOffsetsPartitionResponse() - .setPartitionIndex(partition.partitionIndex) - .setErrorCode(Errors.forException(e).code) - case e: Throwable => - error("Error while responding to offset request", e) - new ListOffsetsPartitionResponse() - .setPartitionIndex(partition.partitionIndex) - .setErrorCode(Errors.forException(e).code) - } - } - } - new ListOffsetsTopicResponse().setName(topic.name).setPartitions(responsePartitions.asJava) - } - (responseTopics ++ unauthorizedResponseStatus).toList - } - - private def handleListOffsetRequestV1AndAbove(request : RequestChannel.Request, - responseCallback: List[ListOffsetsTopicResponse] => Unit): Unit = { val correlationId = request.header.correlationId val clientId = request.header.clientId val offsetRequest = request.body[ListOffsetsRequest] @@ -1180,17 +769,20 @@ class KafkaApis(val requestChannel: RequestChannel, buildErrorResponse(Errors.TOPIC_AUTHORIZATION_FAILED, partition)).asJava) ) - def sendV1ResponseCallback(response: List[ListOffsetsTopicResponse]): Unit = { + def sendResponseCallback(response: Seq[ListOffsetsTopicResponse]): Unit = { val mergedResponses = response ++ unauthorizedResponseStatus - responseCallback(mergedResponses) + requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => + new ListOffsetsResponse(new ListOffsetsResponseData() + .setThrottleTimeMs(requestThrottleMs) + .setTopics(mergedResponses.asJava))) } if (authorizedRequestInfo.isEmpty) { - sendV1ResponseCallback(List.empty) + sendResponseCallback(Seq.empty) } else { replicaManager.fetchOffset(authorizedRequestInfo, offsetRequest.duplicatePartitions().asScala, offsetRequest.isolationLevel(), offsetRequest.replicaId(), clientId, correlationId, version, - buildErrorResponse, sendV1ResponseCallback, offsetRequest.timeoutMs()) + buildErrorResponse, sendResponseCallback, offsetRequest.timeoutMs()) } } @@ -1360,12 +952,7 @@ class KafkaApis(val requestChannel: RequestChannel, trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(completeTopicMetadata.mkString(","), brokers.mkString(","), request.header.correlationId, request.header.clientId)) - val controllerId = { - metadataCache.getControllerId.flatMap { - case ZkCachedControllerId(id) => Some(id) - case KRaftCachedControllerId(_) => metadataCache.getRandomAliveBrokerId - } - } + val controllerId = metadataCache.getRandomAliveBrokerId requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => MetadataResponse.prepareResponse( @@ -1401,61 +988,6 @@ class KafkaApis(val requestChannel: RequestChannel, * Handle an offset fetch request */ def handleOffsetFetchRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { - val version = request.header.apiVersion - if (version == 0) { - handleOffsetFetchRequestFromZookeeper(request) - } else { - handleOffsetFetchRequestFromCoordinator(request) - } - } - - private def handleOffsetFetchRequestFromZookeeper(request: RequestChannel.Request): CompletableFuture[Unit] = { - val header = request.header - val offsetFetchRequest = request.body[OffsetFetchRequest] - - def createResponse(requestThrottleMs: Int): AbstractResponse = { - val offsetFetchResponse = - // reject the request if not authorized to the group - if (!authHelper.authorize(request.context, DESCRIBE, GROUP, offsetFetchRequest.groupId)) - offsetFetchRequest.getErrorResponse(requestThrottleMs, Errors.GROUP_AUTHORIZATION_FAILED) - else { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.unsupported("Version 0 offset fetch requests")) - val (authorizedPartitions, unauthorizedPartitions) = partitionByAuthorized( - offsetFetchRequest.partitions.asScala, request.context) - - // version 0 reads offsets from ZK - val authorizedPartitionData = authorizedPartitions.map { topicPartition => - try { - if (!metadataCache.contains(topicPartition)) - (topicPartition, OffsetFetchResponse.UNKNOWN_PARTITION) - else { - val payloadOpt = zkSupport.zkClient.getConsumerOffset(offsetFetchRequest.groupId, topicPartition) - payloadOpt match { - case Some(payload) => - (topicPartition, new OffsetFetchResponse.PartitionData(payload, - Optional.empty(), OffsetFetchResponse.NO_METADATA, Errors.NONE)) - case None => - (topicPartition, OffsetFetchResponse.UNKNOWN_PARTITION) - } - } - } catch { - case e: Throwable => - (topicPartition, new OffsetFetchResponse.PartitionData(OffsetFetchResponse.INVALID_OFFSET, - Optional.empty(), OffsetFetchResponse.NO_METADATA, Errors.forException(e))) - } - }.toMap - - val unauthorizedPartitionData = unauthorizedPartitions.map(_ -> OffsetFetchResponse.UNAUTHORIZED_PARTITION).toMap - new OffsetFetchResponse(requestThrottleMs, Errors.NONE, (authorizedPartitionData ++ unauthorizedPartitionData).asJava) - } - trace(s"Sending offset fetch response $offsetFetchResponse for correlation id ${header.correlationId} to client ${header.clientId}.") - offsetFetchResponse - } - requestHelper.sendResponseMaybeThrottle(request, createResponse) - CompletableFuture.completedFuture[Unit](()) - } - - private def handleOffsetFetchRequestFromCoordinator(request: RequestChannel.Request): CompletableFuture[Unit] = { val offsetFetchRequest = request.body[OffsetFetchRequest] val groups = offsetFetchRequest.groups() val requireStable = offsetFetchRequest.requireStable() @@ -1566,13 +1098,6 @@ class KafkaApis(val requestChannel: RequestChannel, } } - private def partitionByAuthorized( - seq: Seq[TopicPartition], - context: RequestContext - ): (Seq[TopicPartition], Seq[TopicPartition]) = { - authHelper.partitionSeqByAuthorized(context, DESCRIBE, TOPIC, seq)(_.topic) - } - def handleFindCoordinatorRequest(request: RequestChannel.Request): Unit = { val version = request.header.apiVersion if (version < 4) { @@ -1771,13 +1296,7 @@ class KafkaApis(val requestChannel: RequestChannel, ): CompletableFuture[Unit] = { val joinGroupRequest = request.body[JoinGroupRequest] - if (joinGroupRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) { - // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic - // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard - // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. - requestHelper.sendMaybeThrottle(request, joinGroupRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) - } else if (!authHelper.authorize(request.context, READ, GROUP, joinGroupRequest.data.groupId)) { + if (!authHelper.authorize(request.context, READ, GROUP, joinGroupRequest.data.groupId)) { requestHelper.sendMaybeThrottle(request, joinGroupRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) CompletableFuture.completedFuture[Unit](()) } else { @@ -1801,13 +1320,7 @@ class KafkaApis(val requestChannel: RequestChannel, ): CompletableFuture[Unit] = { val syncGroupRequest = request.body[SyncGroupRequest] - if (syncGroupRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) { - // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic - // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard - // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. - requestHelper.sendMaybeThrottle(request, syncGroupRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) - } else if (!syncGroupRequest.areMandatoryProtocolTypeAndNamePresent()) { + if (!syncGroupRequest.areMandatoryProtocolTypeAndNamePresent()) { // Starting from version 5, ProtocolType and ProtocolName fields are mandatory. requestHelper.sendMaybeThrottle(request, syncGroupRequest.getErrorResponse(Errors.INCONSISTENT_GROUP_PROTOCOL.exception)) CompletableFuture.completedFuture[Unit](()) @@ -1870,13 +1383,7 @@ class KafkaApis(val requestChannel: RequestChannel, def handleHeartbeatRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val heartbeatRequest = request.body[HeartbeatRequest] - if (heartbeatRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) { - // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic - // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard - // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. - requestHelper.sendMaybeThrottle(request, heartbeatRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) - } else if (!authHelper.authorize(request.context, READ, GROUP, heartbeatRequest.data.groupId)) { + if (!authHelper.authorize(request.context, READ, GROUP, heartbeatRequest.data.groupId)) { requestHelper.sendMaybeThrottle(request, heartbeatRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) CompletableFuture.completedFuture[Unit](()) } else { @@ -1945,245 +1452,6 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, createResponseCallback) } - def handleCreateTopicsRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 6) - - def sendResponseCallback(results: CreatableTopicResultCollection): Unit = { - val responseData = new CreateTopicsResponseData() - .setTopics(results) - val response = new CreateTopicsResponse(responseData) - trace(s"Sending create topics response $responseData for correlation id " + - s"${request.header.correlationId} to client ${request.header.clientId}.") - requestHelper.sendResponseMaybeThrottleWithControllerQuota(controllerMutationQuota, request, response) - } - - val createTopicsRequest = request.body[CreateTopicsRequest] - val results = new CreatableTopicResultCollection(createTopicsRequest.data.topics.size) - if (!zkSupport.controller.isActive) { - createTopicsRequest.data.topics.forEach { topic => - results.add(new CreatableTopicResult().setName(topic.name) - .setErrorCode(Errors.NOT_CONTROLLER.code)) - } - sendResponseCallback(results) - } else { - createTopicsRequest.data.topics.forEach { topic => - results.add(new CreatableTopicResult().setName(topic.name)) - } - val hasClusterAuthorization = authHelper.authorize(request.context, CREATE, CLUSTER, CLUSTER_NAME, - logIfDenied = false) - - val allowedTopicNames = { - val topicNames = createTopicsRequest - .data - .topics - .asScala - .map(_.name) - .toSet - - topicNames.diff(Set(Topic.CLUSTER_METADATA_TOPIC_NAME)) - } - - val authorizedTopics = if (hasClusterAuthorization) { - allowedTopicNames - } else { - authHelper.filterByAuthorized(request.context, CREATE, TOPIC, allowedTopicNames)(identity) - } - val authorizedForDescribeConfigs = authHelper.filterByAuthorized( - request.context, - DESCRIBE_CONFIGS, - TOPIC, - allowedTopicNames, - logIfDenied = false - )(identity).map(name => name -> results.find(name)).toMap - - results.forEach { topic => - if (topic.name() == Topic.CLUSTER_METADATA_TOPIC_NAME) { - topic.setErrorCode(Errors.INVALID_REQUEST.code) - topic.setErrorMessage(s"Creation of internal topic ${Topic.CLUSTER_METADATA_TOPIC_NAME} is prohibited.") - } else if (results.findAll(topic.name).size > 1) { - topic.setErrorCode(Errors.INVALID_REQUEST.code) - topic.setErrorMessage("Found multiple entries for this topic.") - } else if (!authorizedTopics.contains(topic.name)) { - topic.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - topic.setErrorMessage("Authorization failed.") - } - if (!authorizedForDescribeConfigs.contains(topic.name) && topic.name() != Topic.CLUSTER_METADATA_TOPIC_NAME) { - topic.setTopicConfigErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - } - } - val toCreate = mutable.Map[String, CreatableTopic]() - createTopicsRequest.data.topics.forEach { topic => - if (results.find(topic.name).errorCode == Errors.NONE.code) { - toCreate += topic.name -> topic - } - } - def handleCreateTopicsResults(errors: Map[String, ApiError]): Unit = { - errors.foreach { case (topicName, error) => - val result = results.find(topicName) - result.setErrorCode(error.error.code) - .setErrorMessage(error.message) - // Reset any configs in the response if Create failed - if (error != ApiError.NONE) { - result.setConfigs(List.empty.asJava) - .setNumPartitions(-1) - .setReplicationFactor(-1) - .setTopicConfigErrorCode(Errors.NONE.code) - } - } - sendResponseCallback(results) - } - zkSupport.adminManager.createTopics( - createTopicsRequest.data.timeoutMs, - createTopicsRequest.data.validateOnly, - toCreate, - authorizedForDescribeConfigs, - controllerMutationQuota, - handleCreateTopicsResults) - } - } - - def handleCreatePartitionsRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - val createPartitionsRequest = request.body[CreatePartitionsRequest] - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 3) - - def sendResponseCallback(results: Map[String, ApiError]): Unit = { - val createPartitionsResults = results.map { - case (topic, error) => new CreatePartitionsTopicResult() - .setName(topic) - .setErrorCode(error.error.code) - .setErrorMessage(error.message) - }.toSeq - val response = new CreatePartitionsResponse(new CreatePartitionsResponseData() - .setResults(createPartitionsResults.asJava)) - trace(s"Sending create partitions response $response for correlation id ${request.header.correlationId} to " + - s"client ${request.header.clientId}.") - requestHelper.sendResponseMaybeThrottleWithControllerQuota(controllerMutationQuota, request, response) - } - - if (!zkSupport.controller.isActive) { - val result = createPartitionsRequest.data.topics.asScala.map { topic => - (topic.name, new ApiError(Errors.NOT_CONTROLLER, null)) - }.toMap - sendResponseCallback(result) - } else { - // Special handling to add duplicate topics to the response - val topics = createPartitionsRequest.data.topics.asScala.toSeq - val dupes = topics.groupBy(_.name) - .filter { _._2.size > 1 } - .keySet - val notDuped = topics.filterNot(topic => dupes.contains(topic.name)) - val (authorized, unauthorized) = authHelper.partitionSeqByAuthorized(request.context, ALTER, TOPIC, - notDuped)(_.name) - - val (queuedForDeletion, valid) = authorized.partition { topic => - zkSupport.controller.isTopicQueuedForDeletion(topic.name) - } - - val errors = dupes.map(_ -> new ApiError(Errors.INVALID_REQUEST, "Duplicate topic in request.")) ++ - unauthorized.map(_.name -> new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED, "The topic authorization is failed.")) ++ - queuedForDeletion.map(_.name -> new ApiError(Errors.INVALID_TOPIC_EXCEPTION, "The topic is queued for deletion.")) - - zkSupport.adminManager.createPartitions( - createPartitionsRequest.data.timeoutMs, - valid, - createPartitionsRequest.data.validateOnly, - controllerMutationQuota, - result => sendResponseCallback(result ++ errors)) - } - } - - def handleDeleteTopicsRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 5) - - def sendResponseCallback(results: DeletableTopicResultCollection): Unit = { - val responseData = new DeleteTopicsResponseData() - .setResponses(results) - val response = new DeleteTopicsResponse(responseData) - trace(s"Sending delete topics response $response for correlation id ${request.header.correlationId} to client ${request.header.clientId}.") - requestHelper.sendResponseMaybeThrottleWithControllerQuota(controllerMutationQuota, request, response) - } - - val deleteTopicRequest = request.body[DeleteTopicsRequest] - val results = new DeletableTopicResultCollection(deleteTopicRequest.numberOfTopics()) - val toDelete = mutable.Set[String]() - if (!zkSupport.controller.isActive) { - deleteTopicRequest.topics().forEach { topic => - results.add(new DeletableTopicResult() - .setName(topic.name()) - .setTopicId(topic.topicId()) - .setErrorCode(Errors.NOT_CONTROLLER.code)) - } - sendResponseCallback(results) - } else if (!config.deleteTopicEnable) { - val error = if (request.context.apiVersion < 3) Errors.INVALID_REQUEST else Errors.TOPIC_DELETION_DISABLED - deleteTopicRequest.topics().forEach { topic => - results.add(new DeletableTopicResult() - .setName(topic.name()) - .setTopicId(topic.topicId()) - .setErrorCode(error.code)) - } - sendResponseCallback(results) - } else { - val topicIdsFromRequest = deleteTopicRequest.topicIds().asScala.filter(topicId => topicId != Uuid.ZERO_UUID).toSet - deleteTopicRequest.topics().forEach { topic => - if (topic.name() != null && topic.topicId() != Uuid.ZERO_UUID) - throw new InvalidRequestException("Topic name and topic ID can not both be specified.") - val name = if (topic.topicId() == Uuid.ZERO_UUID) topic.name() - else zkSupport.controller.controllerContext.topicName(topic.topicId).orNull - results.add(new DeletableTopicResult() - .setName(name) - .setTopicId(topic.topicId())) - } - val authorizedDescribeTopics = authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC, - results.asScala.filter(result => result.name() != null))(_.name) - val authorizedDeleteTopics = authHelper.filterByAuthorized(request.context, DELETE, TOPIC, - results.asScala.filter(result => result.name() != null))(_.name) - results.forEach { topic => - val unresolvedTopicId = topic.topicId() != Uuid.ZERO_UUID && topic.name() == null - if (unresolvedTopicId) { - topic.setErrorCode(Errors.UNKNOWN_TOPIC_ID.code) - } else if (topicIdsFromRequest.contains(topic.topicId) && !authorizedDescribeTopics.contains(topic.name)) { - - // Because the client does not have Describe permission, the name should - // not be returned in the response. Note, however, that we do not consider - // the topicId itself to be sensitive, so there is no reason to obscure - // this case with `UNKNOWN_TOPIC_ID`. - topic.setName(null) - topic.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - } else if (!authorizedDeleteTopics.contains(topic.name)) { - topic.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - } else if (!metadataCache.contains(topic.name)) { - topic.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) - } else { - toDelete += topic.name - } - } - // If no authorized topics return immediately - if (toDelete.isEmpty) - sendResponseCallback(results) - else { - def handleDeleteTopicsResults(errors: Map[String, Errors]): Unit = { - errors.foreach { - case (topicName, error) => - results.find(topicName) - .setErrorCode(error.code) - } - sendResponseCallback(results) - } - - zkSupport.adminManager.deleteTopics( - deleteTopicRequest.data.timeoutMs, - toDelete, - controllerMutationQuota, - handleDeleteTopicsResults - ) - } - } - } - def handleDeleteRecordsRequest(request: RequestChannel.Request): Unit = { val deleteRecordsRequest = request.body[DeleteRecordsRequest] @@ -2300,7 +1568,6 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleEndTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(IBP_0_11_0_IV0) val endTxnRequest = request.body[EndTxnRequest] val transactionalId = endTxnRequest.data.transactionalId @@ -2328,14 +1595,11 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, createResponse) } - // If the request is greater than version 4, we know the client supports transaction version 2. - val clientTransactionVersion = if (endTxnRequest.version() > 4) TransactionVersion.TV_2 else TransactionVersion.TV_0 - txnCoordinator.handleEndTransaction(endTxnRequest.data.transactionalId, endTxnRequest.data.producerId, endTxnRequest.data.producerEpoch, endTxnRequest.result(), - clientTransactionVersion, + TransactionVersion.transactionVersionForEndTxn(endTxnRequest), sendResponseCallback, requestLocal) } else @@ -2347,7 +1611,6 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleWriteTxnMarkersRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(IBP_0_11_0_IV0) // We are checking for AlterCluster permissions first. If it is not present, we are authorizing cluster operation // The latter will throw an exception if it is denied. if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME, logIfDenied = false)) { @@ -2379,28 +1642,41 @@ class KafkaApis(val requestChannel: RequestChannel, trace(s"End transaction marker append for producer id $producerId completed with status: $currentErrors") updateErrors(producerId, currentErrors) - if (!groupCoordinator.isNewGroupCoordinator) { - val successfulOffsetsPartitions = currentErrors.asScala.filter { case (topicPartition, error) => - topicPartition.topic == GROUP_METADATA_TOPIC_NAME && error == Errors.NONE - }.keys - - if (successfulOffsetsPartitions.nonEmpty) { - // as soon as the end transaction marker has been written for a transactional offset commit, - // call to the group coordinator to materialize the offsets into the cache - try { - groupCoordinator.onTransactionCompleted(producerId, successfulOffsetsPartitions.asJava, result) - } catch { - case e: Exception => - error(s"Received an exception while trying to update the offsets cache on transaction marker append", e) - val updatedErrors = new ConcurrentHashMap[TopicPartition, Errors]() - successfulOffsetsPartitions.foreach(updatedErrors.put(_, Errors.UNKNOWN_SERVER_ERROR)) - updateErrors(producerId, updatedErrors) - } + def maybeSendResponse(): Unit = { + if (numAppends.decrementAndGet() == 0) { + requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors)) } } - if (numAppends.decrementAndGet() == 0) - requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors)) + // The new group coordinator uses GroupCoordinator#completeTransaction so we do + // not need to call GroupCoordinator#onTransactionCompleted here. + if (config.isNewGroupCoordinatorEnabled) { + maybeSendResponse() + return + } + + val successfulOffsetsPartitions = currentErrors.asScala.filter { case (topicPartition, error) => + topicPartition.topic == GROUP_METADATA_TOPIC_NAME && error == Errors.NONE + }.keys + + // If no end transaction marker has been written to a __consumer_offsets partition, we do not + // need to call GroupCoordinator#onTransactionCompleted. + if (successfulOffsetsPartitions.isEmpty) { + maybeSendResponse() + return + } + + // Otherwise, we call GroupCoordinator#onTransactionCompleted to materialize the offsets + // into the cache and we wait until the meterialization is completed. + groupCoordinator.onTransactionCompleted(producerId, successfulOffsetsPartitions.asJava, result).whenComplete { (_, exception) => + if (exception != null) { + error(s"Received an exception while trying to update the offsets cache on transaction marker append", exception) + val updatedErrors = new ConcurrentHashMap[TopicPartition, Errors]() + successfulOffsetsPartitions.foreach(updatedErrors.put(_, Errors.UNKNOWN_SERVER_ERROR)) + updateErrors(producerId, updatedErrors) + } + maybeSendResponse() + } } // TODO: The current append API makes doing separate writes per producerId a little easier, but it would @@ -2414,12 +1690,9 @@ class KafkaApis(val requestChannel: RequestChannel, val currentErrors = new ConcurrentHashMap[TopicPartition, Errors]() marker.partitions.forEach { partition => - replicaManager.getMagic(partition) match { - case Some(magic) => - if (magic < RecordBatch.MAGIC_VALUE_V2) - currentErrors.put(partition, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT) - else - partitionsWithCompatibleMessageFormat += partition + replicaManager.onlinePartition(partition) match { + case Some(_) => + partitionsWithCompatibleMessageFormat += partition case None => currentErrors.put(partition, Errors.UNKNOWN_TOPIC_OR_PARTITION) } @@ -2510,13 +1783,7 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors)) } - def ensureInterBrokerVersion(version: MetadataVersion): Unit = { - if (metadataCache.metadataVersion().isLessThan(version)) - throw new UnsupportedVersionException(s"metadata.version: ${metadataCache.metadataVersion()} is less than the required version: ${version}") - } - def handleAddPartitionsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(IBP_0_11_0_IV0) val addPartitionsToTxnRequest = if (request.context.apiVersion() < 4) request.body[AddPartitionsToTxnRequest].normalizeRequest() @@ -2614,6 +1881,7 @@ class KafkaApis(val requestChannel: RequestChannel, transaction.producerEpoch, authorizedPartitions, sendResponseCallback, + TransactionVersion.transactionVersionForAddPartitionsToTxn(addPartitionsToTxnRequest), requestLocal) } else { txnCoordinator.handleVerifyPartitionsInTransaction(transactionalId, @@ -2628,7 +1896,6 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleAddOffsetsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(IBP_0_11_0_IV0) val addOffsetsToTxnRequest = request.body[AddOffsetsToTxnRequest] val transactionalId = addOffsetsToTxnRequest.data.transactionalId val groupId = addOffsetsToTxnRequest.data.groupId @@ -2673,6 +1940,7 @@ class KafkaApis(val requestChannel: RequestChannel, addOffsetsToTxnRequest.data.producerEpoch, Set(offsetTopicPartition), sendResponseCallback, + TransactionVersion.TV_0, // This request will always come from the client not using TV 2. requestLocal) } } @@ -2681,7 +1949,6 @@ class KafkaApis(val requestChannel: RequestChannel, request: RequestChannel.Request, requestLocal: RequestLocal ): CompletableFuture[Unit] = { - ensureInterBrokerVersion(IBP_0_11_0_IV0) val txnOffsetCommitRequest = request.body[TxnOffsetCommitRequest] def sendResponse(response: TxnOffsetCommitResponse): Unit = { @@ -2783,16 +2050,6 @@ class KafkaApis(val requestChannel: RequestChannel, aclApis.handleDescribeAcls(request) } - def handleCreateAcls(request: RequestChannel.Request): Unit = { - metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - aclApis.handleCreateAcls(request) - } - - def handleDeleteAcls(request: RequestChannel.Request): Unit = { - metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - aclApis.handleDeleteAcls(request) - } - def handleOffsetForLeaderEpochRequest(request: RequestChannel.Request): Unit = { val offsetForLeaderEpoch = request.body[OffsetsForLeaderEpochRequest] val topics = offsetForLeaderEpoch.data.topics.asScala.toSeq @@ -2845,158 +2102,19 @@ class KafkaApis(val requestChannel: RequestChannel, } if (remaining.resources().isEmpty) { sendResponse(Some(new AlterConfigsResponseData())) - } else if ((!request.isForwarded) && metadataSupport.canForward()) { - metadataSupport.forwardingManager.get.forwardRequest(request, + } else { + forwardingManager.forwardRequest(request, new AlterConfigsRequest(remaining, request.header.apiVersion()), response => sendResponse(response.map(_.data()))) - } else { - sendResponse(Some(processLegacyAlterConfigsRequest(request, remaining))) } } - def processLegacyAlterConfigsRequest( - originalRequest: RequestChannel.Request, - data: AlterConfigsRequestData - ): AlterConfigsResponseData = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(originalRequest)) - val alterConfigsRequest = new AlterConfigsRequest(data, originalRequest.header.apiVersion()) - val (authorizedResources, unauthorizedResources) = alterConfigsRequest.configs.asScala.toMap.partition { case (resource, _) => - resource.`type` match { - case ConfigResource.Type.BROKER_LOGGER => - throw new InvalidRequestException(s"AlterConfigs is deprecated and does not support the resource type ${ConfigResource.Type.BROKER_LOGGER}") - case ConfigResource.Type.BROKER | ConfigResource.Type.CLIENT_METRICS => - authHelper.authorize(originalRequest.context, ALTER_CONFIGS, CLUSTER, CLUSTER_NAME) - case ConfigResource.Type.TOPIC => - authHelper.authorize(originalRequest.context, ALTER_CONFIGS, TOPIC, resource.name) - case rt => throw new InvalidRequestException(s"Unexpected resource type $rt") - } - } - val authorizedResult = zkSupport.adminManager.alterConfigs(authorizedResources, alterConfigsRequest.validateOnly) - val unauthorizedResult = unauthorizedResources.keys.map { resource => - resource -> configsAuthorizationApiError(resource) - } - val response = new AlterConfigsResponseData() - (authorizedResult ++ unauthorizedResult).foreach { case (resource, error) => - response.responses().add(new AlterConfigsResourceResponse() - .setErrorCode(error.error.code) - .setErrorMessage(error.message) - .setResourceName(resource.name) - .setResourceType(resource.`type`.id)) - } - response - } - - def handleAlterPartitionReassignmentsRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - authHelper.authorizeClusterOperation(request, ALTER) - val alterPartitionReassignmentsRequest = request.body[AlterPartitionReassignmentsRequest] - - def sendResponseCallback(result: Either[Map[TopicPartition, ApiError], ApiError]): Unit = { - val responseData = result match { - case Right(topLevelError) => - new AlterPartitionReassignmentsResponseData().setErrorMessage(topLevelError.message).setErrorCode(topLevelError.error.code) - - case Left(assignments) => - val topicResponses = assignments.groupBy(_._1.topic).map { - case (topic, reassignmentsByTp) => - val partitionResponses = reassignmentsByTp.map { - case (topicPartition, error) => - new ReassignablePartitionResponse().setPartitionIndex(topicPartition.partition) - .setErrorCode(error.error.code).setErrorMessage(error.message) - } - new ReassignableTopicResponse().setName(topic).setPartitions(partitionResponses.toList.asJava) - } - new AlterPartitionReassignmentsResponseData().setResponses(topicResponses.toList.asJava) - } - - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new AlterPartitionReassignmentsResponse(responseData.setThrottleTimeMs(requestThrottleMs)) - ) - } - - val reassignments = alterPartitionReassignmentsRequest.data.topics.asScala.flatMap { - reassignableTopic => reassignableTopic.partitions.asScala.map { - reassignablePartition => - val tp = new TopicPartition(reassignableTopic.name, reassignablePartition.partitionIndex) - if (reassignablePartition.replicas == null) - tp -> None // revert call - else - tp -> Some(reassignablePartition.replicas.asScala.map(_.toInt)) - } - }.toMap - - zkSupport.controller.alterPartitionReassignments(reassignments, sendResponseCallback) - } - - def handleListPartitionReassignmentsRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - authHelper.authorizeClusterOperation(request, DESCRIBE) - val listPartitionReassignmentsRequest = request.body[ListPartitionReassignmentsRequest] - - def sendResponseCallback(result: Either[Map[TopicPartition, ReplicaAssignment], ApiError]): Unit = { - val responseData = result match { - case Right(error) => new ListPartitionReassignmentsResponseData().setErrorMessage(error.message).setErrorCode(error.error.code) - - case Left(assignments) => - val topicReassignments = assignments.groupBy(_._1.topic).map { - case (topic, reassignmentsByTp) => - val partitionReassignments = reassignmentsByTp.map { - case (topicPartition, assignment) => - new ListPartitionReassignmentsResponseData.OngoingPartitionReassignment() - .setPartitionIndex(topicPartition.partition) - .setAddingReplicas(assignment.addingReplicas.toList.asJava.asInstanceOf[java.util.List[java.lang.Integer]]) - .setRemovingReplicas(assignment.removingReplicas.toList.asJava.asInstanceOf[java.util.List[java.lang.Integer]]) - .setReplicas(assignment.replicas.toList.asJava.asInstanceOf[java.util.List[java.lang.Integer]]) - }.toList - - new ListPartitionReassignmentsResponseData.OngoingTopicReassignment().setName(topic) - .setPartitions(partitionReassignments.asJava) - }.toList - - new ListPartitionReassignmentsResponseData().setTopics(topicReassignments.asJava) - } - - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new ListPartitionReassignmentsResponse(responseData.setThrottleTimeMs(requestThrottleMs)) - ) - } - - val partitionsOpt = Option(listPartitionReassignmentsRequest.data.topics).map { topics => - topics.iterator().asScala.flatMap { topic => - topic.partitionIndexes.iterator().asScala.map { partitionIndex => - new TopicPartition(topic.name(), partitionIndex) - } - }.toSet - } - - zkSupport.controller.listPartitionReassignments(partitionsOpt, sendResponseCallback) - } - - private def configsAuthorizationApiError(resource: ConfigResource): ApiError = { - val error = resource.`type` match { - case ConfigResource.Type.BROKER | ConfigResource.Type.BROKER_LOGGER => Errors.CLUSTER_AUTHORIZATION_FAILED - case ConfigResource.Type.TOPIC => Errors.TOPIC_AUTHORIZATION_FAILED - case ConfigResource.Type.GROUP => Errors.GROUP_AUTHORIZATION_FAILED - case rt => throw new InvalidRequestException(s"Unexpected resource type $rt for resource ${resource.name}") - } - new ApiError(error, null) - } - def handleIncrementalAlterConfigsRequest(request: RequestChannel.Request): Unit = { val original = request.body[IncrementalAlterConfigsRequest] val preprocessingResponses = configManager.preprocess(original.data(), (rType, rName) => authHelper.authorize(request.context, ALTER_CONFIGS, rType, rName)) val remaining = ConfigAdminManager.copyWithoutPreprocessed(original.data(), preprocessingResponses) - // Before deciding whether to forward or handle locally, a ZK broker needs to check if - // the active controller is ZK or KRaft. If the controller is KRaft, we need to forward. - // If the controller is ZK, we need to process the request locally. - val isKRaftController = metadataSupport match { - case ZkSupport(_, _, _, _, metadataCache, _) => - metadataCache.getControllerId.exists(_.isInstanceOf[KRaftCachedControllerId]) - case RaftSupport(_, _) => true - } - def sendResponse(secondPart: Option[ApiMessage]): Unit = { secondPart match { case Some(result: IncrementalAlterConfigsResponseData) => @@ -3009,49 +2127,13 @@ class KafkaApis(val requestChannel: RequestChannel, } } - // Forwarding has not happened yet, so handle both ZK and KRaft cases here if (remaining.resources().isEmpty) { sendResponse(Some(new IncrementalAlterConfigsResponseData())) - } else if ((!request.isForwarded) && metadataSupport.canForward() && isKRaftController) { - metadataSupport.forwardingManager.get.forwardRequest(request, + } else { + forwardingManager.forwardRequest(request, new IncrementalAlterConfigsRequest(remaining, request.header.apiVersion()), response => sendResponse(response.map(_.data()))) - } else { - sendResponse(Some(processIncrementalAlterConfigsRequest(request, remaining))) - } - } - - def processIncrementalAlterConfigsRequest( - originalRequest: RequestChannel.Request, - data: IncrementalAlterConfigsRequestData - ): IncrementalAlterConfigsResponseData = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(originalRequest)) - val configs = data.resources.iterator.asScala.map { alterConfigResource => - val configResource = new ConfigResource(ConfigResource.Type.forId(alterConfigResource.resourceType), - alterConfigResource.resourceName) - configResource -> alterConfigResource.configs.iterator.asScala.map { - alterConfig => new AlterConfigOp(new ConfigEntry(alterConfig.name, alterConfig.value), - OpType.forId(alterConfig.configOperation)) - }.toBuffer - }.toMap - - val (authorizedResources, unauthorizedResources) = configs.partition { case (resource, _) => - resource.`type` match { - case ConfigResource.Type.BROKER | ConfigResource.Type.BROKER_LOGGER | ConfigResource.Type.CLIENT_METRICS => - authHelper.authorize(originalRequest.context, ALTER_CONFIGS, CLUSTER, CLUSTER_NAME) - case ConfigResource.Type.TOPIC => - authHelper.authorize(originalRequest.context, ALTER_CONFIGS, TOPIC, resource.name) - case ConfigResource.Type.GROUP => - authHelper.authorize(originalRequest.context, ALTER_CONFIGS, GROUP, resource.name) - case rt => throw new InvalidRequestException(s"Unexpected resource type $rt") - } } - - val authorizedResult = zkSupport.adminManager.incrementalAlterConfigs(authorizedResources, data.validateOnly) - val unauthorizedResult = unauthorizedResources.keys.map { resource => - resource -> configsAuthorizationApiError(resource) - } - new IncrementalAlterConfigsResponse(0, (authorizedResult ++ unauthorizedResult).asJava).data() } def handleDescribeConfigsRequest(request: RequestChannel.Request): Unit = { @@ -3131,93 +2213,7 @@ class KafkaApis(val requestChannel: RequestChannel, CreateDelegationTokenResponse.prepareResponse(request.context.requestVersion, requestThrottleMs, Errors.INVALID_PRINCIPAL_TYPE, owner, requester)) } else { - maybeForwardToController(request, handleCreateTokenRequestZk) - } - } - - def handleCreateTokenRequestZk(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - - val createTokenRequest = request.body[CreateDelegationTokenRequest] - - // the callback for sending a create token response - def sendResponseCallback(createResult: CreateTokenResult): Unit = { - trace(s"Sending create token response for correlation id ${request.header.correlationId} " + - s"to client ${request.header.clientId}.") - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - CreateDelegationTokenResponse.prepareResponse(request.context.requestVersion, requestThrottleMs, createResult.error, createResult.owner, - createResult.tokenRequester, createResult.issueTimestamp, createResult.expiryTimestamp, createResult.maxTimestamp, createResult.tokenId, - ByteBuffer.wrap(createResult.hmac))) - } - - val ownerPrincipalName = createTokenRequest.data.ownerPrincipalName - val owner = if (ownerPrincipalName == null || ownerPrincipalName.isEmpty) { - request.context.principal - } else { - new KafkaPrincipal(createTokenRequest.data.ownerPrincipalType, ownerPrincipalName) - } - val requester = request.context.principal - val renewerList = createTokenRequest.data.renewers.asScala.toList.map(entry => - new KafkaPrincipal(entry.principalType, entry.principalName)) - - // DelegationToken changes only need to be executed on the controller during migration - if (config.migrationEnabled && (!zkSupport.controller.isActive)) { - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - CreateDelegationTokenResponse.prepareResponse(request.context.requestVersion, requestThrottleMs, - Errors.NOT_CONTROLLER, owner, requester)) - } else { - tokenManager.createToken( - owner, - requester, - renewerList, - createTokenRequest.data.maxLifetimeMs, - sendResponseCallback) - } - } - - def handleRenewTokenRequest(request: RequestChannel.Request): Unit = { - if (!allowTokenRequests(request)) { - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new RenewDelegationTokenResponse( - new RenewDelegationTokenResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setErrorCode(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED.code) - .setExpiryTimestampMs(DelegationTokenManager.ErrorTimestamp))) - } else { - maybeForwardToController(request, handleRenewTokenRequestZk) - } - } - - def handleRenewTokenRequestZk(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - - val renewTokenRequest = request.body[RenewDelegationTokenRequest] - - // the callback for sending a renew token response - def sendResponseCallback(error: Errors, expiryTimestamp: Long): Unit = { - trace("Sending renew token response for correlation id %d to client %s." - .format(request.header.correlationId, request.header.clientId)) - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new RenewDelegationTokenResponse( - new RenewDelegationTokenResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setErrorCode(error.code) - .setExpiryTimestampMs(expiryTimestamp))) - } - // DelegationToken changes only need to be executed on the controller during migration - if (config.migrationEnabled && (!zkSupport.controller.isActive)) { - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new RenewDelegationTokenResponse( - new RenewDelegationTokenResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setErrorCode(Errors.NOT_CONTROLLER.code))) - } else { - tokenManager.renewToken( - request.context.principal, - ByteBuffer.wrap(renewTokenRequest.data.hmac), - renewTokenRequest.data.renewPeriodMs, - sendResponseCallback - ) + forwardToController(request) } } @@ -3230,40 +2226,20 @@ class KafkaApis(val requestChannel: RequestChannel, .setErrorCode(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED.code) .setExpiryTimestampMs(DelegationTokenManager.ErrorTimestamp))) } else { - maybeForwardToController(request, handleExpireTokenRequestZk) + forwardToController(request) } } - def handleExpireTokenRequestZk(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - - val expireTokenRequest = request.body[ExpireDelegationTokenRequest] - - // the callback for sending a expire token response - def sendResponseCallback(error: Errors, expiryTimestamp: Long): Unit = { - trace("Sending expire token response for correlation id %d to client %s." - .format(request.header.correlationId, request.header.clientId)) - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new ExpireDelegationTokenResponse( - new ExpireDelegationTokenResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setErrorCode(error.code) - .setExpiryTimestampMs(expiryTimestamp))) - } - // DelegationToken changes only need to be executed on the controller during migration - if (config.migrationEnabled && (!zkSupport.controller.isActive)) { + def handleRenewTokenRequest(request: RequestChannel.Request): Unit = { + if (!allowTokenRequests(request)) { requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new ExpireDelegationTokenResponse( - new ExpireDelegationTokenResponseData() + new RenewDelegationTokenResponse( + new RenewDelegationTokenResponseData() .setThrottleTimeMs(requestThrottleMs) - .setErrorCode(Errors.NOT_CONTROLLER.code))) + .setErrorCode(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED.code) + .setExpiryTimestampMs(DelegationTokenManager.ErrorTimestamp))) } else { - tokenManager.expireToken( - request.context.principal, - expireTokenRequest.hmac(), - expireTokenRequest.expiryTimePeriod(), - sendResponseCallback - ) + forwardToController(request) } } @@ -3314,77 +2290,6 @@ class KafkaApis(val requestChannel: RequestChannel, true } - def handleElectLeaders(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - val electionRequest = request.body[ElectLeadersRequest] - - def sendResponseCallback( - error: ApiError - )( - results: Map[TopicPartition, ApiError] - ): Unit = { - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => { - val adjustedResults = if (electionRequest.data.topicPartitions == null) { - /* When performing elections across all of the partitions we should only return - * partitions for which there was an election or resulted in an error. In other - * words, partitions that didn't need election because they ready have the correct - * leader are not returned to the client. - */ - results.filter { case (_, error) => - error.error != Errors.ELECTION_NOT_NEEDED - } - } else results - - val electionResults = new util.ArrayList[ReplicaElectionResult]() - adjustedResults - .groupBy { case (tp, _) => tp.topic } - .foreachEntry { (topic, ps) => - val electionResult = new ReplicaElectionResult() - - electionResult.setTopic(topic) - ps.foreachEntry { (topicPartition, error) => - val partitionResult = new PartitionResult() - partitionResult.setPartitionId(topicPartition.partition) - partitionResult.setErrorCode(error.error.code) - partitionResult.setErrorMessage(error.message) - electionResult.partitionResult.add(partitionResult) - } - - electionResults.add(electionResult) - } - - new ElectLeadersResponse( - requestThrottleMs, - error.error.code, - electionResults, - electionRequest.version - ) - }) - } - - if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) { - val error = new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, null) - val partitionErrors: Map[TopicPartition, ApiError] = - electionRequest.topicPartitions.asScala.iterator.map(partition => partition -> error).toMap - - sendResponseCallback(error)(partitionErrors) - } else { - val partitions = if (electionRequest.data.topicPartitions == null) { - metadataCache.getAllTopics().flatMap(metadataCache.getTopicPartitions) - } else { - electionRequest.topicPartitions.asScala - } - - replicaManager.electLeaders( - zkSupport.controller, - partitions, - electionRequest.electionType, - sendResponseCallback(ApiError.NONE), - electionRequest.data.timeoutMs - ) - } - } - def handleOffsetDeleteRequest( request: RequestChannel.Request, requestLocal: RequestLocal @@ -3458,70 +2363,11 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => describeClientQuotasRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) } else { - metadataSupport match { - case ZkSupport(adminManager, controller, zkClient, forwardingManager, metadataCache, _) => - val result = adminManager.describeClientQuotas(describeClientQuotasRequest.filter) - - val entriesData = result.iterator.map { case (quotaEntity, quotaValues) => - val entityData = quotaEntity.entries.asScala.iterator.map { case (entityType, entityName) => - new DescribeClientQuotasResponseData.EntityData() - .setEntityType(entityType) - .setEntityName(entityName) - }.toBuffer - - val valueData = quotaValues.iterator.map { case (key, value) => - new DescribeClientQuotasResponseData.ValueData() - .setKey(key) - .setValue(value) - }.toBuffer - - new DescribeClientQuotasResponseData.EntryData() - .setEntity(entityData.asJava) - .setValues(valueData.asJava) - }.toBuffer - - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setEntries(entriesData.asJava))) - case RaftSupport(_, metadataCache) => - val result = metadataCache.describeClientQuotas(describeClientQuotasRequest.data()) - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => { - result.setThrottleTimeMs(requestThrottleMs) - new DescribeClientQuotasResponse(result) - }) - } - } - } - - def handleAlterClientQuotasRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - val alterClientQuotasRequest = request.body[AlterClientQuotasRequest] - - if (authHelper.authorize(request.context, ALTER_CONFIGS, CLUSTER, CLUSTER_NAME)) { - val result = zkSupport.adminManager.alterClientQuotas(alterClientQuotasRequest.entries.asScala, - alterClientQuotasRequest.validateOnly) - - val entriesData = result.iterator.map { case (quotaEntity, apiError) => - val entityData = quotaEntity.entries.asScala.iterator.map { case (key, value) => - new AlterClientQuotasResponseData.EntityData() - .setEntityType(key) - .setEntityName(value) - }.toBuffer - - new AlterClientQuotasResponseData.EntryData() - .setErrorCode(apiError.error.code) - .setErrorMessage(apiError.message) - .setEntity(entityData.asJava) - }.toBuffer - - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new AlterClientQuotasResponse(new AlterClientQuotasResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setEntries(entriesData.asJava))) - } else { - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - alterClientQuotasRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) + val result = metadataCache.describeClientQuotas(describeClientQuotasRequest.data()) + requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => { + result.setThrottleTimeMs(requestThrottleMs) + new DescribeClientQuotasResponse(result) + }) } } @@ -3532,82 +2378,9 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => describeUserScramCredentialsRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) } else { - metadataSupport match { - case ZkSupport(adminManager, controller, zkClient, forwardingManager, metadataCache, _) => - val result = adminManager.describeUserScramCredentials( - Option(describeUserScramCredentialsRequest.data.users).map(_.asScala.map(_.name).toList)) - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new DescribeUserScramCredentialsResponse(result.setThrottleTimeMs(requestThrottleMs))) - case RaftSupport(_, metadataCache) => - val result = metadataCache.describeScramCredentials(describeUserScramCredentialsRequest.data()) - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new DescribeUserScramCredentialsResponse(result.setThrottleTimeMs(requestThrottleMs))) - } - } - } - - def handleAlterUserScramCredentialsRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - val alterUserScramCredentialsRequest = request.body[AlterUserScramCredentialsRequest] - - if (!zkSupport.controller.isActive) { - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - alterUserScramCredentialsRequest.getErrorResponse(requestThrottleMs, Errors.NOT_CONTROLLER.exception)) - } else if (authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) { - val result = zkSupport.adminManager.alterUserScramCredentials( - alterUserScramCredentialsRequest.data.upsertions().asScala, alterUserScramCredentialsRequest.data.deletions().asScala) - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new AlterUserScramCredentialsResponse(result.setThrottleTimeMs(requestThrottleMs))) - } else { + val result = metadataCache.describeScramCredentials(describeUserScramCredentialsRequest.data()) requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - alterUserScramCredentialsRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) - } - } - - def handleAlterPartitionRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request)) - val alterPartitionRequest = request.body[AlterPartitionRequest] - authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - - if (!zkSupport.controller.isActive) - requestHelper.sendResponseExemptThrottle(request, alterPartitionRequest.getErrorResponse( - AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.NOT_CONTROLLER.exception)) - else - zkSupport.controller.alterPartitions(alterPartitionRequest.data, request.context.apiVersion, alterPartitionResp => - requestHelper.sendResponseExemptThrottle(request, new AlterPartitionResponse(alterPartitionResp))) - } - - def handleUpdateFeatures(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request)) - val updateFeaturesRequest = request.body[UpdateFeaturesRequest] - - def sendResponseCallback(errors: Either[ApiError, Map[String, ApiError]]): Unit = { - def createResponse(throttleTimeMs: Int): UpdateFeaturesResponse = { - errors match { - case Left(topLevelError) => - UpdateFeaturesResponse.createWithErrors( - topLevelError, - Collections.emptySet(), - throttleTimeMs) - case Right(featureUpdateErrors) => - // This response is not correct, but since this is ZK specific code it will be removed in 4.0 - UpdateFeaturesResponse.createWithErrors( - ApiError.NONE, - featureUpdateErrors.asJava.keySet(), - throttleTimeMs) - } - } - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => createResponse(requestThrottleMs)) - } - - if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) { - sendResponseCallback(Left(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED))) - } else if (!zkSupport.controller.isActive) { - sendResponseCallback(Left(new ApiError(Errors.NOT_CONTROLLER))) - } else if (!config.isFeatureVersioningSupported) { - sendResponseCallback(Left(new ApiError(Errors.INVALID_REQUEST, "Feature versioning system is disabled."))) - } else { - zkSupport.controller.updateFeatures(updateFeaturesRequest, sendResponseCallback) + new DescribeUserScramCredentialsResponse(result.setThrottleTimeMs(requestThrottleMs))) } } @@ -3618,59 +2391,27 @@ class KafkaApis(val requestChannel: RequestChannel, clusterId, () => { val brokers = new DescribeClusterResponseData.DescribeClusterBrokerCollection() - metadataCache.getAliveBrokerNodes(request.context.listenerName).foreach { node => + val describeClusterRequest = request.body[DescribeClusterRequest] + metadataCache.getBrokerNodes(request.context.listenerName).foreach { node => + if (!node.isFenced || describeClusterRequest.data().includeFencedBrokers()) { brokers.add(new DescribeClusterResponseData.DescribeClusterBroker(). setBrokerId(node.id). setHost(node.host). setPort(node.port). - setRack(node.rack)) + setRack(node.rack). + setIsFenced(node.isFenced)) + } } brokers }, () => { - metadataCache.getControllerId match { - case Some(value) => - value match { - case ZkCachedControllerId (id) => id - case KRaftCachedControllerId (_) => metadataCache.getRandomAliveBrokerId.getOrElse(- 1) - } - case None => -1 - } + metadataCache.getRandomAliveBrokerId.getOrElse(-1) } ) requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new DescribeClusterResponse(response.setThrottleTimeMs(requestThrottleMs))) } - def handleEnvelope(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request)) - - // If forwarding is not yet enabled or this request has been received on an invalid endpoint, - // then we treat the request as unparsable and close the connection. - if (!isForwardingEnabled(request)) { - info(s"Closing connection ${request.context.connectionId} because it sent an `Envelope` " + - "request even though forwarding has not been enabled") - requestChannel.closeConnection(request, Collections.emptyMap()) - return - } else if (!request.context.fromPrivilegedListener) { - info(s"Closing connection ${request.context.connectionId} from listener ${request.context.listenerName} " + - s"because it sent an `Envelope` request, which is only accepted on the inter-broker listener " + - s"${config.interBrokerListenerName}.") - requestChannel.closeConnection(request, Collections.emptyMap()) - return - } else if (!authHelper.authorize(request.context, CLUSTER_ACTION, CLUSTER, CLUSTER_NAME)) { - requestHelper.sendErrorResponseMaybeThrottle(request, new ClusterAuthorizationException( - s"Principal ${request.context.principal} does not have required CLUSTER_ACTION for envelope")) - return - } else if (!zkSupport.controller.isActive) { - requestHelper.sendErrorResponseMaybeThrottle(request, new NotControllerException( - s"Broker $brokerId is not the active controller")) - return - } - - EnvelopeUtils.handleEnvelopeRequest(request, requestChannel.metrics, handle(_, requestLocal)) - } - def handleDescribeProducersRequest(request: RequestChannel.Request): Unit = { val describeProducersRequest = request.body[DescribeProducersRequest] @@ -3775,22 +2516,6 @@ class KafkaApis(val requestChannel: RequestChannel, new ListTransactionsResponse(response.setThrottleTimeMs(requestThrottleMs))) } - def handleAllocateProducerIdsRequest(request: RequestChannel.Request): Unit = { - val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request)) - authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - - val allocateProducerIdsRequest = request.body[AllocateProducerIdsRequest] - - if (!zkSupport.controller.isActive) - requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs => - allocateProducerIdsRequest.getErrorResponse(throttleTimeMs, Errors.NOT_CONTROLLER.exception)) - else - zkSupport.controller.allocateProducerIds(allocateProducerIdsRequest.data, producerIdsResponse => - requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs => - new AllocateProducerIdsResponse(producerIdsResponse.setThrottleTimeMs(throttleTimeMs))) - ) - } - private def groupVersion(): GroupVersion = { GroupVersion.fromFeatureLevel(metadataCache.features.finalizedFeatures.getOrDefault(GroupVersion.FEATURE_NAME, 0.toShort)) } @@ -3885,35 +2610,21 @@ class KafkaApis(val requestChannel: RequestChannel, def handleGetTelemetrySubscriptionsRequest(request: RequestChannel.Request): Unit = { val subscriptionRequest = request.body[GetTelemetrySubscriptionsRequest] - - clientMetricsManager match { - case Some(metricsManager) => - try { - requestHelper.sendMaybeThrottle(request, metricsManager.processGetTelemetrySubscriptionRequest(subscriptionRequest, request.context)) - } catch { - case _: Exception => - requestHelper.sendMaybeThrottle(request, subscriptionRequest.getErrorResponse(Errors.INVALID_REQUEST.exception)) - } - case None => - info("Received get telemetry client request for zookeeper based cluster") - requestHelper.sendMaybeThrottle(request, subscriptionRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + try { + requestHelper.sendMaybeThrottle(request, clientMetricsManager.processGetTelemetrySubscriptionRequest(subscriptionRequest, request.context)) + } catch { + case _: Exception => + requestHelper.sendMaybeThrottle(request, subscriptionRequest.getErrorResponse(Errors.INVALID_REQUEST.exception)) } } - def handlePushTelemetryRequest(request: RequestChannel.Request): Unit = { + private def handlePushTelemetryRequest(request: RequestChannel.Request): Unit = { val pushTelemetryRequest = request.body[PushTelemetryRequest] - - clientMetricsManager match { - case Some(metricsManager) => - try { - requestHelper.sendMaybeThrottle(request, metricsManager.processPushTelemetryRequest(pushTelemetryRequest, request.context)) - } catch { - case _: Exception => - requestHelper.sendMaybeThrottle(request, pushTelemetryRequest.getErrorResponse(Errors.INVALID_REQUEST.exception)) - } - case None => - info("Received push telemetry client request for zookeeper based cluster") - requestHelper.sendMaybeThrottle(request, pushTelemetryRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + try { + requestHelper.sendMaybeThrottle(request, clientMetricsManager.processPushTelemetryRequest(pushTelemetryRequest, request.context)) + } catch { + case _: Exception => + requestHelper.sendMaybeThrottle(request, pushTelemetryRequest.getErrorResponse(Errors.INVALID_REQUEST.exception)) } } @@ -3923,18 +2634,10 @@ class KafkaApis(val requestChannel: RequestChannel, if (!authHelper.authorize(request.context, DESCRIBE_CONFIGS, CLUSTER, CLUSTER_NAME)) { requestHelper.sendMaybeThrottle(request, listClientMetricsResourcesRequest.getErrorResponse(Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) } else { - clientMetricsManager match { - case Some(metricsManager) => - val data = new ListClientMetricsResourcesResponseData().setClientMetricsResources( - metricsManager.listClientMetricsResources.asScala.map( - name => new ClientMetricsResource().setName(name)).toList.asJava) - requestHelper.sendMaybeThrottle(request, new ListClientMetricsResourcesResponse(data)) - case None => - // This should never happen as ZK based cluster calls should get rejected earlier itself, - // but we should handle it gracefully. - info("Received list client metrics resources request for zookeeper based cluster") - requestHelper.sendMaybeThrottle(request, listClientMetricsResourcesRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - } + val data = new ListClientMetricsResourcesResponseData().setClientMetricsResources( + clientMetricsManager.listClientMetricsResources.stream.map( + name => new ClientMetricsResource().setName(name)).toList) + requestHelper.sendMaybeThrottle(request, new ListClientMetricsResourcesResponse(data)) } } @@ -4026,14 +2729,6 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) return } - val sharePartitionManagerInstance: SharePartitionManager = sharePartitionManager match { - case Some(manager) => manager - case None => - // The API is not supported when the SharePartitionManager is not defined on the broker - info("Received share fetch request for zookeeper based cluster") - requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return - } val groupId = shareFetchRequest.data.groupId @@ -4063,7 +2758,7 @@ class KafkaApis(val requestChannel: RequestChannel, try { // Creating the shareFetchContext for Share Session Handling. if context creation fails, the request is failed directly here. - shareFetchContext = sharePartitionManagerInstance.newContext(groupId, shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent) + shareFetchContext = sharePartitionManager.newContext(groupId, shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent) } catch { case e: Exception => requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e)) @@ -4102,7 +2797,7 @@ class KafkaApis(val requestChannel: RequestChannel, acknowledgeResult = handleAcknowledgements( acknowledgementDataFromRequest, erroneous, - sharePartitionManagerInstance, + sharePartitionManager, authorizedTopics, groupId, memberId, @@ -4115,7 +2810,7 @@ class KafkaApis(val requestChannel: RequestChannel, handleFetchFromShareFetchRequest( request, erroneousAndValidPartitionData, - sharePartitionManagerInstance, + sharePartitionManager, authorizedTopics ) @@ -4182,7 +2877,7 @@ class KafkaApis(val requestChannel: RequestChannel, } if (shareSessionEpoch == ShareRequestMetadata.FINAL_EPOCH) { - sharePartitionManagerInstance.releaseSession(groupId, memberId). + sharePartitionManager.releaseSession(groupId, memberId). whenComplete((releaseAcquiredRecordsData, throwable) => if (throwable != null) { error(s"Releasing share session close with correlation from client ${request.header.clientId} " + @@ -4201,7 +2896,7 @@ class KafkaApis(val requestChannel: RequestChannel, if (exception != null) { requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, exception)) } else { - requestChannel.sendResponse(request, result, onFetchComplete(request)) + requestChannel.sendResponse(request, result, None) } } } @@ -4269,6 +2964,7 @@ class KafkaApis(val requestChannel: RequestChannel, groupId, shareFetchRequest.data.memberId, params, + shareFetchRequest.data.batchSize, interestedWithMaxBytes ).thenApply{ result => val combinedResult = mutable.Map.empty[TopicIdPartition, ShareFetchResponseData.PartitionData] @@ -4346,15 +3042,6 @@ class KafkaApis(val requestChannel: RequestChannel, return } - val sharePartitionManagerInstance: SharePartitionManager = sharePartitionManager match { - case Some(manager) => manager - case None => - // The API is not supported when the SharePartitionManager is not defined on the broker - info("Received share acknowledge request for zookeeper based cluster") - requestHelper.sendMaybeThrottle(request, - shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return - } val groupId = shareAcknowledgeRequest.data.groupId // Share Acknowledge needs permission to perform READ action on the named group resource (groupId) @@ -4370,7 +3057,7 @@ class KafkaApis(val requestChannel: RequestChannel, try { // Updating the cache for Share Session Handling - sharePartitionManagerInstance.acknowledgeSessionUpdate(groupId, newReqMetadata) + sharePartitionManager.acknowledgeSessionUpdate(groupId, newReqMetadata) } catch { case e: Exception => requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e)) @@ -4399,13 +3086,13 @@ class KafkaApis(val requestChannel: RequestChannel, val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() val acknowledgementDataFromRequest = getAcknowledgeBatchesFromShareAcknowledgeRequest(shareAcknowledgeRequest, topicIdNames, erroneous) - handleAcknowledgements(acknowledgementDataFromRequest, erroneous, sharePartitionManagerInstance, authorizedTopics, groupId, memberId) + handleAcknowledgements(acknowledgementDataFromRequest, erroneous, sharePartitionManager, authorizedTopics, groupId, memberId) .handle[Unit] {(result, exception) => if (exception != null) { requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, exception)) } else { if (shareSessionEpoch == ShareRequestMetadata.FINAL_EPOCH) { - sharePartitionManagerInstance.releaseSession(groupId, memberId). + sharePartitionManager.releaseSession(groupId, memberId). whenComplete{ (releaseAcquiredRecordsData, throwable) => if (throwable != null) { debug(s"Releasing share session close with correlation from client ${request.header.clientId} " + @@ -4478,8 +3165,28 @@ class KafkaApis(val requestChannel: RequestChannel, def handleReadShareGroupStateSummaryRequest(request: RequestChannel.Request): Unit = { val readShareGroupStateSummaryRequest = request.body[ReadShareGroupStateSummaryRequest] - // TODO: Implement the ReadShareGroupStateSummaryRequest handling - requestHelper.sendMaybeThrottle(request, readShareGroupStateSummaryRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) + + shareCoordinator match { + case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => + readShareGroupStateSummaryRequest.getErrorResponse(requestThrottleMs, + new ApiException("Share coordinator is not enabled."))) + CompletableFuture.completedFuture[Unit](()) + case Some(coordinator) => coordinator.readStateSummary(request.context, readShareGroupStateSummaryRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, readShareGroupStateSummaryRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateSummaryResponse(response)) + } + } + } + } + + def handleDescribeShareGroupOffsetsRequest(request: RequestChannel.Request): Unit = { + val describeShareGroupOffsetsRequest = request.body[DescribeShareGroupOffsetsRequest] + // TODO: Implement the DescribeShareGroupOffsetsRequest handling + requestHelper.sendMaybeThrottle(request, describeShareGroupOffsetsRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) CompletableFuture.completedFuture[Unit](()) } @@ -4728,22 +3435,6 @@ class KafkaApis(val requestChannel: RequestChannel, .setCurrentLeader(partitionData.currentLeader) } - private def onFetchComplete(request: RequestChannel.Request): Option[Send => Unit] = { - def updateConversionStats(send: Send): Unit = { - send match { - case send: MultiRecordsSend if send.recordConversionStats != null => - send.recordConversionStats.asScala.toMap.foreach { - case (tp, stats) => updateRecordConversionStats(request, tp, stats) - } - case send: NetworkSend => - updateConversionStats(send.send()) - case _ => - } - } - - Some(updateConversionStats) - } - private def isShareGroupProtocolEnabled: Boolean = { groupCoordinator.isNewGroupCoordinator && config.shareGroupConfig.isShareGroupEnabled } @@ -4783,18 +3474,4 @@ object KafkaApis { FetchResponse.sizeOf(versionId, responseData.entrySet .iterator.asScala.filter(element => element.getKey.topicPartition.topic != null && quota.isThrottled(element.getKey.topicPartition)).asJava) } - - // visible for testing - private[server] def shouldNeverReceive(request: RequestChannel.Request): Exception = { - new UnsupportedVersionException(s"Should never receive when using a Raft-based metadata quorum: ${request.header.apiKey()}") - } - - // visible for testing - private[server] def shouldAlwaysForward(request: RequestChannel.Request): Exception = { - new UnsupportedVersionException(s"Should always be forwarded to the Active Controller when using a Raft-based metadata quorum: ${request.header.apiKey}") - } - - private def unsupported(text: String): Exception = { - new UnsupportedVersionException(s"Unsupported when using a Raft-based metadata quorum: $text") - } } diff --git a/core/src/main/scala/kafka/server/KafkaBroker.scala b/core/src/main/scala/kafka/server/KafkaBroker.scala index 22d111b896ff8..f2e71b68fa15f 100644 --- a/core/src/main/scala/kafka/server/KafkaBroker.scala +++ b/core/src/main/scala/kafka/server/KafkaBroker.scala @@ -34,6 +34,7 @@ import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.NodeToControllerChannelManager import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.server.util.Scheduler +import org.apache.kafka.storage.internals.log.LogDirFailureChannel import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.time.Duration @@ -68,6 +69,8 @@ object KafkaBroker { * you do change it, be sure to make it match that regex or the system tests will fail. */ val STARTED_MESSAGE = "Kafka Server started" + + val MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS: Long = 120000 } trait KafkaBroker extends Logging { @@ -83,6 +86,7 @@ trait KafkaBroker extends Logging { def dataPlaneRequestProcessor: KafkaApis def kafkaScheduler: Scheduler def kafkaYammerMetrics: KafkaYammerMetrics + def logDirFailureChannel: LogDirFailureChannel def logManager: LogManager def remoteLogManagerOpt: Option[RemoteLogManager] def metrics: Metrics diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index fec938758c43f..e243d40bbb2ff 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -24,36 +24,32 @@ import kafka.cluster.EndPoint import kafka.utils.{CoreUtils, Logging} import kafka.utils.Implicits._ import org.apache.kafka.common.Reconfigurable -import org.apache.kafka.common.config.{ConfigDef, ConfigException, ConfigResource, SaslConfigs, TopicConfig} +import org.apache.kafka.common.config.{ConfigDef, ConfigException, ConfigResource, TopicConfig} import org.apache.kafka.common.config.ConfigDef.ConfigKey import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.config.types.Password import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.record.{CompressionType, TimestampType} +import org.apache.kafka.common.record.TimestampType import org.apache.kafka.common.security.auth.KafkaPrincipalSerde import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.group.Group.GroupType import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinatorConfig} +import org.apache.kafka.coordinator.share.ShareCoordinatorConfig import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.security.authorizer.AuthorizerUtils -import org.apache.kafka.security.PasswordEncoderConfigs import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.server.common.MetadataVersion._ -import org.apache.kafka.server.config.{AbstractKafkaConfig, DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ShareCoordinatorConfig, ZkConfigs} +import org.apache.kafka.server.config.{AbstractKafkaConfig, DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ZkConfigs} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.MetricConfigs import org.apache.kafka.server.util.Csv import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig} -import org.apache.kafka.storage.internals.log.LogConfig.MessageFormatVersion -import org.apache.zookeeper.client.ZKClientConfig -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ import scala.collection.{Map, Seq} import scala.jdk.OptionConverters.RichOptional @@ -65,30 +61,6 @@ object KafkaConfig { DynamicBrokerConfig.dynamicConfigUpdateModes)) } - private[kafka] def zooKeeperClientProperty(clientConfig: ZKClientConfig, kafkaPropName: String): Option[String] = { - Option(clientConfig.getProperty(ZkConfigs.ZK_SSL_CONFIG_TO_SYSTEM_PROPERTY_MAP.get(kafkaPropName))) - } - - private[kafka] def setZooKeeperClientProperty(clientConfig: ZKClientConfig, kafkaPropName: String, kafkaPropValue: Any): Unit = { - clientConfig.setProperty(ZkConfigs.ZK_SSL_CONFIG_TO_SYSTEM_PROPERTY_MAP.get(kafkaPropName), - kafkaPropName match { - case ZkConfigs.ZK_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG => (kafkaPropValue.toString.toUpperCase == "HTTPS").toString - case ZkConfigs.ZK_SSL_ENABLED_PROTOCOLS_CONFIG | ZkConfigs.ZK_SSL_CIPHER_SUITES_CONFIG => kafkaPropValue match { - case list: java.util.List[_] => list.asScala.mkString(",") - case _ => kafkaPropValue.toString - } - case _ => kafkaPropValue.toString - }) - } - - // For ZooKeeper TLS client authentication to be enabled the client must (at a minimum) configure itself as using TLS - // with both a client connection socket and a key store location explicitly set. - private[kafka] def zkTlsClientAuthEnabled(zkClientConfig: ZKClientConfig): Boolean = { - zooKeeperClientProperty(zkClientConfig, ZkConfigs.ZK_SSL_CLIENT_ENABLE_CONFIG).contains("true") && - zooKeeperClientProperty(zkClientConfig, ZkConfigs.ZK_CLIENT_CNXN_SOCKET_CONFIG).isDefined && - zooKeeperClientProperty(zkClientConfig, ZkConfigs.ZK_SSL_KEY_STORE_LOCATION_CONFIG).isDefined - } - val configDef = AbstractKafkaConfig.CONFIG_DEF def configNames: Seq[String] = configDef.names.asScala.toBuffer.sorted @@ -195,11 +167,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) this.currentConfig = newConfig } - // The following captures any system properties impacting ZooKeeper TLS configuration - // and defines the default values this instance will use if no explicit config is given. - // We make it part of each instance rather than the object to facilitate testing. - private val zkClientConfigViaSystemProperties = new ZKClientConfig() - override def originals: util.Map[String, AnyRef] = if (this eq currentConfig) super.originals else currentConfig.originals override def values: util.Map[String, _] = @@ -253,78 +220,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) private val _quotaConfig = new QuotaConfig(this) def quotaConfig: QuotaConfig = _quotaConfig - - private def zkBooleanConfigOrSystemPropertyWithDefaultValue(propKey: String): Boolean = { - // Use the system property if it exists and the Kafka config value was defaulted rather than actually provided - // Need to translate any system property value from true/false (String) to true/false (Boolean) - val actuallyProvided = originals.containsKey(propKey) - if (actuallyProvided) getBoolean(propKey) else { - val sysPropValue = KafkaConfig.zooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey) - sysPropValue match { - case Some("true") => true - case Some(_) => false - case _ => getBoolean(propKey) // not specified so use the default value - } - } - } - - private def zkStringConfigOrSystemPropertyWithDefaultValue(propKey: String): String = { - // Use the system property if it exists and the Kafka config value was defaulted rather than actually provided - val actuallyProvided = originals.containsKey(propKey) - if (actuallyProvided) getString(propKey) else { - KafkaConfig.zooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey) match { - case Some(v) => v - case _ => getString(propKey) // not specified so use the default value - } - } - } - - private def zkOptionalStringConfigOrSystemProperty(propKey: String): Option[String] = { - Option(getString(propKey)).orElse { - KafkaConfig.zooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey) - } - } - private def zkPasswordConfigOrSystemProperty(propKey: String): Option[Password] = { - Option(getPassword(propKey)).orElse { - KafkaConfig.zooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey).map(new Password(_)) - } - } - private def zkListConfigOrSystemProperty(propKey: String): Option[util.List[String]] = { - Option(getList(propKey)).orElse { - KafkaConfig.zooKeeperClientProperty(zkClientConfigViaSystemProperties, propKey).map { sysProp => - sysProp.split("\\s*,\\s*").toBuffer.asJava - } - } - } - - val zkSslClientEnable = zkBooleanConfigOrSystemPropertyWithDefaultValue(ZkConfigs.ZK_SSL_CLIENT_ENABLE_CONFIG) - val zkClientCnxnSocketClassName = zkOptionalStringConfigOrSystemProperty(ZkConfigs.ZK_CLIENT_CNXN_SOCKET_CONFIG) - val zkSslKeyStoreLocation = zkOptionalStringConfigOrSystemProperty(ZkConfigs.ZK_SSL_KEY_STORE_LOCATION_CONFIG) - val zkSslKeyStorePassword = zkPasswordConfigOrSystemProperty(ZkConfigs.ZK_SSL_KEY_STORE_PASSWORD_CONFIG) - val zkSslKeyStoreType = zkOptionalStringConfigOrSystemProperty(ZkConfigs.ZK_SSL_KEY_STORE_TYPE_CONFIG) - val zkSslTrustStoreLocation = zkOptionalStringConfigOrSystemProperty(ZkConfigs.ZK_SSL_TRUST_STORE_LOCATION_CONFIG) - val zkSslTrustStorePassword = zkPasswordConfigOrSystemProperty(ZkConfigs.ZK_SSL_TRUST_STORE_PASSWORD_CONFIG) - val zkSslTrustStoreType = zkOptionalStringConfigOrSystemProperty(ZkConfigs.ZK_SSL_TRUST_STORE_TYPE_CONFIG) - val ZkSslProtocol = zkStringConfigOrSystemPropertyWithDefaultValue(ZkConfigs.ZK_SSL_PROTOCOL_CONFIG) - val ZkSslEnabledProtocols = zkListConfigOrSystemProperty(ZkConfigs.ZK_SSL_ENABLED_PROTOCOLS_CONFIG) - val ZkSslCipherSuites = zkListConfigOrSystemProperty(ZkConfigs.ZK_SSL_CIPHER_SUITES_CONFIG) - val ZkSslEndpointIdentificationAlgorithm = { - // Use the system property if it exists and the Kafka config value was defaulted rather than actually provided - // Need to translate any system property value from true/false to HTTPS/ - val kafkaProp = ZkConfigs.ZK_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG - val actuallyProvided = originals.containsKey(kafkaProp) - if (actuallyProvided) - getString(kafkaProp) - else { - KafkaConfig.zooKeeperClientProperty(zkClientConfigViaSystemProperties, kafkaProp) match { - case Some("true") => "HTTPS" - case Some(_) => "" - case None => getString(kafkaProp) // not specified so use the default value - } - } - } - val ZkSslCrlEnable = zkBooleanConfigOrSystemPropertyWithDefaultValue(ZkConfigs.ZK_SSL_CRL_ENABLE_CONFIG) - val ZkSslOcspEnable = zkBooleanConfigOrSystemPropertyWithDefaultValue(ZkConfigs.ZK_SSL_OCSP_ENABLE_CONFIG) /** ********* General Configuration ***********/ val brokerIdGenerationEnable: Boolean = getBoolean(ServerConfigs.BROKER_ID_GENERATION_ENABLE_CONFIG) val maxReservedBrokerId: Int = getInt(ServerConfigs.RESERVED_BROKER_MAX_ID_CONFIG) @@ -333,15 +228,12 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) val initialRegistrationTimeoutMs: Int = getInt(KRaftConfigs.INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG) val brokerHeartbeatIntervalMs: Int = getInt(KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG) val brokerSessionTimeoutMs: Int = getInt(KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG) + val controllerPerformanceSamplePeriodMs: Long = getLong(KRaftConfigs.CONTROLLER_PERFORMANCE_SAMPLE_PERIOD_MS) + val controllerPerformanceAlwaysLogThresholdMs: Long = getLong(KRaftConfigs.CONTROLLER_PERFORMANCE_ALWAYS_LOG_THRESHOLD_MS) def requiresZookeeper: Boolean = processRoles.isEmpty def usesSelfManagedQuorum: Boolean = processRoles.nonEmpty - val migrationEnabled: Boolean = getBoolean(KRaftConfigs.MIGRATION_ENABLED_CONFIG) - val migrationMetadataMinBatchSize: Int = getInt(KRaftConfigs.MIGRATION_METADATA_MIN_BATCH_SIZE_CONFIG) - - val elrEnabled: Boolean = getBoolean(KRaftConfigs.ELR_ENABLED_CONFIG) - private def parseProcessRoles(): Set[ProcessRole] = { val roles = getList(KRaftConfigs.PROCESS_ROLES_CONFIG).asScala.map { case "broker" => ProcessRole.BrokerRole @@ -480,51 +372,11 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) def logPreAllocateEnable: java.lang.Boolean = getBoolean(ServerLogConfigs.LOG_PRE_ALLOCATE_CONFIG) def logInitialTaskDelayMs: java.lang.Long = Option(getLong(ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_CONFIG)).getOrElse(ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DEFAULT) - // We keep the user-provided String as `MetadataVersion.fromVersionString` can choose a slightly different version (eg if `0.10.0` - // is passed, `0.10.0-IV0` may be picked) - @nowarn("cat=deprecation") - private val logMessageFormatVersionString = getString(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG) - - /* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */ - @deprecated("3.0") - lazy val logMessageFormatVersion = - if (LogConfig.shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion)) - MetadataVersion.fromVersionString(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_DEFAULT) - else MetadataVersion.fromVersionString(logMessageFormatVersionString) - def logMessageTimestampType = TimestampType.forName(getString(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG)) - /* See `TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG` for details */ - @deprecated("3.6") - def logMessageTimestampDifferenceMaxMs: Long = getLong(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG) - - // In the transition period before logMessageTimestampDifferenceMaxMs is removed, to maintain backward compatibility, - // we are using its value if logMessageTimestampBeforeMaxMs default value hasn't changed. - // See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for deprecation details - @nowarn("cat=deprecation") - def logMessageTimestampBeforeMaxMs: Long = { - val messageTimestampBeforeMaxMs: Long = getLong(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG) - if (messageTimestampBeforeMaxMs != ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DEFAULT) { - messageTimestampBeforeMaxMs - } else { - logMessageTimestampDifferenceMaxMs - } - } - - // In the transition period before logMessageTimestampDifferenceMaxMs is removed, to maintain backward compatibility, - // we are using its value if logMessageTimestampAfterMaxMs default value hasn't changed. - // See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for deprecation details - @nowarn("cat=deprecation") - def logMessageTimestampAfterMaxMs: Long = { - val messageTimestampAfterMaxMs: Long = getLong(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG) - if (messageTimestampAfterMaxMs != Long.MaxValue) { - messageTimestampAfterMaxMs - } else { - logMessageTimestampDifferenceMaxMs - } - } + def logMessageTimestampBeforeMaxMs: Long = getLong(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG) - def logMessageDownConversionEnable: Boolean = getBoolean(ServerLogConfigs.LOG_MESSAGE_DOWNCONVERSION_ENABLE_CONFIG) + def logMessageTimestampAfterMaxMs: Long = getLong(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG) def logDirFailureTimeoutMs: Long = getLong(ServerLogConfigs.LOG_DIR_FAILURE_TIMEOUT_MS_CONFIG) @@ -574,13 +426,8 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } /** ********* Controlled shutdown configuration ***********/ - val controlledShutdownMaxRetries = getInt(ServerConfigs.CONTROLLED_SHUTDOWN_MAX_RETRIES_CONFIG) - val controlledShutdownRetryBackoffMs = getLong(ServerConfigs.CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_CONFIG) val controlledShutdownEnable = getBoolean(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG) - /** ********* Feature configuration ***********/ - def isFeatureVersioningSupported = interBrokerProtocolVersion.isFeatureVersioningSupported - /** New group coordinator configs */ val isNewGroupCoordinatorEnabled = getBoolean(GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG) val groupCoordinatorRebalanceProtocols = { @@ -589,14 +436,12 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) if (!protocols.contains(GroupType.CLASSIC)) { throw new ConfigException(s"Disabling the '${GroupType.CLASSIC}' protocol is not supported.") } - if (protocols.contains(GroupType.CONSUMER)) { - if (processRoles.isEmpty || !isNewGroupCoordinatorEnabled) { - warn(s"The new '${GroupType.CONSUMER}' rebalance protocol is only supported in KRaft cluster with the new group coordinator.") - } + if (protocols.contains(GroupType.CONSUMER) && !isNewGroupCoordinatorEnabled) { + warn(s"The new '${GroupType.CONSUMER}' rebalance protocol is only supported with the new group coordinator.") } if (protocols.contains(GroupType.SHARE)) { - if (processRoles.isEmpty || !isNewGroupCoordinatorEnabled) { - warn(s"The new '${GroupType.SHARE}' rebalance protocol is only supported in KRaft cluster with the new group coordinator.") + if (!isNewGroupCoordinatorEnabled) { + warn(s"The new '${GroupType.SHARE}' rebalance protocol is only supported with the new group coordinator.") } warn(s"Share groups and the new '${GroupType.SHARE}' rebalance protocol are enabled. " + "This is part of the early access of KIP-932 and MUST NOT be used in production.") @@ -626,10 +471,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) def interBrokerListenerName = getInterBrokerListenerNameAndSecurityProtocol._1 def interBrokerSecurityProtocol = getInterBrokerListenerNameAndSecurityProtocol._2 - def controlPlaneListenerName = getControlPlaneListenerNameAndSecurityProtocol.map { case (listenerName, _) => listenerName } - def controlPlaneSecurityProtocol = getControlPlaneListenerNameAndSecurityProtocol.map { case (_, securityProtocol) => securityProtocol } def saslMechanismInterBrokerProtocol = getString(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG) - val saslInterBrokerHandshakeRequestEnable = interBrokerProtocolVersion.isSaslInterBrokerHandshakeRequestEnabled /** ********* DelegationToken Configuration **************/ val delegationTokenSecretKey = getPassword(DelegationTokenManagerConfigs.DELEGATION_TOKEN_SECRET_KEY_CONFIG) @@ -638,14 +480,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) val delegationTokenExpiryTimeMs = getLong(DelegationTokenManagerConfigs.DELEGATION_TOKEN_EXPIRY_TIME_MS_CONFIG) val delegationTokenExpiryCheckIntervalMs = getLong(DelegationTokenManagerConfigs.DELEGATION_TOKEN_EXPIRY_CHECK_INTERVAL_MS_CONFIG) - /** ********* Password encryption configuration for dynamic configs *********/ - def passwordEncoderSecret = Option(getPassword(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG)) - def passwordEncoderOldSecret = Option(getPassword(PasswordEncoderConfigs.PASSWORD_ENCODER_OLD_SECRET_CONFIG)) - def passwordEncoderCipherAlgorithm = getString(PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_CONFIG) - def passwordEncoderKeyFactoryAlgorithm = getString(PasswordEncoderConfigs.PASSWORD_ENCODER_KEYFACTORY_ALGORITHM_CONFIG) - def passwordEncoderKeyLength = getInt(PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_CONFIG) - def passwordEncoderIterations = getInt(PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_CONFIG) - /** ********* Fetch Configuration **************/ val maxIncrementalFetchSessionCacheSlots = getInt(ServerConfigs.MAX_INCREMENTAL_FETCH_SESSION_CACHE_SLOTS_CONFIG) val fetchMaxBytes = getInt(ServerConfigs.FETCH_MAX_BYTES_CONFIG) @@ -712,16 +546,9 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) def saslMechanismControllerProtocol: String = getString(KRaftConfigs.SASL_MECHANISM_CONTROLLER_PROTOCOL_CONFIG) - def controlPlaneListener: Option[EndPoint] = { - controlPlaneListenerName.map { listenerName => - listeners.filter(endpoint => endpoint.listenerName.value() == listenerName.value()).head - } - } - def dataPlaneListeners: Seq[EndPoint] = { listeners.filterNot { listener => val name = listener.listenerName.value() - name.equals(getString(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG)) || controllerListenerNames.contains(name) } } @@ -770,19 +597,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } } - private def getControlPlaneListenerNameAndSecurityProtocol: Option[(ListenerName, SecurityProtocol)] = { - Option(getString(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG)) match { - case Some(name) => - val listenerName = ListenerName.normalised(name) - val securityProtocol = effectiveListenerSecurityProtocolMap.getOrElse(listenerName, - throw new ConfigException(s"Listener with ${listenerName.value} defined in " + - s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG} not found in ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG}.")) - Some(listenerName, securityProtocol) - - case None => None - } - } - private def getSecurityProtocol(protocolName: String, configName: String): SecurityProtocol = { try SecurityProtocol.forName(protocolName) catch { @@ -816,41 +630,12 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } } - // Topic IDs are used with all self-managed quorum clusters and ZK cluster with IBP greater than or equal to 2.8 - def usesTopicId: Boolean = - usesSelfManagedQuorum || interBrokerProtocolVersion.isTopicIdsSupported - validateValues() - @nowarn("cat=deprecation") private def validateValues(): Unit = { if (nodeId != brokerId) { throw new ConfigException(s"You must set `${KRaftConfigs.NODE_ID_CONFIG}` to the same value as `${ServerConfigs.BROKER_ID_CONFIG}`.") } - if (requiresZookeeper) { - if (zkConnect == null) { - throw new ConfigException(s"Missing required configuration `${ZkConfigs.ZK_CONNECT_CONFIG}` which has no default value.") - } - if (brokerIdGenerationEnable) { - if (migrationEnabled) { - require(brokerId >= 0, "broker.id generation is incompatible with ZooKeeper migration. Please stop using it before enabling migration (set broker.id to a value greater or equal to 0).") - } - require(brokerId >= -1 && brokerId <= maxReservedBrokerId, "broker.id must be greater than or equal to -1 and not greater than reserved.broker.max.id") - } else { - require(brokerId >= 0, "broker.id must be greater than or equal to 0") - } - } else { - // KRaft-based metadata quorum - if (nodeId < 0) { - throw new ConfigException(s"Missing configuration `${KRaftConfigs.NODE_ID_CONFIG}` which is required " + - s"when `process.roles` is defined (i.e. when running in KRaft mode).") - } - if (migrationEnabled) { - if (zkConnect == null) { - throw new ConfigException(s"If using `${KRaftConfigs.MIGRATION_ENABLED_CONFIG}` in KRaft mode, `${ZkConfigs.ZK_CONNECT_CONFIG}` must also be set.") - } - } - } require(logRollTimeMillis >= 1, "log.roll.ms must be greater than or equal to 1") require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be greater than or equal to 0") require(logRetentionTimeMillis >= 1 || logRetentionTimeMillis == -1, "log.retention.ms must be unlimited (-1) or, greater than or equal to 1") @@ -874,19 +659,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) ) } } - def validateQuorumVotersAndQuorumBootstrapServerForMigration(): Unit = { - if (voterIds.isEmpty && quorumConfig.bootstrapServers.isEmpty) { - throw new ConfigException( - s"""If using ${KRaftConfigs.MIGRATION_ENABLED_CONFIG}, either ${QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG} must - |contain the set of bootstrap controllers or ${QuorumConfig.QUORUM_VOTERS_CONFIG} must contain a parseable - |set of controllers.""".stripMargin.replace("\n", " ") - ) - } - } - def validateControlPlaneListenerEmptyForKRaft(): Unit = { - require(controlPlaneListenerName.isEmpty, - s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG} is not supported in KRaft mode.") - } + def validateControllerQuorumVotersMustContainNodeIdForKRaftController(): Unit = { require(voterIds.isEmpty || voterIds.contains(nodeId), s"If ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains the 'controller' role, the node id $nodeId must be included in the set of voters ${QuorumConfig.QUORUM_VOTERS_CONFIG}=${voterIds.asScala.toSet}") @@ -908,7 +681,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) if (processRoles == Set(ProcessRole.BrokerRole)) { // KRaft broker-only validateQuorumVotersAndQuorumBootstrapServerForKRaft() - validateControlPlaneListenerEmptyForKRaft() // nodeId must not appear in controller.quorum.voters require(!voterIds.contains(nodeId), s"If ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains just the 'broker' role, the node id $nodeId must not be included in the set of voters ${QuorumConfig.QUORUM_VOTERS_CONFIG}=${voterIds.asScala.toSet}") @@ -933,42 +705,31 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } else if (processRoles == Set(ProcessRole.ControllerRole)) { // KRaft controller-only validateQuorumVotersAndQuorumBootstrapServerForKRaft() - validateControlPlaneListenerEmptyForKRaft() // listeners should only contain listeners also enumerated in the controller listener require( effectiveAdvertisedControllerListeners.size == listeners.size, s"The ${SocketServerConfigs.LISTENERS_CONFIG} config must only contain KRaft controller listeners from ${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} when ${KRaftConfigs.PROCESS_ROLES_CONFIG}=controller" ) + // controller.listener.names must not contain inter.broker.listener.name when inter.broker.listener.name is explicitly set + if (Option(getString(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG)).isDefined) { + require( + !controllerListenerNames.contains(interBrokerListenerName.value()), + s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must not contain an explicitly set ${ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG} configuration value when ${KRaftConfigs.PROCESS_ROLES_CONFIG}=controller'" + ) + } validateControllerQuorumVotersMustContainNodeIdForKRaftController() validateAdvertisedControllerListenersNonEmptyForKRaftController() validateControllerListenerNamesMustAppearInListenersForKRaftController() } else if (isKRaftCombinedMode) { // KRaft combined broker and controller validateQuorumVotersAndQuorumBootstrapServerForKRaft() - validateControlPlaneListenerEmptyForKRaft() validateControllerQuorumVotersMustContainNodeIdForKRaftController() validateAdvertisedControllerListenersNonEmptyForKRaftController() validateControllerListenerNamesMustAppearInListenersForKRaftController() } else { - // ZK-based - if (migrationEnabled) { - require(brokerId >= 0, - "broker.id generation is incompatible with ZooKeeper migration. Please stop using it before enabling migration (set broker.id to a value greater or equal to 0).") - validateQuorumVotersAndQuorumBootstrapServerForMigration() - require(controllerListenerNames.nonEmpty, - s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must not be empty when running in ZooKeeper migration mode: ${controllerListenerNames.asJava}") - require(interBrokerProtocolVersion.isMigrationSupported, s"Cannot enable ZooKeeper migration without setting " + - s"'${ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG}' to 3.4 or higher") - if (logDirs.size > 1) { - require(interBrokerProtocolVersion.isDirectoryAssignmentSupported, - s"Cannot enable ZooKeeper migration with multiple log directories (aka JBOD) without setting " + - s"'${ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG}' to ${MetadataVersion.IBP_3_7_IV2} or higher") - } - } else { - // controller listener names must be empty when not in KRaft mode - require(controllerListenerNames.isEmpty, - s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must be empty when not running in KRaft mode: ${controllerListenerNames.asJava}") - } + // controller listener names must be empty when not in KRaft mode + require(controllerListenerNames.isEmpty, + s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must be empty when not running in KRaft mode: ${controllerListenerNames.asJava}") } val listenerNames = listeners.map(_.listenerName).toSet @@ -993,34 +754,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) s"${SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG} cannot use the nonroutable meta-address 0.0.0.0. "+ s"Use a routable IP address.") - // validate control.plane.listener.name config - if (controlPlaneListenerName.isDefined) { - require(advertisedBrokerListenerNames.contains(controlPlaneListenerName.get), - s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG} must be a listener name defined in ${SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG}. " + - s"The valid options based on currently configured listeners are ${advertisedBrokerListenerNames.map(_.value).mkString(",")}") - // controlPlaneListenerName should be different from interBrokerListenerName - require(!controlPlaneListenerName.get.value().equals(interBrokerListenerName.value()), - s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG}, when defined, should have a different value from the inter broker listener name. " + - s"Currently they both have the value ${controlPlaneListenerName.get}") - } - - val messageFormatVersion = new MessageFormatVersion(logMessageFormatVersionString, interBrokerProtocolVersionString) - if (messageFormatVersion.shouldWarn) - warn(createBrokerWarningMessage) - - val recordVersion = logMessageFormatVersion.highestSupportedRecordVersion - require(interBrokerProtocolVersion.highestSupportedRecordVersion().value >= recordVersion.value, - s"log.message.format.version $logMessageFormatVersionString can only be used when inter.broker.protocol.version " + - s"is set to version ${MetadataVersion.minSupportedFor(recordVersion).shortVersion} or higher") - - if (groupCoordinatorConfig.offsetTopicCompressionType == CompressionType.ZSTD) - require(interBrokerProtocolVersion.highestSupportedRecordVersion().value >= IBP_2_1_IV0.highestSupportedRecordVersion().value, - "offsets.topic.compression.codec zstd can only be used when inter.broker.protocol.version " + - s"is set to version ${IBP_2_1_IV0.shortVersion} or higher") - val interBrokerUsesSasl = interBrokerSecurityProtocol == SecurityProtocol.SASL_PLAINTEXT || interBrokerSecurityProtocol == SecurityProtocol.SASL_SSL - require(!interBrokerUsesSasl || saslInterBrokerHandshakeRequestEnable || saslMechanismInterBrokerProtocol == SaslConfigs.GSSAPI_MECHANISM, - s"Only GSSAPI mechanism is supported for inter-broker communication with SASL when inter.broker.protocol.version is set to $interBrokerProtocolVersionString") require(!interBrokerUsesSasl || saslEnabledMechanisms(interBrokerListenerName).contains(saslMechanismInterBrokerProtocol), s"${BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG} must be included in ${BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG} when SASL is used for inter-broker communication") require(queuedMaxBytes <= 0 || queuedMaxBytes >= socketRequestMaxBytes, @@ -1062,7 +796,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) * Copy the subset of properties that are relevant to Logs. The individual properties * are listed here since the names are slightly different in each Config class... */ - @nowarn("cat=deprecation") def extractLogConfigMap: java.util.Map[String, Object] = { val logProps = new java.util.HashMap[String, Object]() logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, logSegmentBytes) @@ -1088,21 +821,11 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) logProps.put(TopicConfig.COMPRESSION_ZSTD_LEVEL_CONFIG, zstdCompressionLevel) logProps.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, uncleanLeaderElectionEnable) logProps.put(TopicConfig.PREALLOCATE_CONFIG, logPreAllocateEnable) - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, logMessageFormatVersion.version) logProps.put(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, logMessageTimestampType.name) - logProps.put(TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, logMessageTimestampDifferenceMaxMs: java.lang.Long) logProps.put(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, logMessageTimestampBeforeMaxMs: java.lang.Long) logProps.put(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, logMessageTimestampAfterMaxMs: java.lang.Long) - logProps.put(TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, logMessageDownConversionEnable: java.lang.Boolean) logProps.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, remoteLogManagerConfig.logLocalRetentionMs: java.lang.Long) logProps.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, remoteLogManagerConfig.logLocalRetentionBytes: java.lang.Long) logProps } - - @nowarn("cat=deprecation") - private def createBrokerWarningMessage: String = { - s"Broker configuration ${ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG} with value $logMessageFormatVersionString is ignored " + - s"because the inter-broker protocol version `$interBrokerProtocolVersionString` is greater or equal than 3.0. " + - "This configuration is deprecated and it will be removed in Apache Kafka 4.0." - } } diff --git a/core/src/main/scala/kafka/server/KafkaRaftServer.scala b/core/src/main/scala/kafka/server/KafkaRaftServer.scala index 4a676a2765e88..5642c8aaa99ce 100644 --- a/core/src/main/scala/kafka/server/KafkaRaftServer.scala +++ b/core/src/main/scala/kafka/server/KafkaRaftServer.scala @@ -19,8 +19,7 @@ package kafka.server import java.io.File import java.util.concurrent.CompletableFuture import kafka.log.UnifiedLog -import kafka.metrics.KafkaMetricsReporter -import kafka.utils.{CoreUtils, Logging, Mx4jLoader, VerifiableProperties} +import kafka.utils.{CoreUtils, Logging, Mx4jLoader} import org.apache.kafka.common.config.{ConfigDef, ConfigResource} import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.utils.{AppInfoParser, Time} @@ -32,7 +31,6 @@ import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsem import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.{ProcessRole, ServerSocketFactory} import org.apache.kafka.server.config.ServerTopicConfigSynonyms -import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.storage.internals.log.LogConfig import org.slf4j.Logger @@ -53,8 +51,6 @@ class KafkaRaftServer( ) extends Server with Logging { this.logIdent = s"[KafkaRaftServer nodeId=${config.nodeId}] " - KafkaMetricsReporter.startReporters(VerifiableProperties(config.originals)) - KafkaYammerMetrics.INSTANCE.configure(config.originals) private val (metaPropsEnsemble, bootstrapMetadata) = KafkaRaftServer.initializeLogDirs(config, this.logger.underlying, this.logIdent) diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala deleted file mode 100755 index 0cb6ee48726d8..0000000000000 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ /dev/null @@ -1,1156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.cluster.{Broker, EndPoint} -import kafka.common.GenerateBrokerIdException -import kafka.controller.KafkaController -import kafka.coordinator.group.GroupCoordinatorAdapter -import kafka.coordinator.transaction.{TransactionCoordinator, ZkProducerIdManager} -import kafka.log.LogManager -import kafka.log.remote.RemoteLogManager -import kafka.metrics.KafkaMetricsReporter -import kafka.network.{ControlPlaneAcceptor, DataPlaneAcceptor, RequestChannel, SocketServer} -import kafka.raft.KafkaRaftManager -import kafka.server.metadata.{OffsetTrackingListener, ZkConfigRepository, ZkMetadataCache} -import kafka.utils._ -import kafka.zk.{AdminZkClient, BrokerInfo, KafkaZkClient} -import org.apache.kafka.clients.{ApiVersions, ManualMetadataUpdater, MetadataRecoveryStrategy, NetworkClient, NetworkClientUtils} -import org.apache.kafka.common.config.ConfigException -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.message.ApiMessageType.ListenerType -import org.apache.kafka.common.message.BrokerRegistrationRequestData.{Listener, ListenerCollection} -import org.apache.kafka.common.message.ControlledShutdownRequestData -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network._ -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests.{ControlledShutdownRequest, ControlledShutdownResponse} -import org.apache.kafka.common.security.scram.internals.ScramMechanism -import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache -import org.apache.kafka.common.security.{JaasContext, JaasUtils} -import org.apache.kafka.common.utils.{AppInfoParser, LogContext, Time, Utils} -import org.apache.kafka.common.{Endpoint, Node, TopicPartition} -import org.apache.kafka.coordinator.group.GroupCoordinator -import org.apache.kafka.coordinator.transaction.ProducerIdManager -import org.apache.kafka.image.loader.metrics.MetadataLoaderMetrics -import org.apache.kafka.metadata.properties.MetaPropertiesEnsemble.VerificationFlag -import org.apache.kafka.metadata.properties.MetaPropertiesEnsemble.VerificationFlag.REQUIRE_V0 -import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble} -import org.apache.kafka.metadata.{BrokerState, MetadataRecordSerde, VersionRange} -import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.raft.Endpoints -import org.apache.kafka.security.CredentialProvider -import org.apache.kafka.server.BrokerFeatures -import org.apache.kafka.server.authorizer.Authorizer -import org.apache.kafka.server.common.MetadataVersion._ -import org.apache.kafka.server.common.{ApiMessageAndVersion, MetadataVersion, NodeToControllerChannelManager} -import org.apache.kafka.server.config.{ConfigType, ZkConfigs} -import org.apache.kafka.server.fault.LoggingFaultHandler -import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig -import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.kafka.server.util.KafkaScheduler -import org.apache.kafka.storage.internals.log.LogDirFailureChannel -import org.apache.kafka.storage.log.metrics.BrokerTopicStats -import org.apache.zookeeper.client.ZKClientConfig - -import java.io.{File, IOException} -import java.net.{InetAddress, SocketTimeoutException} -import java.nio.file.{Files, Paths} -import java.time.Duration -import java.util -import java.util.concurrent._ -import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} -import java.util.{Optional, OptionalInt, OptionalLong} -import scala.collection.{Map, Seq} -import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption - -object KafkaServer { - def zkClientConfigFromKafkaConfig(config: KafkaConfig, forceZkSslClientEnable: Boolean = false): ZKClientConfig = { - val clientConfig = new ZKClientConfig - if (config.zkSslClientEnable || forceZkSslClientEnable) { - KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_CLIENT_ENABLE_CONFIG, "true") - config.zkClientCnxnSocketClassName.foreach(KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_CLIENT_CNXN_SOCKET_CONFIG, _)) - config.zkSslKeyStoreLocation.foreach(KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_KEY_STORE_LOCATION_CONFIG, _)) - config.zkSslKeyStorePassword.foreach(x => KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_KEY_STORE_PASSWORD_CONFIG, x.value)) - config.zkSslKeyStoreType.foreach(KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_KEY_STORE_TYPE_CONFIG, _)) - config.zkSslTrustStoreLocation.foreach(KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_TRUST_STORE_LOCATION_CONFIG, _)) - config.zkSslTrustStorePassword.foreach(x => KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_TRUST_STORE_PASSWORD_CONFIG, x.value)) - config.zkSslTrustStoreType.foreach(KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_TRUST_STORE_TYPE_CONFIG, _)) - KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_PROTOCOL_CONFIG, config.ZkSslProtocol) - config.ZkSslEnabledProtocols.foreach(KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_ENABLED_PROTOCOLS_CONFIG, _)) - config.ZkSslCipherSuites.foreach(KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_CIPHER_SUITES_CONFIG, _)) - KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, config.ZkSslEndpointIdentificationAlgorithm) - KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_CRL_ENABLE_CONFIG, config.ZkSslCrlEnable.toString) - KafkaConfig.setZooKeeperClientProperty(clientConfig, ZkConfigs.ZK_SSL_OCSP_ENABLE_CONFIG, config.ZkSslOcspEnable.toString) - } - // The zk sasl is enabled by default so it can produce false error when broker does not intend to use SASL. - if (!JaasUtils.isZkSaslEnabled) clientConfig.setProperty(JaasUtils.ZK_SASL_CLIENT, "false") - clientConfig - } - - val MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS: Long = 120000 -} - -/** - * Represents the lifecycle of a single Kafka broker. Handles all functionality required - * to start up and shutdown a single Kafka node. - */ -class KafkaServer( - val config: KafkaConfig, - time: Time = Time.SYSTEM, - threadNamePrefix: Option[String] = None, - enableForwarding: Boolean = false -) extends KafkaBroker with Server { - - private val startupComplete = new AtomicBoolean(false) - private val isShuttingDown = new AtomicBoolean(false) - private val isStartingUp = new AtomicBoolean(false) - - @volatile private var _brokerState: BrokerState = BrokerState.NOT_RUNNING - private var shutdownLatch = new CountDownLatch(1) - private var logContext: LogContext = _ - - private val kafkaMetricsReporters: Seq[KafkaMetricsReporter] = - KafkaMetricsReporter.startReporters(VerifiableProperties(config.originals)) - var kafkaYammerMetrics: KafkaYammerMetrics = _ - var metrics: Metrics = _ - - @volatile var dataPlaneRequestProcessor: KafkaApis = _ - private var controlPlaneRequestProcessor: KafkaApis = _ - - var authorizer: Option[Authorizer] = None - @volatile var socketServer: SocketServer = _ - var dataPlaneRequestHandlerPool: KafkaRequestHandlerPool = _ - private var controlPlaneRequestHandlerPool: KafkaRequestHandlerPool = _ - - var logDirFailureChannel: LogDirFailureChannel = _ - @volatile private var _logManager: LogManager = _ - var remoteLogManagerOpt: Option[RemoteLogManager] = None - - @volatile private var _replicaManager: ReplicaManager = _ - var adminManager: ZkAdminManager = _ - var tokenManager: DelegationTokenManager = _ - - var dynamicConfigHandlers: Map[String, ConfigHandler] = _ - private var dynamicConfigManager: ZkConfigManager = _ - var credentialProvider: CredentialProvider = _ - var tokenCache: DelegationTokenCache = _ - - @volatile var groupCoordinator: GroupCoordinator = _ - - var transactionCoordinator: TransactionCoordinator = _ - - @volatile private var _kafkaController: KafkaController = _ - - var forwardingManager: Option[ForwardingManager] = None - - var autoTopicCreationManager: AutoTopicCreationManager = _ - - var clientToControllerChannelManager: NodeToControllerChannelManager = _ - - var alterPartitionManager: AlterPartitionManager = _ - - var kafkaScheduler: KafkaScheduler = _ - - @volatile var metadataCache: ZkMetadataCache = _ - - @volatile var quorumControllerNodeProvider: RaftControllerNodeProvider = _ - - var quotaManagers: QuotaFactory.QuotaManagers = _ - - val zkClientConfig: ZKClientConfig = KafkaServer.zkClientConfigFromKafkaConfig(config) - private var _zkClient: KafkaZkClient = _ - private var configRepository: ZkConfigRepository = _ - - val correlationId: AtomicInteger = new AtomicInteger(0) - - private var _clusterId: String = _ - @volatile private var _brokerTopicStats: BrokerTopicStats = _ - - private var _featureChangeListener: FinalizedFeatureChangeListener = _ - - val brokerFeatures: BrokerFeatures = BrokerFeatures.createEmpty() - - override def brokerState: BrokerState = _brokerState - - def clusterId: String = _clusterId - - // Visible for testing - private[kafka] def zkClient = _zkClient - - override def brokerTopicStats: BrokerTopicStats = _brokerTopicStats - - private[kafka] def featureChangeListener = _featureChangeListener - - override def replicaManager: ReplicaManager = _replicaManager - - override def logManager: LogManager = _logManager - - @volatile def kafkaController: KafkaController = _kafkaController - - var lifecycleManager: BrokerLifecycleManager = _ - private var raftManager: KafkaRaftManager[ApiMessageAndVersion] = _ - - @volatile var brokerEpochManager: ZkBrokerEpochManager = _ - - def brokerEpochSupplier(): Long = Option(brokerEpochManager).map(_.get()).getOrElse(-1) - - /** - * Start up API for bringing up a single instance of the Kafka server. - * Instantiates the LogManager, the SocketServer and the request handlers - KafkaRequestHandlers - */ - override def startup(): Unit = { - try { - info("starting") - - if (isShuttingDown.get) - throw new IllegalStateException("Kafka server is still shutting down, cannot re-start!") - - if (startupComplete.get) - return - - val canStartup = isStartingUp.compareAndSet(false, true) - if (canStartup) { - _brokerState = BrokerState.STARTING - - /* setup zookeeper */ - initZkClient(time) - configRepository = new ZkConfigRepository(new AdminZkClient(zkClient)) - - /* Get or create cluster_id */ - _clusterId = getOrGenerateClusterId(zkClient) - info(s"Cluster ID = $clusterId") - - /* load metadata */ - val initialMetaPropsEnsemble = { - val loader = new MetaPropertiesEnsemble.Loader() - loader.addLogDirs(config.logDirs.asJava) - if (config.migrationEnabled) { - loader.addMetadataLogDir(config.metadataLogDir) - } - loader.load() - } - - val verificationId = if (config.brokerId < 0) { - OptionalInt.empty() - } else { - OptionalInt.of(config.brokerId) - } - val verificationFlags = if (config.migrationEnabled) { - util.EnumSet.noneOf(classOf[VerificationFlag]) - } else { - util.EnumSet.of(REQUIRE_V0) - } - initialMetaPropsEnsemble.verify(Optional.of(_clusterId), verificationId, verificationFlags) - - /* generate brokerId */ - config.brokerId = getOrGenerateBrokerId(initialMetaPropsEnsemble) - logContext = new LogContext(s"[KafkaServer id=${config.brokerId}] ") - this.logIdent = logContext.logPrefix - - // initialize dynamic broker configs from ZooKeeper. Any updates made after this will be - // applied after ZkConfigManager starts. - config.dynamicConfig.initialize(Some(zkClient), clientMetricsReceiverPluginOpt = None) - - /* start scheduler */ - kafkaScheduler = new KafkaScheduler(config.backgroundThreads) - kafkaScheduler.startup() - - /* create and configure metrics */ - kafkaYammerMetrics = KafkaYammerMetrics.INSTANCE - kafkaYammerMetrics.configure(config.originals) - metrics = Server.initializeMetrics(config, time, clusterId) - createCurrentControllerIdMetric() - - /* register broker metrics */ - _brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) - - quotaManagers = QuotaFactory.instantiate(config, metrics, time, threadNamePrefix.getOrElse("")) - KafkaBroker.notifyClusterListeners(clusterId, kafkaMetricsReporters ++ metrics.reporters.asScala) - - logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size) - - // Make sure all storage directories have meta.properties files. - val metaPropsEnsemble = { - val copier = new MetaPropertiesEnsemble.Copier(initialMetaPropsEnsemble) - initialMetaPropsEnsemble.nonFailedDirectoryProps().forEachRemaining(e => { - val logDir = e.getKey - val builder = new MetaProperties.Builder(e.getValue). - setClusterId(_clusterId). - setNodeId(config.brokerId) - if (!builder.directoryId().isPresent) { - if (config.migrationEnabled) { - builder.setDirectoryId(copier.generateValidDirectoryId()) - } - } - copier.setLogDirProps(logDir, builder.build()) - }) - copier.emptyLogDirs().clear() - copier.setPreWriteHandler((logDir, _, _) => { - info(s"Rewriting $logDir${File.separator}meta.properties") - Files.createDirectories(Paths.get(logDir)) - }) - copier.setWriteErrorHandler((logDir, e) => { - logDirFailureChannel.maybeAddOfflineLogDir(logDir, s"Error while writing meta.properties to $logDir", e) - }) - copier.writeLogDirChanges() - copier.copy() - } - metaPropsEnsemble.verify(Optional.of(_clusterId), OptionalInt.of(config.brokerId), verificationFlags) - - /* start log manager */ - _logManager = LogManager( - config, - metaPropsEnsemble.errorLogDirs().asScala.toSeq, - configRepository, - kafkaScheduler, - time, - brokerTopicStats, - logDirFailureChannel, - config.usesTopicId) - _brokerState = BrokerState.RECOVERY - logManager.startup(zkClient.getAllTopicsInCluster()) - - remoteLogManagerOpt = createRemoteLogManager() - - metadataCache = MetadataCache.zkMetadataCache( - config.brokerId, - config.interBrokerProtocolVersion, - brokerFeatures, - config.migrationEnabled) - val controllerNodeProvider = new MetadataCacheControllerNodeProvider(metadataCache, config, - () => Option(quorumControllerNodeProvider).map(_.getControllerInfo())) - - /* initialize feature change listener */ - _featureChangeListener = new FinalizedFeatureChangeListener(metadataCache, _zkClient) - if (config.isFeatureVersioningSupported) { - _featureChangeListener.initOrThrow(config.zkConnectionTimeoutMs) - } - - // Enable delegation token cache for all SCRAM mechanisms to simplify dynamic update. - // This keeps the cache up-to-date if new SCRAM mechanisms are enabled dynamically. - tokenCache = new DelegationTokenCache(ScramMechanism.mechanismNames) - credentialProvider = new CredentialProvider(ScramMechanism.mechanismNames, tokenCache) - - clientToControllerChannelManager = new NodeToControllerChannelManagerImpl( - controllerNodeProvider = controllerNodeProvider, - time = time, - metrics = metrics, - config = config, - channelName = "forwarding", - s"zk-broker-${config.nodeId}-", - retryTimeoutMs = config.requestTimeoutMs.longValue - ) - clientToControllerChannelManager.start() - - /* start forwarding manager */ - var autoTopicCreationChannel = Option.empty[NodeToControllerChannelManager] - if (enableForwarding) { - this.forwardingManager = Some(ForwardingManager(clientToControllerChannelManager, metrics)) - autoTopicCreationChannel = Some(clientToControllerChannelManager) - } - - val apiVersionManager = ApiVersionManager( - ListenerType.ZK_BROKER, - config, - forwardingManager, - brokerFeatures, - metadataCache, - None - ) - - // Create and start the socket server acceptor threads so that the bound port is known. - // Delay starting processors until the end of the initialization sequence to ensure - // that credentials have been loaded before processing authentications. - // - // Note that we allow the use of KRaft mode controller APIs when forwarding is enabled - // so that the Envelope request is exposed. This is only used in testing currently. - socketServer = new SocketServer(config, metrics, time, credentialProvider, apiVersionManager) - - // Start alter partition manager based on the IBP version - alterPartitionManager = if (config.interBrokerProtocolVersion.isAlterPartitionSupported) { - AlterPartitionManager( - config = config, - metadataCache = metadataCache, - scheduler = kafkaScheduler, - controllerNodeProvider, - time = time, - metrics = metrics, - s"zk-broker-${config.nodeId}-", - brokerEpochSupplier = brokerEpochSupplier - ) - } else { - AlterPartitionManager(kafkaScheduler, time, zkClient) - } - alterPartitionManager.start() - - // Start replica manager - _replicaManager = createReplicaManager(isShuttingDown) - replicaManager.startup() - - val brokerInfo = createBrokerInfo - val brokerEpoch = zkClient.registerBroker(brokerInfo) - - /* start token manager */ - tokenManager = new DelegationTokenManagerZk(config, tokenCache, time , zkClient) - tokenManager.startup() - - /* start kafka controller */ - _kafkaController = new KafkaController(config, zkClient, time, metrics, brokerInfo, brokerEpoch, tokenManager, brokerFeatures, metadataCache, threadNamePrefix) - kafkaController.startup() - - if (config.migrationEnabled) { - logger.info("Starting up additional components for ZooKeeper migration") - lifecycleManager = new BrokerLifecycleManager(config, - time, - s"zk-broker-${config.nodeId}-", - isZkBroker = true, - logManager.directoryIdsSet) - - // For ZK brokers in migration mode, always delete the metadata partition on startup. - logger.info(s"Deleting local metadata log from ${config.metadataLogDir} since this is a ZK broker in migration mode.") - KafkaRaftManager.maybeDeleteMetadataLogDir(config) - logger.info("Successfully deleted local metadata log. It will be re-created.") - - // If the ZK broker is in migration mode, start up a RaftManager to learn about the new KRaft controller - val quorumVoters = QuorumConfig.parseVoterConnections(config.quorumConfig.voters) - raftManager = new KafkaRaftManager[ApiMessageAndVersion]( - metaPropsEnsemble.clusterId().get(), - config, - // metadata log dir and directory.id must exist because migration is enabled - metaPropsEnsemble.logDirProps.get(metaPropsEnsemble.metadataLogDir.get).directoryId.get, - new MetadataRecordSerde, - KafkaRaftServer.MetadataPartition, - KafkaRaftServer.MetadataTopicId, - time, - metrics, - threadNamePrefix, - CompletableFuture.completedFuture(quorumVoters), - QuorumConfig.parseBootstrapServers(config.quorumConfig.bootstrapServers), - // Endpoint information is only needed for KRaft controllers (voters). ZK brokers - // (observers) can never be KRaft controllers - Endpoints.empty(), - fatalFaultHandler = new LoggingFaultHandler("raftManager", () => shutdown()) - ) - quorumControllerNodeProvider = RaftControllerNodeProvider(raftManager, config) - val brokerToQuorumChannelManager = new NodeToControllerChannelManagerImpl( - controllerNodeProvider = quorumControllerNodeProvider, - time = time, - metrics = metrics, - config = config, - channelName = "quorum", - s"zk-broker-${config.nodeId}-", - retryTimeoutMs = config.requestTimeoutMs.longValue - ) - - val listener = new OffsetTrackingListener() - raftManager.register(listener) - raftManager.startup() - - val networkListeners = new ListenerCollection() - config.effectiveAdvertisedBrokerListeners.foreach { ep => - networkListeners.add(new Listener(). - setHost(if (Utils.isBlank(ep.host)) InetAddress.getLocalHost.getCanonicalHostName else ep.host). - setName(ep.listenerName.value()). - setPort(if (ep.port == 0) socketServer.boundPort(ep.listenerName) else ep.port). - setSecurityProtocol(ep.securityProtocol.id)) - } - - val features = BrokerFeatures.createDefaultFeatureMap(BrokerFeatures.createDefault(config.unstableFeatureVersionsEnabled)).asScala - - // Even though ZK brokers don't use "metadata.version" feature, we need to overwrite it with our IBP as part of registration - // so the KRaft controller can verify that all brokers are on the same IBP before starting the migration. - val featuresRemapped = features + (MetadataVersion.FEATURE_NAME -> - VersionRange.of(config.interBrokerProtocolVersion.featureLevel(), config.interBrokerProtocolVersion.featureLevel())) - - lifecycleManager.start( - () => listener.highestOffset, - brokerToQuorumChannelManager, - clusterId, - networkListeners, - featuresRemapped.asJava, - OptionalLong.empty() - ) - logger.debug("Start RaftManager") - } - - // Used by ZK brokers during a KRaft migration. When talking to a KRaft controller, we need to use the epoch - // from BrokerLifecycleManager rather than ZK (via KafkaController) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, kafkaController, Option(lifecycleManager)) - - adminManager = new ZkAdminManager(config, metrics, metadataCache, zkClient) - - /* start group coordinator */ - // Hardcode Time.SYSTEM for now as some Streams tests fail otherwise, it would be good to fix the underlying issue - groupCoordinator = GroupCoordinatorAdapter( - config, - replicaManager, - Time.SYSTEM, - metrics - ) - groupCoordinator.startup(() => zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME).getOrElse(config.groupCoordinatorConfig.offsetsTopicPartitions)) - - /* create producer ids manager */ - val producerIdManager = if (config.interBrokerProtocolVersion.isAllocateProducerIdsSupported) { - ProducerIdManager.rpc( - config.brokerId, - time, - () => brokerEpochSupplier(), - clientToControllerChannelManager - ) - } else { - new ZkProducerIdManager(config.brokerId, zkClient) - } - /* start transaction coordinator, with a separate background thread scheduler for transaction expiration and log loading */ - // Hardcode Time.SYSTEM for now as some Streams tests fail otherwise, it would be good to fix the underlying issue - transactionCoordinator = TransactionCoordinator(config, replicaManager, new KafkaScheduler(1, true, "transaction-log-manager-"), - () => producerIdManager, metrics, metadataCache, Time.SYSTEM) - transactionCoordinator.startup( - () => zkClient.getTopicPartitionCount(Topic.TRANSACTION_STATE_TOPIC_NAME).getOrElse(config.transactionLogConfig.transactionTopicPartitions)) - - /* start auto topic creation manager */ - this.autoTopicCreationManager = AutoTopicCreationManager( - config, - autoTopicCreationChannel, - Some(adminManager), - Some(kafkaController), - groupCoordinator, - transactionCoordinator, - None - ) - - /* Get the authorizer and initialize it if one is specified.*/ - authorizer = config.createNewAuthorizer() - authorizer.foreach(_.configure(config.originals)) - val authorizerFutures: Map[Endpoint, CompletableFuture[Void]] = authorizer match { - case Some(authZ) => - authZ.start(brokerInfo.broker.toServerInfo(clusterId, config)).asScala.map { case (ep, cs) => - ep -> cs.toCompletableFuture - } - case None => - brokerInfo.broker.endPoints.map { ep => - ep.toJava -> CompletableFuture.completedFuture[Void](null) - }.toMap - } - - // The FetchSessionCache is divided into config.numIoThreads shards, each responsible - // for Math.max(1, shardNum * sessionIdRange) <= sessionId < (shardNum + 1) * sessionIdRange - val sessionIdRange = Int.MaxValue / NumFetchSessionCacheShards - val fetchSessionCacheShards = (0 until NumFetchSessionCacheShards) - .map(shardNum => new FetchSessionCacheShard( - config.maxIncrementalFetchSessionCacheSlots / NumFetchSessionCacheShards, - KafkaServer.MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS, - sessionIdRange, - shardNum - )) - val fetchManager = new FetchManager(Time.SYSTEM, new FetchSessionCache(fetchSessionCacheShards)) - - // Start RemoteLogManager before broker start serving the requests. - remoteLogManagerOpt.foreach { rlm => - val listenerName = config.remoteLogManagerConfig.remoteLogMetadataManagerListenerName() - if (listenerName != null) { - brokerInfo.broker.endPoints - .find(e => e.listenerName.equals(ListenerName.normalised(listenerName))) - .orElse(throw new ConfigException(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP, - listenerName, "Should be set as a listener name within valid broker listener name list: " - + brokerInfo.broker.endPoints.map(_.listenerName).mkString(","))) - .foreach(e => rlm.onEndPointCreated(e.toJava)) - } - rlm.startup() - } - - /* start processing requests */ - val zkSupport = ZkSupport(adminManager, kafkaController, zkClient, forwardingManager, metadataCache, brokerEpochManager) - - def createKafkaApis(requestChannel: RequestChannel): KafkaApis = new KafkaApis( - requestChannel = requestChannel, - metadataSupport = zkSupport, - replicaManager = replicaManager, - groupCoordinator = groupCoordinator, - txnCoordinator = transactionCoordinator, - shareCoordinator = None, //share coord only supported in kraft mode - autoTopicCreationManager = autoTopicCreationManager, - brokerId = config.brokerId, - config = config, - configRepository = configRepository, - metadataCache = metadataCache, - metrics = metrics, - authorizer = authorizer, - quotas = quotaManagers, - fetchManager = fetchManager, - sharePartitionManager = None, - brokerTopicStats = brokerTopicStats, - clusterId = clusterId, - time = time, - tokenManager = tokenManager, - apiVersionManager = apiVersionManager, - clientMetricsManager = None) - - dataPlaneRequestProcessor = createKafkaApis(socketServer.dataPlaneRequestChannel) - - dataPlaneRequestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.dataPlaneRequestChannel, dataPlaneRequestProcessor, time, - config.numIoThreads, s"${DataPlaneAcceptor.MetricPrefix}RequestHandlerAvgIdlePercent", DataPlaneAcceptor.ThreadPrefix) - - socketServer.controlPlaneRequestChannelOpt.foreach { controlPlaneRequestChannel => - controlPlaneRequestProcessor = createKafkaApis(controlPlaneRequestChannel) - controlPlaneRequestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.controlPlaneRequestChannelOpt.get, controlPlaneRequestProcessor, time, - 1, s"${ControlPlaneAcceptor.MetricPrefix}RequestHandlerAvgIdlePercent", ControlPlaneAcceptor.ThreadPrefix) - } - - Mx4jLoader.maybeLoad() - - /* Add all reconfigurables for config change notification before starting config handlers */ - config.dynamicConfig.addReconfigurables(this) - Option(logManager.cleaner).foreach(config.dynamicConfig.addBrokerReconfigurable) - - /* start dynamic config manager */ - dynamicConfigHandlers = Map[String, ConfigHandler](ConfigType.TOPIC -> new TopicConfigHandler(replicaManager, config, quotaManagers, Some(kafkaController)), - ConfigType.CLIENT -> new ClientIdConfigHandler(quotaManagers), - ConfigType.USER -> new UserConfigHandler(quotaManagers, credentialProvider), - ConfigType.BROKER -> new BrokerConfigHandler(config, quotaManagers), - ConfigType.IP -> new IpConfigHandler(socketServer.connectionQuotas)) - - // Create the config manager. start listening to notifications - dynamicConfigManager = new ZkConfigManager(zkClient, dynamicConfigHandlers) - dynamicConfigManager.startup() - - if (config.migrationEnabled && lifecycleManager != null) { - lifecycleManager.initialCatchUpFuture.whenComplete { case (_, t) => - if (t != null) { - fatal("Encountered an exception when waiting to catch up with KRaft metadata log", t) - shutdown() - } else { - info("Finished catching up on KRaft metadata log, requesting that the KRaft controller unfence this broker") - lifecycleManager.setReadyToUnfence() - } - } - } - - val enableRequestProcessingFuture = socketServer.enableRequestProcessing(authorizerFutures) - // Block here until all the authorizer futures are complete - try { - info("Start processing authorizer futures") - CompletableFuture.allOf(authorizerFutures.values.toSeq: _*).join() - info("End processing authorizer futures") - } catch { - case t: Throwable => throw new RuntimeException("Received a fatal error while " + - "waiting for all of the authorizer futures to be completed.", t) - } - // Wait for all the SocketServer ports to be open, and the Acceptors to be started. - try { - info("Start processing enable request processing future") - enableRequestProcessingFuture.join() - info("End processing enable request processing future") - } catch { - case t: Throwable => throw new RuntimeException("Received a fatal error while " + - "waiting for the SocketServer Acceptors to be started.", t) - } - - _brokerState = BrokerState.RUNNING - shutdownLatch = new CountDownLatch(1) - startupComplete.set(true) - isStartingUp.set(false) - AppInfoParser.registerAppInfo(Server.MetricsPrefix, config.brokerId.toString, metrics, time.milliseconds()) - info("started") - } - } - catch { - case e: Throwable => - fatal("Fatal error during KafkaServer startup. Prepare to shutdown", e) - isStartingUp.set(false) - shutdown() - throw e - } - } - - private def createCurrentControllerIdMetric(): Unit = { - KafkaYammerMetrics.defaultRegistry().newGauge(MetadataLoaderMetrics.CURRENT_CONTROLLER_ID, - () => getCurrentControllerIdFromOldController()) - } - - /** - * Get the current controller ID from the old controller code. - * This is the most up-to-date controller ID we can get when in ZK mode. - */ - def getCurrentControllerIdFromOldController(): Int = { - Option(_kafkaController) match { - case None => -1 - case Some(controller) => controller.activeControllerId - } - } - - private def unregisterCurrentControllerIdMetric(): Unit = { - KafkaYammerMetrics.defaultRegistry().removeMetric(MetadataLoaderMetrics.CURRENT_CONTROLLER_ID) - } - - protected def createRemoteLogManager(): Option[RemoteLogManager] = { - if (config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) { - Some(new RemoteLogManager(config.remoteLogManagerConfig, config.brokerId, config.logDirs.head, clusterId, time, - (tp: TopicPartition) => logManager.getLog(tp).toJava, - (tp: TopicPartition, remoteLogStartOffset: java.lang.Long) => { - logManager.getLog(tp).foreach { log => - log.updateLogStartOffsetFromRemoteTier(remoteLogStartOffset) - } - }, - brokerTopicStats, metrics)) - } else { - None - } - } - - protected def createReplicaManager(isShuttingDown: AtomicBoolean): ReplicaManager = { - val addPartitionsLogContext = new LogContext(s"[AddPartitionsToTxnManager broker=${config.brokerId}]") - val addPartitionsToTxnNetworkClient = NetworkUtils.buildNetworkClient("AddPartitionsManager", config, metrics, time, addPartitionsLogContext) - val addPartitionsToTxnManager = new AddPartitionsToTxnManager( - config, - addPartitionsToTxnNetworkClient, - metadataCache, - // The transaction coordinator is not created at this point so we must - // use a lambda here. - transactionalId => transactionCoordinator.partitionFor(transactionalId), - time - ) - - new ReplicaManager( - metrics = metrics, - config = config, - time = time, - scheduler = kafkaScheduler, - logManager = logManager, - remoteLogManager = remoteLogManagerOpt, - quotaManagers = quotaManagers, - metadataCache = metadataCache, - logDirFailureChannel = logDirFailureChannel, - alterPartitionManager = alterPartitionManager, - brokerTopicStats = brokerTopicStats, - isShuttingDown = isShuttingDown, - zkClient = Some(zkClient), - delayedRemoteFetchPurgatoryParam = None, - threadNamePrefix = threadNamePrefix, - brokerEpochSupplier = brokerEpochSupplier, - addPartitionsToTxnManager = Some(addPartitionsToTxnManager)) - } - - private def initZkClient(time: Time): Unit = { - info(s"Connecting to zookeeper on ${config.zkConnect}") - _zkClient = KafkaZkClient.createZkClient("Kafka server", time, config, zkClientConfig) - _zkClient.createTopLevelPaths() - } - - private def getOrGenerateClusterId(zkClient: KafkaZkClient): String = { - zkClient.getClusterId.getOrElse(zkClient.createOrGetClusterId(CoreUtils.generateUuidAsBase64())) - } - - def createBrokerInfo: BrokerInfo = { - val endPoints = config.effectiveAdvertisedBrokerListeners.map(e => s"${e.host}:${e.port}") - zkClient.getAllBrokersInCluster.filter(_.id != config.brokerId).foreach { broker => - val commonEndPoints = broker.endPoints.map(e => s"${e.host}:${e.port}").intersect(endPoints) - require(commonEndPoints.isEmpty, s"Configured end points ${commonEndPoints.mkString(",")} in" + - s" advertised listeners are already registered by broker ${broker.id}") - } - - val listeners = config.effectiveAdvertisedBrokerListeners.map { endpoint => - if (endpoint.port == 0) - endpoint.copy(port = socketServer.boundPort(endpoint.listenerName)) - else - endpoint - } - - val updatedEndpoints = listeners.map(endpoint => - if (Utils.isBlank(endpoint.host)) - endpoint.copy(host = InetAddress.getLocalHost.getCanonicalHostName) - else - endpoint - ) - - val jmxPort = System.getProperty("com.sun.management.jmxremote.port", "-1").toInt - - BrokerInfo( - Broker(config.brokerId, updatedEndpoints, config.rack, brokerFeatures.supportedFeatures), - config.interBrokerProtocolVersion, - jmxPort) - } - - /** - * Performs controlled shutdown - */ - private def controlledShutdown(): Unit = { - val socketTimeoutMs = config.controllerSocketTimeoutMs - - def doControlledShutdown(retries: Int): Boolean = { - if (config.requiresZookeeper && - metadataCache.getControllerId.exists(_.isInstanceOf[KRaftCachedControllerId])) { - info("ZkBroker currently has a KRaft controller. Controlled shutdown will be handled " + - "through broker lifecycle manager") - return true - } - val metadataUpdater = new ManualMetadataUpdater() - val networkClient = { - val channelBuilder = ChannelBuilders.clientChannelBuilder( - config.interBrokerSecurityProtocol, - JaasContext.Type.SERVER, - config, - config.interBrokerListenerName, - config.saslMechanismInterBrokerProtocol, - time, - config.saslInterBrokerHandshakeRequestEnable, - logContext) - val selector = new Selector( - NetworkReceive.UNLIMITED, - config.connectionsMaxIdleMs, - metrics, - time, - "kafka-server-controlled-shutdown", - Map.empty.asJava, - false, - channelBuilder, - logContext - ) - new NetworkClient( - selector, - metadataUpdater, - config.brokerId.toString, - 1, - 0, - 0, - Selectable.USE_DEFAULT_BUFFER_SIZE, - Selectable.USE_DEFAULT_BUFFER_SIZE, - config.requestTimeoutMs, - config.connectionSetupTimeoutMs, - config.connectionSetupTimeoutMaxMs, - time, - false, - new ApiVersions, - logContext, - MetadataRecoveryStrategy.NONE) - } - - var shutdownSucceeded: Boolean = false - - try { - - var remainingRetries = retries - var prevController: Node = null - var ioException = false - - while (!shutdownSucceeded && remainingRetries > 0) { - remainingRetries = remainingRetries - 1 - - // 1. Find the controller and establish a connection to it. - // If the controller id or the broker registration are missing, we sleep and retry (if there are remaining retries) - metadataCache.getControllerId match { - case Some(controllerId: ZkCachedControllerId) => - metadataCache.getAliveBrokerNode(controllerId.id, config.interBrokerListenerName) match { - case Some(broker) => - // if this is the first attempt, if the controller has changed or if an exception was thrown in a previous - // attempt, connect to the most recent controller - if (ioException || broker != prevController) { - - ioException = false - - if (prevController != null) - networkClient.close(prevController.idString) - - prevController = broker - metadataUpdater.setNodes(Seq(prevController).asJava) - } - case None => - info(s"Broker registration for controller $controllerId is not available in the metadata cache") - } - case Some(_: KRaftCachedControllerId) | None => - info("No zk controller present in the metadata cache") - } - - // 2. issue a controlled shutdown to the controller - if (prevController != null) { - try { - - if (!NetworkClientUtils.awaitReady(networkClient, prevController, time, socketTimeoutMs)) - throw new SocketTimeoutException(s"Failed to connect within $socketTimeoutMs ms") - - // send the controlled shutdown request - val controlledShutdownApiVersion: Short = - if (config.interBrokerProtocolVersion.isLessThan(IBP_0_9_0)) 0 - else if (config.interBrokerProtocolVersion.isLessThan(IBP_2_2_IV0)) 1 - else if (config.interBrokerProtocolVersion.isLessThan(IBP_2_4_IV1)) 2 - else 3 - - val controlledShutdownRequest = new ControlledShutdownRequest.Builder( - new ControlledShutdownRequestData() - .setBrokerId(config.brokerId) - .setBrokerEpoch(kafkaController.brokerEpoch), - controlledShutdownApiVersion) - val request = networkClient.newClientRequest(prevController.idString, controlledShutdownRequest, - time.milliseconds(), true) - val clientResponse = NetworkClientUtils.sendAndReceive(networkClient, request, time) - - val shutdownResponse = clientResponse.responseBody.asInstanceOf[ControlledShutdownResponse] - if (shutdownResponse.error != Errors.NONE) { - info(s"Controlled shutdown request returned after ${clientResponse.requestLatencyMs}ms " + - s"with error ${shutdownResponse.error}") - } else if (shutdownResponse.data.remainingPartitions.isEmpty) { - shutdownSucceeded = true - info("Controlled shutdown request returned successfully " + - s"after ${clientResponse.requestLatencyMs}ms") - } else { - info(s"Controlled shutdown request returned after ${clientResponse.requestLatencyMs}ms " + - s"with ${shutdownResponse.data.remainingPartitions.size} partitions remaining to move") - - if (isDebugEnabled) { - debug("Remaining partitions to move during controlled shutdown: " + - s"${shutdownResponse.data.remainingPartitions}") - } - } - } - catch { - case ioe: IOException => - ioException = true - warn("Error during controlled shutdown, possibly because leader movement took longer than the " + - s"configured controller.socket.timeout.ms and/or request.timeout.ms: ${ioe.getMessage}") - // ignore and try again - } - } - if (!shutdownSucceeded && remainingRetries > 0) { - Thread.sleep(config.controlledShutdownRetryBackoffMs) - info(s"Retrying controlled shutdown ($remainingRetries retries remaining)") - } - } - } - finally - networkClient.close() - - shutdownSucceeded - } - - if (startupComplete.get() && config.controlledShutdownEnable) { - // We request the controller to do a controlled shutdown. On failure, we backoff for a configured period - // of time and try again for a configured number of retries. If all the attempt fails, we simply force - // the shutdown. - info("Starting controlled shutdown") - - _brokerState = BrokerState.PENDING_CONTROLLED_SHUTDOWN - - if (config.migrationEnabled && lifecycleManager != null && metadataCache.getControllerId.exists(_.isInstanceOf[KRaftCachedControllerId])) { - // For now we'll send the heartbeat with WantShutDown set so the KRaft controller can see a broker - // shutting down without waiting for the heartbeat to time out. - info("Notifying KRaft of controlled shutdown") - lifecycleManager.beginControlledShutdown() - try { - lifecycleManager.controlledShutdownFuture.get(5L, TimeUnit.MINUTES) - } catch { - case _: TimeoutException => - error("Timed out waiting for the controller to approve controlled shutdown") - case e: Throwable => - error("Got unexpected exception waiting for controlled shutdown future", e) - } - } - - val shutdownSucceeded = doControlledShutdown(config.controlledShutdownMaxRetries.intValue) - - if (!shutdownSucceeded) - warn("Proceeding to do an unclean shutdown as all the controlled shutdown attempts failed") - } - } - - /** - * Shutdown API for shutting down a single instance of the Kafka server. - * Shuts down the LogManager, the SocketServer and the log cleaner scheduler thread - */ - override def shutdown(timeout: Duration): Unit = { - try { - info("shutting down") - - if (isStartingUp.get) - throw new IllegalStateException("Kafka server is still starting up, cannot shut down!") - - // To ensure correct behavior under concurrent calls, we need to check `shutdownLatch` first since it gets updated - // last in the `if` block. If the order is reversed, we could shutdown twice or leave `isShuttingDown` set to - // `true` at the end of this method. - if (shutdownLatch.getCount > 0 && isShuttingDown.compareAndSet(false, true)) { - CoreUtils.swallow(controlledShutdown(), this) - _brokerState = BrokerState.SHUTTING_DOWN - - if (dynamicConfigManager != null) - CoreUtils.swallow(dynamicConfigManager.shutdown(), this) - - // Stop socket server to stop accepting any more connections and requests. - // Socket server will be shutdown towards the end of the sequence. - if (socketServer != null) - CoreUtils.swallow(socketServer.stopProcessingRequests(), this) - if (dataPlaneRequestHandlerPool != null) - CoreUtils.swallow(dataPlaneRequestHandlerPool.shutdown(), this) - if (controlPlaneRequestHandlerPool != null) - CoreUtils.swallow(controlPlaneRequestHandlerPool.shutdown(), this) - - /** - * We must shutdown the scheduler early because otherwise, the scheduler could touch other - * resources that might have been shutdown and cause exceptions. - * For example, if we didn't shutdown the scheduler first, when LogManager was closing - * partitions one by one, the scheduler might concurrently delete old segments due to - * retention. However, the old segments could have been closed by the LogManager, which would - * cause an IOException and subsequently mark logdir as offline. As a result, the broker would - * not flush the remaining partitions or write the clean shutdown marker. Ultimately, the - * broker would have to take hours to recover the log during restart. - */ - if (kafkaScheduler != null) - CoreUtils.swallow(kafkaScheduler.shutdown(), this) - - if (dataPlaneRequestProcessor != null) - CoreUtils.swallow(dataPlaneRequestProcessor.close(), this) - if (controlPlaneRequestProcessor != null) - CoreUtils.swallow(controlPlaneRequestProcessor.close(), this) - authorizer.foreach(Utils.closeQuietly(_, "authorizer")) - if (adminManager != null) - CoreUtils.swallow(adminManager.shutdown(), this) - - if (transactionCoordinator != null) - CoreUtils.swallow(transactionCoordinator.shutdown(), this) - if (groupCoordinator != null) - CoreUtils.swallow(groupCoordinator.shutdown(), this) - - if (tokenManager != null) - CoreUtils.swallow(tokenManager.shutdown(), this) - - if (replicaManager != null) - CoreUtils.swallow(replicaManager.shutdown(), this) - - if (alterPartitionManager != null) - CoreUtils.swallow(alterPartitionManager.shutdown(), this) - - if (forwardingManager.isDefined) - CoreUtils.swallow(forwardingManager.get.close(), this) - - if (clientToControllerChannelManager != null) - CoreUtils.swallow(clientToControllerChannelManager.shutdown(), this) - - if (logManager != null) - CoreUtils.swallow(logManager.shutdown(), this) - - if (kafkaController != null) - CoreUtils.swallow(kafkaController.shutdown(), this) - - // Close remote log manager before stopping processing requests, to give a chance to any - // of its underlying clients (especially in RemoteStorageManager and RemoteLogMetadataManager) - // to close gracefully. - remoteLogManagerOpt.foreach(Utils.closeQuietly(_, "remote log manager")) - - if (featureChangeListener != null) - CoreUtils.swallow(featureChangeListener.close(), this) - - Utils.closeQuietly(zkClient, "zk client") - - if (quotaManagers != null) - CoreUtils.swallow(quotaManagers.shutdown(), this) - - // Even though socket server is stopped much earlier, controller can generate - // response for controlled shutdown request. Shutdown server at the end to - // avoid any failures (e.g. when metrics are recorded) - if (socketServer != null) - CoreUtils.swallow(socketServer.shutdown(), this) - unregisterCurrentControllerIdMetric() - Utils.closeQuietly(metrics, "metrics") - Utils.closeQuietly(brokerTopicStats, "broker topic stats") - - // Clear all reconfigurable instances stored in DynamicBrokerConfig - config.dynamicConfig.clear() - - if (raftManager != null) - CoreUtils.swallow(raftManager.shutdown(), this) - - if (lifecycleManager != null) { - lifecycleManager.close() - } - _brokerState = BrokerState.NOT_RUNNING - - quorumControllerNodeProvider = null - - startupComplete.set(false) - isShuttingDown.set(false) - CoreUtils.swallow(AppInfoParser.unregisterAppInfo(Server.MetricsPrefix, config.brokerId.toString, metrics), this) - shutdownLatch.countDown() - info("shut down completed") - } - } - catch { - case e: Throwable => - fatal("Fatal error during KafkaServer shutdown.", e) - isShuttingDown.set(false) - throw e - } - } - - override def isShutdown(): Boolean = { - BrokerState.fromValue(brokerState.value()) match { - case BrokerState.SHUTTING_DOWN | BrokerState.NOT_RUNNING => true - case _ => false - } - } - - /** - * After calling shutdown(), use this API to wait until the shutdown is complete - */ - override def awaitShutdown(): Unit = shutdownLatch.await() - - def getLogManager: LogManager = logManager - - override def boundPort(listenerName: ListenerName): Int = socketServer.boundPort(listenerName) - - /** Return advertised listeners with the bound port (this may differ from the configured port if the latter is `0`). */ - def advertisedListeners: Seq[EndPoint] = { - config.effectiveAdvertisedBrokerListeners.map { endPoint => - endPoint.copy(port = boundPort(endPoint.listenerName)) - } - } - - /** - * Generates new brokerId if enabled or reads from meta.properties based on following conditions - *
            - *
          1. config has no broker.id provided and broker id generation is enabled, generates a broker.id based on Zookeeper's sequence - *
          2. config has broker.id and meta.properties contains broker.id if they don't match throws InconsistentBrokerIdException - *
          3. config has broker.id and there is no meta.properties file, creates new meta.properties and stores broker.id - *
              - * - * @return The brokerId. - */ - private def getOrGenerateBrokerId(metaPropsEnsemble: MetaPropertiesEnsemble): Int = { - if (config.brokerId >= 0) { - config.brokerId - } else if (metaPropsEnsemble.nodeId().isPresent) { - metaPropsEnsemble.nodeId().getAsInt - } else if (config.brokerIdGenerationEnable) { - generateBrokerId() - } else - throw new RuntimeException(s"No broker ID found, and ${config.brokerIdGenerationEnable} is disabled.") - } - - /** - * Return a sequence id generated by updating the broker sequence id path in ZK. - * Users can provide brokerId in the config. To avoid conflicts between ZK generated - * sequence id and configured brokerId, we increment the generated sequence id by KafkaConfig.MaxReservedBrokerId. - */ - private def generateBrokerId(): Int = { - try { - zkClient.generateBrokerSequenceId() + config.maxReservedBrokerId - } catch { - case e: Exception => - error("Failed to generate broker.id due to ", e) - throw new GenerateBrokerIdException("Failed to generate broker.id", e) - } - } -} diff --git a/core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala b/core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala index 702d0a4ccb8ef..51507e12043c4 100644 --- a/core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala +++ b/core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala @@ -16,12 +16,14 @@ */ package kafka.server -import kafka.log.AsyncOffsetReadFutureHolder import org.apache.kafka.common.errors.ApiException import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse -import org.apache.kafka.common.record.FileRecords.TimestampAndOffset +import org.apache.kafka.storage.internals.log.AsyncOffsetReadFutureHolder +import org.apache.kafka.storage.internals.log.OffsetResultHolder.FileRecordsOrError -class ListOffsetsPartitionStatus(val futureHolderOpt: Option[AsyncOffsetReadFutureHolder[Either[Exception, Option[TimestampAndOffset]]]], +import java.util.Optional + +class ListOffsetsPartitionStatus(val futureHolderOpt: Optional[AsyncOffsetReadFutureHolder[FileRecordsOrError]], val lastFetchableOffset: Option[Long], val maybeOffsetsError: Option[ApiException]) { @@ -36,7 +38,7 @@ class ListOffsetsPartitionStatus(val futureHolderOpt: Option[AsyncOffsetReadFutu object ListOffsetsPartitionStatus { def apply(responseOpt: Option[ListOffsetsPartitionResponse], - futureHolderOpt: Option[AsyncOffsetReadFutureHolder[Either[Exception, Option[TimestampAndOffset]]]] = None, + futureHolderOpt: Optional[AsyncOffsetReadFutureHolder[FileRecordsOrError]] = Optional.empty(), lastFetchableOffset: Option[Long] = None, maybeOffsetsError: Option[ApiException] = None): ListOffsetsPartitionStatus = { val status = new ListOffsetsPartitionStatus(futureHolderOpt, lastFetchableOffset, maybeOffsetsError) diff --git a/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala b/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala index 03258295a41a3..1e2a6cd033e48 100644 --- a/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala +++ b/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala @@ -118,21 +118,21 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, override def fetchEarliestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = { val partition = replicaManager.getPartitionOrException(topicPartition) val logStartOffset = partition.localLogOrException.logStartOffset - val epoch = partition.localLogOrException.leaderEpochCache.get.epochForOffset(logStartOffset) + val epoch = partition.localLogOrException.leaderEpochCache.epochForOffset(logStartOffset) new OffsetAndEpoch(logStartOffset, epoch.orElse(0)) } override def fetchLatestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = { val partition = replicaManager.getPartitionOrException(topicPartition) val logEndOffset = partition.localLogOrException.logEndOffset - val epoch = partition.localLogOrException.leaderEpochCache.get.epochForOffset(logEndOffset) + val epoch = partition.localLogOrException.leaderEpochCache.epochForOffset(logEndOffset) new OffsetAndEpoch(logEndOffset, epoch.orElse(0)) } override def fetchEarliestLocalOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = { val partition = replicaManager.getPartitionOrException(topicPartition) val localLogStartOffset = partition.localLogOrException.localLogStartOffset() - val epoch = partition.localLogOrException.leaderEpochCache.get.epochForOffset(localLogStartOffset) + val epoch = partition.localLogOrException.leaderEpochCache.epochForOffset(localLogStartOffset) new OffsetAndEpoch(localLogStartOffset, epoch.orElse(0)) } diff --git a/core/src/main/scala/kafka/server/MetadataCache.scala b/core/src/main/scala/kafka/server/MetadataCache.scala index 4b14f04483eaf..d980271a02968 100755 --- a/core/src/main/scala/kafka/server/MetadataCache.scala +++ b/core/src/main/scala/kafka/server/MetadataCache.scala @@ -17,30 +17,18 @@ package kafka.server -import kafka.server.metadata.{KRaftMetadataCache, ZkMetadataCache} +import kafka.server.metadata.{ConfigRepository, KRaftMetadataCache} import org.apache.kafka.admin.BrokerMetadata -import org.apache.kafka.common.message.{MetadataResponseData, UpdateMetadataRequestData} +import org.apache.kafka.common.message.{DescribeClientQuotasRequestData, DescribeClientQuotasResponseData, DescribeUserScramCredentialsRequestData, DescribeUserScramCredentialsResponseData, MetadataResponseData, UpdateMetadataRequestData} import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.{Cluster, Node, TopicPartition, Uuid} -import org.apache.kafka.server.BrokerFeatures import org.apache.kafka.server.common.{FinalizedFeatures, KRaftVersion, MetadataVersion} import java.util import java.util.function.Supplier import scala.collection._ -/** - * Used to represent the controller id cached in the metadata cache of the broker. This trait is - * extended to represent if the controller is KRaft controller or Zk controller. - */ -sealed trait CachedControllerId { - val id: Int -} - -case class ZkCachedControllerId(id: Int) extends CachedControllerId -case class KRaftCachedControllerId(id: Int) extends CachedControllerId - -trait MetadataCache { +trait MetadataCache extends ConfigRepository { /** * Return topic metadata for a given set of topics and listener. See KafkaApis#handleTopicMetadataRequest for details * on the use of the two boolean flags. @@ -75,6 +63,8 @@ trait MetadataCache { def getAliveBrokerNodes(listenerName: ListenerName): Iterable[Node] + def getBrokerNodes(listenerName: ListenerName): Iterable[Node] + def getPartitionInfo(topic: String, partitionId: Int): Option[UpdateMetadataRequestData.UpdateMetadataPartitionState] /** @@ -99,8 +89,6 @@ trait MetadataCache { def getPartitionReplicaEndpoints(tp: TopicPartition, listenerName: ListenerName): Map[Int, Node] - def getControllerId: Option[CachedControllerId] - def getClusterMetadata(clusterId: String, listenerName: ListenerName): Cluster def contains(topic: String): Boolean @@ -112,17 +100,13 @@ trait MetadataCache { def getRandomAliveBrokerId: Option[Int] def features(): FinalizedFeatures + + def describeClientQuotas(request: DescribeClientQuotasRequestData): DescribeClientQuotasResponseData + + def describeScramCredentials(request: DescribeUserScramCredentialsRequestData): DescribeUserScramCredentialsResponseData } object MetadataCache { - def zkMetadataCache(brokerId: Int, - metadataVersion: MetadataVersion, - brokerFeatures: BrokerFeatures = BrokerFeatures.createEmpty(), - zkMigrationEnabled: Boolean = false) - : ZkMetadataCache = { - new ZkMetadataCache(brokerId, metadataVersion, brokerFeatures, zkMigrationEnabled) - } - def kRaftMetadataCache( brokerId: Int, kraftVersionSupplier: Supplier[KRaftVersion] diff --git a/core/src/main/scala/kafka/server/MetadataSupport.scala b/core/src/main/scala/kafka/server/MetadataSupport.scala deleted file mode 100644 index 335df7c42d74c..0000000000000 --- a/core/src/main/scala/kafka/server/MetadataSupport.scala +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.controller.KafkaController -import kafka.network.RequestChannel -import kafka.server.metadata.{KRaftMetadataCache, ZkMetadataCache} -import kafka.zk.KafkaZkClient -import org.apache.kafka.common.requests.AbstractResponse - -sealed trait MetadataSupport { - /** - * Provide a uniform way of getting to the ForwardingManager, which is a shared concept - * despite being optional when using ZooKeeper and required when using Raft - */ - val forwardingManager: Option[ForwardingManager] - - /** - * Return this instance downcast for use with ZooKeeper - * - * @param createException function to create an exception to throw - * @return this instance downcast for use with ZooKeeper - * @throws Exception if this instance is not for ZooKeeper - */ - def requireZkOrThrow(createException: => Exception): ZkSupport - - /** - * Return this instance downcast for use with Raft - * - * @param createException function to create an exception to throw - * @return this instance downcast for use with Raft - * @throws Exception if this instance is not for Raft - */ - def requireRaftOrThrow(createException: => Exception): RaftSupport - - /** - * Confirm that this instance is consistent with the given config - * - * @param config the config to check for consistency with this instance - * @throws IllegalStateException if there is an inconsistency (Raft for a ZooKeeper config or vice-versa) - */ - def ensureConsistentWith(config: KafkaConfig): Unit - - def canForward(): Boolean - - def maybeForward( - request: RequestChannel.Request, - handler: RequestChannel.Request => Unit, - responseCallback: Option[AbstractResponse] => Unit - ): Unit = { - if (!request.isForwarded && canForward()) { - forwardingManager.get.forwardRequest(request, responseCallback) - } else { - handler(request) - } - } -} - -case class ZkSupport(adminManager: ZkAdminManager, - controller: KafkaController, - zkClient: KafkaZkClient, - forwardingManager: Option[ForwardingManager], - metadataCache: ZkMetadataCache, - brokerEpochManager: ZkBrokerEpochManager) extends MetadataSupport { - override def requireZkOrThrow(createException: => Exception): ZkSupport = this - - override def requireRaftOrThrow(createException: => Exception): RaftSupport = throw createException - - override def ensureConsistentWith(config: KafkaConfig): Unit = { - if (!config.requiresZookeeper) { - throw new IllegalStateException("Config specifies Raft but metadata support instance is for ZooKeeper") - } - } - - override def canForward(): Boolean = forwardingManager.isDefined && (!controller.isActive) - - def isBrokerEpochStale(brokerEpochInRequest: Long, isKRaftControllerRequest: Boolean): Boolean = { - brokerEpochManager.isBrokerEpochStale(brokerEpochInRequest, isKRaftControllerRequest) - } -} - -case class RaftSupport(fwdMgr: ForwardingManager, - metadataCache: KRaftMetadataCache) - extends MetadataSupport { - override val forwardingManager: Option[ForwardingManager] = Some(fwdMgr) - override def requireZkOrThrow(createException: => Exception): ZkSupport = throw createException - override def requireRaftOrThrow(createException: => Exception): RaftSupport = this - - override def ensureConsistentWith(config: KafkaConfig): Unit = { - if (config.requiresZookeeper) { - throw new IllegalStateException("Config specifies ZooKeeper but metadata support instance is for Raft") - } - } - - override def canForward(): Boolean = true -} diff --git a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala index 846bbe58ff9e0..c353a82550316 100644 --- a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala +++ b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala @@ -17,13 +17,9 @@ package kafka.server -import java.util.concurrent.LinkedBlockingDeque -import java.util.concurrent.atomic.AtomicReference import kafka.raft.RaftManager -import kafka.server.metadata.ZkMetadataCache import kafka.utils.Logging import org.apache.kafka.clients._ -import org.apache.kafka.common.{Node, Reconfigurable} import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network._ import org.apache.kafka.common.protocol.Errors @@ -31,11 +27,14 @@ import org.apache.kafka.common.requests.AbstractRequest import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.{LogContext, Time} +import org.apache.kafka.common.{Node, Reconfigurable} import org.apache.kafka.server.common.{ApiMessageAndVersion, ControllerRequestCompletionHandler, NodeToControllerChannelManager} import org.apache.kafka.server.util.{InterBrokerSendThread, RequestAndCompletionHandler} import java.util import java.util.Optional +import java.util.concurrent.LinkedBlockingDeque +import java.util.concurrent.atomic.AtomicReference import scala.collection.Seq import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.{RichOption, RichOptionalInt} @@ -44,45 +43,13 @@ case class ControllerInformation( node: Option[Node], listenerName: ListenerName, securityProtocol: SecurityProtocol, - saslMechanism: String, - isZkController: Boolean + saslMechanism: String ) trait ControllerNodeProvider { def getControllerInfo(): ControllerInformation } -class MetadataCacheControllerNodeProvider( - val metadataCache: ZkMetadataCache, - val config: KafkaConfig, - val quorumControllerNodeProvider: () => Option[ControllerInformation] -) extends ControllerNodeProvider { - - private val zkControllerListenerName = config.controlPlaneListenerName.getOrElse(config.interBrokerListenerName) - private val zkControllerSecurityProtocol = config.controlPlaneSecurityProtocol.getOrElse(config.interBrokerSecurityProtocol) - private val zkControllerSaslMechanism = config.saslMechanismInterBrokerProtocol - - val emptyZkControllerInfo = ControllerInformation( - None, - zkControllerListenerName, - zkControllerSecurityProtocol, - zkControllerSaslMechanism, - isZkController = true) - - override def getControllerInfo(): ControllerInformation = { - metadataCache.getControllerId.map { - case ZkCachedControllerId(id) => ControllerInformation( - metadataCache.getAliveBrokerNode(id, zkControllerListenerName), - zkControllerListenerName, - zkControllerSecurityProtocol, - zkControllerSaslMechanism, - isZkController = true) - case KRaftCachedControllerId(_) => - quorumControllerNodeProvider.apply().getOrElse(emptyZkControllerInfo) - }.getOrElse(emptyZkControllerInfo) - } -} - object RaftControllerNodeProvider { def apply( raftManager: RaftManager[ApiMessageAndVersion], @@ -115,7 +82,7 @@ class RaftControllerNodeProvider( override def getControllerInfo(): ControllerInformation = ControllerInformation(raftManager.leaderAndEpoch.leaderId.toScala.flatMap(idToNode), - listenerName, securityProtocol, saslMechanism, isZkController = false) + listenerName, securityProtocol, saslMechanism) } /** @@ -157,7 +124,6 @@ class NodeToControllerChannelManagerImpl( controllerInfo.listenerName, controllerInfo.saslMechanism, time, - config.saslInterBrokerHandshakeRequestEnable, logContext ) channelBuilder match { @@ -199,8 +165,6 @@ class NodeToControllerChannelManagerImpl( val controllerInformation = controllerNodeProvider.getControllerInfo() new NodeToControllerRequestThread( buildNetworkClient(controllerInformation), - controllerInformation.isZkController, - buildNetworkClient, manualMetadataUpdater, controllerNodeProvider, config, @@ -244,8 +208,6 @@ case class NodeToControllerQueueItem( class NodeToControllerRequestThread( initialNetworkClient: KafkaClient, - var isNetworkClientForZkController: Boolean, - networkClientFactory: ControllerInformation => KafkaClient, metadataUpdater: ManualMetadataUpdater, controllerNodeProvider: ControllerNodeProvider, config: KafkaConfig, @@ -262,22 +224,6 @@ class NodeToControllerRequestThread( this.logIdent = logPrefix - private def maybeResetNetworkClient(controllerInformation: ControllerInformation): Unit = { - if (isNetworkClientForZkController != controllerInformation.isZkController) { - debug("Controller changed to " + (if (isNetworkClientForZkController) "kraft" else "zk") + " mode. " + - s"Resetting network client with new controller information : ${controllerInformation}") - // Close existing network client. - val oldClient = networkClient - oldClient.initiateClose() - oldClient.close() - - isNetworkClientForZkController = controllerInformation.isZkController - updateControllerAddress(controllerInformation.node.orNull) - controllerInformation.node.foreach(n => metadataUpdater.setNodes(Seq(n).asJava)) - networkClient = networkClientFactory(controllerInformation) - } - } - private val requestQueue = new LinkedBlockingDeque[NodeToControllerQueueItem]() private val activeController = new AtomicReference[Node](null) @@ -371,19 +317,13 @@ class NodeToControllerRequestThread( override def doWork(): Unit = { val controllerInformation = controllerNodeProvider.getControllerInfo() - maybeResetNetworkClient(controllerInformation) if (activeControllerAddress().isDefined) { super.pollOnce(Long.MaxValue) } else { debug("Controller isn't cached, looking for local metadata changes") controllerInformation.node match { case Some(controllerNode) => - val controllerType = if (controllerInformation.isZkController) { - "ZK" - } else { - "KRaft" - } - info(s"Recorded new $controllerType controller, from now on will use node $controllerNode") + info(s"Recorded new KRaft controller, from now on will use node $controllerNode") updateControllerAddress(controllerNode) metadataUpdater.setNodes(Seq(controllerNode).asJava) case None => diff --git a/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala b/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala index a054e2954d4e7..94bb6f5140dca 100644 --- a/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala +++ b/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala @@ -29,7 +29,6 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEnd import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, ListOffsetsRequest, ListOffsetsResponse, OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} -import org.apache.kafka.server.common.MetadataVersion.IBP_0_10_1_IV2 import org.apache.kafka.server.network.BrokerEndPoint import scala.jdk.CollectionConverters._ @@ -64,7 +63,7 @@ class RemoteLeaderEndPoint(logPrefix: String, private val maxBytes = brokerConfig.replicaFetchResponseMaxBytes private val fetchSize = brokerConfig.replicaFetchMaxBytes - override def isTruncationOnFetchSupported: Boolean = metadataVersionSupplier().isTruncationOnFetchSupported + override def isTruncationOnFetchSupported: Boolean = true override def initiateClose(): Unit = blockingSender.initiateClose() @@ -123,11 +122,7 @@ class RemoteLeaderEndPoint(logPrefix: String, .partitions.asScala.find(_.partitionIndex == topicPartition.partition).get Errors.forCode(responsePartition.errorCode) match { - case Errors.NONE => - if (metadataVersion.isAtLeast(IBP_0_10_1_IV2)) - new OffsetAndEpoch(responsePartition.offset, responsePartition.leaderEpoch) - else - new OffsetAndEpoch(responsePartition.oldStyleOffsets.get(0), responsePartition.leaderEpoch) + case Errors.NONE => new OffsetAndEpoch(responsePartition.offset, responsePartition.leaderEpoch) case error => throw error.exception } } @@ -148,8 +143,7 @@ class RemoteLeaderEndPoint(logPrefix: String, topic.partitions.add(epochData) } - val epochRequest = OffsetsForLeaderEpochRequest.Builder.forFollower( - metadataVersionSupplier().offsetForLeaderEpochRequestVersion, topics, brokerConfig.brokerId) + val epochRequest = OffsetsForLeaderEpochRequest.Builder.forFollower(topics, brokerConfig.brokerId) debug(s"Sending offset for leader epoch request $epochRequest") try { diff --git a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala index 403cb34180d3a..56492de34856f 100644 --- a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala @@ -169,8 +169,6 @@ class ReplicaAlterLogDirsThread(name: String, } } - override protected val isOffsetForLeaderEpochSupported: Boolean = true - /** * Truncate the log for each partition based on current replica's returned epoch and offset. * diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala index bb073682bdfb6..7f0c6d41dbdf6 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala @@ -47,8 +47,6 @@ class ReplicaFetcherThread(name: String, // Visible for testing private[server] val partitionsWithNewHighWatermark = mutable.Buffer[TopicPartition]() - override protected val isOffsetForLeaderEpochSupported: Boolean = metadataVersionSupplier().isOffsetForLeaderEpochSupported - override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = { replicaMgr.localLogOrException(topicPartition).latestEpoch } diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index b986ee20cdba6..9504731f6839d 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -18,16 +18,14 @@ package kafka.server import com.yammer.metrics.core.Meter import kafka.cluster.{Partition, PartitionListener} -import kafka.controller.{KafkaController, StateChangeLogger} +import kafka.controller.StateChangeLogger import kafka.log.remote.RemoteLogManager -import kafka.log.{LogManager, OffsetResultHolder, UnifiedLog} +import kafka.log.{LogManager, UnifiedLog} import kafka.server.HostedPartition.Online import kafka.server.QuotaFactory.QuotaManagers import kafka.server.ReplicaManager.{AtMinIsrPartitionCountMetricName, FailedIsrUpdatesPerSecMetricName, IsrExpandsPerSecMetricName, IsrShrinksPerSecMetricName, LeaderCountMetricName, OfflineReplicaCountMetricName, PartitionCountMetricName, PartitionsWithLateTransactionsCountMetricName, ProducerIdCountMetricName, ReassigningPartitionsMetricName, UnderMinIsrPartitionCountMetricName, UnderReplicatedPartitionsMetricName, createLogReadResult, isListOffsetsTimestampUnsupported} -import kafka.server.metadata.ZkMetadataCache import kafka.server.share.DelayedShareFetch import kafka.utils._ -import kafka.zk.KafkaZkClient import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult @@ -51,13 +49,12 @@ import org.apache.kafka.common.requests.FetchRequest.PartitionData import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests._ import org.apache.kafka.common.utils.{Exit, Time} -import org.apache.kafka.common.{ElectionType, IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.{IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.image.{LocalReplicaChanges, MetadataImage, TopicsDelta} import org.apache.kafka.metadata.LeaderAndIsr import org.apache.kafka.metadata.LeaderConstants.NO_LEADER import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, common} import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition, TopicOptionalIdPartition} -import org.apache.kafka.server.common.MetadataVersion._ import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.purgatory.{DelayedOperationKey, DelayedOperationPurgatory, TopicPartitionOperationKey} @@ -65,7 +62,7 @@ import org.apache.kafka.server.share.fetch.{DelayedShareFetchKey, DelayedShareFe import org.apache.kafka.server.storage.log.{FetchParams, FetchPartitionData} import org.apache.kafka.server.util.{Scheduler, ShutdownableThread} import org.apache.kafka.storage.internals.checkpoint.{LazyOffsetCheckpoints, OffsetCheckpointFile, OffsetCheckpoints} -import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogReadInfo, RecordValidationException, RemoteLogReadResult, RemoteStorageFetchInfo, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogReadInfo, OffsetResultHolder, RecordValidationException, RemoteLogReadResult, RemoteStorageFetchInfo, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.io.File @@ -278,7 +275,6 @@ class ReplicaManager(val config: KafkaConfig, val alterPartitionManager: AlterPartitionManager, val brokerTopicStats: BrokerTopicStats = new BrokerTopicStats(), val isShuttingDown: AtomicBoolean = new AtomicBoolean(false), - val zkClient: Option[KafkaZkClient] = None, delayedProducePurgatoryParam: Option[DelayedOperationPurgatory[DelayedProduce]] = None, delayedFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedFetch]] = None, delayedDeleteRecordsPurgatoryParam: Option[DelayedOperationPurgatory[DelayedDeleteRecords]] = None, @@ -321,7 +317,7 @@ class ReplicaManager(val config: KafkaConfig, config.shareGroupConfig.shareFetchPurgatoryPurgeIntervalRequests)) /* epoch of the controller that last changed the leader */ - @volatile private[server] var controllerEpoch: Int = KafkaController.InitialControllerEpoch + @volatile private[server] var controllerEpoch: Int = 0 protected val localBrokerId = config.brokerId protected val allPartitions = new Pool[TopicPartition, HostedPartition]( valueFactory = Some(tp => HostedPartition.Online(Partition(tp, time, this))) @@ -340,13 +336,9 @@ class ReplicaManager(val config: KafkaConfig, private var logDirFailureHandler: LogDirFailureHandler = _ - private class LogDirFailureHandler(name: String, haltBrokerOnDirFailure: Boolean) extends ShutdownableThread(name) { + private class LogDirFailureHandler(name: String) extends ShutdownableThread(name) { override def doWork(): Unit = { val newOfflineLogDir = logDirFailureChannel.takeNextOfflineLogDir() - if (haltBrokerOnDirFailure) { - fatal(s"Halting broker because dir $newOfflineLogDir is offline") - Exit.halt(1) - } handleLogDirFailure(newOfflineLogDir) } } @@ -412,11 +404,7 @@ class ReplicaManager(val config: KafkaConfig, scheduler.schedule("isr-expiration", () => maybeShrinkIsr(), 0L, config.replicaLagTimeMaxMs / 2) scheduler.schedule("shutdown-idle-replica-alter-log-dirs-thread", () => shutdownIdleReplicaAlterLogDirsThread(), 0L, 10000L) - // If inter-broker protocol (IBP) < 1.0, the controller will send LeaderAndIsrRequest V0 which does not include isNew field. - // In this case, the broker receiving the request cannot determine whether it is safe to create a partition if a log directory has failed. - // Thus, we choose to halt the broker on any log directory failure if IBP < 1.0 - val haltBrokerOnFailure = metadataCache.metadataVersion().isLessThan(IBP_1_0_IV0) - logDirFailureHandler = new LogDirFailureHandler("LogDirFailureHandler", haltBrokerOnFailure) + logDirFailureHandler = new LogDirFailureHandler("LogDirFailureHandler") logDirFailureHandler.start() addPartitionsToTxnManager.foreach(_.start()) remoteLogManager.foreach(rlm => rlm.setDelayedOperationPurgatory(delayedRemoteListOffsetsPurgatory)) @@ -1141,7 +1129,7 @@ class ReplicaManager(val config: KafkaConfig, callback((errors ++ verificationErrors, verificationGuards.toMap)) } - addPartitionsToTxnManager.foreach(_.verifyTransaction( + addPartitionsToTxnManager.foreach(_.addOrVerifyTransaction( transactionalId = transactionalId, producerId = producerId, producerEpoch = producerEpoch, @@ -1172,11 +1160,11 @@ class ReplicaManager(val config: KafkaConfig, * Delete records on leader replicas of the partition, and wait for delete records operation be propagated to other replicas; * the callback function will be triggered either when timeout or logStartOffset of all live replicas have reached the specified offset */ - private def deleteRecordsOnLocalLog(offsetPerPartition: Map[TopicPartition, Long]): Map[TopicPartition, LogDeleteRecordsResult] = { + private def deleteRecordsOnLocalLog(offsetPerPartition: Map[TopicPartition, Long], allowInternalTopicDeletion: Boolean): Map[TopicPartition, LogDeleteRecordsResult] = { trace("Delete records on local logs to offsets [%s]".format(offsetPerPartition)) offsetPerPartition.map { case (topicPartition, requestedOffset) => - // reject delete records operation on internal topics - if (Topic.isInternal(topicPartition.topic)) { + // reject delete records operation for internal topics unless allowInternalTopicDeletion is true + if (Topic.isInternal(topicPartition.topic) && !allowInternalTopicDeletion) { (topicPartition, LogDeleteRecordsResult(-1L, -1L, Some(new InvalidTopicException(s"Cannot delete records of internal topic ${topicPartition.topic}")))) } else { try { @@ -1369,9 +1357,10 @@ class ReplicaManager(val config: KafkaConfig, def deleteRecords(timeout: Long, offsetPerPartition: Map[TopicPartition, Long], - responseCallback: Map[TopicPartition, DeleteRecordsPartitionResult] => Unit): Unit = { + responseCallback: Map[TopicPartition, DeleteRecordsPartitionResult] => Unit, + allowInternalTopicDeletion: Boolean = false): Unit = { val timeBeforeLocalDeleteRecords = time.milliseconds - val localDeleteRecordsResults = deleteRecordsOnLocalLog(offsetPerPartition) + val localDeleteRecordsResults = deleteRecordsOnLocalLog(offsetPerPartition, allowInternalTopicDeletion) debug("Delete records on local log in %d ms".format(time.milliseconds - timeBeforeLocalDeleteRecords)) val deleteRecordsStatus = localDeleteRecordsResults.map { case (topicPartition, result) => @@ -1537,31 +1526,35 @@ class ReplicaManager(val config: KafkaConfig, if (partition.currentLeaderEpoch == ListOffsetsResponse.UNKNOWN_EPOCH) Optional.empty() else Optional.of(partition.currentLeaderEpoch), fetchOnlyFromLeader) - val status = resultHolder match { - case OffsetResultHolder(Some(found), _) => + val status = { + if (resultHolder.timestampAndOffsetOpt().isPresent) { // This case is for normal topic that does not have remote storage. + val timestampAndOffsetOpt = resultHolder.timestampAndOffsetOpt.get var partitionResponse = buildErrorResponse(Errors.NONE, partition) - if (resultHolder.lastFetchableOffset.isDefined && - found.offset >= resultHolder.lastFetchableOffset.get) { + if (resultHolder.lastFetchableOffset.isPresent && + timestampAndOffsetOpt.offset >= resultHolder.lastFetchableOffset.get) { resultHolder.maybeOffsetsError.map(e => throw e) } else { partitionResponse = new ListOffsetsPartitionResponse() .setPartitionIndex(partition.partitionIndex) .setErrorCode(Errors.NONE.code) - .setTimestamp(found.timestamp) - .setOffset(found.offset) - if (found.leaderEpoch.isPresent && version >= 4) - partitionResponse.setLeaderEpoch(found.leaderEpoch.get) + .setTimestamp(timestampAndOffsetOpt.timestamp) + .setOffset(timestampAndOffsetOpt.offset) + if (timestampAndOffsetOpt.leaderEpoch.isPresent && version >= 4) + partitionResponse.setLeaderEpoch(timestampAndOffsetOpt.leaderEpoch.get) } ListOffsetsPartitionStatus(Some(partitionResponse)) - case OffsetResultHolder(None, None) => + } else if (resultHolder.timestampAndOffsetOpt.isEmpty && resultHolder.futureHolderOpt.isEmpty) { // This is an empty offset response scenario resultHolder.maybeOffsetsError.map(e => throw e) ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.NONE, partition))) - case OffsetResultHolder(None, Some(futureHolder)) => + } else if (resultHolder.timestampAndOffsetOpt.isEmpty && resultHolder.futureHolderOpt.isPresent) { // This case is for topic enabled with remote storage and we want to search the timestamp in // remote storage using async fashion. - ListOffsetsPartitionStatus(None, Some(futureHolder), resultHolder.lastFetchableOffset, resultHolder.maybeOffsetsError) + ListOffsetsPartitionStatus(None, resultHolder.futureHolderOpt(), resultHolder.lastFetchableOffset.toScala.map(_.longValue()), resultHolder.maybeOffsetsError.toScala) + } else { + throw new IllegalStateException(s"Unexpected result holder state $resultHolder") + } } statusByPartition += topicPartition -> status } catch { @@ -1612,7 +1605,7 @@ class ReplicaManager(val config: KafkaConfig, } private def delayedRemoteListOffsetsRequired(responseByPartition: Map[TopicPartition, ListOffsetsPartitionStatus]): Boolean = { - responseByPartition.values.exists(status => status.futureHolderOpt.isDefined) + responseByPartition.values.exists(status => status.futureHolderOpt.isPresent) } def fetchOffsetForTimestamp(topicPartition: TopicPartition, @@ -1920,7 +1913,7 @@ class ReplicaManager(val config: KafkaConfig, val offset = fetchInfo.fetchOffset // In case of offset out of range errors, handle it for tiered storage only if all the below conditions are true. // 1) remote log manager is enabled and it is available - // 2) `log` instance should not be null here as that would have been caught earlier with NotLeaderForPartitionException or ReplicaNotAvailableException. + // 2) `log` instance should not be null here as that would have been caught earlier with NotLeaderOrFollowerException or ReplicaNotAvailableException. // 3) fetch offset is within the offset range of the remote storage layer if (remoteLogManager.isDefined && log != null && log.remoteLogEnabled() && log.logStartOffset <= offset && offset < log.localLogStartOffset()) @@ -2041,25 +2034,6 @@ class ReplicaManager(val config: KafkaConfig, def getLogConfig(topicPartition: TopicPartition): Option[LogConfig] = localLog(topicPartition).map(_.config) - def getMagic(topicPartition: TopicPartition): Option[Byte] = getLogConfig(topicPartition).map(_.recordVersion.value) - - def maybeUpdateMetadataCache(correlationId: Int, updateMetadataRequest: UpdateMetadataRequest) : Seq[TopicPartition] = { - replicaStateChangeLock synchronized { - if (updateMetadataRequest.controllerEpoch < controllerEpoch) { - val stateControllerEpochErrorMessage = s"Received update metadata request with correlation id $correlationId " + - s"from an old controller ${updateMetadataRequest.controllerId} with epoch ${updateMetadataRequest.controllerEpoch}. " + - s"Latest known controller epoch is $controllerEpoch" - stateChangeLogger.warn(stateControllerEpochErrorMessage) - throw new ControllerMovedException(stateChangeLogger.messageWithPrefix(stateControllerEpochErrorMessage)) - } else { - val zkMetadataCache = metadataCache.asInstanceOf[ZkMetadataCache] - val deletedPartitions = zkMetadataCache.updateMetadata(correlationId, updateMetadataRequest) - controllerEpoch = updateMetadataRequest.controllerEpoch - deletedPartitions - } - } - } - def becomeLeaderOrFollower(correlationId: Int, leaderAndIsrRequest: LeaderAndIsrRequest, onLeadershipChange: (Iterable[Partition], Iterable[Partition]) => Unit): LeaderAndIsrResponse = { @@ -2092,24 +2066,6 @@ class ReplicaManager(val config: KafkaConfig, s"Latest known controller epoch is $controllerEpoch") leaderAndIsrRequest.getErrorResponse(0, Errors.STALE_CONTROLLER_EPOCH.exception) } else { - // In migration mode, reconcile missed topic deletions when handling full LISR from KRaft controller. - // LISR "type" field was previously unspecified (0), so if we see it set to Full (2), then we know the - // request came from a KRaft controller. - // - // Note that we have to do this first, before anything else, since topics may be recreated with the same - // name, but a different ID. And in that case, we need to move aside the old version of those topics - // (with the obsolete topic ID) before doing anything else. - if (config.migrationEnabled && - leaderAndIsrRequest.isKRaftController && - leaderAndIsrRequest.requestType() == AbstractControlRequest.Type.FULL) - { - val strays = LogManager.findStrayReplicas(localBrokerId, leaderAndIsrRequest, logManager.allLogs) - stateChangeLogger.info(s"While handling full LeaderAndIsr request from KRaft " + - s"controller $controllerId with correlation id $correlationId, found ${strays.size} " + - "stray partition(s).") - updateStrayLogs(strays) - } - val responseMap = new mutable.HashMap[TopicPartition, Errors] controllerEpoch = leaderAndIsrRequest.controllerEpoch @@ -2455,6 +2411,7 @@ class ReplicaManager(val config: KafkaConfig, if (metadataCache.hasAliveBroker(newLeaderBrokerId)) { // Only change partition state when the leader is available if (partition.makeFollower(partitionState, highWatermarkCheckpoints, topicIds(partitionState.topicName))) { + // Skip invoking onBecomingFollower listeners as the listeners are not registered for zk-based features. partitionsToMakeFollower += partition } } else { @@ -2576,7 +2533,7 @@ class ReplicaManager(val config: KafkaConfig, * OffsetForLeaderEpoch request. */ protected def initialFetchOffset(log: UnifiedLog): Long = { - if (metadataCache.metadataVersion().isTruncationOnFetchSupported && log.latestEpoch.nonEmpty) + if (log.latestEpoch.nonEmpty) log.logEndOffset else log.highWatermark @@ -2669,25 +2626,17 @@ class ReplicaManager(val config: KafkaConfig, s"for partitions ${partitionsWithOfflineFutureReplica.mkString(",")} because they are in the failed log directory $dir.") } logManager.handleLogDirFailure(dir) - if (dir == new File(config.metadataLogDir).getAbsolutePath && (config.processRoles.nonEmpty || config.migrationEnabled)) { + if (dir == new File(config.metadataLogDir).getAbsolutePath && config.processRoles.nonEmpty) { fatal(s"Shutdown broker because the metadata log dir $dir has failed") Exit.halt(1) } if (notifyController) { - if (config.migrationEnabled) { - fatal(s"Shutdown broker because some log directory has failed during migration mode: $dir") - Exit.halt(1) - } - if (zkClient.isEmpty) { - if (uuid.isDefined) { - directoryEventHandler.handleFailure(uuid.get) - } else { - fatal(s"Unable to propagate directory failure disabled because directory $dir has no UUID") - Exit.halt(1) - } + if (uuid.isDefined) { + directoryEventHandler.handleFailure(uuid.get) } else { - zkClient.get.propagateLogDirEvent(localBrokerId) + fatal(s"Unable to propagate directory failure disabled because directory $dir has no UUID") + Exit.halt(1) } } warn(s"Stopped serving replicas in dir $dir") @@ -2790,47 +2739,6 @@ class ReplicaManager(val config: KafkaConfig, } } - def electLeaders( - controller: KafkaController, - partitions: Set[TopicPartition], - electionType: ElectionType, - responseCallback: Map[TopicPartition, ApiError] => Unit, - requestTimeout: Int - ): Unit = { - - val deadline = time.milliseconds() + requestTimeout - - def electionCallback(results: Map[TopicPartition, Either[ApiError, Int]]): Unit = { - val expectedLeaders = mutable.Map.empty[TopicPartition, Int] - val failures = mutable.Map.empty[TopicPartition, ApiError] - results.foreach { - case (partition, Right(leader)) => expectedLeaders += partition -> leader - case (partition, Left(error)) => failures += partition -> error - } - if (expectedLeaders.nonEmpty) { - val watchKeys = expectedLeaders.iterator.map { - case (tp, _) => new TopicPartitionOperationKey(tp) - }.toList.asJava - - delayedElectLeaderPurgatory.tryCompleteElseWatch( - new DelayedElectLeader( - math.max(0, deadline - time.milliseconds()), - expectedLeaders, - failures, - this, - responseCallback - ), - watchKeys - ) - } else { - // There are no partitions actually being elected, so return immediately - responseCallback(failures) - } - } - - controller.electLeaders(partitions, electionType, electionCallback) - } - def activeProducerState(requestPartition: TopicPartition): DescribeProducersResponseData.PartitionResponse = { getPartitionOrError(requestPartition) match { case Left(error) => new DescribeProducersResponseData.PartitionResponse() @@ -2925,7 +2833,7 @@ class ReplicaManager(val config: KafkaConfig, val leaderChangedPartitions = new mutable.HashSet[Partition] val followerChangedPartitions = new mutable.HashSet[Partition] if (!localChanges.leaders.isEmpty) { - applyLocalLeadersDelta(leaderChangedPartitions, newImage, delta, lazyOffsetCheckpoints, localChanges.leaders.asScala, localChanges.directoryIds.asScala) + applyLocalLeadersDelta(leaderChangedPartitions, delta, lazyOffsetCheckpoints, localChanges.leaders.asScala, localChanges.directoryIds.asScala) } if (!localChanges.followers.isEmpty) { applyLocalFollowersDelta(followerChangedPartitions, newImage, delta, lazyOffsetCheckpoints, localChanges.followers.asScala, localChanges.directoryIds.asScala) @@ -2949,7 +2857,6 @@ class ReplicaManager(val config: KafkaConfig, private def applyLocalLeadersDelta( changedPartitions: mutable.Set[Partition], - newImage: MetadataImage, delta: TopicsDelta, offsetCheckpoints: OffsetCheckpoints, localLeaders: mutable.Map[TopicPartition, LocalReplicaChanges.PartitionInfo], @@ -3013,6 +2920,8 @@ class ReplicaManager(val config: KafkaConfig, // where this broker is not in the ISR are stopped. partitionsToStopFetching.put(tp, false) } else if (isNewLeaderEpoch) { + // Invoke the follower transition listeners for the partition. + partition.invokeOnBecomingFollowerListeners() // Otherwise, fetcher is restarted if the leader epoch has changed. partitionsToStartFetching.put(tp, partition) } diff --git a/core/src/main/scala/kafka/server/RequestHandlerHelper.scala b/core/src/main/scala/kafka/server/RequestHandlerHelper.scala index dbc824021bea6..8229607b5be3d 100644 --- a/core/src/main/scala/kafka/server/RequestHandlerHelper.scala +++ b/core/src/main/scala/kafka/server/RequestHandlerHelper.scala @@ -17,46 +17,14 @@ package kafka.server -import kafka.cluster.Partition -import kafka.coordinator.transaction.TransactionCoordinator import kafka.network.RequestChannel import kafka.server.QuotaFactory.QuotaManagers import org.apache.kafka.common.errors.ClusterAuthorizationException -import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.network.Send import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse} import org.apache.kafka.common.utils.Time -import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.server.quota.ThrottleCallback -import java.util.OptionalInt - -object RequestHandlerHelper { - - def onLeadershipChange(groupCoordinator: GroupCoordinator, - txnCoordinator: TransactionCoordinator, - updatedLeaders: Iterable[Partition], - updatedFollowers: Iterable[Partition]): Unit = { - // for each new leader or follower, call coordinator to handle consumer group migration. - // this callback is invoked under the replica state change lock to ensure proper order of - // leadership changes - updatedLeaders.foreach { partition => - if (partition.topic == Topic.GROUP_METADATA_TOPIC_NAME) - groupCoordinator.onElection(partition.partitionId, partition.getLeaderEpoch) - else if (partition.topic == Topic.TRANSACTION_STATE_TOPIC_NAME) - txnCoordinator.onElection(partition.partitionId, partition.getLeaderEpoch) - } - - updatedFollowers.foreach { partition => - if (partition.topic == Topic.GROUP_METADATA_TOPIC_NAME) - groupCoordinator.onResignation(partition.partitionId, OptionalInt.of(partition.getLeaderEpoch)) - else if (partition.topic == Topic.TRANSACTION_STATE_TOPIC_NAME) - txnCoordinator.onResignation(partition.partitionId, Some(partition.getLeaderEpoch)) - } - } - -} - class RequestHandlerHelper( requestChannel: RequestChannel, quotas: QuotaManagers, diff --git a/core/src/main/scala/kafka/server/SharedServer.scala b/core/src/main/scala/kafka/server/SharedServer.scala index a465c38507791..66af33c169793 100644 --- a/core/src/main/scala/kafka/server/SharedServer.scala +++ b/core/src/main/scala/kafka/server/SharedServer.scala @@ -17,9 +17,10 @@ package kafka.server +import kafka.metrics.KafkaMetricsReporter import kafka.raft.KafkaRaftManager import kafka.server.Server.MetricsPrefix -import kafka.utils.{CoreUtils, Logging} +import kafka.utils.{CoreUtils, Logging, VerifiableProperties} import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.utils.{AppInfoParser, LogContext, Time, Utils} @@ -101,6 +102,9 @@ class SharedServer( val faultHandlerFactory: FaultHandlerFactory, val socketFactory: ServerSocketFactory ) extends Logging { + KafkaMetricsReporter.startReporters(VerifiableProperties(sharedServerConfig.originals)) + KafkaYammerMetrics.INSTANCE.configure(sharedServerConfig.originals) + private val logContext: LogContext = new LogContext(s"[SharedServer id=${sharedServerConfig.nodeId}] ") this.logIdent = logContext.logPrefix private var started = false @@ -264,7 +268,7 @@ class SharedServer( // This is only done in tests. metrics = new Metrics() } - sharedServerConfig.dynamicConfig.initialize(zkClientOpt = None, clientMetricsReceiverPluginOpt = None) + sharedServerConfig.dynamicConfig.initialize(clientMetricsReceiverPluginOpt = None) if (sharedServerConfig.processRoles.contains(ProcessRole.BrokerRole)) { brokerMetrics = new BrokerServerMetrics(metrics) diff --git a/core/src/main/scala/kafka/server/ZkAdminManager.scala b/core/src/main/scala/kafka/server/ZkAdminManager.scala deleted file mode 100644 index 99ff17289d314..0000000000000 --- a/core/src/main/scala/kafka/server/ZkAdminManager.scala +++ /dev/null @@ -1,1046 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server - -import java.util -import java.util.Properties -import kafka.common.TopicAlreadyMarkedForDeletionException -import kafka.server.ConfigAdminManager.{prepareIncrementalConfigs, toLoggableProps} -import kafka.server.metadata.ZkConfigRepository -import kafka.utils._ -import kafka.zk.{AdminZkClient, KafkaZkClient} -import org.apache.kafka.admin.AdminUtils -import org.apache.kafka.clients.admin.{AlterConfigOp, ScramMechanism} -import org.apache.kafka.common.Uuid -import org.apache.kafka.common.config.{ConfigDef, ConfigException, ConfigResource} -import org.apache.kafka.common.errors.ThrottlingQuotaExceededException -import org.apache.kafka.common.errors.{ApiException, InvalidConfigurationException, InvalidPartitionsException, InvalidReplicaAssignmentException, InvalidRequestException, ReassignmentInProgressException, TopicExistsException, UnknownTopicOrPartitionException, UnsupportedVersionException} -import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult -import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic -import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic -import org.apache.kafka.common.message.CreateTopicsResponseData.{CreatableTopicConfigs, CreatableTopicResult} -import org.apache.kafka.common.message.{AlterUserScramCredentialsRequestData, AlterUserScramCredentialsResponseData, DescribeUserScramCredentialsResponseData} -import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData.CredentialInfo -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.security.scram.internals.{ScramMechanism => InternalScramMechanism} -import org.apache.kafka.server.policy.{AlterConfigPolicy, CreateTopicPolicy} -import org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent} -import org.apache.kafka.common.requests.CreateTopicsRequest._ -import org.apache.kafka.common.requests.{AlterConfigsRequest, ApiError} -import org.apache.kafka.common.security.scram.internals.{ScramCredentialUtils, ScramFormatter} -import org.apache.kafka.common.utils.{Sanitizer, Utils} -import org.apache.kafka.server.common.AdminOperationException -import org.apache.kafka.server.config.{ConfigType, QuotaConfig, ZooKeeperInternals} -import org.apache.kafka.server.config.ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG -import org.apache.kafka.server.config.ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG -import org.apache.kafka.server.purgatory.{DelayedOperation, DelayedOperationPurgatory} -import org.apache.kafka.storage.internals.log.LogConfig - -import scala.collection.{Map, mutable, _} -import scala.jdk.CollectionConverters._ - -object ZkAdminManager { - def clientQuotaPropsToDoubleMap(props: Map[String, String]): Map[String, Double] = { - props.map { case (key, value) => - val doubleValue = try value.toDouble catch { - case _: NumberFormatException => - throw new IllegalStateException(s"Unexpected client quota configuration value: $key -> $value") - } - key -> doubleValue - } - } -} - - -class ZkAdminManager(val config: KafkaConfig, - val metrics: Metrics, - val metadataCache: MetadataCache, - val zkClient: KafkaZkClient) extends Logging { - - this.logIdent = "[Admin Manager on Broker " + config.brokerId + "]: " - - private val topicPurgatory = new DelayedOperationPurgatory[DelayedOperation]("topic", config.brokerId) - private val adminZkClient = new AdminZkClient(zkClient, Some(config)) - private val configHelper = new ConfigHelper(metadataCache, config, new ZkConfigRepository(adminZkClient)) - - private val createTopicPolicy = - Option(config.getConfiguredInstance(CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, classOf[CreateTopicPolicy])) - - private val alterConfigPolicy = - Option(config.getConfiguredInstance(ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, classOf[AlterConfigPolicy])) - - def hasDelayedTopicOperations: Boolean = topicPurgatory.numDelayed != 0 - - private val defaultNumPartitions = config.numPartitions.intValue() - private val defaultReplicationFactor = config.defaultReplicationFactor.shortValue() - - /** - * Try to complete delayed topic operations with the request key - */ - def tryCompleteDelayedTopicOperations(topic: String): Unit = { - val key = TopicKey(topic) - val completed = topicPurgatory.checkAndComplete(key) - debug(s"Request key ${key.keyLabel} unblocked $completed topic requests.") - } - - private def validateTopicCreatePolicy(topic: CreatableTopic, - resolvedNumPartitions: Int, - resolvedReplicationFactor: Short, - assignments: Map[Int, Seq[Int]]): Unit = { - createTopicPolicy.foreach { policy => - // Use `null` for unset fields in the public API - val numPartitions: java.lang.Integer = - if (topic.assignments().isEmpty) resolvedNumPartitions else null - val replicationFactor: java.lang.Short = - if (topic.assignments().isEmpty) resolvedReplicationFactor else null - val javaAssignments = if (topic.assignments().isEmpty) { - null - } else { - assignments.map { case (k, v) => - (k: java.lang.Integer) -> v.map(i => i: java.lang.Integer).asJava - }.asJava - } - val javaConfigs = new java.util.HashMap[String, String] - topic.configs.forEach(config => javaConfigs.put(config.name, config.value)) - policy.validate(new RequestMetadata(topic.name, numPartitions, replicationFactor, - javaAssignments, javaConfigs)) - } - } - - private def maybePopulateMetadataAndConfigs(metadataAndConfigs: Map[String, CreatableTopicResult], - topicName: String, - configs: Properties, - assignments: Map[Int, Seq[Int]]): Unit = { - metadataAndConfigs.get(topicName).foreach { result => - val logConfig = LogConfig.fromProps(config.extractLogConfigMap, configs) - val createEntry = configHelper.createTopicConfigEntry(logConfig, configs, includeSynonyms = false, includeDocumentation = false)(_, _) - val topicConfigs = configHelper.allConfigs(logConfig).map { case (k, v) => - val entry = createEntry(k, v) - new CreatableTopicConfigs() - .setName(k) - .setValue(entry.value) - .setIsSensitive(entry.isSensitive) - .setReadOnly(entry.readOnly) - .setConfigSource(entry.configSource) - }.toList.asJava - result.setConfigs(topicConfigs) - result.setNumPartitions(assignments.size) - result.setReplicationFactor(assignments(0).size.toShort) - } - } - - private def populateIds(metadataAndConfigs: Map[String, CreatableTopicResult], - topicName: String) : Unit = { - metadataAndConfigs.get(topicName).foreach { result => - result.setTopicId(zkClient.getTopicIdsForTopics(Predef.Set(result.name())).getOrElse(result.name(), Uuid.ZERO_UUID)) - } - } - - /** - * Create topics and wait until the topics have been completely created. - * The callback function will be triggered either when timeout, error or the topics are created. - */ - def createTopics(timeout: Int, - validateOnly: Boolean, - toCreate: Map[String, CreatableTopic], - includeConfigsAndMetadata: Map[String, CreatableTopicResult], - controllerMutationQuota: ControllerMutationQuota, - responseCallback: Map[String, ApiError] => Unit): Unit = { - - // 1. map over topics creating assignment and calling zookeeper - val brokers = metadataCache.getAliveBrokers() - val metadata = toCreate.values.map(topic => - try { - if (metadataCache.contains(topic.name)) - throw new TopicExistsException(s"Topic '${topic.name}' already exists.") - - val nullConfigs = topic.configs.asScala.filter(_.value == null).map(_.name) - if (nullConfigs.nonEmpty) - throw new InvalidConfigurationException(s"Null value not supported for topic configs: ${nullConfigs.mkString(",")}") - - if ((topic.numPartitions != NO_NUM_PARTITIONS || topic.replicationFactor != NO_REPLICATION_FACTOR) - && !topic.assignments().isEmpty) { - throw new InvalidRequestException("Both numPartitions or replicationFactor and replicasAssignments were set. " + - "Both cannot be used at the same time.") - } - - val resolvedNumPartitions = if (topic.numPartitions == NO_NUM_PARTITIONS) - defaultNumPartitions else topic.numPartitions - val resolvedReplicationFactor = if (topic.replicationFactor == NO_REPLICATION_FACTOR) - defaultReplicationFactor else topic.replicationFactor - - val assignments = if (topic.assignments.isEmpty) { - CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers( - brokers.asJavaCollection, resolvedNumPartitions, resolvedReplicationFactor)) - } else { - val assignments = new mutable.HashMap[Int, Seq[Int]] - // Note: we don't check that replicaAssignment contains unknown brokers - unlike in add-partitions case, - // this follows the existing logic in TopicCommand - topic.assignments.forEach { assignment => - assignments(assignment.partitionIndex) = assignment.brokerIds.asScala.map(a => a: Int) - } - assignments - } - trace(s"Assignments for topic $topic are $assignments ") - - val configs = new Properties() - topic.configs.forEach(entry => configs.setProperty(entry.name, entry.value)) - adminZkClient.validateTopicCreate(topic.name, assignments, configs) - validateTopicCreatePolicy(topic, resolvedNumPartitions, resolvedReplicationFactor, assignments) - - // For responses with DescribeConfigs permission, populate metadata and configs. It is - // safe to populate it before creating the topic because the values are unset if the - // creation fails. - maybePopulateMetadataAndConfigs(includeConfigsAndMetadata, topic.name, configs, assignments) - - if (validateOnly) { - CreatePartitionsMetadata(topic.name, assignments.keySet) - } else { - controllerMutationQuota.record(assignments.size) - adminZkClient.createTopicWithAssignment(topic.name, configs, assignments, validate = false, config.usesTopicId) - populateIds(includeConfigsAndMetadata, topic.name) - CreatePartitionsMetadata(topic.name, assignments.keySet) - } - } catch { - // Log client errors at a lower level than unexpected exceptions - case e: TopicExistsException => - debug(s"Topic creation failed since topic '${topic.name}' already exists.", e) - CreatePartitionsMetadata(topic.name, e) - case e: ThrottlingQuotaExceededException => - debug(s"Topic creation not allowed because quota is violated. Delay time: ${e.throttleTimeMs}") - CreatePartitionsMetadata(topic.name, e) - case e: ApiException => - info(s"Error processing create topic request $topic", e) - CreatePartitionsMetadata(topic.name, e) - case e: ConfigException => - info(s"Error processing create topic request $topic", e) - CreatePartitionsMetadata(topic.name, new InvalidConfigurationException(e.getMessage, e.getCause)) - case e: Throwable => - error(s"Error processing create topic request $topic", e) - CreatePartitionsMetadata(topic.name, e) - }).toBuffer - - // 2. if timeout <= 0, validateOnly or no topics can proceed return immediately - if (timeout <= 0 || validateOnly || !metadata.exists(_.error.is(Errors.NONE))) { - val results = metadata.map { createTopicMetadata => - // ignore topics that already have errors - if (createTopicMetadata.error.isSuccess && !validateOnly) { - (createTopicMetadata.topic, new ApiError(Errors.REQUEST_TIMED_OUT, null)) - } else { - (createTopicMetadata.topic, createTopicMetadata.error) - } - }.toMap - responseCallback(results) - } else { - // 3. else pass the assignments and errors to the delayed operation and set the keys - val delayedCreate = new DelayedCreatePartitions(timeout, metadata, this, - responseCallback) - val delayedCreateKeys = toCreate.values.map(topic => TopicKey(topic.name)).toList - // try to complete the request immediately, otherwise put it into the purgatory - topicPurgatory.tryCompleteElseWatch(delayedCreate, delayedCreateKeys.asJava) - } - } - - /** - * Delete topics and wait until the topics have been completely deleted. - * The callback function will be triggered either when timeout, error or the topics are deleted. - */ - def deleteTopics(timeout: Int, - topics: Set[String], - controllerMutationQuota: ControllerMutationQuota, - responseCallback: Map[String, Errors] => Unit): Unit = { - // 1. map over topics calling the asynchronous delete - val metadata = topics.map { topic => - try { - controllerMutationQuota.record(metadataCache.numPartitions(topic).getOrElse(0).toDouble) - adminZkClient.deleteTopic(topic) - DeleteTopicMetadata(topic, Errors.NONE) - } catch { - case _: TopicAlreadyMarkedForDeletionException => - // swallow the exception, and still track deletion allowing multiple calls to wait for deletion - DeleteTopicMetadata(topic, Errors.NONE) - case e: ThrottlingQuotaExceededException => - debug(s"Topic deletion not allowed because quota is violated. Delay time: ${e.throttleTimeMs}") - DeleteTopicMetadata(topic, e) - case e: Throwable => - error(s"Error processing delete topic request for topic $topic", e) - DeleteTopicMetadata(topic, e) - } - } - - // 2. if timeout <= 0 or no topics can proceed return immediately - if (timeout <= 0 || !metadata.exists(_.error == Errors.NONE)) { - val results = metadata.map { deleteTopicMetadata => - // ignore topics that already have errors - if (deleteTopicMetadata.error == Errors.NONE) { - (deleteTopicMetadata.topic, Errors.REQUEST_TIMED_OUT) - } else { - (deleteTopicMetadata.topic, deleteTopicMetadata.error) - } - }.toMap - responseCallback(results) - } else { - // 3. else pass the topics and errors to the delayed operation and set the keys - val delayedDelete = new DelayedDeleteTopics(timeout, metadata.toSeq, this, responseCallback) - val delayedDeleteKeys = topics.map(TopicKey).toList - // try to complete the request immediately, otherwise put it into the purgatory - topicPurgatory.tryCompleteElseWatch(delayedDelete, delayedDeleteKeys.asJava) - } - } - - def createPartitions(timeoutMs: Int, - newPartitions: Seq[CreatePartitionsTopic], - validateOnly: Boolean, - controllerMutationQuota: ControllerMutationQuota, - callback: Map[String, ApiError] => Unit): Unit = { - val allBrokers = adminZkClient.getBrokerMetadatas() - val allBrokerIds = allBrokers.map(_.id) - - // 1. map over topics creating assignment and calling AdminUtils - val metadata = newPartitions.map { newPartition => - val topic = newPartition.name - - try { - val existingAssignment = zkClient.getFullReplicaAssignmentForTopics(immutable.Set(topic)).map { - case (topicPartition, assignment) => - if (assignment.isBeingReassigned) { - // We prevent adding partitions while topic reassignment is in progress, to protect from a race condition - // between the controller thread processing reassignment update and createPartitions(this) request. - throw new ReassignmentInProgressException(s"A partition reassignment is in progress for the topic '$topic'.") - } - topicPartition.partition -> assignment - } - if (existingAssignment.isEmpty) - throw new UnknownTopicOrPartitionException(s"The topic '$topic' does not exist.") - - val oldNumPartitions = existingAssignment.size - val newNumPartitions = newPartition.count - val numPartitionsIncrement = newNumPartitions - oldNumPartitions - if (numPartitionsIncrement < 0) { - throw new InvalidPartitionsException( - s"Topic currently has $oldNumPartitions partitions, which is higher than the requested $newNumPartitions.") - } else if (numPartitionsIncrement == 0) { - throw new InvalidPartitionsException(s"Topic already has $oldNumPartitions partitions.") - } - - val newPartitionsAssignment = Option(newPartition.assignments).map { assignmentMap => - val assignments = assignmentMap.asScala.map { - createPartitionAssignment => createPartitionAssignment.brokerIds.asScala.map(_.toInt) - } - val unknownBrokers = assignments.flatten.toSet -- allBrokerIds - if (unknownBrokers.nonEmpty) - throw new InvalidReplicaAssignmentException( - s"Unknown broker(s) in replica assignment: ${unknownBrokers.mkString(", ")}.") - - if (assignments.size != numPartitionsIncrement) - throw new InvalidReplicaAssignmentException( - s"Increasing the number of partitions by $numPartitionsIncrement " + - s"but ${assignments.size} assignments provided.") - - assignments.zipWithIndex.map { case (replicas, index) => - existingAssignment.size + index -> replicas - }.toMap - } - - val assignmentForNewPartitions = adminZkClient.createNewPartitionsAssignment( - topic, existingAssignment, allBrokers, newPartition.count, newPartitionsAssignment) - - if (validateOnly) { - CreatePartitionsMetadata(topic, (existingAssignment ++ assignmentForNewPartitions).keySet) - } else { - controllerMutationQuota.record(numPartitionsIncrement) - val updatedReplicaAssignment = adminZkClient.createPartitionsWithAssignment( - topic, existingAssignment, assignmentForNewPartitions) - CreatePartitionsMetadata(topic, updatedReplicaAssignment.keySet) - } - } catch { - case e: AdminOperationException => - CreatePartitionsMetadata(topic, e) - case e: ThrottlingQuotaExceededException => - debug(s"Partition(s) creation not allowed because quota is violated. Delay time: ${e.throttleTimeMs}") - CreatePartitionsMetadata(topic, e) - case e: ApiException => - CreatePartitionsMetadata(topic, e) - } - } - - // 2. if timeout <= 0, validateOnly or no topics can proceed return immediately - if (timeoutMs <= 0 || validateOnly || !metadata.exists(_.error.is(Errors.NONE))) { - val results = metadata.map { createPartitionMetadata => - // ignore topics that already have errors - if (createPartitionMetadata.error.isSuccess && !validateOnly) { - (createPartitionMetadata.topic, new ApiError(Errors.REQUEST_TIMED_OUT, null)) - } else { - (createPartitionMetadata.topic, createPartitionMetadata.error) - } - }.toMap - callback(results) - } else { - // 3. else pass the assignments and errors to the delayed operation and set the keys - val delayedCreate = new DelayedCreatePartitions(timeoutMs, metadata, this, callback) - val delayedCreateKeys = newPartitions.map(createPartitionTopic => TopicKey(createPartitionTopic.name)).toList - // try to complete the request immediately, otherwise put it into the purgatory - topicPurgatory.tryCompleteElseWatch(delayedCreate, delayedCreateKeys.asJava) - } - } - - def alterConfigs(configs: Map[ConfigResource, AlterConfigsRequest.Config], validateOnly: Boolean): Map[ConfigResource, ApiError] = { - configs.map { case (resource, config) => - - try { - val configEntriesMap = config.entries.asScala.map(entry => (entry.name, entry.value)).toMap - - val configProps = new Properties - config.entries.asScala.filter(_.value != null).foreach { configEntry => - configProps.setProperty(configEntry.name, configEntry.value) - } - - resource.`type` match { - case ConfigResource.Type.TOPIC => alterTopicConfigs(resource, validateOnly, configProps, configEntriesMap) - case ConfigResource.Type.BROKER => alterBrokerConfigs(resource, validateOnly, configProps, configEntriesMap) - case resourceType => - throw new InvalidRequestException(s"AlterConfigs is only supported for topics and brokers, but resource type is $resourceType") - } - } catch { - case e @ (_: ConfigException | _: IllegalArgumentException) => - val message = s"Invalid config value for resource $resource: ${e.getMessage}" - info(message) - resource -> ApiError.fromThrowable(new InvalidConfigurationException(message, e)) - case e: Throwable => - val configProps = new Properties - config.entries.asScala.filter(_.value != null).foreach { configEntry => - configProps.setProperty(configEntry.name, configEntry.value) - } - // Log client errors at a lower level than unexpected exceptions - val message = s"Error processing alter configs request for resource $resource, config ${toLoggableProps(resource, configProps).mkString(",")}" - if (e.isInstanceOf[ApiException]) - info(message, e) - else - error(message, e) - resource -> ApiError.fromThrowable(e) - } - }.toMap - } - - private def alterTopicConfigs(resource: ConfigResource, validateOnly: Boolean, - configProps: Properties, configEntriesMap: Map[String, String]): (ConfigResource, ApiError) = { - val topic = resource.name - if (topic.isEmpty) { - throw new InvalidRequestException("Default topic resources are not allowed.") - } - - if (!metadataCache.contains(topic)) - throw new UnknownTopicOrPartitionException(s"The topic '$topic' does not exist.") - - adminZkClient.validateTopicConfig(topic, configProps) - validateConfigPolicy(resource, configEntriesMap) - if (!validateOnly) { - info(s"Updating topic $topic with new configuration : ${toLoggableProps(resource, configProps).mkString(",")}") - adminZkClient.changeTopicConfig(topic, configProps) - } - - resource -> ApiError.NONE - } - - private def alterBrokerConfigs(resource: ConfigResource, validateOnly: Boolean, - configProps: Properties, configEntriesMap: Map[String, String]): (ConfigResource, ApiError) = { - val brokerId = getBrokerId(resource) - val perBrokerConfig = brokerId.nonEmpty - this.config.dynamicConfig.validate(configProps, perBrokerConfig) - validateConfigPolicy(resource, configEntriesMap) - if (!validateOnly) { - if (perBrokerConfig) - this.config.dynamicConfig.reloadUpdatedFilesWithoutConfigChange(configProps) - - if (perBrokerConfig) - info(s"Updating broker ${brokerId.get} with new configuration : ${toLoggableProps(resource, configProps).mkString(",")}") - else - info(s"Updating brokers with new configuration : ${toLoggableProps(resource, configProps).mkString(",")}") - - adminZkClient.changeBrokerConfig(brokerId, - this.config.dynamicConfig.toPersistentProps(configProps, perBrokerConfig)) - } - - resource -> ApiError.NONE - } - - private def getBrokerId(resource: ConfigResource) = { - if (resource.name == null || resource.name.isEmpty) - None - else { - val id = resourceNameToBrokerId(resource.name) - if (id != this.config.brokerId) - throw new InvalidRequestException(s"Unexpected broker id, expected ${this.config.brokerId}, but received ${resource.name}") - Some(id) - } - } - - private def validateConfigPolicy(resource: ConfigResource, configEntriesMap: Map[String, String]): Unit = { - alterConfigPolicy match { - case Some(policy) => - policy.validate(new AlterConfigPolicy.RequestMetadata( - new ConfigResource(resource.`type`(), resource.name), configEntriesMap.asJava)) - case None => - } - } - - def incrementalAlterConfigs(configs: Map[ConfigResource, Seq[AlterConfigOp]], validateOnly: Boolean): Map[ConfigResource, ApiError] = { - configs.map { case (resource, alterConfigOps) => - try { - val configEntriesMap = alterConfigOps.map(entry => (entry.configEntry.name, entry.configEntry.value)).toMap - - resource.`type` match { - case ConfigResource.Type.TOPIC => - if (resource.name.isEmpty) { - throw new InvalidRequestException("Default topic resources are not allowed.") - } - val configProps = adminZkClient.fetchEntityConfig(ConfigType.TOPIC, resource.name) - prepareIncrementalConfigs(alterConfigOps, configProps, LogConfig.configKeys.asScala) - alterTopicConfigs(resource, validateOnly, configProps, configEntriesMap) - - case ConfigResource.Type.BROKER => - val brokerId = getBrokerId(resource) - val perBrokerConfig = brokerId.nonEmpty - - val persistentProps = if (perBrokerConfig) adminZkClient.fetchEntityConfig(ConfigType.BROKER, brokerId.get.toString) - else adminZkClient.fetchEntityConfig(ConfigType.BROKER, ZooKeeperInternals.DEFAULT_STRING) - - val configProps = this.config.dynamicConfig.fromPersistentProps(persistentProps, perBrokerConfig) - prepareIncrementalConfigs(alterConfigOps, configProps, KafkaConfig.configKeys) - alterBrokerConfigs(resource, validateOnly, configProps, configEntriesMap) - - case resourceType => - throw new InvalidRequestException(s"AlterConfigs is only supported for topics and brokers, but resource type is $resourceType") - } - } catch { - case e @ (_: ConfigException | _: IllegalArgumentException) => - val message = s"Invalid config value for resource $resource: ${e.getMessage}" - info(message) - resource -> ApiError.fromThrowable(new InvalidConfigurationException(message, e)) - case e: Throwable => - // Log client errors at a lower level than unexpected exceptions - val message = s"Error processing alter configs request for resource $resource, config $alterConfigOps" - if (e.isInstanceOf[ApiException]) - info(message, e) - else - error(message, e) - resource -> ApiError.fromThrowable(e) - } - }.toMap - } - - def shutdown(): Unit = { - topicPurgatory.shutdown() - createTopicPolicy.foreach(Utils.closeQuietly(_, "create topic policy")) - alterConfigPolicy.foreach(Utils.closeQuietly(_, "alter config policy")) - } - - private def resourceNameToBrokerId(resourceName: String): Int = { - try resourceName.toInt catch { - case _: NumberFormatException => - throw new InvalidRequestException(s"Broker id must be an integer, but it is: $resourceName") - } - } - - private def sanitizeEntityName(entityName: String): String = - Option(entityName) match { - case None => ZooKeeperInternals.DEFAULT_STRING - case Some(name) => Sanitizer.sanitize(name) - } - - private def desanitizeEntityName(sanitizedEntityName: String): String = - sanitizedEntityName match { - case ZooKeeperInternals.DEFAULT_STRING => null - case name => Sanitizer.desanitize(name) - } - - private def parseAndSanitizeQuotaEntity(entity: ClientQuotaEntity): (Option[String], Option[String], Option[String]) = { - if (entity.entries.isEmpty) - throw new InvalidRequestException("Invalid empty client quota entity") - - var user: Option[String] = None - var clientId: Option[String] = None - var ip: Option[String] = None - entity.entries.forEach { (entityType, entityName) => - val sanitizedEntityName = Some(sanitizeEntityName(entityName)) - entityType match { - case ClientQuotaEntity.USER => user = sanitizedEntityName - case ClientQuotaEntity.CLIENT_ID => clientId = sanitizedEntityName - case ClientQuotaEntity.IP => ip = sanitizedEntityName - case _ => throw new InvalidRequestException(s"Unhandled client quota entity type: $entityType") - } - if (entityName != null && entityName.isEmpty) - throw new InvalidRequestException(s"Empty $entityType not supported") - } - (user, clientId, ip) - } - - private def userClientIdToEntity(user: Option[String], clientId: Option[String]): ClientQuotaEntity = { - new ClientQuotaEntity((user.map(u => ClientQuotaEntity.USER -> u) ++ clientId.map(c => ClientQuotaEntity.CLIENT_ID -> c)).toMap.asJava) - } - - def describeClientQuotas(filter: ClientQuotaFilter): Map[ClientQuotaEntity, Map[String, Double]] = { - var userComponent: Option[ClientQuotaFilterComponent] = None - var clientIdComponent: Option[ClientQuotaFilterComponent] = None - var ipComponent: Option[ClientQuotaFilterComponent] = None - filter.components.forEach { component => - component.entityType match { - case ClientQuotaEntity.USER => - if (userComponent.isDefined) - throw new InvalidRequestException(s"Duplicate user filter component entity type") - userComponent = Some(component) - case ClientQuotaEntity.CLIENT_ID => - if (clientIdComponent.isDefined) - throw new InvalidRequestException(s"Duplicate client filter component entity type") - clientIdComponent = Some(component) - case ClientQuotaEntity.IP => - if (ipComponent.isDefined) - throw new InvalidRequestException(s"Duplicate ip filter component entity type") - ipComponent = Some(component) - case "" => - throw new InvalidRequestException(s"Unexpected empty filter component entity type") - case et => - // Supplying other entity types is not yet supported. - throw new UnsupportedVersionException(s"Custom entity type '$et' not supported") - } - } - if ((userComponent.isDefined || clientIdComponent.isDefined) && ipComponent.isDefined) - throw new InvalidRequestException(s"Invalid entity filter component combination, IP filter component should not be used with " + - s"user or clientId filter component.") - - val userClientQuotas = if (ipComponent.isEmpty) - handleDescribeClientQuotas(userComponent, clientIdComponent, filter.strict) - else - Map.empty - - val ipQuotas = if (userComponent.isEmpty && clientIdComponent.isEmpty) - handleDescribeIpQuotas(ipComponent, filter.strict) - else - Map.empty - - (userClientQuotas ++ ipQuotas).toMap - } - - private def wantExact(component: Option[ClientQuotaFilterComponent]): Boolean = component.exists(_.`match` != null) - - private def toOption(opt: java.util.Optional[String]): Option[String] = { - if (opt == null) - None - else if (opt.isPresent) - Some(opt.get) - else - Some(null) - } - - private def sanitized(name: Option[String]): String = name.map(n => sanitizeEntityName(n)).getOrElse("") - - private def handleDescribeClientQuotas(userComponent: Option[ClientQuotaFilterComponent], - clientIdComponent: Option[ClientQuotaFilterComponent], - strict: Boolean): Map[ClientQuotaEntity, Map[String, Double]] = { - - val user = userComponent.flatMap(c => toOption(c.`match`)) - val clientId = clientIdComponent.flatMap(c => toOption(c.`match`)) - - val sanitizedUser = sanitized(user) - val sanitizedClientId = sanitized(clientId) - - val exactUser = wantExact(userComponent) - val exactClientId = wantExact(clientIdComponent) - - def wantExcluded(component: Option[ClientQuotaFilterComponent]): Boolean = strict && component.isEmpty - val excludeUser = wantExcluded(userComponent) - val excludeClientId = wantExcluded(clientIdComponent) - - val userEntries = if (exactUser && excludeClientId) - Map((Some(user.get), None) -> adminZkClient.fetchEntityConfig(ConfigType.USER, sanitizedUser)) - else if (!excludeUser && !exactClientId) - adminZkClient.fetchAllEntityConfigs(ConfigType.USER).map { case (name, props) => - (Some(desanitizeEntityName(name)), None) -> props - } - else - Map.empty - - val clientIdEntries = if (excludeUser && exactClientId) - Map((None, Some(clientId.get)) -> adminZkClient.fetchEntityConfig(ConfigType.CLIENT, sanitizedClientId)) - else if (!exactUser && !excludeClientId) - adminZkClient.fetchAllEntityConfigs(ConfigType.CLIENT).map { case (name, props) => - (None, Some(desanitizeEntityName(name))) -> props - } - else - Map.empty - - val bothEntries = if (exactUser && exactClientId) - Map((Some(user.get), Some(clientId.get)) -> - adminZkClient.fetchEntityConfig(ConfigType.USER, s"${sanitizedUser}/clients/${sanitizedClientId}")) - else if (!excludeUser && !excludeClientId) - adminZkClient.fetchAllChildEntityConfigs(ConfigType.USER, ConfigType.CLIENT).map { case (name, props) => - val components = name.split("/") - if (components.size != 3 || components(1) != "clients") - throw new IllegalArgumentException(s"Unexpected config path: $name") - (Some(desanitizeEntityName(components(0))), Some(desanitizeEntityName(components(2)))) -> props - } - else - Map.empty - - def matches(nameComponent: Option[ClientQuotaFilterComponent], name: Option[String]): Boolean = nameComponent match { - case Some(component) => - toOption(component.`match`) match { - case Some(n) => name.contains(n) - case None => name.isDefined - } - case None => - name.isEmpty || !strict - } - - (userEntries ++ clientIdEntries ++ bothEntries).flatMap { case ((u, c), p) => - val quotaProps = p.asScala.filter { case (key, _) => QuotaConfig.isClientOrUserQuotaConfig(key) } - if (quotaProps.nonEmpty && matches(userComponent, u) && matches(clientIdComponent, c)) - Some(userClientIdToEntity(u, c) -> ZkAdminManager.clientQuotaPropsToDoubleMap(quotaProps)) - else - None - }.toMap - } - - private def handleDescribeIpQuotas(ipComponent: Option[ClientQuotaFilterComponent], strict: Boolean): Map[ClientQuotaEntity, Map[String, Double]] = { - val ip = ipComponent.flatMap(c => toOption(c.`match`)) - val exactIp = wantExact(ipComponent) - val allIps = ipComponent.exists(_.`match` == null) || (ipComponent.isEmpty && !strict) - val ipEntries = if (exactIp) - Map(Some(ip.get) -> adminZkClient.fetchEntityConfig(ConfigType.IP, sanitized(ip))) - else if (allIps) - adminZkClient.fetchAllEntityConfigs(ConfigType.IP).map { case (name, props) => - Some(desanitizeEntityName(name)) -> props - } - else - Map.empty - - def ipToQuotaEntity(ip: Option[String]): ClientQuotaEntity = { - new ClientQuotaEntity(ip.map(ipName => ClientQuotaEntity.IP -> ipName).toMap.asJava) - } - - ipEntries.flatMap { case (ip, props) => - val ipQuotaProps = props.asScala.filter { case (key, _) => DynamicConfig.Ip.names.contains(key) } - if (ipQuotaProps.nonEmpty) - Some(ipToQuotaEntity(ip) -> ZkAdminManager.clientQuotaPropsToDoubleMap(ipQuotaProps)) - else - None - } - } - - def alterClientQuotas(entries: Seq[ClientQuotaAlteration], validateOnly: Boolean): Map[ClientQuotaEntity, ApiError] = { - def alterEntityQuotas(entity: ClientQuotaEntity, ops: Iterable[ClientQuotaAlteration.Op]): Unit = { - val (path, configType, configKeys, isUserClientId) = parseAndSanitizeQuotaEntity(entity) match { - case (Some(user), Some(clientId), None) => (user + "/clients/" + clientId, ConfigType.USER, DynamicConfig.User.configKeys, true) - case (Some(user), None, None) => (user, ConfigType.USER, DynamicConfig.User.configKeys, false) - case (None, Some(clientId), None) => (clientId, ConfigType.CLIENT, DynamicConfig.Client.configKeys, false) - case (None, None, Some(ip)) => - if (!DynamicConfig.Ip.isValidIpEntity(ip)) - throw new InvalidRequestException(s"$ip is not a valid IP or resolvable host.") - (ip, ConfigType.IP, DynamicConfig.Ip.configKeys, false) - case (_, _, Some(_)) => throw new InvalidRequestException(s"Invalid quota entity combination, " + - s"IP entity should not be used with user/client ID entity.") - case _ => throw new InvalidRequestException("Invalid client quota entity") - } - - val props = adminZkClient.fetchEntityConfig(configType, path) - ops.foreach { op => - op.value match { - case null => - props.remove(op.key) - case value => configKeys.get(op.key) match { - case null => - throw new InvalidRequestException(s"Invalid configuration key ${op.key}") - case key => key.`type` match { - case ConfigDef.Type.DOUBLE => - props.setProperty(op.key, value.toString) - case ConfigDef.Type.LONG | ConfigDef.Type.INT => - val epsilon = 1e-6 - val intValue = if (key.`type` == ConfigDef.Type.LONG) - (value + epsilon).toLong - else - (value + epsilon).toInt - if ((intValue.toDouble - value).abs > epsilon) - throw new InvalidRequestException(s"Configuration ${op.key} must be a ${key.`type`} value") - props.setProperty(op.key, intValue.toString) - case _ => - throw new IllegalStateException(s"Unexpected config type ${key.`type`}") - } - } - } - } - if (!validateOnly) - adminZkClient.changeConfigs(configType, path, props, isUserClientId) - } - entries.map { entry => - val apiError = try { - alterEntityQuotas(entry.entity, entry.ops.asScala) - ApiError.NONE - } catch { - case e: Throwable => - info(s"Error encountered while updating client quotas", e) - ApiError.fromThrowable(e) - } - entry.entity -> apiError - }.toMap - } - - private val usernameMustNotBeEmptyMsg = "Username must not be empty" - private val errorProcessingDescribe = "Error processing describe user SCRAM credential configs request" - private val attemptToDescribeUserThatDoesNotExist = "Attempt to describe a user credential that does not exist" - - def describeUserScramCredentials(users: Option[Seq[String]]): DescribeUserScramCredentialsResponseData = { - val describingAllUsers = users.isEmpty || users.get.isEmpty - val retval = new DescribeUserScramCredentialsResponseData() - val userResults = mutable.Map[String, DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult]() - - def addToResultsIfHasScramCredential(user: String, userConfig: Properties, explicitUser: Boolean = false): Unit = { - val result = new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult().setUser(user) - val configKeys = userConfig.stringPropertyNames - val hasScramCredential = ScramMechanism.values().toList.exists(key => key != ScramMechanism.UNKNOWN && configKeys.contains(key.mechanismName)) - if (hasScramCredential) { - val credentialInfos = new util.ArrayList[CredentialInfo] - try { - ScramMechanism.values().filter(_ != ScramMechanism.UNKNOWN).foreach { mechanism => - val propertyValue = userConfig.getProperty(mechanism.mechanismName) - if (propertyValue != null) { - val iterations = ScramCredentialUtils.credentialFromString(propertyValue).iterations - credentialInfos.add(new CredentialInfo().setMechanism(mechanism.`type`).setIterations(iterations)) - } - } - result.setCredentialInfos(credentialInfos) - } catch { - case e: Exception => { // should generally never happen, but just in case bad data gets in... - val apiError = apiErrorFrom(e, errorProcessingDescribe) - result.setErrorCode(apiError.error.code).setErrorMessage(apiError.error.message) - } - } - userResults += (user -> result) - } else if (explicitUser) { - // it is an error to request credentials for a user that has no credentials - result.setErrorCode(Errors.RESOURCE_NOT_FOUND.code).setErrorMessage(s"$attemptToDescribeUserThatDoesNotExist: $user") - userResults += (user -> result) - } - } - - def collectRetrievedResults(): Unit = { - if (describingAllUsers) { - val usersSorted = SortedSet.empty[String] ++ userResults.keys - usersSorted.foreach { user => retval.results.add(userResults(user)) } - } else { - // be sure to only include a single copy of a result for any user requested multiple times - users.get.distinct.foreach { user => retval.results.add(userResults(user)) } - } - } - - try { - if (describingAllUsers) - adminZkClient.fetchAllEntityConfigs(ConfigType.USER).foreach { - case (user, properties) => addToResultsIfHasScramCredential(Sanitizer.desanitize(user), properties) } - else { - // describing specific users - val illegalUsers = users.get.filter(_.isEmpty).toSet - illegalUsers.foreach { user => - userResults += (user -> new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult() - .setUser(user) - .setErrorCode(Errors.RESOURCE_NOT_FOUND.code) - .setErrorMessage(usernameMustNotBeEmptyMsg)) } - val duplicatedUsers = users.get.groupBy(identity).filter( - userAndOccurrencesTuple => userAndOccurrencesTuple._2.length > 1).keys - duplicatedUsers.filterNot(illegalUsers.contains).foreach { user => - userResults += (user -> new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult() - .setUser(user) - .setErrorCode(Errors.DUPLICATE_RESOURCE.code) - .setErrorMessage(s"Cannot describe SCRAM credentials for the same user twice in a single request: $user")) } - val usersToSkip = illegalUsers ++ duplicatedUsers - users.get.filterNot(usersToSkip.contains).foreach { user => - try { - val userConfigs = adminZkClient.fetchEntityConfig(ConfigType.USER, Sanitizer.sanitize(user)) - addToResultsIfHasScramCredential(user, userConfigs, explicitUser = true) - } catch { - case e: Exception => { - val apiError = apiErrorFrom(e, errorProcessingDescribe) - userResults += (user -> new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult() - .setUser(user) - .setErrorCode(apiError.error.code) - .setErrorMessage(apiError.error.message)) - } - } - } - } - collectRetrievedResults() - } catch { - case e: Exception => { - // this should generally only happen when we get a failure trying to retrieve all user configs from ZooKeeper - val apiError = apiErrorFrom(e, errorProcessingDescribe) - retval.setErrorCode(apiError.error.code).setErrorMessage(apiError.messageWithFallback()) - } - } - retval - } - - private def apiErrorFrom(e: Exception, message: String): ApiError = { - if (e.isInstanceOf[ApiException]) - info(message, e) - else - error(message, e) - ApiError.fromThrowable(e) - } - - private case class requestStatus(user: String, mechanism: Option[ScramMechanism], legalRequest: Boolean, iterations: Int) {} - - def alterUserScramCredentials(upsertions: Seq[AlterUserScramCredentialsRequestData.ScramCredentialUpsertion], - deletions: Seq[AlterUserScramCredentialsRequestData.ScramCredentialDeletion]): AlterUserScramCredentialsResponseData = { - - def scramMechanism(mechanism: Byte): ScramMechanism = { - ScramMechanism.fromType(mechanism) - } - - def mechanismName(mechanism: Byte): String = { - scramMechanism(mechanism).mechanismName - } - - val retval = new AlterUserScramCredentialsResponseData() - - // fail any user that is invalid due to an empty user name, an unknown SCRAM mechanism, or unacceptable number of iterations - val maxIterations = 16384 - val illegalUpsertions = upsertions.map(upsertion => - if (upsertion.name.isEmpty) - requestStatus(upsertion.name, None, legalRequest = false, upsertion.iterations) // no determined mechanism -- empty user is the cause of failure - else { - val publicScramMechanism = scramMechanism(upsertion.mechanism) - if (publicScramMechanism == ScramMechanism.UNKNOWN) { - requestStatus(upsertion.name, Some(publicScramMechanism), legalRequest = false, upsertion.iterations) // unknown mechanism is the cause of failure - } else { - if (upsertion.iterations < InternalScramMechanism.forMechanismName(publicScramMechanism.mechanismName).minIterations - || upsertion.iterations > maxIterations) { - requestStatus(upsertion.name, Some(publicScramMechanism), legalRequest = false, upsertion.iterations) // known mechanism, bad iterations is the cause of failure - } else { - requestStatus(upsertion.name, Some(publicScramMechanism), legalRequest = true, upsertion.iterations) // legal - } - } - }).filter { !_.legalRequest } - val illegalDeletions = deletions.map(deletion => - if (deletion.name.isEmpty) { - requestStatus(deletion.name, None, legalRequest = false, 0) // no determined mechanism -- empty user is the cause of failure - } else { - val publicScramMechanism = scramMechanism(deletion.mechanism) - requestStatus(deletion.name, Some(publicScramMechanism), publicScramMechanism != ScramMechanism.UNKNOWN, 0) - }).filter { !_.legalRequest } - // map user names to error messages - val unknownScramMechanismMsg = "Unknown SCRAM mechanism" - val tooFewIterationsMsg = "Too few iterations" - val tooManyIterationsMsg = "Too many iterations" - val illegalRequestsByUser = - illegalDeletions.map(requestStatus => - if (requestStatus.user.isEmpty) { - (requestStatus.user, usernameMustNotBeEmptyMsg) - } else { - (requestStatus.user, unknownScramMechanismMsg) - } - ).toMap ++ illegalUpsertions.map(requestStatus => - if (requestStatus.user.isEmpty) { - (requestStatus.user, usernameMustNotBeEmptyMsg) - } else if (requestStatus.mechanism.contains(ScramMechanism.UNKNOWN)) { - (requestStatus.user, unknownScramMechanismMsg) - } else { - (requestStatus.user, if (requestStatus.iterations > maxIterations) {tooManyIterationsMsg} else tooFewIterationsMsg) - } - ).toMap - - illegalRequestsByUser.foreachEntry { (user, errorMessage) => - retval.results.add(new AlterUserScramCredentialsResult().setUser(user) - .setErrorCode(if (errorMessage == unknownScramMechanismMsg) {Errors.UNSUPPORTED_SASL_MECHANISM.code} else {Errors.UNACCEPTABLE_CREDENTIAL.code}) - .setErrorMessage(errorMessage)) } - - val invalidUsers = (illegalUpsertions ++ illegalDeletions).map(_.user).toSet - val initiallyValidUserMechanismPairs = upsertions.filter(upsertion => !invalidUsers.contains(upsertion.name)).map(upsertion => (upsertion.name, upsertion.mechanism)) ++ - deletions.filter(deletion => !invalidUsers.contains(deletion.name)).map(deletion => (deletion.name, deletion.mechanism)) - - val usersWithDuplicateUserMechanismPairs = initiallyValidUserMechanismPairs.groupBy(identity).filter( - userMechanismPairAndOccurrencesTuple => userMechanismPairAndOccurrencesTuple._2.length > 1).keys.map(userMechanismPair => userMechanismPair._1).toSet - usersWithDuplicateUserMechanismPairs.foreach { user => - retval.results.add(new AlterUserScramCredentialsResult() - .setUser(user) - .setErrorCode(Errors.DUPLICATE_RESOURCE.code).setErrorMessage("A user credential cannot be altered twice in the same request")) } - - def potentiallyValidUserMechanismPairs = initiallyValidUserMechanismPairs.filter(pair => !usersWithDuplicateUserMechanismPairs.contains(pair._1)) - - val potentiallyValidUsers = potentiallyValidUserMechanismPairs.map(_._1).toSet - val configsByPotentiallyValidUser = potentiallyValidUsers.map(user => (user, adminZkClient.fetchEntityConfig(ConfigType.USER, Sanitizer.sanitize(user)))).toMap - - // check for deletion of a credential that does not exist - val invalidDeletions = deletions.filter(deletion => potentiallyValidUsers.contains(deletion.name)).filter(deletion => - configsByPotentiallyValidUser(deletion.name).getProperty(mechanismName(deletion.mechanism)) == null) - val invalidUsersDueToInvalidDeletions = invalidDeletions.map(_.name).toSet - invalidUsersDueToInvalidDeletions.foreach { user => - retval.results.add(new AlterUserScramCredentialsResult() - .setUser(user) - .setErrorCode(Errors.RESOURCE_NOT_FOUND.code).setErrorMessage("Attempt to delete a user credential that does not exist")) } - - // now prepare the new set of property values for users that don't have any issues identified above, - // keeping track of ones that fail - val usersToTryToAlter = potentiallyValidUsers.diff(invalidUsersDueToInvalidDeletions) - val usersFailedToPrepareProperties = usersToTryToAlter.map(user => { - try { - // deletions: remove property keys - deletions.filter(deletion => usersToTryToAlter.contains(deletion.name)).foreach { deletion => - configsByPotentiallyValidUser(deletion.name).remove(mechanismName(deletion.mechanism)) } - // upsertions: put property key/value - upsertions.filter(upsertion => usersToTryToAlter.contains(upsertion.name)).foreach { upsertion => - val mechanism = InternalScramMechanism.forMechanismName(mechanismName(upsertion.mechanism)) - val credential = new ScramFormatter(mechanism) - .generateCredential(upsertion.salt, upsertion.saltedPassword, upsertion.iterations) - configsByPotentiallyValidUser(upsertion.name).put(mechanismName(upsertion.mechanism), ScramCredentialUtils.credentialToString(credential)) } - (user) // success, 1 element, won't be matched - } catch { - case e: Exception => - info(s"Error encountered while altering user SCRAM credentials", e) - (user, e) // fail, 2 elements, will be matched - } - }).collect { case (user: String, exception: Exception) => (user, exception) }.toMap - - // now persist the properties we have prepared, again keeping track of whatever fails - val usersFailedToPersist = usersToTryToAlter.filterNot(usersFailedToPrepareProperties.contains).map(user => { - try { - adminZkClient.changeConfigs(ConfigType.USER, Sanitizer.sanitize(user), configsByPotentiallyValidUser(user)) - (user) // success, 1 element, won't be matched - } catch { - case e: Exception => - info(s"Error encountered while altering user SCRAM credentials", e) - (user, e) // fail, 2 elements, will be matched - } - }).collect { case (user: String, exception: Exception) => (user, exception) }.toMap - - // report failures - usersFailedToPrepareProperties.++(usersFailedToPersist).foreachEntry { (user, exception) => - val error = Errors.forException(exception) - retval.results.add(new AlterUserScramCredentialsResult() - .setUser(user) - .setErrorCode(error.code) - .setErrorMessage(error.message)) } - - // report successes - usersToTryToAlter.filterNot(usersFailedToPrepareProperties.contains).filterNot(usersFailedToPersist.contains).foreach { user => - retval.results.add(new AlterUserScramCredentialsResult() - .setUser(user) - .setErrorCode(Errors.NONE.code)) } - - retval - } -} diff --git a/core/src/main/scala/kafka/server/ZkAlterPartitionManager.scala b/core/src/main/scala/kafka/server/ZkAlterPartitionManager.scala deleted file mode 100644 index 942ead0d6d0dc..0000000000000 --- a/core/src/main/scala/kafka/server/ZkAlterPartitionManager.scala +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server - -import kafka.utils.{Logging, ReplicationUtils} -import kafka.zk.KafkaZkClient -import org.apache.kafka.common.TopicPartition - -import java.util.concurrent.atomic.AtomicLong -import java.util.concurrent.CompletableFuture -import org.apache.kafka.common.TopicIdPartition -import org.apache.kafka.common.errors.InvalidUpdateVersionException -import org.apache.kafka.common.utils.Time -import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.server.util.Scheduler - -import scala.collection.mutable - -/** - * @param checkIntervalMs How often to check for ISR - * @param maxDelayMs Maximum time that an ISR change may be delayed before sending the notification - * @param lingerMs Maximum time to await additional changes before sending the notification - */ -case class IsrChangePropagationConfig(checkIntervalMs: Long, maxDelayMs: Long, lingerMs: Long) - -object ZkAlterPartitionManager { - // This field is mutable to allow overriding change notification behavior in test cases - @volatile var DefaultIsrPropagationConfig: IsrChangePropagationConfig = IsrChangePropagationConfig( - checkIntervalMs = 2500, - lingerMs = 5000, - maxDelayMs = 60000, - ) -} - -class ZkAlterPartitionManager(scheduler: Scheduler, time: Time, zkClient: KafkaZkClient) extends AlterPartitionManager with Logging { - - private val isrChangeNotificationConfig = ZkAlterPartitionManager.DefaultIsrPropagationConfig - // Visible for testing - private[server] val isrChangeSet: mutable.Set[TopicPartition] = new mutable.HashSet[TopicPartition]() - private val lastIsrChangeMs = new AtomicLong(time.milliseconds()) - private val lastIsrPropagationMs = new AtomicLong(time.milliseconds()) - - override def start(): Unit = { - scheduler.schedule("isr-change-propagation", () => maybePropagateIsrChanges(), 0L, - isrChangeNotificationConfig.checkIntervalMs) - } - - override def submit( - topicIdPartition: TopicIdPartition, - leaderAndIsr: LeaderAndIsr, - controllerEpoch: Int - ): CompletableFuture[LeaderAndIsr]= { - debug(s"Writing new ISR ${leaderAndIsr.isr} to ZooKeeper with version " + - s"${leaderAndIsr.partitionEpoch} for partition $topicIdPartition") - - val (updateSucceeded, newVersion) = ReplicationUtils.updateLeaderAndIsr(zkClient, topicIdPartition.topicPartition, - leaderAndIsr, controllerEpoch) - - val future = new CompletableFuture[LeaderAndIsr]() - if (updateSucceeded) { - // Track which partitions need to be propagated to the controller - isrChangeSet synchronized { - isrChangeSet += topicIdPartition.topicPartition - lastIsrChangeMs.set(time.milliseconds()) - } - - // We rely on Partition#isrState being properly set to the pending ISR at this point since we are synchronously - // applying the callback - future.complete(leaderAndIsr.withPartitionEpoch(newVersion)) - } else { - future.completeExceptionally(new InvalidUpdateVersionException( - s"ISR update $leaderAndIsr for partition $topicIdPartition with controller epoch $controllerEpoch " + - "failed with an invalid version error")) - } - future - } - - /** - * This function periodically runs to see if ISR needs to be propagated. It propagates ISR when: - * 1. There is ISR change not propagated yet. - * 2. There is no ISR Change in the last five seconds, or it has been more than 60 seconds since the last ISR propagation. - * This allows an occasional ISR change to be propagated within a few seconds, and avoids overwhelming controller and - * other brokers when large amount of ISR change occurs. - */ - private[server] def maybePropagateIsrChanges(): Unit = { - val now = time.milliseconds() - isrChangeSet synchronized { - if (isrChangeSet.nonEmpty && - (lastIsrChangeMs.get() + isrChangeNotificationConfig.lingerMs < now || - lastIsrPropagationMs.get() + isrChangeNotificationConfig.maxDelayMs < now)) { - zkClient.propagateIsrChanges(isrChangeSet) - isrChangeSet.clear() - lastIsrPropagationMs.set(now) - } - } - } -} diff --git a/core/src/main/scala/kafka/server/ZkBrokerEpochManager.scala b/core/src/main/scala/kafka/server/ZkBrokerEpochManager.scala deleted file mode 100644 index 36b2815444576..0000000000000 --- a/core/src/main/scala/kafka/server/ZkBrokerEpochManager.scala +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.controller.KafkaController -import org.apache.kafka.common.requests.AbstractControlRequest - -class ZkBrokerEpochManager(metadataCache: MetadataCache, - controller: KafkaController, - lifecycleManagerOpt: Option[BrokerLifecycleManager]) { - def get(): Long = { - lifecycleManagerOpt match { - case Some(lifecycleManager) => metadataCache.getControllerId match { - case Some(_: ZkCachedControllerId) => controller.brokerEpoch - case Some(_: KRaftCachedControllerId) => lifecycleManager.brokerEpoch - case None => controller.brokerEpoch - } - case None => controller.brokerEpoch - } - } - - def isBrokerEpochStale(brokerEpochInRequest: Long, isKRaftControllerRequest: Boolean): Boolean = { - if (brokerEpochInRequest == AbstractControlRequest.UNKNOWN_BROKER_EPOCH) { - false - } else if (isKRaftControllerRequest) { - if (lifecycleManagerOpt.isDefined) { - brokerEpochInRequest < lifecycleManagerOpt.get.brokerEpoch - } else { - throw new IllegalStateException("Expected BrokerLifecycleManager to be non-null.") - } - } else { - // brokerEpochInRequest > controller.brokerEpoch is possible in rare scenarios where the controller gets notified - // about the new broker epoch and sends a control request with this epoch before the broker learns about it - brokerEpochInRequest < controller.brokerEpoch - } - } -} diff --git a/core/src/main/scala/kafka/server/ZkConfigManager.scala b/core/src/main/scala/kafka/server/ZkConfigManager.scala deleted file mode 100644 index 1a4d99587a9e8..0000000000000 --- a/core/src/main/scala/kafka/server/ZkConfigManager.scala +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import java.nio.charset.StandardCharsets -import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener} -import kafka.utils.{Json, Logging} -import kafka.utils.json.JsonObject -import kafka.zk.{AdminZkClient, ConfigEntityChangeNotificationSequenceZNode, ConfigEntityChangeNotificationZNode, KafkaZkClient} -import org.apache.kafka.common.config.types.Password -import org.apache.kafka.common.security.scram.internals.ScramMechanism -import org.apache.kafka.server.config.ConfigType - -import scala.jdk.CollectionConverters._ -import scala.collection._ - -/** - * This class initiates and carries out config changes for all entities defined in ConfigType. - * - * It works as follows. - * - * Config is stored under the path: /config/entityType/entityName - * E.g. /config/topics/ and /config/clients/ - * This znode stores the overrides for this entity in properties format with defaults stored using entityName "". - * Multiple entity names may be specified (eg. quotas) using a hierarchical path: - * E.g. /config/users//clients/ - * - * To avoid watching all topics for changes instead we have a notification path - * /config/changes - * The ZkConfigManager has a child watch on this path. - * - * To update a config we first update the config properties. Then we create a new sequential - * znode under the change path which contains the name of the entityType and entityName that was updated, say - * /config/changes/config_change_13321 - * The sequential znode contains data in this format: {"version" : 1, "entity_type":"topic/client", "entity_name" : "topic_name/client_id"} - * This is just a notification--the actual config change is stored only once under the /config/entityType/entityName path. - * Version 2 of notifications has the format: {"version" : 2, "entity_path":"entity_type/entity_name"} - * Multiple entities may be specified as a hierarchical path (eg. users//clients/). - * - * This will fire a watcher on all brokers. This watcher works as follows. It reads all the config change notifications. - * It keeps track of the highest config change suffix number it has applied previously. For any previously applied change it finds - * it checks if this notification is larger than a static expiration time (say 10mins) and if so it deletes this notification. - * For any new changes it reads the new configuration, combines it with the defaults, and updates the existing config. - * - * Note that config is always read from the config path in zk, the notification is just a trigger to do so. So if a broker is - * down and misses a change that is fine--when it restarts it will be loading the full config anyway. Note also that - * if there are two consecutive config changes it is possible that only the last one will be applied (since by the time the - * broker reads the config the both changes may have been made). In this case the broker would needlessly refresh the config twice, - * but that is harmless. - * - * On restart the config manager re-processes all notifications. This will usually be wasted work, but avoids any race conditions - * on startup where a change might be missed between the initial config load and registering for change notifications. - * - */ -class ZkConfigManager( - private val zkClient: KafkaZkClient, - private val configHandlers: Map[String, ConfigHandler], -) extends Logging { - val adminZkClient = new AdminZkClient(zkClient) - - object ConfigChangedNotificationHandler extends NotificationHandler { - override def processNotification(jsonBytes: Array[Byte]): Unit = { - // Ignore non-json notifications because they can be from the deprecated TopicConfigManager - Json.parseBytes(jsonBytes).foreach { js => - val jsObject = js.asJsonObjectOption.getOrElse { - throw new IllegalArgumentException("Config change notification has an unexpected value. The format is:" + - """{"version" : 1, "entity_type":"topics/clients", "entity_name" : "topic_name/client_id"} or """ + - """{"version" : 2, "entity_path":"entity_type/entity_name"}. """ + - s"Received: ${new String(jsonBytes, StandardCharsets.UTF_8)}") - } - jsObject("version").to[Int] match { - case 1 => processEntityConfigChangeVersion1(jsonBytes, jsObject) - case 2 => processEntityConfigChangeVersion2(jsonBytes, jsObject) - case version => throw new IllegalArgumentException("Config change notification has unsupported version " + - s"'$version', supported versions are 1 and 2.") - } - } - } - - private def processEntityConfigChangeVersion1(jsonBytes: Array[Byte], js: JsonObject): Unit = { - val validConfigTypes = Set(ConfigType.TOPIC, ConfigType.CLIENT) - val entityType = js.get("entity_type").flatMap(_.to[Option[String]]).filter(validConfigTypes).getOrElse { - throw new IllegalArgumentException("Version 1 config change notification must have 'entity_type' set to " + - s"'clients' or 'topics'. Received: ${new String(jsonBytes, StandardCharsets.UTF_8)}") - } - - val entity = js.get("entity_name").flatMap(_.to[Option[String]]).getOrElse { - throw new IllegalArgumentException("Version 1 config change notification does not specify 'entity_name'. " + - s"Received: ${new String(jsonBytes, StandardCharsets.UTF_8)}") - } - - val entityConfig = adminZkClient.fetchEntityConfig(entityType, entity) - info(s"Processing override for entityType: $entityType, entity: $entity with config: $entityConfig") - configHandlers(entityType).processConfigChanges(entity, entityConfig) - - } - - private def processEntityConfigChangeVersion2(jsonBytes: Array[Byte], js: JsonObject): Unit = { - - val entityPath = js.get("entity_path").flatMap(_.to[Option[String]]).getOrElse { - throw new IllegalArgumentException(s"Version 2 config change notification must specify 'entity_path'. " + - s"Received: ${new String(jsonBytes, StandardCharsets.UTF_8)}") - } - - val index = entityPath.indexOf('/') - val rootEntityType = entityPath.substring(0, index) - if (index < 0 || !configHandlers.contains(rootEntityType)) { - val entityTypes = configHandlers.keys.map(entityType => s"'$entityType'/").mkString(", ") - throw new IllegalArgumentException("Version 2 config change notification must have 'entity_path' starting with " + - s"one of $entityTypes. Received: ${new String(jsonBytes, StandardCharsets.UTF_8)}") - } - val fullSanitizedEntityName = entityPath.substring(index + 1) - - val entityConfig = adminZkClient.fetchEntityConfig(rootEntityType, fullSanitizedEntityName) - val loggableConfig = entityConfig.asScala.map { - case (k, v) => (k, if (ScramMechanism.isScram(k)) Password.HIDDEN else v) - } - info(s"Processing override for entityPath: $entityPath with config: $loggableConfig") - configHandlers(rootEntityType).processConfigChanges(fullSanitizedEntityName, entityConfig) - - } - } - - private val configChangeListener = new ZkNodeChangeNotificationListener(zkClient, ConfigEntityChangeNotificationZNode.path, - ConfigEntityChangeNotificationSequenceZNode.SequenceNumberPrefix, ConfigChangedNotificationHandler) - - /** - * Begin watching for config changes - */ - def startup(): Unit = { - configChangeListener.init() - - // Apply all existing client/user configs to the ClientIdConfigHandler/UserConfigHandler to bootstrap the overrides - configHandlers.foreach { - case (ConfigType.USER, handler) => - adminZkClient.fetchAllEntityConfigs(ConfigType.USER).foreach { - case (sanitizedUser, properties) => handler.processConfigChanges(sanitizedUser, properties) - } - adminZkClient.fetchAllChildEntityConfigs(ConfigType.USER, ConfigType.CLIENT).foreach { - case (sanitizedUserClientId, properties) => handler.processConfigChanges(sanitizedUserClientId, properties) - } - case (configType, handler) => - adminZkClient.fetchAllEntityConfigs(configType).foreach { - case (entityName, properties) => handler.processConfigChanges(entityName, properties) - } - } - } - - def shutdown(): Unit = { - configChangeListener.close() - } -} - diff --git a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala index 957677c2b4181..cc8d16b2e7c8e 100644 --- a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala @@ -17,7 +17,7 @@ package kafka.server.metadata -import java.util.{OptionalInt, Properties} +import java.util.OptionalInt import kafka.coordinator.transaction.TransactionCoordinator import kafka.log.LogManager import kafka.server.{KafkaConfig, ReplicaManager} @@ -243,10 +243,6 @@ class BrokerMetadataPublisher( } } - def reloadUpdatedFilesWithoutConfigChange(props: Properties): Unit = { - config.dynamicConfig.reloadUpdatedFilesWithoutConfigChange(props) - } - /** * Update the coordinator of local replica changes: election and resignation. * diff --git a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala index 5fad48f8a71e5..8235cc1b3611f 100644 --- a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala +++ b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala @@ -17,7 +17,7 @@ package kafka.server.metadata -import kafka.server.{CachedControllerId, KRaftCachedControllerId, MetadataCache} +import kafka.server.MetadataCache import kafka.utils.Logging import org.apache.kafka.admin.BrokerMetadata import org.apache.kafka.common._ @@ -49,7 +49,7 @@ import scala.util.control.Breaks._ class KRaftMetadataCache( val brokerId: Int, val kraftVersionSupplier: Supplier[KRaftVersion] -) extends MetadataCache with Logging with ConfigRepository { +) extends MetadataCache with Logging { this.logIdent = s"[MetadataCache brokerId=$brokerId] " // This is the cache state. Every MetadataImage instance is immutable, and updates @@ -151,9 +151,7 @@ class KRaftMetadataCache( * @param topicName The name of the topic. * @param listenerName The listener name. * @param startIndex The smallest index of the partitions to be included in the result. - * @param upperIndex The upper limit of the index of the partitions to be included in the result. - * Note that, the upper index can be larger than the largest partition index in - * this topic. + * * @return A collection of topic partition metadata and next partition index (-1 means * no next partition). */ @@ -271,7 +269,7 @@ class KRaftMetadataCache( * * @param topics The iterator of topics and their corresponding first partition id to fetch. * @param listenerName The listener name. - * @param firstTopicPartitionStartIndex The start partition index for the first topic + * @param topicPartitionStartIndex The start partition index for the first topic * @param maximumNumberOfPartitions The max number of partitions to return. * @param ignoreTopicsWithExceptions Whether ignore the topics with exception. */ @@ -381,6 +379,10 @@ class KRaftMetadataCache( flatMap(_.node(listenerName.value()).toScala).toSeq } + override def getBrokerNodes(listenerName: ListenerName): Seq[Node] = { + _currentImage.cluster().brokers().values().asScala.flatMap(_.node(listenerName.value()).toScala).toSeq + } + // Does NOT include offline replica metadata override def getPartitionInfo(topicName: String, partitionId: Int): Option[UpdateMetadataPartitionState] = { Option(_currentImage.topics().getTopic(topicName)). @@ -446,15 +448,6 @@ class KRaftMetadataCache( result } - /** - * Choose a random broker node to report as the controller. We do this because we want - * the client to send requests destined for the controller to a random broker. - * Clients do not have direct access to the controller in the KRaft world, as explained - * in KIP-590. - */ - override def getControllerId: Option[CachedControllerId] = - getRandomAliveBroker(_currentImage).map(KRaftCachedControllerId) - override def getRandomAliveBrokerId: Option[Int] = { getRandomAliveBroker(_currentImage) } @@ -537,11 +530,11 @@ class KRaftMetadataCache( override def config(configResource: ConfigResource): Properties = _currentImage.configs().configProperties(configResource) - def describeClientQuotas(request: DescribeClientQuotasRequestData): DescribeClientQuotasResponseData = { + override def describeClientQuotas(request: DescribeClientQuotasRequestData): DescribeClientQuotasResponseData = { _currentImage.clientQuotas().describe(request) } - def describeScramCredentials(request: DescribeUserScramCredentialsRequestData): DescribeUserScramCredentialsResponseData = { + override def describeScramCredentials(request: DescribeUserScramCredentialsRequestData): DescribeUserScramCredentialsResponseData = { _currentImage.scram().describe(request) } diff --git a/core/src/main/scala/kafka/server/metadata/OffsetTrackingListener.scala b/core/src/main/scala/kafka/server/metadata/OffsetTrackingListener.scala deleted file mode 100644 index fb6213fb99fd8..0000000000000 --- a/core/src/main/scala/kafka/server/metadata/OffsetTrackingListener.scala +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server.metadata - -import org.apache.kafka.raft.{BatchReader, RaftClient} -import org.apache.kafka.server.common.ApiMessageAndVersion -import org.apache.kafka.snapshot.SnapshotReader - -/** - * A simple Raft listener that only keeps track of the highest offset seen. Used for registration of ZK - * brokers with the KRaft controller during a KIP-866 migration. - */ -class OffsetTrackingListener extends RaftClient.Listener[ApiMessageAndVersion] { - @volatile private var _highestOffset = 0L - - def highestOffset: Long = _highestOffset - - override def handleCommit(reader: BatchReader[ApiMessageAndVersion]): Unit = { - reader.lastOffset() - var index = 0 - while (reader.hasNext) { - index += 1 - reader.next() - } - _highestOffset = reader.lastOffset().orElse(reader.baseOffset() + index) - reader.close() - } - - override def handleLoadSnapshot(reader: SnapshotReader[ApiMessageAndVersion]): Unit = { - _highestOffset = reader.lastContainedLogOffset() - reader.close() - } -} diff --git a/core/src/main/scala/kafka/server/metadata/ZkConfigRepository.scala b/core/src/main/scala/kafka/server/metadata/ZkConfigRepository.scala deleted file mode 100644 index 74e73512961cb..0000000000000 --- a/core/src/main/scala/kafka/server/metadata/ZkConfigRepository.scala +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server.metadata - -import java.util.Properties -import kafka.zk.{AdminZkClient, KafkaZkClient} -import org.apache.kafka.common.config.ConfigResource -import org.apache.kafka.common.config.ConfigResource.Type -import org.apache.kafka.common.errors.InvalidRequestException -import org.apache.kafka.server.config.{ConfigType, ZooKeeperInternals} - - -object ZkConfigRepository { - def apply(zkClient: KafkaZkClient): ZkConfigRepository = - new ZkConfigRepository(new AdminZkClient(zkClient)) -} - -class ZkConfigRepository(adminZkClient: AdminZkClient) extends ConfigRepository { - override def config(configResource: ConfigResource): Properties = { - val configTypeForZk = configResource.`type` match { - case Type.TOPIC => ConfigType.TOPIC - case Type.BROKER => ConfigType.BROKER - case Type.CLIENT_METRICS => throw new InvalidRequestException("Config type client-metrics is only supported on KRaft clusters") - case Type.GROUP => throw new InvalidRequestException("Config type groups is only supported on KRaft clusters") - case tpe => throw new IllegalArgumentException(s"Unsupported config type: $tpe") - } - // ZK stores cluster configs under "". - val effectiveName = if (configResource.`type`.equals(Type.BROKER) && - configResource.name.isEmpty) { - ZooKeeperInternals.DEFAULT_STRING - } else { - configResource.name - } - adminZkClient.fetchEntityConfig(configTypeForZk, effectiveName) - } -} diff --git a/core/src/main/scala/kafka/server/metadata/ZkMetadataCache.scala b/core/src/main/scala/kafka/server/metadata/ZkMetadataCache.scala deleted file mode 100755 index 3205a24aa4447..0000000000000 --- a/core/src/main/scala/kafka/server/metadata/ZkMetadataCache.scala +++ /dev/null @@ -1,715 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server.metadata - -import java.util -import java.util.{Collections, Optional} -import java.util.concurrent.locks.{ReentrantLock, ReentrantReadWriteLock} -import scala.collection.{Seq, Set, mutable} -import scala.jdk.CollectionConverters._ -import kafka.cluster.{Broker, EndPoint} -import kafka.controller.StateChangeLogger -import kafka.server.{CachedControllerId, KRaftCachedControllerId, MetadataCache, ZkCachedControllerId} -import kafka.utils.CoreUtils._ -import kafka.utils.Logging -import org.apache.kafka.admin.BrokerMetadata -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.message.UpdateMetadataRequestData.{UpdateMetadataPartitionState, UpdateMetadataTopicState} -import org.apache.kafka.common.{Cluster, Node, PartitionInfo, TopicPartition, Uuid} -import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic -import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition -import org.apache.kafka.common.message.UpdateMetadataRequestData -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.requests.{AbstractControlRequest, ApiVersionsResponse, MetadataResponse, UpdateMetadataRequest} -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.server.BrokerFeatures -import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion} - -import java.util.concurrent.{ThreadLocalRandom, TimeUnit} -import scala.concurrent.TimeoutException -import scala.math.max - -// Raised whenever there was an error in updating the FinalizedFeatureCache with features. -class FeatureCacheUpdateException(message: String) extends RuntimeException(message) { -} - -trait ZkFinalizedFeatureCache { - def waitUntilFeatureEpochOrThrow(minExpectedEpoch: Long, timeoutMs: Long): Unit - - def getFeatureOption: Option[FinalizedFeatures] -} - -case class MetadataSnapshot(partitionStates: mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]], - topicIds: Map[String, Uuid], - controllerId: Option[CachedControllerId], - aliveBrokers: mutable.LongMap[Broker], - aliveNodes: mutable.LongMap[collection.Map[ListenerName, Node]]) { - val topicNames: Map[Uuid, String] = topicIds.map { case (topicName, topicId) => (topicId, topicName) } -} - -object ZkMetadataCache { - def transformKRaftControllerFullMetadataRequest( - currentMetadata: MetadataSnapshot, - requestControllerEpoch: Int, - requestTopicStates: util.List[UpdateMetadataTopicState], - handleLogMessage: String => Unit, - ): util.List[UpdateMetadataTopicState] = { - val topicIdToNewState = new util.HashMap[Uuid, UpdateMetadataTopicState]() - requestTopicStates.forEach(state => topicIdToNewState.put(state.topicId(), state)) - val newRequestTopicStates = new util.ArrayList[UpdateMetadataTopicState]() - currentMetadata.topicNames.foreachEntry((id, name) => { - try { - Option(topicIdToNewState.get(id)) match { - case None => - currentMetadata.partitionStates.get(name) match { - case None => handleLogMessage(s"Error: topic $name appeared in currentMetadata.topicNames, " + - "but not in currentMetadata.partitionStates.") - case Some(curPartitionStates) => - handleLogMessage(s"Removing topic $name with ID $id from the metadata cache since " + - "the full UMR did not include it.") - newRequestTopicStates.add(createDeletionEntries(name, - id, - curPartitionStates.values, - requestControllerEpoch)) - } - case Some(newTopicState) => - val indexToState = new util.HashMap[Integer, UpdateMetadataPartitionState] - newTopicState.partitionStates().forEach(part => indexToState.put(part.partitionIndex, part)) - currentMetadata.partitionStates.get(name) match { - case None => handleLogMessage(s"Error: topic $name appeared in currentMetadata.topicNames, " + - "but not in currentMetadata.partitionStates.") - case Some(curPartitionStates) => - curPartitionStates.foreach(state => indexToState.remove(state._1.toInt)) - if (!indexToState.isEmpty) { - handleLogMessage(s"Removing ${indexToState.size()} partition(s) from topic $name with " + - s"ID $id from the metadata cache since the full UMR did not include them.") - newRequestTopicStates.add(createDeletionEntries(name, - id, - indexToState.values().asScala, - requestControllerEpoch)) - } - } - } - } catch { - case e: Exception => handleLogMessage(s"Error: $e") - } - }) - if (newRequestTopicStates.isEmpty) { - // If the output is the same as the input, optimize by just returning the input. - requestTopicStates - } else { - // If the output has some new entries, they should all appear at the beginning. This will - // ensure that the old stuff is cleared out before the new stuff is added. We will need a - // new list for this, of course. - newRequestTopicStates.addAll(requestTopicStates) - newRequestTopicStates - } - } - - def createDeletionEntries( - topicName: String, - topicId: Uuid, - partitions: Iterable[UpdateMetadataPartitionState], - requestControllerEpoch: Int - ): UpdateMetadataTopicState = { - val topicState = new UpdateMetadataRequestData.UpdateMetadataTopicState() - .setTopicId(topicId) - .setTopicName(topicName) - .setPartitionStates(new util.ArrayList()) - partitions.foreach(partition => { - val lisr = LeaderAndIsr.duringDelete(partition.isr()) - val newPartitionState = new UpdateMetadataPartitionState() - .setPartitionIndex(partition.partitionIndex()) - .setTopicName(topicName) - .setLeader(lisr.leader) - .setLeaderEpoch(lisr.leaderEpoch) - .setControllerEpoch(requestControllerEpoch) - .setReplicas(partition.replicas()) - .setZkVersion(lisr.partitionEpoch) - .setIsr(lisr.isr) - topicState.partitionStates().add(newPartitionState) - }) - topicState - } -} - -/** - * A cache for the state (e.g., current leader) of each partition. This cache is updated through - * UpdateMetadataRequest from the controller. Every broker maintains the same cache, asynchronously. - */ -class ZkMetadataCache( - brokerId: Int, - metadataVersion: MetadataVersion, - brokerFeatures: BrokerFeatures, - zkMigrationEnabled: Boolean = false) - extends MetadataCache with ZkFinalizedFeatureCache with Logging { - - private val partitionMetadataLock = new ReentrantReadWriteLock() - //this is the cache state. every MetadataSnapshot instance is immutable, and updates (performed under a lock) - //replace the value with a completely new one. this means reads (which are not under any lock) need to grab - //the value of this var (into a val) ONCE and retain that read copy for the duration of their operation. - //multiple reads of this value risk getting different snapshots. - @volatile private var metadataSnapshot: MetadataSnapshot = MetadataSnapshot( - partitionStates = mutable.AnyRefMap.empty, - topicIds = Map.empty, - controllerId = None, - aliveBrokers = mutable.LongMap.empty, - aliveNodes = mutable.LongMap.empty) - - this.logIdent = s"[MetadataCache brokerId=$brokerId] " - private val stateChangeLogger = new StateChangeLogger(brokerId, inControllerContext = false, None) - - // Features are updated via ZK notification (see FinalizedFeatureChangeListener) - @volatile private var _features: Option[FinalizedFeatures] = Option.empty - private val featureLock = new ReentrantLock() - private val featureCond = featureLock.newCondition() - - // This method is the main hotspot when it comes to the performance of metadata requests, - // we should be careful about adding additional logic here. Relatedly, `brokers` is - // `List[Integer]` instead of `List[Int]` to avoid a collection copy. - // filterUnavailableEndpoints exists to support v0 MetadataResponses - private def maybeFilterAliveReplicas(snapshot: MetadataSnapshot, - brokers: java.util.List[Integer], - listenerName: ListenerName, - filterUnavailableEndpoints: Boolean): java.util.List[Integer] = { - if (!filterUnavailableEndpoints) { - brokers - } else { - val res = new util.ArrayList[Integer](math.min(snapshot.aliveBrokers.size, brokers.size)) - for (brokerId <- brokers.asScala) { - if (hasAliveEndpoint(snapshot, brokerId, listenerName)) - res.add(brokerId) - } - res - } - } - - // errorUnavailableEndpoints exists to support v0 MetadataResponses - // If errorUnavailableListeners=true, return LISTENER_NOT_FOUND if listener is missing on the broker. - // Otherwise, return LEADER_NOT_AVAILABLE for broker unavailable and missing listener (Metadata response v5 and below). - private def getPartitionMetadata(snapshot: MetadataSnapshot, topic: String, listenerName: ListenerName, errorUnavailableEndpoints: Boolean, - errorUnavailableListeners: Boolean): Option[Iterable[MetadataResponsePartition]] = { - snapshot.partitionStates.get(topic).map { partitions => - partitions.map { case (partitionId, partitionState) => - val topicPartition = new TopicPartition(topic, partitionId.toInt) - val leaderBrokerId = partitionState.leader - val leaderEpoch = partitionState.leaderEpoch - val maybeLeader = getAliveEndpoint(snapshot, leaderBrokerId, listenerName) - - val replicas = partitionState.replicas - val filteredReplicas = maybeFilterAliveReplicas(snapshot, replicas, listenerName, errorUnavailableEndpoints) - - val isr = partitionState.isr - val filteredIsr = maybeFilterAliveReplicas(snapshot, isr, listenerName, errorUnavailableEndpoints) - - val offlineReplicas = partitionState.offlineReplicas - - maybeLeader match { - case None => - val error = if (!snapshot.aliveBrokers.contains(leaderBrokerId)) { // we are already holding the read lock - debug(s"Error while fetching metadata for $topicPartition: leader not available") - Errors.LEADER_NOT_AVAILABLE - } else { - debug(s"Error while fetching metadata for $topicPartition: listener $listenerName " + - s"not found on leader $leaderBrokerId") - if (errorUnavailableListeners) Errors.LISTENER_NOT_FOUND else Errors.LEADER_NOT_AVAILABLE - } - - new MetadataResponsePartition() - .setErrorCode(error.code) - .setPartitionIndex(partitionId.toInt) - .setLeaderId(MetadataResponse.NO_LEADER_ID) - .setLeaderEpoch(leaderEpoch) - .setReplicaNodes(filteredReplicas) - .setIsrNodes(filteredIsr) - .setOfflineReplicas(offlineReplicas) - - case Some(_) => - val error = if (filteredReplicas.size < replicas.size) { - debug(s"Error while fetching metadata for $topicPartition: replica information not available for " + - s"following brokers ${replicas.asScala.filterNot(filteredReplicas.contains).mkString(",")}") - Errors.REPLICA_NOT_AVAILABLE - } else if (filteredIsr.size < isr.size) { - debug(s"Error while fetching metadata for $topicPartition: in sync replica information not available for " + - s"following brokers ${isr.asScala.filterNot(filteredIsr.contains).mkString(",")}") - Errors.REPLICA_NOT_AVAILABLE - } else { - Errors.NONE - } - - new MetadataResponsePartition() - .setErrorCode(error.code) - .setPartitionIndex(partitionId.toInt) - .setLeaderId(maybeLeader.map(_.id()).getOrElse(MetadataResponse.NO_LEADER_ID)) - .setLeaderEpoch(leaderEpoch) - .setReplicaNodes(filteredReplicas) - .setIsrNodes(filteredIsr) - .setOfflineReplicas(offlineReplicas) - } - } - } - } - - /** - * Check whether a broker is alive and has a registered listener matching the provided name. - * This method was added to avoid unnecessary allocations in [[maybeFilterAliveReplicas]], which is - * a hotspot in metadata handling. - */ - private def hasAliveEndpoint(snapshot: MetadataSnapshot, brokerId: Int, listenerName: ListenerName): Boolean = { - snapshot.aliveNodes.get(brokerId).exists(_.contains(listenerName)) - } - - /** - * Get the endpoint matching the provided listener if the broker is alive. Note that listeners can - * be added dynamically, so a broker with a missing listener could be a transient error. - * - * @return None if broker is not alive or if the broker does not have a listener named `listenerName`. - */ - private def getAliveEndpoint(snapshot: MetadataSnapshot, brokerId: Int, listenerName: ListenerName): Option[Node] = { - snapshot.aliveNodes.get(brokerId).flatMap(_.get(listenerName)) - } - - // errorUnavailableEndpoints exists to support v0 MetadataResponses - def getTopicMetadata(topics: Set[String], - listenerName: ListenerName, - errorUnavailableEndpoints: Boolean = false, - errorUnavailableListeners: Boolean = false): Seq[MetadataResponseTopic] = { - val snapshot = metadataSnapshot - topics.toSeq.flatMap { topic => - getPartitionMetadata(snapshot, topic, listenerName, errorUnavailableEndpoints, errorUnavailableListeners).map { partitionMetadata => - new MetadataResponseTopic() - .setErrorCode(Errors.NONE.code) - .setName(topic) - .setTopicId(snapshot.topicIds.getOrElse(topic, Uuid.ZERO_UUID)) - .setIsInternal(Topic.isInternal(topic)) - .setPartitions(partitionMetadata.toBuffer.asJava) - } - } - } - - def topicNamesToIds(): util.Map[String, Uuid] = { - Collections.unmodifiableMap(metadataSnapshot.topicIds.asJava) - } - - def topicIdsToNames(): util.Map[Uuid, String] = { - Collections.unmodifiableMap(metadataSnapshot.topicNames.asJava) - } - - /** - * This method returns a map from topic names to IDs and a map from topic IDs to names - */ - def topicIdInfo(): (util.Map[String, Uuid], util.Map[Uuid, String]) = { - val snapshot = metadataSnapshot - (Collections.unmodifiableMap(snapshot.topicIds.asJava), Collections.unmodifiableMap(snapshot.topicNames.asJava)) - } - - override def getAllTopics(): Set[String] = { - getAllTopics(metadataSnapshot) - } - - override def getTopicPartitions(topicName: String): Set[TopicPartition] = { - metadataSnapshot.partitionStates.getOrElse(topicName, Map.empty).values. - map(p => new TopicPartition(topicName, p.partitionIndex())).toSet - } - - private def getAllTopics(snapshot: MetadataSnapshot): Set[String] = { - snapshot.partitionStates.keySet - } - - private def getAllPartitions(snapshot: MetadataSnapshot): Map[TopicPartition, UpdateMetadataPartitionState] = { - snapshot.partitionStates.flatMap { case (topic, partitionStates) => - partitionStates.map { case (partition, state) => (new TopicPartition(topic, partition.toInt), state) } - }.toMap - } - - override def hasAliveBroker(brokerId: Int): Boolean = metadataSnapshot.aliveBrokers.contains(brokerId) - - override def getAliveBrokers(): Iterable[BrokerMetadata] = { - metadataSnapshot.aliveBrokers.values.map(b => new BrokerMetadata(b.id, Optional.ofNullable(b.rack.orNull))) - } - - override def getAliveBrokerNode(brokerId: Int, listenerName: ListenerName): Option[Node] = { - val snapshot = metadataSnapshot - snapshot.aliveBrokers.get(brokerId).flatMap(_.getNode(listenerName)) - } - - override def getAliveBrokerNodes(listenerName: ListenerName): Iterable[Node] = { - metadataSnapshot.aliveBrokers.values.flatMap(_.getNode(listenerName)) - } - - def getTopicId(topicName: String): Uuid = { - metadataSnapshot.topicIds.getOrElse(topicName, Uuid.ZERO_UUID) - } - - def getTopicName(topicId: Uuid): Option[String] = { - metadataSnapshot.topicNames.get(topicId) - } - - private def addOrUpdatePartitionInfo(partitionStates: mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]], - topic: String, - partitionId: Int, - stateInfo: UpdateMetadataPartitionState): Unit = { - val infos = partitionStates.getOrElseUpdate(topic, mutable.LongMap.empty) - infos(partitionId) = stateInfo - } - - def getPartitionInfo(topic: String, partitionId: Int): Option[UpdateMetadataPartitionState] = { - metadataSnapshot.partitionStates.get(topic).flatMap(_.get(partitionId)) - } - - def numPartitions(topic: String): Option[Int] = { - metadataSnapshot.partitionStates.get(topic).map(_.size) - } - - // if the leader is not known, return None; - // if the leader is known and corresponding node is available, return Some(node) - // if the leader is known but corresponding node with the listener name is not available, return Some(NO_NODE) - def getPartitionLeaderEndpoint(topic: String, partitionId: Int, listenerName: ListenerName): Option[Node] = { - val snapshot = metadataSnapshot - snapshot.partitionStates.get(topic).flatMap(_.get(partitionId)) map { partitionInfo => - val leaderId = partitionInfo.leader - - snapshot.aliveNodes.get(leaderId) match { - case Some(nodeMap) => - nodeMap.getOrElse(listenerName, Node.noNode) - case None => - Node.noNode - } - } - } - - def getPartitionReplicaEndpoints(tp: TopicPartition, listenerName: ListenerName): Map[Int, Node] = { - val snapshot = metadataSnapshot - snapshot.partitionStates.get(tp.topic).flatMap(_.get(tp.partition)).map { partitionInfo => - val replicaIds = partitionInfo.replicas - replicaIds.asScala - .map(replicaId => replicaId.intValue() -> { - snapshot.aliveBrokers.get(replicaId.longValue()) match { - case Some(broker) => - broker.getNode(listenerName).getOrElse(Node.noNode()) - case None => - Node.noNode() - } - }).toMap - .filter(pair => pair match { - case (_, node) => !node.isEmpty - }) - }.getOrElse(Map.empty[Int, Node]) - } - - def getControllerId: Option[CachedControllerId] = { - metadataSnapshot.controllerId - } - - def getRandomAliveBrokerId: Option[Int] = { - val aliveBrokers = metadataSnapshot.aliveBrokers.values.toList - Some(aliveBrokers(ThreadLocalRandom.current().nextInt(aliveBrokers.size)).id) - } - - def getClusterMetadata(clusterId: String, listenerName: ListenerName): Cluster = { - val snapshot = metadataSnapshot - val nodes = snapshot.aliveNodes.flatMap { case (id, nodesByListener) => - nodesByListener.get(listenerName).map { node => - id -> node - } - } - - def node(id: Integer): Node = { - nodes.getOrElse(id.toLong, new Node(id, "", -1)) - } - - def controllerId(snapshot: MetadataSnapshot): Option[Node] = { - snapshot.controllerId.flatMap { - case ZkCachedControllerId(id) => getAliveBrokerNode(id, listenerName) - case KRaftCachedControllerId(_) => getRandomAliveBrokerId.flatMap(getAliveBrokerNode(_, listenerName)) - } - } - - val partitions = getAllPartitions(snapshot) - .filter { case (_, state) => state.leader != LeaderAndIsr.LEADER_DURING_DELETE } - .map { case (tp, state) => - new PartitionInfo(tp.topic, tp.partition, node(state.leader), - state.replicas.asScala.map(node).toArray, - state.isr.asScala.map(node).toArray, - state.offlineReplicas.asScala.map(node).toArray) - } - val unauthorizedTopics = Collections.emptySet[String] - val internalTopics = getAllTopics(snapshot).filter(Topic.isInternal).asJava - new Cluster(clusterId, nodes.values.toBuffer.asJava, - partitions.toBuffer.asJava, - unauthorizedTopics, internalTopics, - controllerId(snapshot).orNull) - } - - // This method returns the deleted TopicPartitions received from UpdateMetadataRequest. - // Note: if this ZK broker is migrating to KRaft, a singular UMR may sometimes both delete a - // partition and re-create a new partition with that same name. In that case, it will not appear - // in the return value of this function. - def updateMetadata( - correlationId: Int, - originalUpdateMetadataRequest: UpdateMetadataRequest - ): Seq[TopicPartition] = { - var updateMetadataRequest = originalUpdateMetadataRequest - inWriteLock(partitionMetadataLock) { - if ( - updateMetadataRequest.isKRaftController && - updateMetadataRequest.updateType() == AbstractControlRequest.Type.FULL - ) { - if (updateMetadataRequest.version() < 8) { - stateChangeLogger.error(s"Received UpdateMetadataRequest with Type=FULL (2), but version of " + - updateMetadataRequest.version() + ", which should not be possible. Not treating this as a full " + - "metadata update") - } else if (!zkMigrationEnabled) { - stateChangeLogger.error(s"Received UpdateMetadataRequest with Type=FULL (2), but ZK migrations " + - s"are not enabled on this broker. Not treating this as a full metadata update") - } else { - // When handling a UMR from a KRaft controller, we may have to insert some partition - // deletions at the beginning, to handle the different way topic deletion works in KRaft - // mode (and also migration mode). - // - // After we've done that, we re-create the whole UpdateMetadataRequest object using the - // updated list of topic info. This ensures that UpdateMetadataRequest.normalize is called - // on the new, updated topic data. Note that we don't mutate the old request object; it may - // be used elsewhere. - val newTopicStates = ZkMetadataCache.transformKRaftControllerFullMetadataRequest( - metadataSnapshot, - updateMetadataRequest.controllerEpoch(), - updateMetadataRequest.topicStates(), - logMessage => if (logMessage.startsWith("Error")) { - stateChangeLogger.error(logMessage) - } else { - stateChangeLogger.info(logMessage) - }) - - // It would be nice if we could call duplicate() here, but we don't want to copy the - // old topicStates array. That would be quite costly, and we're not going to use it anyway. - // Instead, we copy each field that we need. - val originalRequestData = updateMetadataRequest.data() - val newData = new UpdateMetadataRequestData(). - setControllerId(originalRequestData.controllerId()). - setIsKRaftController(originalRequestData.isKRaftController). - setType(originalRequestData.`type`()). - setControllerEpoch(originalRequestData.controllerEpoch()). - setBrokerEpoch(originalRequestData.brokerEpoch()). - setTopicStates(newTopicStates). - setLiveBrokers(originalRequestData.liveBrokers()) - updateMetadataRequest = new UpdateMetadataRequest(newData, updateMetadataRequest.version()) - } - } - - val aliveBrokers = new mutable.LongMap[Broker](metadataSnapshot.aliveBrokers.size) - val aliveNodes = new mutable.LongMap[collection.Map[ListenerName, Node]](metadataSnapshot.aliveNodes.size) - val controllerIdOpt: Option[CachedControllerId] = updateMetadataRequest.controllerId match { - case id if id < 0 => None - case id => - if (updateMetadataRequest.isKRaftController) - Some(KRaftCachedControllerId(id)) - else - Some(ZkCachedControllerId(id)) - } - - updateMetadataRequest.liveBrokers.forEach { broker => - // `aliveNodes` is a hot path for metadata requests for large clusters, so we use java.util.HashMap which - // is a bit faster than scala.collection.mutable.HashMap. When we drop support for Scala 2.10, we could - // move to `AnyRefMap`, which has comparable performance. - val nodes = new java.util.HashMap[ListenerName, Node] - val endPoints = new mutable.ArrayBuffer[EndPoint] - broker.endpoints.forEach { ep => - val listenerName = new ListenerName(ep.listener) - endPoints += new EndPoint(ep.host, ep.port, listenerName, SecurityProtocol.forId(ep.securityProtocol)) - nodes.put(listenerName, new Node(broker.id, ep.host, ep.port, broker.rack())) - } - aliveBrokers(broker.id) = Broker(broker.id, endPoints, Option(broker.rack)) - aliveNodes(broker.id) = nodes.asScala - } - aliveNodes.get(brokerId).foreach { listenerMap => - val listeners = listenerMap.keySet - if (!aliveNodes.values.forall(_.keySet == listeners)) - error(s"Listeners are not identical across brokers: $aliveNodes") - } - - val topicIds = mutable.Map.empty[String, Uuid] - topicIds ++= metadataSnapshot.topicIds - val (newTopicIds, newZeroIds) = updateMetadataRequest.topicStates().asScala - .map(topicState => (topicState.topicName(), topicState.topicId())) - .partition { case (_, topicId) => topicId != Uuid.ZERO_UUID } - newZeroIds.foreach { case (zeroIdTopic, _) => topicIds.remove(zeroIdTopic) } - topicIds ++= newTopicIds.toMap - - val deletedPartitions = new java.util.LinkedHashSet[TopicPartition] - if (!updateMetadataRequest.partitionStates.iterator.hasNext) { - metadataSnapshot = MetadataSnapshot(metadataSnapshot.partitionStates, topicIds.toMap, - controllerIdOpt, aliveBrokers, aliveNodes) - } else { - //since kafka may do partial metadata updates, we start by copying the previous state - val partitionStates = new mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]](metadataSnapshot.partitionStates.size) - metadataSnapshot.partitionStates.foreachEntry { (topic, oldPartitionStates) => - val copy = new mutable.LongMap[UpdateMetadataPartitionState](oldPartitionStates.size) - copy ++= oldPartitionStates - partitionStates(topic) = copy - } - - val traceEnabled = stateChangeLogger.isTraceEnabled - val controllerId = updateMetadataRequest.controllerId - val controllerEpoch = updateMetadataRequest.controllerEpoch - val newStates = updateMetadataRequest.partitionStates.asScala - newStates.foreach { state => - // per-partition logging here can be very expensive due going through all partitions in the cluster - val tp = new TopicPartition(state.topicName, state.partitionIndex) - if (state.leader == LeaderAndIsr.LEADER_DURING_DELETE) { - removePartitionInfo(partitionStates, topicIds, tp.topic, tp.partition) - if (traceEnabled) - stateChangeLogger.trace(s"Deleted partition $tp from metadata cache in response to UpdateMetadata " + - s"request sent by controller $controllerId epoch $controllerEpoch with correlation id $correlationId") - deletedPartitions.add(tp) - } else { - addOrUpdatePartitionInfo(partitionStates, tp.topic, tp.partition, state) - deletedPartitions.remove(tp) - if (traceEnabled) - stateChangeLogger.trace(s"Cached leader info $state for partition $tp in response to " + - s"UpdateMetadata request sent by controller $controllerId epoch $controllerEpoch with correlation id $correlationId") - } - } - val cachedPartitionsCount = newStates.size - deletedPartitions.size - stateChangeLogger.info(s"Add $cachedPartitionsCount partitions and deleted ${deletedPartitions.size} partitions from metadata cache " + - s"in response to UpdateMetadata request sent by controller $controllerId epoch $controllerEpoch with correlation id $correlationId") - - metadataSnapshot = MetadataSnapshot(partitionStates, topicIds.toMap, controllerIdOpt, aliveBrokers, aliveNodes) - } - deletedPartitions.asScala.toSeq - } - } - - def contains(topic: String): Boolean = { - metadataSnapshot.partitionStates.contains(topic) - } - - def contains(tp: TopicPartition): Boolean = getPartitionInfo(tp.topic, tp.partition).isDefined - - private def removePartitionInfo(partitionStates: mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]], - topicIds: mutable.Map[String, Uuid], topic: String, partitionId: Int): Boolean = { - partitionStates.get(topic).exists { infos => - infos.remove(partitionId) - if (infos.isEmpty) { - partitionStates.remove(topic) - topicIds.remove(topic) - } - true - } - } - - override def metadataVersion(): MetadataVersion = metadataVersion - - override def features(): FinalizedFeatures = _features match { - case Some(features) => features - case None => new FinalizedFeatures(metadataVersion, - Collections.emptyMap(), - ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, - false) - } - - /** - * Updates the cache to the latestFeatures, and updates the existing epoch to latestEpoch. - * Expects that the latestEpoch should be always greater than the existing epoch (when the - * existing epoch is defined). - * - * @param latestFeatures the latest finalized features to be set in the cache - * @param latestEpoch the latest epoch value to be set in the cache - * - * @throws FeatureCacheUpdateException if the cache update operation fails - * due to invalid parameters or incompatibilities with the broker's - * supported features. In such a case, the existing cache contents are - * not modified. - */ - def updateFeaturesOrThrow(latestFeatures: Map[String, Short], latestEpoch: Long): Unit = { - val latest = new FinalizedFeatures(metadataVersion, - latestFeatures.map(kv => (kv._1, kv._2.asInstanceOf[java.lang.Short])).asJava, - latestEpoch, - false) - val existing = _features - if (existing.isDefined && existing.get.finalizedFeaturesEpoch() > latest.finalizedFeaturesEpoch()) { - val errorMsg = s"FinalizedFeatureCache update failed due to invalid epoch in new $latest." + - s" The existing cache contents are $existing." - throw new FeatureCacheUpdateException(errorMsg) - } else { - val incompatibleFeatures = brokerFeatures.incompatibleFeatures( - latest.finalizedFeatures().asScala.map(kv => (kv._1, kv._2.toShort: java.lang.Short)).toMap.asJava) - if (!incompatibleFeatures.isEmpty) { - val errorMsg = "FinalizedFeatureCache update failed since feature compatibility" + - s" checks failed! Supported ${brokerFeatures.supportedFeatures} has incompatibilities" + - s" with the latest $latest." - throw new FeatureCacheUpdateException(errorMsg) - } else { - val logMsg = s"Updated cache from existing $existing to latest $latest." - inLock(featureLock) { - _features = Some(latest) - featureCond.signalAll() - } - info(logMsg) - } - } - } - - /** - * Clears all existing finalized features and epoch from the cache. - */ - def clearFeatures(): Unit = { - inLock(featureLock) { - _features = None - featureCond.signalAll() - } - } - - /** - * Waits no more than timeoutMs for the cache's feature epoch to reach an epoch >= minExpectedEpoch. - * - * @param minExpectedEpoch the minimum expected epoch to be reached by the cache - * (should be >= 0) - * @param timeoutMs the timeout (in milli seconds) - * - * @throws TimeoutException if the cache's epoch has not reached at least - * minExpectedEpoch within timeoutMs. - */ - def waitUntilFeatureEpochOrThrow(minExpectedEpoch: Long, timeoutMs: Long): Unit = { - if (minExpectedEpoch < 0L) { - throw new IllegalArgumentException( - s"Expected minExpectedEpoch >= 0, but $minExpectedEpoch was provided.") - } - - if (timeoutMs < 0L) { - throw new IllegalArgumentException(s"Expected timeoutMs >= 0, but $timeoutMs was provided.") - } - val waitEndTimeNanos = System.nanoTime() + (timeoutMs * 1000000) - inLock(featureLock) { - while (!(_features.isDefined && _features.get.finalizedFeaturesEpoch() >= minExpectedEpoch)) { - val nowNanos = System.nanoTime() - if (nowNanos > waitEndTimeNanos) { - throw new TimeoutException( - s"Timed out after waiting for ${timeoutMs}ms for required condition to be met." + - s" Current epoch: ${_features.map(fe => fe.finalizedFeaturesEpoch()).getOrElse("")}.") - } - val sleepTimeMs = max(1L, (waitEndTimeNanos - nowNanos) / 1000000) - featureCond.await(sleepTimeMs, TimeUnit.MILLISECONDS) - } - } - } - - override def getFeatureOption: Option[FinalizedFeatures] = _features -} diff --git a/core/src/main/scala/kafka/tools/DumpLogSegments.scala b/core/src/main/scala/kafka/tools/DumpLogSegments.scala index 2b846b4c1362e..7b7b765c930f0 100755 --- a/core/src/main/scala/kafka/tools/DumpLogSegments.scala +++ b/core/src/main/scala/kafka/tools/DumpLogSegments.scala @@ -21,7 +21,6 @@ import com.fasterxml.jackson.databind.JsonNode import java.io._ import com.fasterxml.jackson.databind.node.{IntNode, JsonNodeFactory, ObjectNode, TextNode} -import kafka.coordinator.transaction.TransactionLog import kafka.log._ import kafka.utils.CoreUtils import org.apache.kafka.clients.consumer.internals.ConsumerProtocol @@ -34,14 +33,16 @@ import org.apache.kafka.common.message.SnapshotFooterRecordJsonConverter import org.apache.kafka.common.message.SnapshotHeaderRecordJsonConverter import org.apache.kafka.common.message.VotersRecordJsonConverter import org.apache.kafka.common.metadata.{MetadataJsonConverters, MetadataRecordType} -import org.apache.kafka.common.protocol.{ByteBufferAccessor, Message} +import org.apache.kafka.common.protocol.{ApiMessage, ByteBufferAccessor, Message} import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils -import org.apache.kafka.coordinator.group.generated.{ConsumerGroupCurrentMemberAssignmentKey, ConsumerGroupCurrentMemberAssignmentKeyJsonConverter, ConsumerGroupCurrentMemberAssignmentValue, ConsumerGroupCurrentMemberAssignmentValueJsonConverter, ConsumerGroupMemberMetadataKey, ConsumerGroupMemberMetadataKeyJsonConverter, ConsumerGroupMemberMetadataValue, ConsumerGroupMemberMetadataValueJsonConverter, ConsumerGroupMetadataKey, ConsumerGroupMetadataKeyJsonConverter, ConsumerGroupMetadataValue, ConsumerGroupMetadataValueJsonConverter, ConsumerGroupPartitionMetadataKey, ConsumerGroupPartitionMetadataKeyJsonConverter, ConsumerGroupPartitionMetadataValue, ConsumerGroupPartitionMetadataValueJsonConverter, ConsumerGroupTargetAssignmentMemberKey, ConsumerGroupTargetAssignmentMemberKeyJsonConverter, ConsumerGroupTargetAssignmentMemberValue, ConsumerGroupTargetAssignmentMemberValueJsonConverter, ConsumerGroupTargetAssignmentMetadataKey, ConsumerGroupTargetAssignmentMetadataKeyJsonConverter, ConsumerGroupTargetAssignmentMetadataValue, ConsumerGroupTargetAssignmentMetadataValueJsonConverter, GroupMetadataKey, GroupMetadataKeyJsonConverter, GroupMetadataValue, GroupMetadataValueJsonConverter, OffsetCommitKey, OffsetCommitKeyJsonConverter, OffsetCommitValue, OffsetCommitValueJsonConverter, ShareGroupCurrentMemberAssignmentKey, ShareGroupCurrentMemberAssignmentKeyJsonConverter, ShareGroupCurrentMemberAssignmentValue, ShareGroupCurrentMemberAssignmentValueJsonConverter, ShareGroupMemberMetadataKey, ShareGroupMemberMetadataKeyJsonConverter, ShareGroupMemberMetadataValue, ShareGroupMemberMetadataValueJsonConverter, ShareGroupMetadataKey, ShareGroupMetadataKeyJsonConverter, ShareGroupMetadataValue, ShareGroupMetadataValueJsonConverter, ShareGroupPartitionMetadataKey, ShareGroupPartitionMetadataKeyJsonConverter, ShareGroupPartitionMetadataValue, ShareGroupPartitionMetadataValueJsonConverter, ShareGroupStatePartitionMetadataKey, ShareGroupStatePartitionMetadataKeyJsonConverter, ShareGroupStatePartitionMetadataValue, ShareGroupStatePartitionMetadataValueJsonConverter, ShareGroupTargetAssignmentMemberKey, ShareGroupTargetAssignmentMemberKeyJsonConverter, ShareGroupTargetAssignmentMemberValue, ShareGroupTargetAssignmentMemberValueJsonConverter, ShareGroupTargetAssignmentMetadataKey, ShareGroupTargetAssignmentMetadataKeyJsonConverter, ShareGroupTargetAssignmentMetadataValue, ShareGroupTargetAssignmentMetadataValueJsonConverter} +import org.apache.kafka.coordinator.group.generated.{CoordinatorRecordJsonConverters => GroupCoordinatorRecordJsonConverters, CoordinatorRecordType => GroupCoordinatorRecordType, GroupMetadataValue, GroupMetadataValueJsonConverter} import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader.UnknownRecordTypeException import org.apache.kafka.coordinator.group.GroupCoordinatorRecordSerde import org.apache.kafka.coordinator.share.ShareCoordinatorRecordSerde -import org.apache.kafka.coordinator.share.generated.{ShareSnapshotKey, ShareSnapshotKeyJsonConverter, ShareSnapshotValue, ShareSnapshotValueJsonConverter, ShareUpdateKey, ShareUpdateKeyJsonConverter, ShareUpdateValue, ShareUpdateValueJsonConverter} +import org.apache.kafka.coordinator.share.generated.{CoordinatorRecordJsonConverters => ShareCoordinatorRecordJsonConverters} +import org.apache.kafka.coordinator.transaction.TransactionCoordinatorRecordSerde +import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogKeyJsonConverter, TransactionLogValue, TransactionLogValueJsonConverter} import org.apache.kafka.metadata.MetadataRecordSerde import org.apache.kafka.metadata.bootstrap.BootstrapDirectory import org.apache.kafka.snapshot.Snapshots @@ -422,44 +423,10 @@ object DumpLogSegments { class OffsetsMessageParser extends MessageParser[String, String] { private val serde = new GroupCoordinatorRecordSerde() - private def prepareKey(message: Message, version: Short): String = { - val messageAsJson = message match { - case m: OffsetCommitKey => - OffsetCommitKeyJsonConverter.write(m, version) - case m: GroupMetadataKey => - GroupMetadataKeyJsonConverter.write(m, version) - case m: ConsumerGroupMetadataKey => - ConsumerGroupMetadataKeyJsonConverter.write(m, version) - case m: ConsumerGroupPartitionMetadataKey => - ConsumerGroupPartitionMetadataKeyJsonConverter.write(m, version) - case m: ConsumerGroupMemberMetadataKey => - ConsumerGroupMemberMetadataKeyJsonConverter.write(m, version) - case m: ConsumerGroupTargetAssignmentMetadataKey => - ConsumerGroupTargetAssignmentMetadataKeyJsonConverter.write(m, version) - case m: ConsumerGroupTargetAssignmentMemberKey => - ConsumerGroupTargetAssignmentMemberKeyJsonConverter.write(m, version) - case m: ConsumerGroupCurrentMemberAssignmentKey => - ConsumerGroupCurrentMemberAssignmentKeyJsonConverter.write(m, version) - case m: ShareGroupMetadataKey => - ShareGroupMetadataKeyJsonConverter.write(m, version) - case m: ShareGroupPartitionMetadataKey => - ShareGroupPartitionMetadataKeyJsonConverter.write(m, version) - case m: ShareGroupMemberMetadataKey => - ShareGroupMemberMetadataKeyJsonConverter.write(m, version) - case m: ShareGroupTargetAssignmentMetadataKey => - ShareGroupTargetAssignmentMetadataKeyJsonConverter.write(m, version) - case m: ShareGroupTargetAssignmentMemberKey => - ShareGroupTargetAssignmentMemberKeyJsonConverter.write(m, version) - case m: ShareGroupCurrentMemberAssignmentKey => - ShareGroupCurrentMemberAssignmentKeyJsonConverter.write(m, version) - case m: ShareGroupStatePartitionMetadataKey => - ShareGroupStatePartitionMetadataKeyJsonConverter.write(m, version) - case _ => throw new UnknownRecordTypeException(version) - } - + private def prepareKey(message: ApiMessage): String = { val json = new ObjectNode(JsonNodeFactory.instance) - json.set("type", new TextNode(version.toString)) - json.set("data", messageAsJson) + json.set("type", new TextNode(message.apiKey.toString)) + json.set("data", GroupCoordinatorRecordJsonConverters.writeRecordKeyAsJson(message)) json.toString } @@ -516,39 +483,11 @@ object DumpLogSegments { json } - private def prepareValue(message: Message, version: Short): String = { - val messageAsJson = message match { - case m: OffsetCommitValue => - OffsetCommitValueJsonConverter.write(m, version) - case m: GroupMetadataValue => - prepareGroupMetadataValue(m, version) - case m: ConsumerGroupMetadataValue => - ConsumerGroupMetadataValueJsonConverter.write(m, version) - case m: ConsumerGroupPartitionMetadataValue => - ConsumerGroupPartitionMetadataValueJsonConverter.write(m, version) - case m: ConsumerGroupMemberMetadataValue => - ConsumerGroupMemberMetadataValueJsonConverter.write(m, version) - case m: ConsumerGroupTargetAssignmentMetadataValue => - ConsumerGroupTargetAssignmentMetadataValueJsonConverter.write(m, version) - case m: ConsumerGroupTargetAssignmentMemberValue => - ConsumerGroupTargetAssignmentMemberValueJsonConverter.write(m, version) - case m: ConsumerGroupCurrentMemberAssignmentValue => - ConsumerGroupCurrentMemberAssignmentValueJsonConverter.write(m, version) - case m: ShareGroupMetadataValue => - ShareGroupMetadataValueJsonConverter.write(m, version) - case m: ShareGroupPartitionMetadataValue => - ShareGroupPartitionMetadataValueJsonConverter.write(m, version) - case m: ShareGroupMemberMetadataValue => - ShareGroupMemberMetadataValueJsonConverter.write(m, version) - case m: ShareGroupTargetAssignmentMetadataValue => - ShareGroupTargetAssignmentMetadataValueJsonConverter.write(m, version) - case m: ShareGroupTargetAssignmentMemberValue => - ShareGroupTargetAssignmentMemberValueJsonConverter.write(m, version) - case m: ShareGroupCurrentMemberAssignmentValue => - ShareGroupCurrentMemberAssignmentValueJsonConverter.write(m, version) - case m: ShareGroupStatePartitionMetadataValue => - ShareGroupStatePartitionMetadataValueJsonConverter.write(m, version) - case _ => throw new IllegalStateException(s"Message value ${message.getClass.getSimpleName} is not supported.") + private def prepareValue(message: ApiMessage, version: Short): String = { + val messageAsJson = if (message.apiKey == GroupCoordinatorRecordType.GROUP_METADATA.id) { + prepareGroupMetadataValue(message.asInstanceOf[GroupMetadataValue], version) + } else { + GroupCoordinatorRecordJsonConverters.writeRecordValueAsJson(message, version) } val json = new ObjectNode(JsonNodeFactory.instance) @@ -565,7 +504,7 @@ object DumpLogSegments { try { val r = serde.deserialize(record.key, record.value) ( - Some(prepareKey(r.key.message, r.key.version)), + Some(prepareKey(r.key.message)), Option(r.value).map(v => prepareValue(v.message, v.version)).orElse(Some("")) ) } catch { @@ -584,9 +523,61 @@ object DumpLogSegments { } } - private class TransactionLogMessageParser extends MessageParser[String, String] { + // Package private for testing. + class TransactionLogMessageParser extends MessageParser[String, String] { + private val serde = new TransactionCoordinatorRecordSerde() + + private def prepareKey(message: Message, version: Short): String = { + val messageAsJson = message match { + case m: TransactionLogKey => + TransactionLogKeyJsonConverter.write(m, version) + case _ => throw new UnknownRecordTypeException(version) + } + + val json = new ObjectNode(JsonNodeFactory.instance) + json.set("type", new TextNode(version.toString)) + json.set("data", messageAsJson) + json.toString + } + + private def prepareValue(message: Message, version: Short): String = { + val messageAsJson = message match { + case m: TransactionLogValue => + TransactionLogValueJsonConverter.write(m, version) + case _ => throw new UnknownRecordTypeException(version) + } + + val json = new ObjectNode(JsonNodeFactory.instance) + json.set("version", new TextNode(version.toString)) + json.set("data", messageAsJson) + json.toString + } + override def parse(record: Record): (Option[String], Option[String]) = { - TransactionLog.formatRecordKeyAndValue(record) + if (!record.hasKey) + throw new RuntimeException(s"Failed to decode message at offset ${record.offset} using offset " + + "transaction-log decoder (message had a missing key)") + + try { + val r = serde.deserialize(record.key, record.value) + ( + Some(prepareKey(r.key.message, r.key.version)), + Option(r.value).map(v => prepareValue(v.message, v.version)).orElse(Some("")) + ) + } catch { + case e: UnknownRecordTypeException => + ( + Some(s"Unknown record type ${e.unknownType} at offset ${record.offset}, skipping."), + None + ) + + case e: Throwable => + e.printStackTrace() + ( + Some(s"Error at offset ${record.offset}, skipping. ${e.getMessage}"), + None + ) + } } } @@ -634,34 +625,17 @@ object DumpLogSegments { class ShareGroupStateMessageParser extends MessageParser[String, String] { private val serde = new ShareCoordinatorRecordSerde() - private def prepareKey(message: Message, version: Short): String = { - val messageAsJson = message match { - case m: ShareSnapshotKey => - ShareSnapshotKeyJsonConverter.write(m, version) - case m: ShareUpdateKey => - ShareUpdateKeyJsonConverter.write(m, version) - case _ => throw new UnknownRecordTypeException(version) - } - - jsonString(messageAsJson, version) - } - - private def prepareValue(message: Message, version: Short): String = { - val messageAsJson = message match { - case m: ShareSnapshotValue => - ShareSnapshotValueJsonConverter.write(m, version) - case m: ShareUpdateValue => - ShareUpdateValueJsonConverter.write(m, version) - case _ => throw new IllegalStateException(s"Message value ${message.getClass.getSimpleName} is not supported.") - } - - jsonString(messageAsJson, version) + private def prepareKey(message: ApiMessage): String = { + val json = new ObjectNode(JsonNodeFactory.instance) + json.set("type", new TextNode(message.apiKey.toString)) + json.set("data", ShareCoordinatorRecordJsonConverters.writeRecordKeyAsJson(message)) + json.toString } - private def jsonString(jsonNode: JsonNode, version: Short): String = { + private def prepareValue(message: ApiMessage, version: Short): String = { val json = new ObjectNode(JsonNodeFactory.instance) - json.set("type", new TextNode(version.toString)) - json.set("data", jsonNode) + json.set("version", new TextNode(version.toString)) + json.set("data", ShareCoordinatorRecordJsonConverters.writeRecordValueAsJson(message, version)) json.toString } @@ -673,7 +647,7 @@ object DumpLogSegments { try { val r = serde.deserialize(record.key, record.value) ( - Some(prepareKey(r.key.message, r.key.version)), + Some(prepareKey(r.key.message)), Option(r.value).map(v => prepareValue(v.message, v.version)).orElse(Some("")) ) } catch { diff --git a/core/src/main/scala/kafka/tools/StorageTool.scala b/core/src/main/scala/kafka/tools/StorageTool.scala index 29734bd8d8d20..253a6dc94bb42 100644 --- a/core/src/main/scala/kafka/tools/StorageTool.scala +++ b/core/src/main/scala/kafka/tools/StorageTool.scala @@ -27,7 +27,7 @@ import net.sourceforge.argparse4j.inf.{ArgumentParserException, Namespace, Subpa import net.sourceforge.argparse4j.internal.HelpScreenException import org.apache.kafka.common.Uuid import org.apache.kafka.common.utils.{Exit, Utils} -import org.apache.kafka.server.common.{Features, MetadataVersion} +import org.apache.kafka.server.common.{Feature, MetadataVersion} import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} import org.apache.kafka.metadata.storage.{Formatter, FormatterException} import org.apache.kafka.raft.{DynamicVoters, QuorumConfig} @@ -88,11 +88,11 @@ object StorageTool extends Logging { 0 case "version-mapping" => - runVersionMappingCommand(namespace, printStream, Features.PRODUCTION_FEATURES) + runVersionMappingCommand(namespace, printStream, Feature.PRODUCTION_FEATURES) 0 case "feature-dependencies" => - runFeatureDependenciesCommand(namespace, printStream, Features.PRODUCTION_FEATURES) + runFeatureDependenciesCommand(namespace, printStream, Feature.PRODUCTION_FEATURES) 0 case "random-uuid" => @@ -143,14 +143,14 @@ object StorageTool extends Logging { if (namespace.getBoolean("standalone")) { formatter.setInitialControllers(createStandaloneDynamicVoters(config)) } - if (!namespace.getBoolean("no_initial_controllers")) { + if (namespace.getBoolean("no_initial_controllers")) { + formatter.setNoInitialControllersFlag(true) + } else { if (config.processRoles.contains(ProcessRole.ControllerRole)) { - if (config.quorumConfig.voters().isEmpty) { - if (formatter.initialVoters().isEmpty()) { - throw new TerseFailure("Because " + QuorumConfig.QUORUM_VOTERS_CONFIG + - " is not set on this controller, you must specify one of the following: " + - "--standalone, --initial-controllers, or --no-initial-controllers."); - } + if (config.quorumConfig.voters().isEmpty && formatter.initialVoters().isEmpty) { + throw new TerseFailure("Because " + QuorumConfig.QUORUM_VOTERS_CONFIG + + " is not set on this controller, you must specify one of the following: " + + "--standalone, --initial-controllers, or --no-initial-controllers."); } } } @@ -171,7 +171,7 @@ object StorageTool extends Logging { def runVersionMappingCommand( namespace: Namespace, printStream: PrintStream, - validFeatures: java.util.List[Features] + validFeatures: java.util.List[Feature] ): Unit = { val releaseVersion = Option(namespace.getString("release_version")).getOrElse(MetadataVersion.LATEST_PRODUCTION.toString) try { @@ -181,7 +181,7 @@ object StorageTool extends Logging { printStream.print(f"metadata.version=$metadataVersionLevel%d ($releaseVersion%s)%n") for (feature <- validFeatures.asScala) { - val featureLevel = feature.defaultValue(metadataVersion) + val featureLevel = feature.defaultLevel(metadataVersion) printStream.print(f"${feature.featureName}%s=$featureLevel%d%n") } } catch { @@ -194,7 +194,7 @@ object StorageTool extends Logging { def runFeatureDependenciesCommand( namespace: Namespace, printStream: PrintStream, - validFeatures: java.util.List[Features] + validFeatures: java.util.List[Feature] ): Unit = { val featureArgs = Option(namespace.getList[String]("feature")).map(_.asScala.toList).getOrElse(List.empty) @@ -314,7 +314,7 @@ object StorageTool extends Logging { formatParser.addArgument("--release-version", "-r") .action(store()) .help(s"The release version to use for the initial feature settings. The minimum is " + - s"${MetadataVersion.IBP_3_0_IV0}; the default is ${MetadataVersion.LATEST_PRODUCTION}") + s"${MetadataVersion.IBP_3_0_IV1}; the default is ${MetadataVersion.LATEST_PRODUCTION}") formatParser.addArgument("--feature", "-f") .help("The setting to use for a specific feature, in feature=level format. For example: `kraft.version=1`.") @@ -347,7 +347,7 @@ object StorageTool extends Logging { versionMappingParser.addArgument("--release-version", "-r") .action(store()) .help(s"The release version to use for the corresponding feature mapping. The minimum is " + - s"${MetadataVersion.IBP_3_0_IV0}; the default is ${MetadataVersion.LATEST_PRODUCTION}") + s"${MetadataVersion.IBP_3_0_IV1}; the default is ${MetadataVersion.LATEST_PRODUCTION}") } private def addFeatureDependenciesParser(subparsers: Subparsers): Unit = { diff --git a/core/src/main/scala/kafka/utils/DelayedItem.scala b/core/src/main/scala/kafka/utils/DelayedItem.scala deleted file mode 100644 index cfb87719a0058..0000000000000 --- a/core/src/main/scala/kafka/utils/DelayedItem.scala +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.utils - -import java.util.concurrent._ - -import org.apache.kafka.common.utils.Time - -import scala.math._ - -class DelayedItem(val delayMs: Long) extends Delayed with Logging { - - private val dueMs = Time.SYSTEM.milliseconds + delayMs - - def this(delay: Long, unit: TimeUnit) = this(unit.toMillis(delay)) - - /** - * The remaining delay time - */ - def getDelay(unit: TimeUnit): Long = { - unit.convert(max(dueMs - Time.SYSTEM.milliseconds, 0), TimeUnit.MILLISECONDS) - } - - def compareTo(d: Delayed): Int = { - val other = d.asInstanceOf[DelayedItem] - java.lang.Long.compare(dueMs, other.dueMs) - } - -} diff --git a/core/src/main/scala/kafka/utils/Log4jController.scala b/core/src/main/scala/kafka/utils/Log4jController.scala index 0d54c74e07542..4bc022dadfeba 100755 --- a/core/src/main/scala/kafka/utils/Log4jController.scala +++ b/core/src/main/scala/kafka/utils/Log4jController.scala @@ -17,83 +17,90 @@ package kafka.utils -import java.util -import java.util.Locale - import org.apache.kafka.common.utils.Utils -import org.apache.log4j.{Level, LogManager, Logger} +import org.apache.logging.log4j.core.LoggerContext +import org.apache.logging.log4j.core.config.Configurator +import org.apache.logging.log4j.{Level, LogManager} -import scala.collection.mutable +import java.util +import java.util.Locale import scala.jdk.CollectionConverters._ object Log4jController { - val ROOT_LOGGER = "root" - private def resolveLevel(logger: Logger): String = { - var name = logger.getName - var level = logger.getLevel - while (level == null) { - val index = name.lastIndexOf(".") - if (index > 0) { - name = name.substring(0, index) - val ancestor = existingLogger(name) - if (ancestor != null) { - level = ancestor.getLevel - } - } else { - level = existingLogger(ROOT_LOGGER).getLevel - } - } - level.toString - } + /** + * Note: In log4j, the root logger's name was "root" and Kafka also followed that name for dynamic logging control feature. + * + * The root logger's name is changed in log4j2 to empty string (see: [[LogManager.ROOT_LOGGER_NAME]]) but for backward- + * compatibility. Kafka keeps its original root logger name. It is why here is a dedicated definition for the root logger name. + */ + val ROOT_LOGGER = "root" /** - * Returns a map of the log4j loggers and their assigned log level. - * If a logger does not have a log level assigned, we return the root logger's log level - */ - def loggers: mutable.Map[String, String] = { - val logs = new mutable.HashMap[String, String]() - val rootLoggerLvl = existingLogger(ROOT_LOGGER).getLevel.toString - logs.put(ROOT_LOGGER, rootLoggerLvl) - - val loggers = LogManager.getCurrentLoggers - while (loggers.hasMoreElements) { - val logger = loggers.nextElement().asInstanceOf[Logger] - if (logger != null) { - logs.put(logger.getName, resolveLevel(logger)) - } - } - logs + * Returns a map of the log4j loggers and their assigned log level. + * If a logger does not have a log level assigned, we return the log level of the first ancestor with a level configured. + */ + def loggers: Map[String, String] = { + val logContext = LogManager.getContext(false).asInstanceOf[LoggerContext] + val rootLoggerLevel = logContext.getRootLogger.getLevel.toString + + // Loggers defined in the configuration + val configured = logContext.getConfiguration.getLoggers.asScala + .values + .filterNot(_.getName.equals(LogManager.ROOT_LOGGER_NAME)) + .map { logger => + logger.getName -> logger.getLevel.toString + }.toMap + + // Loggers actually running + val actual = logContext.getLoggers.asScala + .filterNot(_.getName.equals(LogManager.ROOT_LOGGER_NAME)) + .map { logger => + logger.getName -> logger.getLevel.toString + }.toMap + + (configured ++ actual) + (ROOT_LOGGER -> rootLoggerLevel) } /** - * Sets the log level of a particular logger - */ + * Sets the log level of a particular logger. If the given logLevel is not an available log4j level + * (i.e., one of OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL) it falls back to DEBUG. + * + * @see [[Level.toLevel]] + */ def logLevel(loggerName: String, logLevel: String): Boolean = { - val log = existingLogger(loggerName) - if (!Utils.isBlank(loggerName) && !Utils.isBlank(logLevel) && log != null) { - log.setLevel(Level.toLevel(logLevel.toUpperCase(Locale.ROOT))) + if (Utils.isBlank(loggerName) || Utils.isBlank(logLevel)) + return false + + val level = Level.toLevel(logLevel.toUpperCase(Locale.ROOT)) + + if (loggerName == ROOT_LOGGER) { + Configurator.setAllLevels(LogManager.ROOT_LOGGER_NAME, level) true + } else { + if (loggerExists(loggerName) && level != null) { + Configurator.setAllLevels(loggerName, level) + true + } + else false } - else false } def unsetLogLevel(loggerName: String): Boolean = { - val log = existingLogger(loggerName) - if (!Utils.isBlank(loggerName) && log != null) { - log.setLevel(null) + if (loggerName == ROOT_LOGGER) { + Configurator.setAllLevels(LogManager.ROOT_LOGGER_NAME, null) true + } else { + if (loggerExists(loggerName)) { + Configurator.setAllLevels(loggerName, null) + true + } + else false } - else false } - def loggerExists(loggerName: String): Boolean = existingLogger(loggerName) != null - - private def existingLogger(loggerName: String) = - if (loggerName == ROOT_LOGGER) - LogManager.getRootLogger - else LogManager.exists(loggerName) + def loggerExists(loggerName: String): Boolean = loggers.contains(loggerName) } /** @@ -113,15 +120,7 @@ class Log4jController extends Log4jControllerMBean { def getLogLevel(loggerName: String): String = { - val log = Log4jController.existingLogger(loggerName) - if (log != null) { - val level = log.getLevel - if (level != null) - log.getLevel.toString - else - Log4jController.resolveLevel(log) - } - else "No such logger." + Log4jController.loggers.getOrElse(loggerName, "No such logger.") } def setLogLevel(loggerName: String, level: String): Boolean = Log4jController.logLevel(loggerName, level) diff --git a/core/src/main/scala/kafka/utils/ReplicationUtils.scala b/core/src/main/scala/kafka/utils/ReplicationUtils.scala deleted file mode 100644 index 8cb03f4553312..0000000000000 --- a/core/src/main/scala/kafka/utils/ReplicationUtils.scala +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.utils - -import kafka.controller.LeaderIsrAndControllerEpoch -import kafka.zk._ -import org.apache.kafka.common.TopicPartition -import org.apache.kafka.metadata.LeaderAndIsr - -import scala.jdk.CollectionConverters._ - -object ReplicationUtils extends Logging { - - def updateLeaderAndIsr(zkClient: KafkaZkClient, partition: TopicPartition, newLeaderAndIsr: LeaderAndIsr, - controllerEpoch: Int): (Boolean, Int) = { - debug(s"Updated ISR for $partition to ${newLeaderAndIsr.isr.asScala.mkString(",")}") - val path = TopicPartitionStateZNode.path(partition) - val newLeaderData = TopicPartitionStateZNode.encode(LeaderIsrAndControllerEpoch(newLeaderAndIsr, controllerEpoch)) - // use the epoch of the controller that made the leadership decision, instead of the current controller epoch - val updatePersistentPath: (Boolean, Int) = zkClient.conditionalUpdatePath(path, newLeaderData, - newLeaderAndIsr.partitionEpoch, Some(checkLeaderAndIsrZkData)) - updatePersistentPath - } - - private def checkLeaderAndIsrZkData(zkClient: KafkaZkClient, path: String, expectedLeaderAndIsrInfo: Array[Byte]): (Boolean, Int) = { - try { - val (writtenLeaderOpt, writtenStat) = zkClient.getDataAndStat(path) - val expectedLeaderOpt = TopicPartitionStateZNode.decode(expectedLeaderAndIsrInfo, writtenStat) - val succeeded = writtenLeaderOpt.exists { writtenData => - val writtenLeaderOpt = TopicPartitionStateZNode.decode(writtenData, writtenStat) - (expectedLeaderOpt, writtenLeaderOpt) match { - case (Some(expectedLeader), Some(writtenLeader)) if expectedLeader == writtenLeader => true - case _ => false - } - } - if (succeeded) (true, writtenStat.getVersion) - else (false, -1) - } catch { - case _: Exception => (false, -1) - } - } - -} diff --git a/core/src/main/scala/kafka/utils/ToolsUtils.scala b/core/src/main/scala/kafka/utils/ToolsUtils.scala deleted file mode 100644 index 7831ee64d1e7d..0000000000000 --- a/core/src/main/scala/kafka/utils/ToolsUtils.scala +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.utils - -import joptsimple.OptionParser -import org.apache.kafka.server.util.CommandLineUtils - -object ToolsUtils { - /** - * This is a simple wrapper around `CommandLineUtils.printUsageAndExit`. - * It is needed for tools migration (KAFKA-14525), as there is no Java equivalent for return type `Nothing`. - * Can be removed once ZooKeeper related code are deleted. - * - * @param parser Command line options parser. - * @param message Error message. - */ - def printUsageAndExit(parser: OptionParser, message: String): Nothing = { - CommandLineUtils.printUsageAndExit(parser, message) - throw new AssertionError("printUsageAndExit should not return, but it did.") - } -} diff --git a/core/src/main/scala/kafka/utils/VersionInfo.scala b/core/src/main/scala/kafka/utils/VersionInfo.scala deleted file mode 100644 index 203488a64b5c2..0000000000000 --- a/core/src/main/scala/kafka/utils/VersionInfo.scala +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.utils - -import org.apache.kafka.common.utils.AppInfoParser - -object VersionInfo { - - def main(args: Array[String]): Unit = { - System.out.println(getVersionString) - System.exit(0) - } - - def getVersion: String = { - AppInfoParser.getVersion - } - - def getCommit: String = { - AppInfoParser.getCommitId - } - - def getVersionString: String = { - s"$getVersion (Commit:$getCommit)" - } -} diff --git a/core/src/main/scala/kafka/zk/AdminZkClient.scala b/core/src/main/scala/kafka/zk/AdminZkClient.scala deleted file mode 100644 index 290802c5d11f0..0000000000000 --- a/core/src/main/scala/kafka/zk/AdminZkClient.scala +++ /dev/null @@ -1,596 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.zk - -import java.util.{Collections, Optional, Properties} -import kafka.admin.RackAwareMode -import kafka.common.TopicAlreadyMarkedForDeletionException -import kafka.controller.ReplicaAssignment -import kafka.server.{DynamicConfig, KafkaConfig} -import kafka.utils._ -import org.apache.kafka.admin.{AdminUtils, BrokerMetadata} -import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.common.errors._ -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.server.common.AdminOperationException -import org.apache.kafka.server.config.{ConfigType, ZooKeeperInternals} -import org.apache.kafka.storage.internals.log.LogConfig -import org.apache.zookeeper.KeeperException.NodeExistsException - -import scala.jdk.CollectionConverters._ -import scala.collection.{Map, Seq} - -/** - * Provides admin related methods for interacting with ZooKeeper. - * - * This is an internal class and no compatibility guarantees are provided, - * see org.apache.kafka.clients.admin.AdminClient for publicly supported APIs. - */ -class AdminZkClient(zkClient: KafkaZkClient, - kafkaConfig: Option[KafkaConfig] = None) extends Logging { - - /** - * Creates the topic with given configuration - * @param topic topic name to create - * @param partitions Number of partitions to be set - * @param replicationFactor Replication factor - * @param topicConfig topic configs - * @param rackAwareMode rack aware mode for replica assignment - * @param usesTopicId Boolean indicating whether the topic ID will be created - */ - def createTopic(topic: String, - partitions: Int, - replicationFactor: Int, - topicConfig: Properties = new Properties, - rackAwareMode: RackAwareMode = RackAwareMode.Enforced, - usesTopicId: Boolean = false): Unit = { - val brokerMetadatas = getBrokerMetadatas(rackAwareMode).asJava - val replicaAssignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(brokerMetadatas, partitions, replicationFactor)) - createTopicWithAssignment(topic, topicConfig, replicaAssignment, usesTopicId = usesTopicId) - } - - /** - * Gets broker metadata list - * - * @param rackAwareMode rack aware mode for replica assignment - * @param brokerList The brokers to gather metadata about. - * @return The metadata for each broker that was found. - */ - def getBrokerMetadatas(rackAwareMode: RackAwareMode = RackAwareMode.Enforced, - brokerList: Option[Seq[Int]] = None): Seq[BrokerMetadata] = { - val allBrokers = zkClient.getAllBrokersInCluster - val brokers = brokerList.map(brokerIds => allBrokers.filter(b => brokerIds.contains(b.id))).getOrElse(allBrokers) - val brokersWithRack = brokers.filter(_.rack.nonEmpty) - if (rackAwareMode == RackAwareMode.Enforced && brokersWithRack.nonEmpty && brokersWithRack.size < brokers.size) { - throw new AdminOperationException("Not all brokers have rack information. Add --disable-rack-aware in command line" + - " to make replica assignment without rack information.") - } - val brokerMetadatas = rackAwareMode match { - case RackAwareMode.Disabled => brokers.map(broker => new BrokerMetadata(broker.id, Optional.empty())) - case RackAwareMode.Safe if brokersWithRack.size < brokers.size => - brokers.map(broker => new BrokerMetadata(broker.id, Optional.empty())) - case _ => brokers.map(broker => new BrokerMetadata(broker.id, Optional.ofNullable(broker.rack.orNull))) - } - brokerMetadatas.sortBy(_.id) - } - - /** - * Create topic and optionally validate its parameters. Note that this method is used by the - * TopicCommand as well. - * - * @param topic The name of the topic - * @param config The config of the topic - * @param partitionReplicaAssignment The assignments of the topic - * @param validate Boolean indicating if parameters must be validated or not (true by default) - * @param usesTopicId Boolean indicating whether the topic ID will be created - */ - def createTopicWithAssignment(topic: String, - config: Properties, - partitionReplicaAssignment: Map[Int, Seq[Int]], - validate: Boolean = true, - usesTopicId: Boolean = false): Unit = { - if (validate) - validateTopicCreate(topic, partitionReplicaAssignment, config) - - info(s"Creating topic $topic with configuration $config and initial partition " + - s"assignment $partitionReplicaAssignment") - - // write out the config if there is any, this isn't transactional with the partition assignments - zkClient.setOrCreateEntityConfigs(ConfigType.TOPIC, topic, config) - - // create the partition assignment - writeTopicPartitionAssignment(topic, partitionReplicaAssignment.map { case (k, v) => k -> ReplicaAssignment(v) }, - isUpdate = false, usesTopicId) - } - - /** - * Validate topic creation parameters. Note that this method is indirectly used by the - * TopicCommand via the `createTopicWithAssignment` method. - * - * @param topic The name of the topic - * @param partitionReplicaAssignment The assignments of the topic - * @param config The config of the topic - */ - def validateTopicCreate(topic: String, - partitionReplicaAssignment: Map[Int, Seq[Int]], - config: Properties): Unit = { - Topic.validate(topic) - if (zkClient.isTopicMarkedForDeletion(topic)) { - throw new TopicExistsException(s"Topic '$topic' is marked for deletion.") - } - if (zkClient.topicExists(topic)) - throw new TopicExistsException(s"Topic '$topic' already exists.") - else if (Topic.hasCollisionChars(topic)) { - val allTopics = zkClient.getAllTopicsInCluster() - // check again in case the topic was created in the meantime, otherwise the - // topic could potentially collide with itself - if (allTopics.contains(topic)) - throw new TopicExistsException(s"Topic '$topic' already exists.") - val collidingTopics = allTopics.filter(Topic.hasCollision(topic, _)) - if (collidingTopics.nonEmpty) { - throw new InvalidTopicException(s"Topic '$topic' collides with existing topics: ${collidingTopics.mkString(", ")}") - } - } - - if (partitionReplicaAssignment.values.map(_.size).toSet.size != 1) - throw new InvalidReplicaAssignmentException("All partitions should have the same number of replicas") - - partitionReplicaAssignment.values.foreach(reps => - if (reps.size != reps.toSet.size) - throw new InvalidReplicaAssignmentException("Duplicate replica assignment found: " + partitionReplicaAssignment) - ) - - val partitionSize = partitionReplicaAssignment.size - val sequenceSum = partitionSize * (partitionSize - 1) / 2 - if (partitionReplicaAssignment.size != partitionReplicaAssignment.toSet.size || - partitionReplicaAssignment.keys.filter(_ >= 0).sum != sequenceSum) - throw new InvalidReplicaAssignmentException("partitions should be a consecutive 0-based integer sequence") - - LogConfig.validate(Collections.emptyMap(), config, - kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.isRemoteStorageSystemEnabled()), - true) - } - - private def writeTopicPartitionAssignment(topic: String, replicaAssignment: Map[Int, ReplicaAssignment], - isUpdate: Boolean, usesTopicId: Boolean = false): Unit = { - try { - val assignment = replicaAssignment.map { case (partitionId, replicas) => (new TopicPartition(topic,partitionId), replicas) }.toMap - - if (!isUpdate) { - val topicIdOpt = if (usesTopicId) Some(Uuid.randomUuid()) else None - zkClient.createTopicAssignment(topic, topicIdOpt, assignment.map { case (k, v) => k -> v.replicas }) - } else { - val topicIds = zkClient.getTopicIdsForTopics(Set(topic)) - zkClient.setTopicAssignment(topic, topicIds.get(topic), assignment) - } - debug("Updated path %s with %s for replica assignment".format(TopicZNode.path(topic), assignment)) - } catch { - case _: NodeExistsException => throw new TopicExistsException(s"Topic '$topic' already exists.") - case e2: Throwable => throw new AdminOperationException(e2.toString) - } - } - - /** - * Creates a delete path for a given topic - * @param topic Topic name to delete - */ - def deleteTopic(topic: String): Unit = { - if (zkClient.topicExists(topic)) { - try { - zkClient.createDeleteTopicPath(topic) - } catch { - case _: NodeExistsException => throw new TopicAlreadyMarkedForDeletionException( - "topic %s is already marked for deletion".format(topic)) - case e: Throwable => throw new AdminOperationException(e.getMessage) - } - } else { - throw new UnknownTopicOrPartitionException(s"Topic `$topic` to delete does not exist") - } - } - - /** - * Add partitions to existing topic with optional replica assignment. Note that this - * method is used by the TopicCommand. - * - * @param topic Topic for adding partitions to - * @param existingAssignment A map from partition id to its assignment - * @param allBrokers All brokers in the cluster - * @param numPartitions Number of partitions to be set - * @param replicaAssignment Manual replica assignment, or none - * @param validateOnly If true, validate the parameters without actually adding the partitions - * @return the updated replica assignment - */ - def addPartitions(topic: String, - existingAssignment: Map[Int, ReplicaAssignment], - allBrokers: Seq[BrokerMetadata], - numPartitions: Int = 1, - replicaAssignment: Option[Map[Int, Seq[Int]]] = None, - validateOnly: Boolean = false): Map[Int, Seq[Int]] = { - - val proposedAssignmentForNewPartitions = createNewPartitionsAssignment( - topic, - existingAssignment, - allBrokers, - numPartitions, - replicaAssignment - ) - - if (validateOnly) { - (existingAssignment ++ proposedAssignmentForNewPartitions) - .map { case (k, v) => k -> v.replicas } - } else { - createPartitionsWithAssignment(topic, existingAssignment, proposedAssignmentForNewPartitions) - .map { case (k, v) => k -> v.replicas } - } - } - - /** - * Create assignment to add the given number of partitions while validating the - * provided arguments. - * - * @param topic Topic for adding partitions to - * @param existingAssignment A map from partition id to its assignment - * @param allBrokers All brokers in the cluster - * @param numPartitions Number of partitions to be set - * @param replicaAssignment Manual replica assignment, or none - * @return the assignment for the new partitions - */ - def createNewPartitionsAssignment(topic: String, - existingAssignment: Map[Int, ReplicaAssignment], - allBrokers: Seq[BrokerMetadata], - numPartitions: Int = 1, - replicaAssignment: Option[Map[Int, Seq[Int]]] = None): Map[Int, ReplicaAssignment] = { - val existingAssignmentPartition0 = existingAssignment.getOrElse(0, - throw new AdminOperationException( - s"Unexpected existing replica assignment for topic '$topic', partition id 0 is missing. " + - s"Assignment: $existingAssignment")).replicas - - val partitionsToAdd = numPartitions - existingAssignment.size - if (partitionsToAdd <= 0) - throw new InvalidPartitionsException( - s"The number of partitions for a topic can only be increased. " + - s"Topic $topic currently has ${existingAssignment.size} partitions, " + - s"$numPartitions would not be an increase.") - - replicaAssignment.foreach { proposedReplicaAssignment => - validateReplicaAssignment(proposedReplicaAssignment, existingAssignmentPartition0.size, - allBrokers.map(_.id).toSet) - } - - val proposedAssignmentForNewPartitions = replicaAssignment.getOrElse { - val startIndex = math.max(0, allBrokers.indexWhere(_.id >= existingAssignmentPartition0.head)) - CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(allBrokers.asJava, partitionsToAdd, existingAssignmentPartition0.size, - startIndex, existingAssignment.size)) - } - - proposedAssignmentForNewPartitions.map { case (tp, replicas) => - tp -> ReplicaAssignment(replicas, List(), List()) - } - } - - /** - * Add partitions to the existing topic with the provided assignment. This method does - * not validate the provided assignments. Validation must be done beforehand. - * - * @param topic Topic for adding partitions to - * @param existingAssignment A map from partition id to its assignment - * @param newPartitionAssignment The assignments to add - * @return the updated replica assignment - */ - def createPartitionsWithAssignment(topic: String, - existingAssignment: Map[Int, ReplicaAssignment], - newPartitionAssignment: Map[Int, ReplicaAssignment]): Map[Int, ReplicaAssignment] = { - - info(s"Creating ${newPartitionAssignment.size} partitions for '$topic' with the following replica assignment: " + - s"$newPartitionAssignment.") - - val combinedAssignment = existingAssignment ++ newPartitionAssignment - - writeTopicPartitionAssignment(topic, combinedAssignment, isUpdate = true) - - combinedAssignment - } - - private def validateReplicaAssignment(replicaAssignment: Map[Int, Seq[Int]], - expectedReplicationFactor: Int, - availableBrokerIds: Set[Int]): Unit = { - - replicaAssignment.foreachEntry { (partitionId, replicas) => - if (replicas.isEmpty) - throw new InvalidReplicaAssignmentException( - s"Cannot have replication factor of 0 for partition id $partitionId.") - if (replicas.size != replicas.toSet.size) - throw new InvalidReplicaAssignmentException( - s"Duplicate brokers not allowed in replica assignment: " + - s"${replicas.mkString(", ")} for partition id $partitionId.") - if (!replicas.toSet.subsetOf(availableBrokerIds)) - throw new BrokerNotAvailableException( - s"Some brokers specified for partition id $partitionId are not available. " + - s"Specified brokers: ${replicas.mkString(", ")}, " + - s"available brokers: ${availableBrokerIds.mkString(", ")}.") - partitionId -> replicas.size - } - val badRepFactors = replicaAssignment.collect { - case (partition, replicas) if replicas.size != expectedReplicationFactor => partition -> replicas.size - } - if (badRepFactors.nonEmpty) { - val sortedBadRepFactors = badRepFactors.toSeq.sortBy { case (partitionId, _) => partitionId } - val partitions = sortedBadRepFactors.map { case (partitionId, _) => partitionId } - val repFactors = sortedBadRepFactors.map { case (_, rf) => rf } - throw new InvalidReplicaAssignmentException(s"Inconsistent replication factor between partitions, " + - s"partition 0 has $expectedReplicationFactor while partitions [${partitions.mkString(", ")}] have " + - s"replication factors [${repFactors.mkString(", ")}], respectively.") - } - } - - /** - * Parse broker from entity name to integer id - * @param broker The broker entity name to parse - * @return Integer brokerId after successfully parsed or default None - */ - def parseBroker(broker: String): Option[Int] = { - broker match { - case ZooKeeperInternals.DEFAULT_STRING => None - case _ => - try Some(broker.toInt) - catch { - case _: NumberFormatException => - throw new IllegalArgumentException(s"Error parsing broker $broker. The broker's Entity Name must be a single integer value") - } - } - } - - /** - * Change the configs for a given entityType and entityName - * @param entityType The entityType of the configs that will be changed - * @param entityName The entityName of the entityType - * @param configs The config of the entityName - * @param isUserClientId If true, this entity is user and clientId entity - */ - def changeConfigs(entityType: String, entityName: String, configs: Properties, isUserClientId: Boolean = false): Unit = { - - entityType match { - case ConfigType.TOPIC => changeTopicConfig(entityName, configs) - case ConfigType.CLIENT => changeClientIdConfig(entityName, configs) - case ConfigType.USER => changeUserOrUserClientIdConfig(entityName, configs, isUserClientId) - case ConfigType.BROKER => changeBrokerConfig(parseBroker(entityName), configs) - case ConfigType.IP => changeIpConfig(entityName, configs) - case _ => throw new IllegalArgumentException(s"$entityType is not a known entityType. Should be one of List(${String.join(", ",ConfigType.ALL)})") - } - } - - /** - * Try to clean quota nodes in zk, if the configs of the node are empty and there are no children left, - * to avoid infinite growth of quota nodes - * @param entityType The entityType of the node we are trying to clean - * @param entityName The entityName of the entityType - * @param isUserClientId If true, this entity is user and clientId entity - * @return True, if the node is deleted - */ - private def tryCleanQuotaNodes(entityType: String, entityName: String, isUserClientId: Boolean): Boolean = { - val currPath = ConfigEntityZNode.path(entityType, entityName) - if (zkClient.getChildren(currPath).isEmpty) { - var pathToDelete = currPath - // If the entity is user and clientId, we need to do some further check if the parent user node is also empty - // after current userClientId node deleted. If so, we also need to try cleaning the corresponding user node. - if (isUserClientId) { - val user = entityName.substring(0, entityName.indexOf("/")) - val clientId = entityName.substring(entityName.lastIndexOf("/") + 1) - val clientsPath = ConfigEntityZNode.path(ConfigType.USER, user + "/" + ConfigType.CLIENT) - val clientsChildren = zkClient.getChildren(clientsPath) - // If current client is the only child of clients, the node of clients can also be deleted. - if (clientsChildren == Seq(clientId)) { - pathToDelete = clientsPath - val userData = fetchEntityConfig(ConfigType.USER, user) - val userPath = ConfigEntityZNode.path(ConfigType.USER, user) - val userChildren = zkClient.getChildren(userPath) - // If the configs of the user are empty and the clients node is the only child of the user, - // the node of user can also be deleted. - if (userData.isEmpty && userChildren == Seq(ConfigType.CLIENT)) { - pathToDelete = userPath - } - } - } - info(s"Deleting zk node $pathToDelete since node of entityType $entityType and entityName $entityName is empty.") - zkClient.deletePath(pathToDelete) - true - } else - false - } - - /** - * Update the config for a client and create a change notification so the change will propagate to other brokers. - * If clientId is , default clientId config is updated. ClientId configs are used only if - * and configs are not specified. - * - * @param sanitizedClientId: The sanitized clientId for which configs are being changed - * @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or - * existing configs need to be deleted, it should be done prior to invoking this API - * - */ - def changeClientIdConfig(sanitizedClientId: String, configs: Properties): Unit = { - DynamicConfig.Client.validate(configs) - changeEntityConfig(ConfigType.CLIENT, sanitizedClientId, configs) - } - - /** - * Update the config for a or and create a change notification so the change will propagate to other brokers. - * User and/or clientId components of the path may be , indicating that the configuration is the default - * value to be applied if a more specific override is not configured. - * - * @param sanitizedEntityName: or /clients/ - * @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or - * existing configs need to be deleted, it should be done prior to invoking this API - * @param isUserClientId If true, this entity is user and clientId entity - * - */ - def changeUserOrUserClientIdConfig(sanitizedEntityName: String, configs: Properties, isUserClientId: Boolean = false): Unit = { - if (sanitizedEntityName == ZooKeeperInternals.DEFAULT_STRING || sanitizedEntityName.contains("/clients")) - DynamicConfig.Client.validate(configs) - else - DynamicConfig.User.validate(configs) - changeEntityConfig(ConfigType.USER, sanitizedEntityName, configs, isUserClientId) - } - - /** - * Validates the IP configs. - * @param ip ip for which configs are being validated - * @param configs properties to validate for the IP - */ - private def validateIpConfig(ip: String, configs: Properties): Unit = { - if (!DynamicConfig.Ip.isValidIpEntity(ip)) - throw new AdminOperationException(s"$ip is not a valid IP or resolvable host.") - DynamicConfig.Ip.validate(configs) - } - - /** - * Update the config for an IP. These overrides will be persisted between sessions, and will override any default - * IP properties. - * @param ip ip for which configs are being updated - * @param configs properties to update for the IP - */ - def changeIpConfig(ip: String, configs: Properties): Unit = { - validateIpConfig(ip, configs) - changeEntityConfig(ConfigType.IP, ip, configs) - } - - /** - * validates the topic configs - * @param topic topic for which configs are being validated - * @param configs properties to validate for the topic - */ - def validateTopicConfig(topic: String, configs: Properties): Unit = { - Topic.validate(topic) - if (!zkClient.topicExists(topic)) - throw new UnknownTopicOrPartitionException(s"Topic '$topic' does not exist.") - // remove the topic overrides - LogConfig.validate(Collections.emptyMap(), configs, - kafkaConfig.map(_.extractLogConfigMap).getOrElse(Collections.emptyMap()), - kafkaConfig.exists(_.remoteLogManagerConfig.isRemoteStorageSystemEnabled()), true) - } - - /** - * Update the config for an existing topic and create a change notification so the change will propagate to other brokers - * - * @param topic: The topic for which configs are being changed - * @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or - * existing configs need to be deleted, it should be done prior to invoking this API - * - */ - def changeTopicConfig(topic: String, configs: Properties): Unit = { - validateTopicConfig(topic, configs) - changeEntityConfig(ConfigType.TOPIC, topic, configs) - } - - /** - * Override the broker config on some set of brokers. These overrides will be persisted between sessions, and will - * override any defaults entered in the broker's config files - * - * @param brokers: The list of brokers to apply config changes to - * @param configs: The config to change, as properties - */ - def changeBrokerConfig(brokers: Seq[Int], configs: Properties): Unit = { - validateBrokerConfig(configs) - brokers.foreach { - broker => changeEntityConfig(ConfigType.BROKER, broker.toString, configs) - } - } - - /** - * Override a broker override or broker default config. These overrides will be persisted between sessions, and will - * override any defaults entered in the broker's config files - * - * @param broker: The broker to apply config changes to or None to update dynamic default configs - * @param configs: The config to change, as properties - */ - def changeBrokerConfig(broker: Option[Int], configs: Properties): Unit = { - validateBrokerConfig(configs) - changeEntityConfig(ConfigType.BROKER, broker.map(_.toString).getOrElse(ZooKeeperInternals.DEFAULT_STRING), configs) - } - - /** - * Validate dynamic broker configs. Since broker configs may contain custom configs, the validation - * only verifies that the provided config does not contain any static configs. - * @param configs configs to validate - */ - private def validateBrokerConfig(configs: Properties): Unit = { - DynamicConfig.Broker.validate(configs) - } - - private def changeEntityConfig(rootEntityType: String, fullSanitizedEntityName: String, configs: Properties, isUserClientId: Boolean = false): Unit = { - val sanitizedEntityPath = rootEntityType + '/' + fullSanitizedEntityName - var needUpdateConfigs = true - // If the entityType is quota and node is empty, which means if the configs are empty and no children left, - // we should try to clean up to avoid continuous increment of zk nodes. - if ((ConfigType.CLIENT.equals(rootEntityType) || ConfigType.USER.equals(rootEntityType) || ConfigType.IP.equals(rootEntityType)) && configs.isEmpty) { - if (tryCleanQuotaNodes(rootEntityType, fullSanitizedEntityName, isUserClientId)) { - needUpdateConfigs = false - } - } - if (needUpdateConfigs) { - zkClient.setOrCreateEntityConfigs(rootEntityType, fullSanitizedEntityName, configs) - } - - // create the change notification - zkClient.createConfigChangeNotification(sanitizedEntityPath) - } - - /** - * Read the entity (topic, broker, client, user, or ) config (if any) from zk - * sanitizedEntityName is , , , , /clients/ or . - * @param rootEntityType entityType for which configs are being fetched - * @param sanitizedEntityName entityName of the entityType - * @return The successfully gathered configs - */ - def fetchEntityConfig(rootEntityType: String, sanitizedEntityName: String): Properties = { - zkClient.getEntityConfigs(rootEntityType, sanitizedEntityName) - } - - /** - * Gets all the entity configs for a given entityType - * @param entityType entityType for which configs are being fetched - * @return The successfully gathered configs of the entityType - */ - def fetchAllEntityConfigs(entityType: String): Map[String, Properties] = - zkClient.getAllEntitiesWithConfig(entityType).map(entity => (entity, fetchEntityConfig(entityType, entity))).toMap - - /** - * Gets all the entity configs for a given childEntityType - * @param rootEntityType rootEntityType for which configs are being fetched - * @param childEntityType childEntityType of the rootEntityType - * @return The successfully gathered configs of the childEntityType - */ - def fetchAllChildEntityConfigs(rootEntityType: String, childEntityType: String): Map[String, Properties] = { - def entityPaths(rootPath: Option[String]): Seq[String] = { - val root = rootPath match { - case Some(path) => rootEntityType + '/' + path - case None => rootEntityType - } - val entityNames = zkClient.getAllEntitiesWithConfig(root) - rootPath match { - case Some(path) => entityNames.map(entityName => path + '/' + entityName) - case None => entityNames - } - } - entityPaths(None) - .flatMap(entity => entityPaths(Some(entity + '/' + childEntityType))) - .map(entityPath => (entityPath, fetchEntityConfig(rootEntityType, entityPath))).toMap - } - -} - diff --git a/core/src/main/scala/kafka/zk/KafkaZkClient.scala b/core/src/main/scala/kafka/zk/KafkaZkClient.scala deleted file mode 100644 index 106d5075dc400..0000000000000 --- a/core/src/main/scala/kafka/zk/KafkaZkClient.scala +++ /dev/null @@ -1,2405 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.zk - -import java.util.Properties -import kafka.cluster.Broker -import kafka.controller.{KafkaController, LeaderIsrAndControllerEpoch, ReplicaAssignment} -import kafka.server.KafkaConfig -import kafka.utils.Logging -import kafka.zk.TopicZNode.TopicIdReplicaAssignment -import kafka.zookeeper._ -import org.apache.kafka.common.errors.ControllerMovedException -import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourceType} -import org.apache.kafka.common.security.JaasUtils -import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation} -import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} -import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.metadata.migration.ZkMigrationLeadershipState -import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.config.{ConfigType, ZkConfigs} -import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.kafka.storage.internals.log.LogConfig -import org.apache.zookeeper.KeeperException.{Code, NodeExistsException} -import org.apache.zookeeper.OpResult.{CheckResult, CreateResult, ErrorResult, SetDataResult} -import org.apache.zookeeper.client.ZKClientConfig -import org.apache.zookeeper.common.ZKConfig -import org.apache.zookeeper.data.{ACL, Stat} -import org.apache.zookeeper.{CreateMode, KeeperException, OpResult, ZooKeeper} - -import java.lang.{Long => JLong} -import scala.collection.{Map, Seq, mutable} -import scala.jdk.CollectionConverters._ - -sealed trait KRaftRegistrationResult -case class FailedRegistrationResult() extends KRaftRegistrationResult -case class SuccessfulRegistrationResult(zkControllerEpoch: Int, controllerEpochZkVersion: Int) extends KRaftRegistrationResult - -/** - * Provides higher level Kafka-specific operations on top of the pipelined [[kafka.zookeeper.ZooKeeperClient]]. - * - * Implementation note: this class includes methods for various components (Controller, Configs, Old Consumer, etc.) - * and returns instances of classes from the calling packages in some cases. This is not ideal, but it made it - * easier to migrate away from `ZkUtils` (since removed). We should revisit this. We should also consider whether a - * monolithic [[kafka.zk.ZkData]] is the way to go. - */ -class KafkaZkClient private[zk] ( - zooKeeperClient: ZooKeeperClient, - isSecure: Boolean, - time: Time, - enableEntityConfigControllerCheck: Boolean -) extends AutoCloseable with Logging { - - private val metricsGroup: KafkaMetricsGroup = new KafkaMetricsGroup("kafka.server", "ZooKeeperClientMetrics") - - private val latencyMetric = metricsGroup.newHistogram("ZooKeeperRequestLatencyMs") - - import KafkaZkClient._ - - // Only for testing - private[kafka] def currentZooKeeper: ZooKeeper = zooKeeperClient.currentZooKeeper - - // This variable holds the Zookeeper session id at the moment a Broker gets registered in Zookeeper and the subsequent - // updates of the session id. It is possible that the session id changes over the time for 'Session expired'. - // This code is part of the work around done in the KAFKA-7165, once ZOOKEEPER-2985 is complete, this code must - // be deleted. - private var currentZooKeeperSessionId: Long = -1 - - /** - * Create a sequential persistent path. That is, the znode will not be automatically deleted upon client's disconnect - * and a monotonically increasing number will be appended to its name. - * - * @param path the path to create (with the monotonically increasing number appended) - * @param data the znode data - * @return the created path (including the appended monotonically increasing number) - */ - private[kafka] def createSequentialPersistentPath(path: String, data: Array[Byte]): String = { - val createRequest = CreateRequest(path, data, defaultAcls(path), CreateMode.PERSISTENT_SEQUENTIAL) - val createResponse = retryRequestUntilConnected(createRequest) - createResponse.maybeThrow() - createResponse.name - } - - /** - * Registers the broker in zookeeper and return the broker epoch. - * @param brokerInfo payload of the broker znode - * @return broker epoch (znode create transaction id) - */ - def registerBroker(brokerInfo: BrokerInfo): Long = { - val path = brokerInfo.path - val stat = checkedEphemeralCreate(path, brokerInfo.toJsonBytes) - info(s"Registered broker ${brokerInfo.broker.id} at path $path with addresses: " + - s"${brokerInfo.broker.endPoints.map(_.connectionString).mkString(",")}, czxid (broker epoch): ${stat.getCzxid}") - stat.getCzxid - } - - /** - * Registers a given broker in zookeeper as the controller and increments controller epoch. - * @param controllerId the id of the broker that is to be registered as the controller. - * @return the (updated controller epoch, epoch zkVersion) tuple - * @throws ControllerMovedException if fail to create /controller or fail to increment controller epoch. - */ - def registerControllerAndIncrementControllerEpoch(controllerId: Int): (Int, Int) = { - val timestamp = time.milliseconds() - - // Read /controller_epoch to get the current controller epoch and zkVersion, - // create /controller_epoch with initial value if not exists - val (curEpoch, curEpochZkVersion) = getControllerEpoch - .map(e => (e._1, e._2.getVersion)) - .getOrElse(maybeCreateControllerEpochZNode()) - - // Create /controller and update /controller_epoch atomically - val newControllerEpoch = curEpoch + 1 - val expectedControllerEpochZkVersion = curEpochZkVersion - - debug(s"Try to create ${ControllerZNode.path} and increment controller epoch to $newControllerEpoch with expected controller epoch zkVersion $expectedControllerEpochZkVersion") - - def checkControllerAndEpoch(): (Int, Int) = { - val curControllerId = getControllerId.getOrElse(throw new ControllerMovedException( - s"The ephemeral node at ${ControllerZNode.path} went away while checking whether the controller election succeeds. " + - s"Aborting controller startup procedure")) - if (controllerId == curControllerId) { - val (epoch, stat) = getControllerEpoch.getOrElse( - throw new IllegalStateException(s"${ControllerEpochZNode.path} existed before but goes away while trying to read it")) - - // If the epoch is the same as newControllerEpoch, it is safe to infer that the returned epoch zkVersion - // is associated with the current broker during controller election because we already knew that the zk - // transaction succeeds based on the controller znode verification. Other rounds of controller - // election will result in larger epoch number written in zk. - if (epoch == newControllerEpoch) - return (newControllerEpoch, stat.getVersion) - } - throw new ControllerMovedException("Controller moved to another broker. Aborting controller startup procedure") - } - - def tryCreateControllerZNodeAndIncrementEpoch(): (Int, Int) = { - val response = retryRequestUntilConnected( - MultiRequest(Seq( - CreateOp(ControllerZNode.path, ControllerZNode.encode(controllerId, timestamp), defaultAcls(ControllerZNode.path), CreateMode.EPHEMERAL), - SetDataOp(ControllerEpochZNode.path, ControllerEpochZNode.encode(newControllerEpoch), expectedControllerEpochZkVersion))) - ) - response.resultCode match { - case Code.NODEEXISTS | Code.BADVERSION => checkControllerAndEpoch() - case Code.OK => - val setDataResult = response.zkOpResults(1).rawOpResult.asInstanceOf[SetDataResult] - (newControllerEpoch, setDataResult.getStat.getVersion) - case code => throw KeeperException.create(code) - } - } - - tryCreateControllerZNodeAndIncrementEpoch() - } - - /** - * Registers a given KRaft controller in zookeeper as the active controller. Unlike the ZK equivalent of this method, - * this creates /controller as a persistent znode. This prevents ZK brokers from attempting to claim the controller - * leadership during a KRaft leadership failover. - * - * This method is called at the beginning of a KRaft migration and during subsequent KRaft leadership changes during - * the migration. - * - * To ensure that the KRaft controller epoch exceeds the current ZK controller epoch, this registration algorithm - * uses a conditional update on the /controller and /controller_epoch znodes. - * - * If a new controller is registered concurrently with this registration, one of the two will fail the CAS - * operation on /controller_epoch. For KRaft, we have an extra guard against the registered KRaft epoch going - * backwards. If a KRaft controller had previously registered, an additional CAS operation is done on the /controller - * ZNode to ensure that the KRaft epoch being registered is newer. - * - * @param kraftControllerId ID of the KRaft controller node - * @param kraftControllerEpoch Epoch of the KRaft controller node - * @return A result object containing the written ZK controller epoch and version, or nothing. - */ - def tryRegisterKRaftControllerAsActiveController(kraftControllerId: Int, kraftControllerEpoch: Int): KRaftRegistrationResult = { - val timestamp = time.milliseconds() - val curEpochOpt: Option[(Int, Int)] = getControllerEpoch.map(e => (e._1, e._2.getVersion)) - val controllerOpt = getControllerRegistration - - // If we have a KRaft epoch registered in /controller, and it is not _older_ than the requested epoch, throw an error. - controllerOpt.flatMap(_.kraftEpoch).foreach { kraftEpochInZk => - if (kraftEpochInZk >= kraftControllerEpoch) { - throw new ControllerMovedException(s"Cannot register KRaft controller $kraftControllerId with epoch $kraftControllerEpoch " + - s"as the current controller register in ZK has the same or newer epoch $kraftEpochInZk.") - } - } - - curEpochOpt match { - case None => - throw new IllegalStateException(s"Cannot register KRaft controller $kraftControllerId as the active controller " + - s"since there is no ZK controller epoch present.") - case Some((curEpoch: Int, curEpochZk: Int)) => - val newControllerEpoch = curEpoch + 1 - - val response = controllerOpt match { - case Some(controller) => - info(s"KRaft controller $kraftControllerId overwriting ${ControllerZNode.path} to become the active " + - s"controller with ZK epoch $newControllerEpoch. The previous controller was ${controller.broker}.") - retryRequestUntilConnected( - MultiRequest(Seq( - SetDataOp(ControllerEpochZNode.path, ControllerEpochZNode.encode(newControllerEpoch), curEpochZk), - DeleteOp(ControllerZNode.path, controller.zkVersion), - CreateOp(ControllerZNode.path, ControllerZNode.encode(kraftControllerId, timestamp, kraftControllerEpoch), - defaultAcls(ControllerZNode.path), CreateMode.PERSISTENT))) - ) - case None => - info(s"KRaft controller $kraftControllerId creating ${ControllerZNode.path} to become the active " + - s"controller with ZK epoch $newControllerEpoch. There was no active controller.") - retryRequestUntilConnected( - MultiRequest(Seq( - SetDataOp(ControllerEpochZNode.path, ControllerEpochZNode.encode(newControllerEpoch), curEpochZk), - CreateOp(ControllerZNode.path, ControllerZNode.encode(kraftControllerId, timestamp, kraftControllerEpoch), - defaultAcls(ControllerZNode.path), CreateMode.PERSISTENT))) - ) - } - - val failureSuffix = s"while trying to register KRaft controller $kraftControllerId with ZK epoch " + - s"$newControllerEpoch. KRaft controller was not registered." - response.resultCode match { - case Code.OK => - info(s"Successfully registered KRaft controller $kraftControllerId with ZK epoch $newControllerEpoch") - // First op is always SetData on /controller_epoch - val setDataResult = response.zkOpResults.head.rawOpResult.asInstanceOf[SetDataResult] - SuccessfulRegistrationResult(newControllerEpoch, setDataResult.getStat.getVersion) - case Code.BADVERSION => - info(s"The ZK controller epoch changed $failureSuffix") - FailedRegistrationResult() - case Code.NONODE => - info(s"The ephemeral node at ${ControllerZNode.path} went away $failureSuffix") - FailedRegistrationResult() - case Code.NODEEXISTS => - info(s"The ephemeral node at ${ControllerZNode.path} was created by another controller $failureSuffix") - FailedRegistrationResult() - case code => - error(s"ZooKeeper had an error $failureSuffix") - throw KeeperException.create(code) - } - } - } - - private def maybeCreateControllerEpochZNode(): (Int, Int) = { - createControllerEpochRaw(KafkaController.InitialControllerEpoch).resultCode match { - case Code.OK => - info(s"Successfully created ${ControllerEpochZNode.path} with initial epoch ${KafkaController.InitialControllerEpoch}") - (KafkaController.InitialControllerEpoch, KafkaController.InitialControllerEpochZkVersion) - case Code.NODEEXISTS => - val (epoch, stat) = getControllerEpoch.getOrElse(throw new IllegalStateException(s"${ControllerEpochZNode.path} existed before but goes away while trying to read it")) - (epoch, stat.getVersion) - case code => - throw KeeperException.create(code) - } - } - - def updateBrokerInfo(brokerInfo: BrokerInfo): Unit = { - val brokerIdPath = brokerInfo.path - val setDataRequest = SetDataRequest(brokerIdPath, brokerInfo.toJsonBytes, ZkVersion.MatchAnyVersion) - val response = retryRequestUntilConnected(setDataRequest) - response.maybeThrow() - info("Updated broker %d at path %s with addresses: %s".format(brokerInfo.broker.id, brokerIdPath, brokerInfo.broker.endPoints)) - } - - /** - * Gets topic partition states for the given partitions. - * @param partitions the partitions for which we want to get states. - * @return sequence of GetDataResponses whose contexts are the partitions they are associated with. - */ - def getTopicPartitionStatesRaw(partitions: Seq[TopicPartition]): Seq[GetDataResponse] = { - val getDataRequests = partitions.map { partition => - GetDataRequest(TopicPartitionStateZNode.path(partition), ctx = Some(partition)) - } - retryRequestsUntilConnected(getDataRequests) - } - - /** - * Sets topic partition states for the given partitions. - * @param leaderIsrAndControllerEpochs the partition states of each partition whose state we wish to set. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @return sequence of SetDataResponse whose contexts are the partitions they are associated with. - */ - def setTopicPartitionStatesRaw(leaderIsrAndControllerEpochs: Map[TopicPartition, LeaderIsrAndControllerEpoch], expectedControllerEpochZkVersion: Int): Seq[SetDataResponse] = { - val setDataRequests = leaderIsrAndControllerEpochs.map { case (partition, leaderIsrAndControllerEpoch) => - val path = TopicPartitionStateZNode.path(partition) - val data = TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch) - SetDataRequest(path, data, leaderIsrAndControllerEpoch.leaderAndIsr.partitionEpoch, Some(partition)) - } - retryRequestsUntilConnected(setDataRequests.toSeq, expectedControllerEpochZkVersion) - } - - /** - * Creates topic partition state znodes for the given partitions. - * @param leaderIsrAndControllerEpochs the partition states of each partition whose state we wish to set. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @return sequence of CreateResponse whose contexts are the partitions they are associated with. - */ - def createTopicPartitionStatesRaw(leaderIsrAndControllerEpochs: Map[TopicPartition, LeaderIsrAndControllerEpoch], expectedControllerEpochZkVersion: Int): Seq[CreateResponse] = { - createTopicPartitions(leaderIsrAndControllerEpochs.keys.map(_.topic).toSeq.distinct, expectedControllerEpochZkVersion) - createTopicPartition(leaderIsrAndControllerEpochs.keys.toSeq, expectedControllerEpochZkVersion) - val createRequests = leaderIsrAndControllerEpochs.map { case (partition, leaderIsrAndControllerEpoch) => - val path = TopicPartitionStateZNode.path(partition) - val data = TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch) - CreateRequest(path, data, defaultAcls(path), CreateMode.PERSISTENT, Some(partition)) - } - retryRequestsUntilConnected(createRequests.toSeq, expectedControllerEpochZkVersion) - } - - /** - * Sets the controller epoch conditioned on the given epochZkVersion. - * @param epoch the epoch to set - * @param epochZkVersion the expected version number of the epoch znode. - * @return SetDataResponse - */ - def setControllerEpochRaw(epoch: Int, epochZkVersion: Int): SetDataResponse = { - val setDataRequest = SetDataRequest(ControllerEpochZNode.path, ControllerEpochZNode.encode(epoch), epochZkVersion) - retryRequestUntilConnected(setDataRequest) - } - - /** - * Creates the controller epoch znode. - * @param epoch the epoch to set - * @return CreateResponse - */ - def createControllerEpochRaw(epoch: Int): CreateResponse = { - val createRequest = CreateRequest(ControllerEpochZNode.path, ControllerEpochZNode.encode(epoch), - defaultAcls(ControllerEpochZNode.path), CreateMode.PERSISTENT) - retryRequestUntilConnected(createRequest) - } - - /** - * Update the partition states of multiple partitions in zookeeper. - * @param leaderAndIsrs The partition states to update. - * @param controllerEpoch The current controller epoch. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @return UpdateLeaderAndIsrResult instance containing per partition results. - */ - def updateLeaderAndIsr( - leaderAndIsrs: Map[TopicPartition, LeaderAndIsr], - controllerEpoch: Int, - expectedControllerEpochZkVersion: Int - ): UpdateLeaderAndIsrResult = { - val leaderIsrAndControllerEpochs = leaderAndIsrs.map { case (partition, leaderAndIsr) => - partition -> LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch) - } - val setDataResponses = try { - setTopicPartitionStatesRaw(leaderIsrAndControllerEpochs, expectedControllerEpochZkVersion) - } catch { - case e: ControllerMovedException => throw e - case e: Exception => - return UpdateLeaderAndIsrResult(leaderAndIsrs.keys.iterator.map(_ -> Left(e)).toMap, Seq.empty) - } - - val updatesToRetry = mutable.Buffer.empty[TopicPartition] - val finished = setDataResponses.iterator.flatMap { setDataResponse => - val partition = setDataResponse.ctx.get.asInstanceOf[TopicPartition] - setDataResponse.resultCode match { - case Code.OK => - val updatedLeaderAndIsr = leaderAndIsrs(partition).withPartitionEpoch(setDataResponse.stat.getVersion) - Some(partition -> Right(updatedLeaderAndIsr)) - case Code.BADVERSION => - // Update the buffer for partitions to retry - updatesToRetry += partition - None - case _ => - Some(partition -> Left(setDataResponse.resultException.get)) - } - }.toMap - - UpdateLeaderAndIsrResult(finished, updatesToRetry) - } - - /** - * Get log configs that merge local configs with topic-level configs in zookeeper. - * @param topics The topics to get log configs for. - * @param config The local configs. - * @return A tuple of two values: - * 1. The successfully gathered log configs - * 2. Exceptions corresponding to failed log config lookups. - */ - def getLogConfigs( - topics: Set[String], - config: java.util.Map[String, AnyRef] - ): (Map[String, LogConfig], Map[String, Exception]) = { - val logConfigs = mutable.Map.empty[String, LogConfig] - val failed = mutable.Map.empty[String, Exception] - val configResponses = try { - getTopicConfigs(topics) - } catch { - case e: Exception => - topics.foreach(topic => failed.put(topic, e)) - return (logConfigs.toMap, failed.toMap) - } - configResponses.foreach { configResponse => - val topic = configResponse.ctx.get.asInstanceOf[String] - configResponse.resultCode match { - case Code.OK => - val overrides = ConfigEntityZNode.decode(configResponse.data) - val logConfig = LogConfig.fromProps(config, overrides) - logConfigs.put(topic, logConfig) - case Code.NONODE => - val logConfig = LogConfig.fromProps(config, new Properties) - logConfigs.put(topic, logConfig) - case _ => failed.put(topic, configResponse.resultException.get) - } - } - (logConfigs.toMap, failed.toMap) - } - - /** - * Get entity configs for a given entity name - * @param rootEntityType entity type - * @param sanitizedEntityName entity name - * @return The successfully gathered log configs - */ - def getEntityConfigs(rootEntityType: String, sanitizedEntityName: String): Properties = { - val getDataRequest = GetDataRequest(ConfigEntityZNode.path(rootEntityType, sanitizedEntityName)) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - - getDataResponse.resultCode match { - case Code.OK => - ConfigEntityZNode.decode(getDataResponse.data) - case Code.NONODE => new Properties() - case _ => throw getDataResponse.resultException.get - } - } - - def getEntitiesConfigs(rootEntityType: String, sanitizedEntityNames: Set[String]): Map[String, Properties] = { - val getDataRequests: Seq[GetDataRequest] = sanitizedEntityNames.map { entityName => - GetDataRequest(ConfigEntityZNode.path(rootEntityType, entityName), Some(entityName)) - }.toSeq - - val getDataResponses = retryRequestsUntilConnected(getDataRequests) - getDataResponses.map { response => - val entityName = response.ctx.get.asInstanceOf[String] - response.resultCode match { - case Code.OK => - entityName -> ConfigEntityZNode.decode(response.data) - case Code.NONODE => - entityName -> new Properties() - case _ => throw response.resultException.get - } - }.toMap - } - - /** - * Sets or creates the entity znode path with the given configs depending - * on whether it already exists or not. - * - * If enableEntityConfigControllerCheck is set, this method will ensure that a ZK controller is defined and - * that it is not modified within the duration of this call. This is done to prevent configs from being - * created or modified while the ZK to KRaft migration is taking place. - * - * The only case where enableEntityConfigControllerCheck should be false is when being called by ConfigCommand, - * i.e., "kafka-configs.sh --zookeeper". This is an old behavior we have kept around to allow users to setup - * SCRAM credentials and other configs before the cluster is started for the first time. - * - * If this is method is called concurrently, the last writer wins. In cases where we update configs and then - * partition assignment (i.e. create topic), it's possible for one thread to set this and the other to set the - * partition assignment. As such, the recommendation is to never call create topic for the same topic with different - * configs/partition assignment concurrently. - * - * @param rootEntityType entity type - * @param sanitizedEntityName entity name - * @throws KeeperException if there is an error while setting or creating the znode - * @throws ControllerMovedException if no controller is defined, or a KRaft controller is defined - */ - def setOrCreateEntityConfigs(rootEntityType: String, sanitizedEntityName: String, config: Properties): Unit = { - val controllerZkVersionOpt: Option[Int] = if (enableEntityConfigControllerCheck) { - val controllerRegistration = getControllerRegistration match { - case Some(registration) => registration - case None => - // This case is mainly here to make tests less flaky (by virtue of retries). - // In practice, we always expect a /controller ZNode to exist - throw new ControllerMovedException(s"Cannot set entity configs for $rootEntityType $sanitizedEntityName " + - s"when there is no controller.") - } - - // If there is a KRaft controller defined, don't even attempt this write. The broker will soon get a UMR - // from the new KRaft controller that lets it know about the new controller. It will then forward - // IncrementalAlterConfig requests instead of processing directly. - if (controllerRegistration.kraftEpoch.exists(epoch => epoch > 0)) { - throw new ControllerMovedException(s"Cannot set entity configs for $rootEntityType $sanitizedEntityName " + - s"directly when there is a KRaft controller.") - } - - Some(controllerRegistration.zkVersion) - } else { - logger.warn("Setting entity configs without any checks on the controller.") - None - } - - def set(configData: Array[Byte]): SetDataResponse = { - // Since we're guarding against the controller switching to KRaft, we need to check that the controller hasn't - // changed during this method. We do that here by adding a CheckOp on the controller ZNode. The reason we - // don't use the controller epoch zkVersion here is that we can't consistently read the controller and - // controller epoch. This does _not_ guard against the existing "last writer wins" behavior of this method. - controllerZkVersionOpt match { - case Some(controllerZkVersion) => - val multi = MultiRequest(Seq( - CheckOp(ControllerZNode.path, controllerZkVersion), - SetDataOp(ConfigEntityZNode.path(rootEntityType, sanitizedEntityName), configData, ZkVersion.MatchAnyVersion) - )) - val results = retryRequestUntilConnected(multi) - unwrapResponseWithControllerEpochCheck(results).asInstanceOf[SetDataResponse] - case None => - val setDataRequest = SetDataRequest(ConfigEntityZNode.path(rootEntityType, sanitizedEntityName), configData, ZkVersion.MatchAnyVersion) - retryRequestUntilConnected(setDataRequest) - } - } - - def createOrSet(configData: Array[Byte]): Unit = { - val path = ConfigEntityZNode.path(rootEntityType, sanitizedEntityName) - try createRecursive(path, configData) - catch { - case _: NodeExistsException => set(configData).maybeThrow() - } - } - - val configData = ConfigEntityZNode.encode(config) - - val setDataResponse = set(configData) - setDataResponse.resultCode match { - case Code.NONODE => createOrSet(configData) - case _ => setDataResponse.maybeThrow() - } - } - - /** - * Returns all the entities for a given entityType - * @param entityType entity type - * @return List of all entity names - */ - def getAllEntitiesWithConfig(entityType: String): Seq[String] = { - getChildren(ConfigEntityTypeZNode.path(entityType)) - } - - /** - * Creates config change notification - * @param sanitizedEntityPath sanitizedEntityPath path to write - * @throws KeeperException if there is an error while setting or creating the znode - */ - def createConfigChangeNotification(sanitizedEntityPath: String): Unit = { - makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path) - val path = ConfigEntityChangeNotificationSequenceZNode.createPath - val createRequest = CreateRequest(path, ConfigEntityChangeNotificationSequenceZNode.encode(sanitizedEntityPath), defaultAcls(path), CreateMode.PERSISTENT_SEQUENTIAL) - val createResponse = retryRequestUntilConnected(createRequest) - createResponse.maybeThrow() - } - - /** - * Gets all brokers in the cluster. - * @return sequence of brokers in the cluster. - */ - def getAllBrokersInCluster: Seq[Broker] = { - val brokerIds = getSortedBrokerList - val getDataRequests = brokerIds.map(brokerId => GetDataRequest(BrokerIdZNode.path(brokerId), ctx = Some(brokerId))) - val getDataResponses = retryRequestsUntilConnected(getDataRequests) - getDataResponses.flatMap { getDataResponse => - val brokerId = getDataResponse.ctx.get.asInstanceOf[Int] - getDataResponse.resultCode match { - case Code.OK => - Option(BrokerIdZNode.decode(brokerId, getDataResponse.data).broker) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - } - - /** - * Gets all brokers with broker epoch in the cluster. - * @return map of broker to epoch in the cluster. - */ - def getAllBrokerAndEpochsInCluster: Map[Broker, Long] = { - val brokerIds = getSortedBrokerList - val getDataRequests = brokerIds.map(brokerId => GetDataRequest(BrokerIdZNode.path(brokerId), ctx = Some(brokerId))) - val getDataResponses = retryRequestsUntilConnected(getDataRequests) - getDataResponses.flatMap { getDataResponse => - val brokerId = getDataResponse.ctx.get.asInstanceOf[Int] - getDataResponse.resultCode match { - case Code.OK => - Some((BrokerIdZNode.decode(brokerId, getDataResponse.data).broker, getDataResponse.stat.getCzxid)) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - }.toMap - } - - /** - * Get a broker from ZK - * @return an optional Broker - */ - def getBroker(brokerId: Int): Option[Broker] = { - val getDataRequest = GetDataRequest(BrokerIdZNode.path(brokerId)) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => - Option(BrokerIdZNode.decode(brokerId, getDataResponse.data).broker) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Gets the list of sorted broker Ids - */ - def getSortedBrokerList: Seq[Int] = getChildren(BrokerIdsZNode.path).map(_.toInt).sorted - - /** - * Gets all topics in the cluster. - * @param registerWatch indicates if a watch must be registered or not - * @return sequence of topics in the cluster. - */ - def getAllTopicsInCluster(registerWatch: Boolean = false): Set[String] = { - val getChildrenResponse = retryRequestUntilConnected( - GetChildrenRequest(TopicsZNode.path, registerWatch)) - getChildrenResponse.resultCode match { - case Code.OK => getChildrenResponse.children.toSet - case Code.NONODE => Set.empty - case _ => throw getChildrenResponse.resultException.get - } - } - - /** - * Checks the topic existence - * @param topicName the name of the topic to check - * @return true if topic exists else false - */ - def topicExists(topicName: String): Boolean = { - pathExists(TopicZNode.path(topicName)) - } - - /** - * Adds a topic ID to existing topic and replica assignments - * @param topicIdReplicaAssignments the TopicIDReplicaAssignments to add a topic ID to - * @return the updated TopicIdReplicaAssignments including the newly created topic IDs - */ - def setTopicIds(topicIdReplicaAssignments: collection.Set[TopicIdReplicaAssignment], - expectedControllerEpochZkVersion: Int): Set[TopicIdReplicaAssignment] = { - val updatedAssignments = topicIdReplicaAssignments.map { - case TopicIdReplicaAssignment(topic, None, assignments) => - TopicIdReplicaAssignment(topic, Some(Uuid.randomUuid()), assignments) - case TopicIdReplicaAssignment(topic, Some(_), _) => - throw new IllegalArgumentException("TopicIdReplicaAssignment for " + topic + " already contains a topic ID.") - }.toSet - - val setDataRequests = updatedAssignments.map { case TopicIdReplicaAssignment(topic, topicIdOpt, assignments) => - SetDataRequest(TopicZNode.path(topic), TopicZNode.encode(topicIdOpt, assignments), ZkVersion.MatchAnyVersion) - }.toSeq - - retryRequestsUntilConnected(setDataRequests, expectedControllerEpochZkVersion) - updatedAssignments - } - - /** - * Sets the topic znode with the given assignment. - * @param topic the topic whose assignment is being set. - * @param topicId unique topic ID for the topic if the version supports it - * @param assignment the partition to replica mapping to set for the given topic - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @return SetDataResponse - */ - def setTopicAssignmentRaw(topic: String, - topicId: Option[Uuid], - assignment: collection.Map[TopicPartition, ReplicaAssignment], - expectedControllerEpochZkVersion: Int): SetDataResponse = { - val setDataRequest = SetDataRequest(TopicZNode.path(topic), TopicZNode.encode(topicId, assignment), ZkVersion.MatchAnyVersion) - retryRequestUntilConnected(setDataRequest, expectedControllerEpochZkVersion) - } - - /** - * Sets the topic znode with the given assignment. - * @param topic the topic whose assignment is being set. - * @param topicId unique topic ID for the topic if the version supports it - * @param assignment the partition to replica mapping to set for the given topic - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @throws KeeperException if there is an error while setting assignment - */ - def setTopicAssignment(topic: String, - topicId: Option[Uuid], - assignment: Map[TopicPartition, ReplicaAssignment], - expectedControllerEpochZkVersion: Int = ZkVersion.MatchAnyVersion): Unit = { - val setDataResponse = setTopicAssignmentRaw(topic, topicId, assignment, expectedControllerEpochZkVersion) - setDataResponse.maybeThrow() - } - - /** - * Create the topic znode with the given assignment. - * @param topic the topic whose assignment is being set. - * @param topicId unique topic ID for the topic if the version supports it - * @param assignment the partition to replica mapping to set for the given topic - * @throws KeeperException if there is an error while creating assignment - */ - def createTopicAssignment(topic: String, topicId: Option[Uuid], assignment: Map[TopicPartition, Seq[Int]]): Unit = { - val persistedAssignments = assignment.map { case (k, v) => k -> ReplicaAssignment(v) } - createRecursive(TopicZNode.path(topic), TopicZNode.encode(topicId, persistedAssignments)) - } - - /** - * Gets the log dir event notifications as strings. These strings are the znode names and not the absolute znode path. - * @return sequence of znode names and not the absolute znode path. - */ - def getAllLogDirEventNotifications: Seq[String] = { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(LogDirEventNotificationZNode.path, registerWatch = true)) - getChildrenResponse.resultCode match { - case Code.OK => getChildrenResponse.children.map(LogDirEventNotificationSequenceZNode.sequenceNumber) - case Code.NONODE => Seq.empty - case _ => throw getChildrenResponse.resultException.get - } - } - - /** - * Reads each of the log dir event notifications associated with the given sequence numbers and extracts the broker ids. - * @param sequenceNumbers the sequence numbers associated with the log dir event notifications. - * @return broker ids associated with the given log dir event notifications. - */ - def getBrokerIdsFromLogDirEvents(sequenceNumbers: Seq[String]): Seq[Int] = { - val getDataRequests = sequenceNumbers.map { sequenceNumber => - GetDataRequest(LogDirEventNotificationSequenceZNode.path(sequenceNumber)) - } - val getDataResponses = retryRequestsUntilConnected(getDataRequests) - getDataResponses.flatMap { getDataResponse => - getDataResponse.resultCode match { - case Code.OK => LogDirEventNotificationSequenceZNode.decode(getDataResponse.data) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - } - - /** - * Deletes all log dir event notifications. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteLogDirEventNotifications(expectedControllerEpochZkVersion: Int): Unit = { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(LogDirEventNotificationZNode.path, registerWatch = true)) - if (getChildrenResponse.resultCode == Code.OK) { - deleteLogDirEventNotifications(getChildrenResponse.children.map(LogDirEventNotificationSequenceZNode.sequenceNumber), expectedControllerEpochZkVersion) - } else if (getChildrenResponse.resultCode != Code.NONODE) { - getChildrenResponse.maybeThrow() - } - } - - /** - * Deletes the log dir event notifications associated with the given sequence numbers. - * @param sequenceNumbers the sequence numbers associated with the log dir event notifications to be deleted. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteLogDirEventNotifications(sequenceNumbers: Seq[String], expectedControllerEpochZkVersion: Int): Unit = { - val deleteRequests = sequenceNumbers.map { sequenceNumber => - DeleteRequest(LogDirEventNotificationSequenceZNode.path(sequenceNumber), ZkVersion.MatchAnyVersion) - } - retryRequestsUntilConnected(deleteRequests, expectedControllerEpochZkVersion) - } - - /** - * Gets the topic IDs for the given topics. - * @param topics the topics we wish to retrieve the Topic IDs for - * @return the Topic IDs - */ - def getTopicIdsForTopics(topics: Set[String]): Map[String, Uuid] = { - val getDataRequests = topics.map(topic => GetDataRequest(TopicZNode.path(topic), ctx = Some(topic))) - val getDataResponses = retryRequestsUntilConnected(getDataRequests.toSeq) - getDataResponses.map { getDataResponse => - val topic = getDataResponse.ctx.get.asInstanceOf[String] - getDataResponse.resultCode match { - case Code.OK => Some(TopicZNode.decode(topic, getDataResponse.data)) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - }.filter(_.flatMap(_.topicId).isDefined) - .map(_.get) - .map(topicIdAssignment => (topicIdAssignment.topic, topicIdAssignment.topicId.get)) - .toMap - } - - /** - * Gets the replica assignments for the given topics. - * This function does not return information about which replicas are being added or removed from the assignment. - * @param topics the topics whose partitions we wish to get the assignments for. - * @return the replica assignment for each partition from the given topics. - */ - def getReplicaAssignmentForTopics(topics: Set[String]): Map[TopicPartition, Seq[Int]] = { - getFullReplicaAssignmentForTopics(topics).map { case (k, v) => k -> v.replicas } - } - - /** - * Gets the TopicID and replica assignments for the given topics. - * @param topics the topics whose partitions we wish to get the assignments for. - * @return the TopicIdReplicaAssignment for each partition for the given topics. - */ - def getReplicaAssignmentAndTopicIdForTopics(topics: Set[String]): Set[TopicIdReplicaAssignment] = { - val getDataRequests = topics.map(topic => GetDataRequest(TopicZNode.path(topic), ctx = Some(topic))) - val getDataResponses = retryRequestsUntilConnected(getDataRequests.toSeq) - getDataResponses.map { getDataResponse => - val topic = getDataResponse.ctx.get.asInstanceOf[String] - getDataResponse.resultCode match { - case Code.OK => TopicZNode.decode(topic, getDataResponse.data) - case Code.NONODE => TopicIdReplicaAssignment(topic, None, Map.empty[TopicPartition, ReplicaAssignment]) - case _ => throw getDataResponse.resultException.get - } - }.toSet - } - - /** - * Gets the replica assignments for the given topics. - * @param topics the topics whose partitions we wish to get the assignments for. - * @return the full replica assignment for each partition from the given topics. - */ - def getFullReplicaAssignmentForTopics(topics: Set[String]): Map[TopicPartition, ReplicaAssignment] = { - val getDataRequests = topics.map(topic => GetDataRequest(TopicZNode.path(topic), ctx = Some(topic))) - val getDataResponses = retryRequestsUntilConnected(getDataRequests.toSeq) - getDataResponses.flatMap { getDataResponse => - val topic = getDataResponse.ctx.get.asInstanceOf[String] - getDataResponse.resultCode match { - case Code.OK => TopicZNode.decode(topic, getDataResponse.data).assignment - case Code.NONODE => Map.empty[TopicPartition, ReplicaAssignment] - case _ => throw getDataResponse.resultException.get - } - }.toMap - } - - /** - * Gets partition the assignments for the given topics. - * @param topics the topics whose partitions we wish to get the assignments for. - * @return the partition assignment for each partition from the given topics. - */ - def getPartitionAssignmentForTopics(topics: Set[String]): Map[String, Map[Int, ReplicaAssignment]] = { - val getDataRequests = topics.map(topic => GetDataRequest(TopicZNode.path(topic), ctx = Some(topic))) - val getDataResponses = retryRequestsUntilConnected(getDataRequests.toSeq) - getDataResponses.flatMap { getDataResponse => - val topic = getDataResponse.ctx.get.asInstanceOf[String] - getDataResponse.resultCode match { - case Code.OK => - val partitionMap = TopicZNode.decode(topic, getDataResponse.data).assignment.map { case (k, v) => (k.partition, v) } - Map(topic -> partitionMap) - case Code.NONODE => Map.empty[String, Map[Int, ReplicaAssignment]] - case _ => throw getDataResponse.resultException.get - } - }.toMap - } - - /** - * Gets the partition numbers for the given topics - * @param topics the topics whose partitions we wish to get. - * @return the partition array for each topic from the given topics. - */ - def getPartitionsForTopics(topics: Set[String]): Map[String, Seq[Int]] = { - getPartitionAssignmentForTopics(topics).map { topicAndPartitionMap => - val topic = topicAndPartitionMap._1 - val partitionMap = topicAndPartitionMap._2 - topic -> partitionMap.keys.toSeq.sorted - } - } - - /** - * Gets the partition count for a given topic - * @param topic The topic to get partition count for. - * @return optional integer that is Some if the topic exists and None otherwise. - */ - def getTopicPartitionCount(topic: String): Option[Int] = { - val topicData = getReplicaAssignmentForTopics(Set(topic)) - if (topicData.nonEmpty) - Some(topicData.size) - else - None - } - - /** - * Gets the assigned replicas for a specific topic and partition - * @param topicPartition TopicAndPartition to get assigned replicas for . - * @return List of assigned replicas - */ - def getReplicasForPartition(topicPartition: TopicPartition): Seq[Int] = { - val topicData = getReplicaAssignmentForTopics(Set(topicPartition.topic)) - topicData.getOrElse(topicPartition, Seq.empty) - } - - /** - * Gets all partitions in the cluster - * @return all partitions in the cluster - */ - def getAllPartitions: Set[TopicPartition] = { - val topics = getChildren(TopicsZNode.path) - if (topics == null) Set.empty - else { - topics.flatMap { topic => - // The partitions path may not exist if the topic is in the process of being deleted - getChildren(TopicPartitionsZNode.path(topic)).map(_.toInt).map(new TopicPartition(topic, _)) - }.toSet - } - } - - /** - * Gets the data and version at the given zk path - * @param path zk node path - * @return A tuple of 2 elements, where first element is zk node data as an array of bytes - * and second element is zk node version. - * returns (None, ZkVersion.UnknownVersion) if node doesn't exist and throws exception for any error - */ - def getDataAndVersion(path: String): (Option[Array[Byte]], Int) = { - val (data, stat) = getDataAndStat(path) - stat match { - case ZkStat.NoStat => (data, ZkVersion.UnknownVersion) - case _ => (data, stat.getVersion) - } - } - - /** - * Gets the data and Stat at the given zk path - * @param path zk node path - * @return A tuple of 2 elements, where first element is zk node data as an array of bytes - * and second element is zk node stats. - * returns (None, ZkStat.NoStat) if node doesn't exists and throws exception for any error - */ - def getDataAndStat(path: String): (Option[Array[Byte]], Stat) = { - val getDataRequest = GetDataRequest(path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - - getDataResponse.resultCode match { - case Code.OK => (Option(getDataResponse.data), getDataResponse.stat) - case Code.NONODE => (None, ZkStat.NoStat) - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Gets all the child nodes at a given zk node path - * @param path the path to check - * @return list of child node names - */ - def getChildren(path : String): Seq[String] = { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(path, registerWatch = true)) - getChildrenResponse.resultCode match { - case Code.OK => getChildrenResponse.children - case Code.NONODE => Seq.empty - case _ => throw getChildrenResponse.resultException.get - } - } - - /** - * Conditional update the persistent path data, return (true, newVersion) if it succeeds, otherwise (the path doesn't - * exist, the current version is not the expected version, etc.) return (false, ZkVersion.UnknownVersion) - * - * When there is a ConnectionLossException during the conditional update, ZookeeperClient will retry the update and may fail - * since the previous update may have succeeded (but the stored zkVersion no longer matches the expected one). - * In this case, we will run the optionalChecker to further check if the previous write did indeed succeeded. - */ - def conditionalUpdatePath(path: String, data: Array[Byte], expectVersion: Int, - optionalChecker: Option[(KafkaZkClient, String, Array[Byte]) => (Boolean,Int)] = None): (Boolean, Int) = { - - val setDataRequest = SetDataRequest(path, data, expectVersion) - val setDataResponse = retryRequestUntilConnected(setDataRequest) - - setDataResponse.resultCode match { - case Code.OK => - debug("Conditional update of path %s with value %s and expected version %d succeeded, returning the new version: %d" - .format(path, Utils.utf8(data), expectVersion, setDataResponse.stat.getVersion)) - (true, setDataResponse.stat.getVersion) - - case Code.BADVERSION => - optionalChecker match { - case Some(checker) => checker(this, path, data) - case _ => - debug("Checker method is not passed skipping zkData match") - debug("Conditional update of path %s with data %s and expected version %d failed due to %s" - .format(path, Utils.utf8(data), expectVersion, setDataResponse.resultException.get.getMessage)) - (false, ZkVersion.UnknownVersion) - } - - case Code.NONODE => - debug("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, - Utils.utf8(data), expectVersion, setDataResponse.resultException.get.getMessage)) - (false, ZkVersion.UnknownVersion) - - case _ => - debug("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, - Utils.utf8(data), expectVersion, setDataResponse.resultException.get.getMessage)) - throw setDataResponse.resultException.get - } - } - - /** - * Creates the delete topic znode. - * @param topicName topic name - * @throws KeeperException if there is an error while setting or creating the znode - */ - def createDeleteTopicPath(topicName: String): Unit = { - createRecursive(DeleteTopicsTopicZNode.path(topicName)) - } - - /** - * Checks if topic is marked for deletion - * @param topic the name of the topic to check - * @return true if topic is marked for deletion, else false - */ - def isTopicMarkedForDeletion(topic: String): Boolean = { - pathExists(DeleteTopicsTopicZNode.path(topic)) - } - - /** - * Get all topics marked for deletion. - * @return sequence of topics marked for deletion. - */ - def getTopicDeletions: Seq[String] = { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(DeleteTopicsZNode.path, registerWatch = true)) - getChildrenResponse.resultCode match { - case Code.OK => getChildrenResponse.children - case Code.NONODE => Seq.empty - case _ => throw getChildrenResponse.resultException.get - } - } - - /** - * Remove the given topics from the topics marked for deletion. - * @param topics the topics to remove. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteTopicDeletions(topics: Seq[String], expectedControllerEpochZkVersion: Int): Unit = { - val deleteRequests = topics.map(topic => DeleteRequest(DeleteTopicsTopicZNode.path(topic), ZkVersion.MatchAnyVersion)) - retryRequestsUntilConnected(deleteRequests, expectedControllerEpochZkVersion) - } - - /** - * Returns all reassignments. - * @return the reassignments for each partition. - */ - def getPartitionReassignment: collection.Map[TopicPartition, Seq[Int]] = { - val getDataRequest = GetDataRequest(ReassignPartitionsZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => - ReassignPartitionsZNode.decode(getDataResponse.data) match { - case Left(e) => - logger.warn(s"Ignoring partition reassignment due to invalid json: ${e.getMessage}", e) - Map.empty[TopicPartition, Seq[Int]] - case Right(assignments) => assignments - } - case Code.NONODE => Map.empty - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Sets or creates the partition reassignment znode with the given reassignment depending on whether it already - * exists or not. - * - * @param reassignment the reassignment to set on the reassignment znode - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @throws KeeperException if there is an error while setting or creating the znode - * @deprecated Use the PartitionReassignment Kafka API instead - */ - @Deprecated - def setOrCreatePartitionReassignment(reassignment: collection.Map[TopicPartition, Seq[Int]], expectedControllerEpochZkVersion: Int): Unit = { - - def set(reassignmentData: Array[Byte]): SetDataResponse = { - val setDataRequest = SetDataRequest(ReassignPartitionsZNode.path, reassignmentData, ZkVersion.MatchAnyVersion) - retryRequestUntilConnected(setDataRequest, expectedControllerEpochZkVersion) - } - - def create(reassignmentData: Array[Byte]): CreateResponse = { - val createRequest = CreateRequest(ReassignPartitionsZNode.path, reassignmentData, defaultAcls(ReassignPartitionsZNode.path), - CreateMode.PERSISTENT) - retryRequestUntilConnected(createRequest, expectedControllerEpochZkVersion) - } - - val reassignmentData = ReassignPartitionsZNode.encode(reassignment) - val setDataResponse = set(reassignmentData) - setDataResponse.resultCode match { - case Code.NONODE => - val createDataResponse = create(reassignmentData) - createDataResponse.maybeThrow() - case _ => setDataResponse.maybeThrow() - } - } - - /** - * Creates the partition reassignment znode with the given reassignment. - * @param reassignment the reassignment to set on the reassignment znode. - * @throws KeeperException if there is an error while creating the znode. - */ - def createPartitionReassignment(reassignment: Map[TopicPartition, Seq[Int]]): Unit = { - createRecursive(ReassignPartitionsZNode.path, ReassignPartitionsZNode.encode(reassignment)) - } - - /** - * Deletes the partition reassignment znode. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deletePartitionReassignment(expectedControllerEpochZkVersion: Int): Unit = { - deletePath(ReassignPartitionsZNode.path, expectedControllerEpochZkVersion) - } - - /** - * Checks if reassign partitions is in progress. - * @return true if reassign partitions is in progress, else false. - */ - def reassignPartitionsInProgress: Boolean = { - pathExists(ReassignPartitionsZNode.path) - } - - /** - * Gets topic partition states for the given partitions. - * @param partitions the partitions for which we want to get states. - * @return map containing LeaderIsrAndControllerEpoch of each partition for we were able to lookup the partition state. - */ - def getTopicPartitionStates(partitions: Seq[TopicPartition]): Map[TopicPartition, LeaderIsrAndControllerEpoch] = { - val getDataResponses = getTopicPartitionStatesRaw(partitions) - getDataResponses.flatMap { getDataResponse => - val partition = getDataResponse.ctx.get.asInstanceOf[TopicPartition] - getDataResponse.resultCode match { - case Code.OK => TopicPartitionStateZNode.decode(getDataResponse.data, getDataResponse.stat).map(partition -> _) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - }.toMap - } - - /** - * Gets topic partition state for the given partition. - * @param partition the partition for which we want to get state. - * @return LeaderIsrAndControllerEpoch of the partition state if exists, else None - */ - def getTopicPartitionState(partition: TopicPartition): Option[LeaderIsrAndControllerEpoch] = { - val getDataResponse = getTopicPartitionStatesRaw(Seq(partition)).head - if (getDataResponse.resultCode == Code.OK) { - TopicPartitionStateZNode.decode(getDataResponse.data, getDataResponse.stat) - } else if (getDataResponse.resultCode == Code.NONODE) { - None - } else { - throw getDataResponse.resultException.get - } - } - - /** - * Gets the leader for a given partition - * @param partition The partition for which we want to get leader. - * @return optional integer if the leader exists and None otherwise. - */ - def getLeaderForPartition(partition: TopicPartition): Option[Int] = - getTopicPartitionState(partition).map(_.leaderAndIsr.leader) - - /** - * Gets the in-sync replicas (ISR) for a specific topicPartition - * @param partition The partition for which we want to get ISR. - * @return optional ISR if exists and None otherwise - */ - def getInSyncReplicasForPartition(partition: TopicPartition): Option[Seq[Int]] = - getTopicPartitionState(partition).map(_.leaderAndIsr.isr.asScala.map(_.toInt)) - - - /** - * Gets the leader epoch for a specific topicPartition - * @param partition The partition for which we want to get the leader epoch - * @return optional integer if the leader exists and None otherwise - */ - def getEpochForPartition(partition: TopicPartition): Option[Int] = { - getTopicPartitionState(partition).map(_.leaderAndIsr.leaderEpoch) - } - - /** - * Gets the isr change notifications as strings. These strings are the znode names and not the absolute znode path. - * @return sequence of znode names and not the absolute znode path. - */ - def getAllIsrChangeNotifications: Seq[String] = { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(IsrChangeNotificationZNode.path, registerWatch = true)) - getChildrenResponse.resultCode match { - case Code.OK => getChildrenResponse.children.map(IsrChangeNotificationSequenceZNode.sequenceNumber) - case Code.NONODE => Seq.empty - case _ => throw getChildrenResponse.resultException.get - } - } - - /** - * Reads each of the isr change notifications associated with the given sequence numbers and extracts the partitions. - * @param sequenceNumbers the sequence numbers associated with the isr change notifications. - * @return partitions associated with the given isr change notifications. - */ - def getPartitionsFromIsrChangeNotifications(sequenceNumbers: Seq[String]): Seq[TopicPartition] = { - val getDataRequests = sequenceNumbers.map { sequenceNumber => - GetDataRequest(IsrChangeNotificationSequenceZNode.path(sequenceNumber)) - } - val getDataResponses = retryRequestsUntilConnected(getDataRequests) - getDataResponses.flatMap { getDataResponse => - getDataResponse.resultCode match { - case Code.OK => IsrChangeNotificationSequenceZNode.decode(getDataResponse.data) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - } - - /** - * Deletes all isr change notifications. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteIsrChangeNotifications(expectedControllerEpochZkVersion: Int): Unit = { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(IsrChangeNotificationZNode.path, registerWatch = true)) - if (getChildrenResponse.resultCode == Code.OK) { - deleteIsrChangeNotifications(getChildrenResponse.children.map(IsrChangeNotificationSequenceZNode.sequenceNumber), expectedControllerEpochZkVersion) - } else if (getChildrenResponse.resultCode != Code.NONODE) { - getChildrenResponse.maybeThrow() - } - } - - /** - * Deletes the isr change notifications associated with the given sequence numbers. - * @param sequenceNumbers the sequence numbers associated with the isr change notifications to be deleted. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteIsrChangeNotifications(sequenceNumbers: Seq[String], expectedControllerEpochZkVersion: Int): Unit = { - val deleteRequests = sequenceNumbers.map { sequenceNumber => - DeleteRequest(IsrChangeNotificationSequenceZNode.path(sequenceNumber), ZkVersion.MatchAnyVersion) - } - retryRequestsUntilConnected(deleteRequests, expectedControllerEpochZkVersion) - } - - /** - * Creates preferred replica election znode with partitions undergoing election - * @param partitions the set of partitions - * @throws KeeperException if there is an error while creating the znode - */ - def createPreferredReplicaElection(partitions: Set[TopicPartition]): Unit = { - createRecursive(PreferredReplicaElectionZNode.path, PreferredReplicaElectionZNode.encode(partitions)) - } - - /** - * Gets the partitions marked for preferred replica election. - * @return sequence of partitions. - */ - def getPreferredReplicaElection: Set[TopicPartition] = { - val getDataRequest = GetDataRequest(PreferredReplicaElectionZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => PreferredReplicaElectionZNode.decode(getDataResponse.data) - case Code.NONODE => Set.empty - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Deletes the preferred replica election znode. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deletePreferredReplicaElection(expectedControllerEpochZkVersion: Int): Unit = { - val deleteRequest = DeleteRequest(PreferredReplicaElectionZNode.path, ZkVersion.MatchAnyVersion) - retryRequestUntilConnected(deleteRequest, expectedControllerEpochZkVersion) - } - - /** - * Gets the controller id. - * @return optional integer that is Some if the controller znode exists and can be parsed and None otherwise. - */ - def getControllerId: Option[Int] = { - val getDataRequest = GetDataRequest(ControllerZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => ControllerZNode.decode(getDataResponse.data) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - - - private def getControllerRegistration: Option[ZKControllerRegistration] = { - val getDataRequest = GetDataRequest(ControllerZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => Some(ControllerZNode.decodeController(getDataResponse.data, getDataResponse.stat.getVersion)) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Deletes the controller znode. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteController(expectedControllerEpochZkVersion: Int): Unit = { - val deleteRequest = DeleteRequest(ControllerZNode.path, ZkVersion.MatchAnyVersion) - retryRequestUntilConnected(deleteRequest, expectedControllerEpochZkVersion) - } - - /** - * Gets the controller epoch. - * @return optional (Int, Stat) that is Some if the controller epoch path exists and None otherwise. - */ - def getControllerEpoch: Option[(Int, Stat)] = { - val getDataRequest = GetDataRequest(ControllerEpochZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => - val epoch = ControllerEpochZNode.decode(getDataResponse.data) - Option(epoch, getDataResponse.stat) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Recursively deletes the topic znode. - * @param topic the topic whose topic znode we wish to delete. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteTopicZNode(topic: String, expectedControllerEpochZkVersion: Int): Unit = { - deleteRecursive(TopicZNode.path(topic), expectedControllerEpochZkVersion) - } - - /** - * Deletes the topic configs for the given topics. - * @param topics the topics whose configs we wish to delete. - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - */ - def deleteTopicConfigs(topics: Seq[String], expectedControllerEpochZkVersion: Int): Unit = { - val deleteRequests = topics.map(topic => DeleteRequest(ConfigEntityZNode.path(ConfigType.TOPIC, topic), - ZkVersion.MatchAnyVersion)) - retryRequestsUntilConnected(deleteRequests, expectedControllerEpochZkVersion) - } - - //Acl management methods - - /** - * Creates the required zk nodes for Acl storage and Acl change storage. - */ - def createAclPaths(): Unit = { - ZkAclStore.stores.foreach(store => { - createRecursive(store.aclPath, throwIfPathExists = false) - AclEntry.RESOURCE_TYPES.forEach(resourceType => createRecursive(store.path(resourceType), throwIfPathExists = false)) - }) - - ZkAclChangeStore.stores.foreach(store => createRecursive(store.aclChangePath, throwIfPathExists = false)) - } - - /** - * Gets VersionedAcls for a given Resource - * @param resource Resource to get VersionedAcls for - * @return VersionedAcls - */ - def getVersionedAclsForResource(resource: ResourcePattern): ZkData.VersionedAcls = { - val getDataRequest = GetDataRequest(ResourceZNode.path(resource)) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => ResourceZNode.decode(getDataResponse.data, getDataResponse.stat) - case Code.NONODE => ZkData.VersionedAcls(Set.empty, ZkVersion.UnknownVersion) - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Sets or creates the resource znode path with the given acls and expected zk version depending - * on whether it already exists or not. - * @param resource - * @param aclsSet - * @param expectedVersion - * @return true if the update was successful and the new version - */ - def conditionalSetAclsForResource(resource: ResourcePattern, - aclsSet: Set[AclEntry], - expectedVersion: Int): (Boolean, Int) = { - def set(aclData: Array[Byte], expectedVersion: Int): SetDataResponse = { - val setDataRequest = SetDataRequest(ResourceZNode.path(resource), aclData, expectedVersion) - retryRequestUntilConnected(setDataRequest) - } - - if (expectedVersion < 0) - throw new IllegalArgumentException(s"Invalid version $expectedVersion provided for conditional update") - - val aclData = ResourceZNode.encode(aclsSet) - - val setDataResponse = set(aclData, expectedVersion) - setDataResponse.resultCode match { - case Code.OK => (true, setDataResponse.stat.getVersion) - case Code.NONODE | Code.BADVERSION => (false, ZkVersion.UnknownVersion) - case _ => throw setDataResponse.resultException.get - } - } - - def createAclsForResourceIfNotExists(resource: ResourcePattern, aclsSet: Set[AclEntry]): (Boolean, Int) = { - def create(aclData: Array[Byte]): CreateResponse = { - val path = ResourceZNode.path(resource) - val createRequest = CreateRequest(path, aclData, defaultAcls(path), CreateMode.PERSISTENT) - retryRequestUntilConnected(createRequest) - } - - val aclData = ResourceZNode.encode(aclsSet) - - val createResponse = create(aclData) - createResponse.resultCode match { - case Code.OK => (true, 0) - case Code.NODEEXISTS => (false, ZkVersion.UnknownVersion) - case _ => throw createResponse.resultException.get - } - } - - /** - * Creates an Acl change notification message. - * @param resource resource pattern that has changed - */ - def createAclChangeNotification(resource: ResourcePattern): Unit = { - val aclChange = ZkAclStore(resource.patternType).changeStore.createChangeNode(resource) - val createRequest = CreateRequest(aclChange.path, aclChange.bytes, defaultAcls(aclChange.path), CreateMode.PERSISTENT_SEQUENTIAL) - val createResponse = retryRequestUntilConnected(createRequest) - createResponse.maybeThrow() - } - - def propagateLogDirEvent(brokerId: Int): Unit = { - val logDirEventNotificationPath: String = createSequentialPersistentPath( - LogDirEventNotificationZNode.path + "/" + LogDirEventNotificationSequenceZNode.SequenceNumberPrefix, - LogDirEventNotificationSequenceZNode.encode(brokerId)) - debug(s"Added $logDirEventNotificationPath for broker $brokerId") - } - - def propagateIsrChanges(isrChangeSet: collection.Set[TopicPartition]): Unit = { - val isrChangeNotificationPath: String = createSequentialPersistentPath(IsrChangeNotificationSequenceZNode.path(), - IsrChangeNotificationSequenceZNode.encode(isrChangeSet)) - debug(s"Added $isrChangeNotificationPath for $isrChangeSet") - } - - /** - * Deletes all Acl change notifications. - * @throws KeeperException if there is an error while deleting Acl change notifications - */ - def deleteAclChangeNotifications(): Unit = { - ZkAclChangeStore.stores.foreach(store => { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(store.aclChangePath, registerWatch = true)) - if (getChildrenResponse.resultCode == Code.OK) { - deleteAclChangeNotifications(store.aclChangePath, getChildrenResponse.children) - } else if (getChildrenResponse.resultCode != Code.NONODE) { - getChildrenResponse.maybeThrow() - } - }) - } - - /** - * Deletes the Acl change notifications associated with the given sequence nodes - * - * @param aclChangePath the root path - * @param sequenceNodes the name of the node to delete. - */ - private def deleteAclChangeNotifications(aclChangePath: String, sequenceNodes: Seq[String]): Unit = { - val deleteRequests = sequenceNodes.map { sequenceNode => - DeleteRequest(s"$aclChangePath/$sequenceNode", ZkVersion.MatchAnyVersion) - } - - val deleteResponses = retryRequestsUntilConnected(deleteRequests) - deleteResponses.foreach { deleteResponse => - if (deleteResponse.resultCode != Code.NONODE) { - deleteResponse.maybeThrow() - } - } - } - - /** - * Gets the resource types, for which ACLs are stored, for the supplied resource pattern type. - * @param patternType The resource pattern type to retrieve the names for. - * @return list of resource type names - */ - def getResourceTypes(patternType: PatternType): Seq[String] = { - getChildren(ZkAclStore(patternType).aclPath) - } - - /** - * Gets the resource names, for which ACLs are stored, for a given resource type and pattern type - * @param patternType The resource pattern type to retrieve the names for. - * @param resourceType Resource type to retrieve the names for. - * @return list of resource names - */ - def getResourceNames(patternType: PatternType, resourceType: ResourceType): Seq[String] = { - getChildren(ZkAclStore(patternType).path(resourceType)) - } - - /** - * Deletes the given Resource node - * @param resource - * @return delete status - */ - def deleteResource(resource: ResourcePattern): Boolean = { - deleteRecursive(ResourceZNode.path(resource)) - } - - /** - * checks the resource existence - * @param resource - * @return existence status - */ - def resourceExists(resource: ResourcePattern): Boolean = { - pathExists(ResourceZNode.path(resource)) - } - - /** - * Conditional delete the resource node - * @param resource - * @param expectedVersion - * @return return true if it succeeds, false otherwise (the current version is not the expected version) - */ - def conditionalDelete(resource: ResourcePattern, expectedVersion: Int): Boolean = { - val deleteRequest = DeleteRequest(ResourceZNode.path(resource), expectedVersion) - val deleteResponse = retryRequestUntilConnected(deleteRequest) - deleteResponse.resultCode match { - case Code.OK | Code.NONODE => true - case Code.BADVERSION => false - case _ => throw deleteResponse.resultException.get - } - } - - /** - * Deletes the zk node recursively - * @param path path to delete - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @param recursiveDelete enable recursive delete - * @return KeeperException if there is an error while deleting the path - */ - def deletePath(path: String, expectedControllerEpochZkVersion: Int = ZkVersion.MatchAnyVersion, recursiveDelete: Boolean = true): Unit = { - if (recursiveDelete) - deleteRecursive(path, expectedControllerEpochZkVersion) - else { - val deleteRequest = DeleteRequest(path, ZkVersion.MatchAnyVersion) - val deleteResponse = retryRequestUntilConnected(deleteRequest, expectedControllerEpochZkVersion) - if (deleteResponse.resultCode != Code.OK && deleteResponse.resultCode != Code.NONODE) { - throw deleteResponse.resultException.get - } - } - } - - /** - * Creates the required zk nodes for Delegation Token storage - */ - def createDelegationTokenPaths(): Unit = { - createRecursive(DelegationTokenChangeNotificationZNode.path, throwIfPathExists = false) - createRecursive(DelegationTokensZNode.path, throwIfPathExists = false) - } - - /** - * Creates Delegation Token change notification message - * @param tokenId token Id - */ - def createTokenChangeNotification(tokenId: String): Unit = { - val path = DelegationTokenChangeNotificationSequenceZNode.createPath - val createRequest = CreateRequest(path, DelegationTokenChangeNotificationSequenceZNode.encode(tokenId), defaultAcls(path), CreateMode.PERSISTENT_SEQUENTIAL) - val createResponse = retryRequestUntilConnected(createRequest) - createResponse.resultException.foreach(e => throw e) - } - - /** - * Sets or creates token info znode with the given token details depending on whether it already - * exists or not. - * - * @param token the token to set on the token znode - * @throws KeeperException if there is an error while setting or creating the znode - */ - def setOrCreateDelegationToken(token: DelegationToken): Unit = { - - def set(tokenData: Array[Byte]): SetDataResponse = { - val setDataRequest = SetDataRequest(DelegationTokenInfoZNode.path(token.tokenInfo().tokenId()), tokenData, ZkVersion.MatchAnyVersion) - retryRequestUntilConnected(setDataRequest) - } - - def create(tokenData: Array[Byte]): CreateResponse = { - val path = DelegationTokenInfoZNode.path(token.tokenInfo().tokenId()) - val createRequest = CreateRequest(path, tokenData, defaultAcls(path), CreateMode.PERSISTENT) - retryRequestUntilConnected(createRequest) - } - - val tokenInfo = DelegationTokenInfoZNode.encode(token.tokenInfo()) - val setDataResponse = set(tokenInfo) - setDataResponse.resultCode match { - case Code.NONODE => - val createDataResponse = create(tokenInfo) - createDataResponse.maybeThrow() - case _ => setDataResponse.maybeThrow() - } - } - - /** - * Gets the Delegation Token Info - * @return optional TokenInfo that is Some if the token znode exists and can be parsed and None otherwise. - */ - def getDelegationTokenInfo(delegationTokenId: String): Option[TokenInformation] = { - val getDataRequest = GetDataRequest(DelegationTokenInfoZNode.path(delegationTokenId)) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => DelegationTokenInfoZNode.decode(getDataResponse.data) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Deletes the given Delegation token node - * @param delegationTokenId - * @return delete status - */ - def deleteDelegationToken(delegationTokenId: String): Boolean = { - deleteRecursive(DelegationTokenInfoZNode.path(delegationTokenId)) - } - - /** - * This registers a ZNodeChangeHandler and attempts to register a watcher with an ExistsRequest, which allows data - * watcher registrations on paths which might not even exist. - * - * @param zNodeChangeHandler - * @return `true` if the path exists or `false` if it does not - * @throws KeeperException if an error is returned by ZooKeeper - */ - def registerZNodeChangeHandlerAndCheckExistence(zNodeChangeHandler: ZNodeChangeHandler): Boolean = { - zooKeeperClient.registerZNodeChangeHandler(zNodeChangeHandler) - val existsResponse = retryRequestUntilConnected(ExistsRequest(zNodeChangeHandler.path)) - existsResponse.resultCode match { - case Code.OK => true - case Code.NONODE => false - case _ => throw existsResponse.resultException.get - } - } - - /** - * See ZooKeeperClient.registerZNodeChangeHandler - * @param zNodeChangeHandler - */ - def registerZNodeChangeHandler(zNodeChangeHandler: ZNodeChangeHandler): Unit = { - zooKeeperClient.registerZNodeChangeHandler(zNodeChangeHandler) - } - - /** - * See ZooKeeperClient.unregisterZNodeChangeHandler - * @param path - */ - def unregisterZNodeChangeHandler(path: String): Unit = { - zooKeeperClient.unregisterZNodeChangeHandler(path) - } - - /** - * See ZooKeeperClient.registerZNodeChildChangeHandler - * @param zNodeChildChangeHandler - */ - def registerZNodeChildChangeHandler(zNodeChildChangeHandler: ZNodeChildChangeHandler): Unit = { - zooKeeperClient.registerZNodeChildChangeHandler(zNodeChildChangeHandler) - } - - /** - * See ZooKeeperClient.unregisterZNodeChildChangeHandler - * @param path - */ - def unregisterZNodeChildChangeHandler(path: String): Unit = { - zooKeeperClient.unregisterZNodeChildChangeHandler(path) - } - - /** - * - * @param stateChangeHandler - */ - def registerStateChangeHandler(stateChangeHandler: StateChangeHandler): Unit = { - zooKeeperClient.registerStateChangeHandler(stateChangeHandler) - } - - /** - * - * @param name - */ - def unregisterStateChangeHandler(name: String): Unit = { - zooKeeperClient.unregisterStateChangeHandler(name) - } - - /** - * Close the underlying ZooKeeperClient. - */ - def close(): Unit = { - metricsGroup.removeMetric("ZooKeeperRequestLatencyMs") - zooKeeperClient.close() - } - - /** - * Get the committed offset for a topic partition and group - * @param group the group we wish to get offset for - * @param topicPartition the topic partition we wish to get the offset for - * @return optional long that is Some if there was an offset committed for topic partition, group and None otherwise. - */ - def getConsumerOffset(group: String, topicPartition: TopicPartition): Option[Long] = { - val getDataRequest = GetDataRequest(ConsumerOffset.path(group, topicPartition.topic, topicPartition.partition)) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => ConsumerOffset.decode(getDataResponse.data) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - - /** - * Set the committed offset for a topic partition and group - * @param group the group whose offset is being set - * @param topicPartition the topic partition whose offset is being set - * @param offset the offset value - */ - def setOrCreateConsumerOffset(group: String, topicPartition: TopicPartition, offset: Long): Unit = { - val setDataResponse = setConsumerOffset(group, topicPartition, offset) - if (setDataResponse.resultCode == Code.NONODE) { - createConsumerOffset(group, topicPartition, offset) - } else { - setDataResponse.maybeThrow() - } - } - - /** - * Get the cluster id. - * @return optional cluster id in String. - */ - def getClusterId: Option[String] = { - val getDataRequest = GetDataRequest(ClusterIdZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => Some(ClusterIdZNode.fromJson(getDataResponse.data)) - case Code.NONODE => None - case _ => throw getDataResponse.resultException.get - } - } - - def getOrCreateMigrationState(initialState: ZkMigrationLeadershipState): ZkMigrationLeadershipState = { - val getDataRequest = GetDataRequest(MigrationZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => - MigrationZNode.decode(getDataResponse.data, getDataResponse.stat.getVersion, getDataResponse.stat.getMtime) - case Code.NONODE => - createInitialMigrationState(initialState) - case _ => throw getDataResponse.resultException.get - } - } - - private def createInitialMigrationState(initialState: ZkMigrationLeadershipState): ZkMigrationLeadershipState = { - val createRequest = CreateRequest( - MigrationZNode.path, - MigrationZNode.encode(initialState), - defaultAcls(MigrationZNode.path), - CreateMode.PERSISTENT) - val response = retryRequestUntilConnected(createRequest) - response.maybeThrow() - initialState.withMigrationZkVersion(0) - } - - def updateMigrationState(migrationState: ZkMigrationLeadershipState): ZkMigrationLeadershipState = { - val req = SetDataRequest(MigrationZNode.path, MigrationZNode.encode(migrationState), migrationState.migrationZkVersion()) - val resp = retryRequestUntilConnected(req) - resp.maybeThrow() - migrationState.withMigrationZkVersion(resp.stat.getVersion) - } - - /** - * Return the ACLs of the node of the given path - * @param path the given path for the node - * @return the ACL array of the given node. - */ - def getAcl(path: String): Seq[ACL] = { - val getAclRequest = GetAclRequest(path) - val getAclResponse = retryRequestUntilConnected(getAclRequest) - getAclResponse.resultCode match { - case Code.OK => getAclResponse.acl - case _ => throw getAclResponse.resultException.get - } - } - - /** - * sets the ACLs to the node of the given path - * @param path the given path for the node - * @param acl the given acl for the node - */ - def setAcl(path: String, acl: Seq[ACL]): Unit = { - val setAclRequest = SetAclRequest(path, acl, ZkVersion.MatchAnyVersion) - val setAclResponse = retryRequestUntilConnected(setAclRequest) - setAclResponse.maybeThrow() - } - - /** - * Create the cluster Id. If the cluster id already exists, return the current cluster id. - * @return cluster id - */ - def createOrGetClusterId(proposedClusterId: String): String = { - try { - createRecursive(ClusterIdZNode.path, ClusterIdZNode.toJson(proposedClusterId)) - proposedClusterId - } catch { - case _: NodeExistsException => getClusterId.getOrElse( - throw new KafkaException("Failed to get cluster id from Zookeeper. This can happen if /cluster/id is deleted from Zookeeper.")) - } - } - - /** - * Generate a broker id by updating the broker sequence id path in ZK and return the version of the path. - * The version is incremented by one on every update starting from 1. - * @return sequence number as the broker id - */ - def generateBrokerSequenceId(): Int = { - val setDataRequest = SetDataRequest(BrokerSequenceIdZNode.path, Array.empty[Byte], ZkVersion.MatchAnyVersion) - val setDataResponse = retryRequestUntilConnected(setDataRequest) - setDataResponse.resultCode match { - case Code.OK => setDataResponse.stat.getVersion - case Code.NONODE => - // maker sure the path exists - createRecursive(BrokerSequenceIdZNode.path, Array.empty[Byte], throwIfPathExists = false) - generateBrokerSequenceId() - case _ => throw setDataResponse.resultException.get - } - } - - /** - * Pre-create top level paths in ZK if needed. - */ - def createTopLevelPaths(): Unit = { - ZkData.PersistentZkPaths.foreach(makeSurePersistentPathExists) - } - - /** - * Make sure a persistent path exists in ZK. - * @param path - */ - def makeSurePersistentPathExists(path: String): Unit = { - createRecursive(path, data = null, throwIfPathExists = false) - } - - def createFeatureZNode(nodeContents: FeatureZNode): Unit = { - val createRequest = CreateRequest( - FeatureZNode.path, - FeatureZNode.encode(nodeContents), - defaultAcls(FeatureZNode.path), - CreateMode.PERSISTENT) - val response = retryRequestUntilConnected(createRequest) - response.maybeThrow() - } - - def updateFeatureZNode(nodeContents: FeatureZNode): Int = { - val setRequest = SetDataRequest( - FeatureZNode.path, - FeatureZNode.encode(nodeContents), - ZkVersion.MatchAnyVersion) - val response = retryRequestUntilConnected(setRequest) - response.maybeThrow() - response.stat.getVersion - } - - def deleteFeatureZNode(): Unit = { - deletePath(FeatureZNode.path, ZkVersion.MatchAnyVersion, recursiveDelete = false) - } - - private def setConsumerOffset(group: String, topicPartition: TopicPartition, offset: Long): SetDataResponse = { - val setDataRequest = SetDataRequest(ConsumerOffset.path(group, topicPartition.topic, topicPartition.partition), - ConsumerOffset.encode(offset), ZkVersion.MatchAnyVersion) - retryRequestUntilConnected(setDataRequest) - } - - private def createConsumerOffset(group: String, topicPartition: TopicPartition, offset: Long): Unit = { - val path = ConsumerOffset.path(group, topicPartition.topic, topicPartition.partition) - createRecursive(path, ConsumerOffset.encode(offset)) - } - - /** - * Deletes the given zk path recursively - * @param path - * @param expectedControllerEpochZkVersion expected controller epoch zkVersion. - * @return true if path gets deleted successfully, false if root path doesn't exist - * @throws KeeperException if there is an error while deleting the znodes - */ - def deleteRecursive(path: String, expectedControllerEpochZkVersion: Int = ZkVersion.MatchAnyVersion): Boolean = { - val getChildrenResponse = retryRequestUntilConnected(GetChildrenRequest(path, registerWatch = true)) - getChildrenResponse.resultCode match { - case Code.OK => - getChildrenResponse.children.foreach(child => deleteRecursive(s"$path/$child", expectedControllerEpochZkVersion)) - val deleteResponse = retryRequestUntilConnected(DeleteRequest(path, ZkVersion.MatchAnyVersion), expectedControllerEpochZkVersion) - if (deleteResponse.resultCode != Code.OK && deleteResponse.resultCode != Code.NONODE) - throw deleteResponse.resultException.get - true - case Code.NONODE => false - case _ => throw getChildrenResponse.resultException.get - } - } - - def pathExists(path: String): Boolean = { - val existsRequest = ExistsRequest(path) - val existsResponse = retryRequestUntilConnected(existsRequest) - existsResponse.resultCode match { - case Code.OK => true - case Code.NONODE => false - case _ => throw existsResponse.resultException.get - } - } - - private[kafka] def createRecursive(path: String, data: Array[Byte] = null, throwIfPathExists: Boolean = true): Unit = { - - def parentPath(path: String): String = { - val indexOfLastSlash = path.lastIndexOf("/") - if (indexOfLastSlash == -1) throw new IllegalArgumentException(s"Invalid path $path") - path.substring(0, indexOfLastSlash) - } - - def createRecursive0(path: String): Unit = { - val createRequest = CreateRequest(path, null, defaultAcls(path), CreateMode.PERSISTENT) - var createResponse = retryRequestUntilConnected(createRequest) - if (createResponse.resultCode == Code.NONODE) { - createRecursive0(parentPath(path)) - createResponse = retryRequestUntilConnected(createRequest) - if (createResponse.resultCode != Code.OK && createResponse.resultCode != Code.NODEEXISTS) { - throw createResponse.resultException.get - } - } else if (createResponse.resultCode != Code.OK && createResponse.resultCode != Code.NODEEXISTS) { - throw createResponse.resultException.get - } - } - - val createRequest = CreateRequest(path, data, defaultAcls(path), CreateMode.PERSISTENT) - var createResponse = retryRequestUntilConnected(createRequest) - - if (throwIfPathExists && createResponse.resultCode == Code.NODEEXISTS) { - createResponse.maybeThrow() - } else if (createResponse.resultCode == Code.NONODE) { - createRecursive0(parentPath(path)) - createResponse = retryRequestUntilConnected(createRequest) - if (throwIfPathExists || createResponse.resultCode != Code.NODEEXISTS) - createResponse.maybeThrow() - } else if (createResponse.resultCode != Code.NODEEXISTS) - createResponse.maybeThrow() - - } - - private def createTopicPartition(partitions: Seq[TopicPartition], expectedControllerEpochZkVersion: Int): Seq[CreateResponse] = { - val createRequests = partitions.map { partition => - val path = TopicPartitionZNode.path(partition) - CreateRequest(path, null, defaultAcls(path), CreateMode.PERSISTENT, Some(partition)) - } - retryRequestsUntilConnected(createRequests, expectedControllerEpochZkVersion) - } - - private def createTopicPartitions(topics: Seq[String], expectedControllerEpochZkVersion: Int): Seq[CreateResponse] = { - val createRequests = topics.map { topic => - val path = TopicPartitionsZNode.path(topic) - CreateRequest(path, null, defaultAcls(path), CreateMode.PERSISTENT, Some(topic)) - } - retryRequestsUntilConnected(createRequests, expectedControllerEpochZkVersion) - } - - private def getTopicConfigs(topics: Set[String]): Seq[GetDataResponse] = { - val getDataRequests: Seq[GetDataRequest] = topics.iterator.map { topic => - GetDataRequest(ConfigEntityZNode.path(ConfigType.TOPIC, topic), ctx = Some(topic)) - }.toBuffer - - retryRequestsUntilConnected(getDataRequests) - } - - def defaultAcls(path: String): Seq[ACL] = ZkData.defaultAcls(isSecure, path) - - def secure: Boolean = isSecure - - private[zk] def retryRequestUntilConnected[Req <: AsyncRequest](request: Req, expectedControllerZkVersion: Int = ZkVersion.MatchAnyVersion): Req#Response = { - retryRequestsUntilConnected(Seq(request), expectedControllerZkVersion).head - } - - private def retryRequestsUntilConnected[Req <: AsyncRequest](requests: Seq[Req], expectedControllerZkVersion: Int): Seq[Req#Response] = { - expectedControllerZkVersion match { - case ZkVersion.MatchAnyVersion => retryRequestsUntilConnected(requests) - case version if version >= 0 => - retryRequestsUntilConnected(requests.map(wrapRequestWithControllerEpochCheck(_, version))) - .map(unwrapResponseWithControllerEpochCheck(_).asInstanceOf[Req#Response]) - case invalidVersion => - throw new IllegalArgumentException(s"Expected controller epoch zkVersion $invalidVersion should be non-negative or equal to ${ZkVersion.MatchAnyVersion}") - } - } - - /** - * Safely performs a sequence of writes to ZooKeeper as part of a KRaft migration. For each request in {@code requests}, we - * wrap the operation in a multi-op transaction that includes a check op on /controller_epoch and /migration. This ensures - * that another KRaft controller or another ZK controller has unexpectedly taken leadership. - * - * In cases of KRaft failover during a migration, it is possible that a write is attempted before the old KRaft controller - * receives the new leader information. In this case, the check op on /migration acts as a guard against multiple writers. - * - * The multi-op for the last request in {@code requests} is used to update the /migration node with the latest migration - * state. This effectively checkpoints the progress of the migration in ZK relative to the metadata log. - * - * Each multi-op request is atomic. The overall sequence of multi-op requests is not atomic and we may fail during any - * of them. When the KRaft controller recovers the migration state, it will re-apply all of the writes needed to update - * the ZK state with the latest KRaft state. In the case of Create or Delete operations, these will fail if applied - * twice, so we need to ignore NodeExists and NoNode failures for those cases. - * - * @param requests A sequence of ZK requests. Only Create, Delete, and SetData are supported. - * @param migrationState The current migration state. This is written out as part of the final multi-op request. - * @return The new version of /migration ZNode and the sequence of responses for the given requests. - */ - def retryMigrationRequestsUntilConnected[Req <: AsyncRequest](requests: Seq[Req], - migrationState: ZkMigrationLeadershipState): (Int, Seq[Req#Response]) = { - - if (requests.isEmpty) { - return (migrationState.migrationZkVersion(), Seq.empty) - } - - def wrapMigrationRequest(request: Req, lastRequestInBatch: Boolean): MultiRequest = { - // Wrap a single request with the multi-op transactional request. - val checkOp = CheckOp(ControllerEpochZNode.path, migrationState.zkControllerEpochZkVersion()) - val migrationOp = if (lastRequestInBatch) { - SetDataOp(MigrationZNode.path, MigrationZNode.encode(migrationState), migrationState.migrationZkVersion()) - } else { - CheckOp(MigrationZNode.path, migrationState.migrationZkVersion()) - } - - request match { - case CreateRequest(path, data, acl, createMode, ctx) => - MultiRequest(Seq(checkOp, migrationOp, CreateOp(path, data, acl, createMode)), ctx) - case DeleteRequest(path, version, ctx) => - MultiRequest(Seq(checkOp, migrationOp, DeleteOp(path, version)), ctx) - case SetDataRequest(path, data, version, ctx) => - MultiRequest(Seq(checkOp, migrationOp, SetDataOp(path, data, version)), ctx) - case _ => throw new IllegalStateException(s"$request does not need controller epoch check") - } - } - - def handleUnwrappedMigrationResult(migrationOp: ZkOp, migrationResult: OpResult): Int = { - // Handle just the operation that updated /migration ZNode - val (path: String, data: Option[Array[Byte]], version: Int) = migrationOp match { - case CheckOp(path, version) => (path, None, version) - case SetDataOp(path, data, version) => (path, Some(data), version) - case _ => throw new IllegalStateException("Unexpected result on /migration znode") - } - - migrationResult match { - case _: CheckResult => version - case setDataResult: SetDataResult => setDataResult.getStat.getVersion - case errorResult: ErrorResult => - if (path.equals(MigrationZNode.path)) { - val errorCode = Code.get(errorResult.getErr) - if (errorCode == Code.BADVERSION) { - data match { - case Some(value) => - val failedPayload = MigrationZNode.decode(value, version, -1) - throw new RuntimeException( - s"Conditional update on KRaft Migration ZNode failed. Sent zkVersion = $version. The failed " + - s"write was: $failedPayload. This indicates that another KRaft controller is making writes to ZooKeeper.") - case None => - throw new RuntimeException(s"Check op on KRaft Migration ZNode failed. Sent zkVersion = $version. " + - s"This indicates that another KRaft controller is making writes to ZooKeeper.") - } - } else if (errorCode == Code.OK) { - // This means the Check or SetData op would have been ok, but failed because of another operation in this multi-op - version - } else { - throw KeeperException.create(errorCode, path) - } - } else { - throw new RuntimeException(s"Got migration result for incorrect path $path") - } - case _ => throw new RuntimeException( - s"Expected either CheckResult, SetDataResult, or ErrorResult for migration op, but saw $migrationResult") - } - } - - def unwrapMigrationResponse(response: AsyncResponse, lastRequestInBatch: Boolean): (AsyncResponse, Int) = { - response match { - case MultiResponse(resultCode, _, ctx, zkOpResults, responseMetadata) => - zkOpResults match { - case Seq(ZkOpResult(checkOp: CheckOp, checkOpResult), ZkOpResult(migrationOp: CheckOp, migrationResult), zkOpResult) => - // Matches all requests except or the last one (CheckOp on /migration) - if (lastRequestInBatch) { - throw new IllegalStateException("Should not see a Check operation on /migration in the last request.") - } - handleUnwrappedCheckOp(checkOp, checkOpResult) - val migrationVersion = handleUnwrappedMigrationResult(migrationOp, migrationResult) - (handleUnwrappedZkOp(zkOpResult, resultCode, ctx, responseMetadata), migrationVersion) - case Seq(ZkOpResult(checkOp: CheckOp, checkOpResult), ZkOpResult(migrationOp: SetDataOp, migrationResult), zkOpResult) => - // Matches the last request in a batch (SetDataOp on /migration) - if (!lastRequestInBatch) { - throw new IllegalStateException("Should only see a SetData operation on /migration in the last request.") - } - handleUnwrappedCheckOp(checkOp, checkOpResult) - val migrationVersion = handleUnwrappedMigrationResult(migrationOp, migrationResult) - (handleUnwrappedZkOp(zkOpResult, resultCode, ctx, responseMetadata), migrationVersion) - case null => throw KeeperException.create(resultCode) - case _ => throw new IllegalStateException( - s"Cannot unwrap $response because it does not contain the expected operations for a migration operation.") - } - case _ => throw new IllegalStateException(s"Cannot unwrap $response because it is not a MultiResponse") - } - } - - migrationState.zkControllerEpochZkVersion() match { - case ZkVersion.MatchAnyVersion => throw new IllegalArgumentException( - s"Expected a controller epoch zkVersion when making migration writes, not -1.") - case version if version >= 0 => - logger.trace(s"Performing ${requests.size} migration update(s) with migrationState=$migrationState") - val wrappedRequests = requests.map(req => wrapMigrationRequest(req, req == requests.last)) - val results = retryRequestsUntilConnected(wrappedRequests) - val unwrappedResults = results.map(resp => unwrapMigrationResponse(resp, resp == results.last)) - val migrationZkVersion = unwrappedResults.last._2 - // Return the new version of /migration and the sequence of responses to the original requests - (migrationZkVersion, unwrappedResults.map(_._1.asInstanceOf[Req#Response])) - case invalidVersion => - throw new IllegalArgumentException( - s"Expected controller epoch zkVersion $invalidVersion should be non-negative or equal to ${ZkVersion.MatchAnyVersion}") - } - } - - private def retryRequestsUntilConnected[Req <: AsyncRequest](requests: Seq[Req]): Seq[Req#Response] = { - val remainingRequests = new mutable.ArrayBuffer(requests.size) ++= requests - val responses = new mutable.ArrayBuffer[Req#Response] - while (remainingRequests.nonEmpty) { - val batchResponses = zooKeeperClient.handleRequests(remainingRequests) - - batchResponses.foreach(response => latencyMetric.update(response.metadata.responseTimeMs)) - - // Only execute slow path if we find a response with CONNECTIONLOSS - if (batchResponses.exists(_.resultCode == Code.CONNECTIONLOSS)) { - val requestResponsePairs = remainingRequests.zip(batchResponses) - - remainingRequests.clear() - requestResponsePairs.foreach { case (request, response) => - if (response.resultCode == Code.CONNECTIONLOSS) - remainingRequests += request - else - responses += response - } - - if (remainingRequests.nonEmpty) - zooKeeperClient.waitUntilConnected() - } else { - remainingRequests.clear() - responses ++= batchResponses - } - } - responses - } - - private def checkedEphemeralCreate(path: String, data: Array[Byte]): Stat = { - val checkedEphemeral = new CheckedEphemeral(path, data) - info(s"Creating $path (is it secure? $isSecure)") - val stat = checkedEphemeral.create() - info(s"Stat of the created znode at $path is: $stat") - stat - } - - private def isZKSessionIdDiffFromCurrentZKSessionId: Boolean = { - zooKeeperClient.sessionId != currentZooKeeperSessionId - } - - private def isZKSessionTheEphemeralOwner(ephemeralOwnerId: Long): Boolean = { - ephemeralOwnerId == currentZooKeeperSessionId - } - - private[zk] def shouldReCreateEphemeralZNode(ephemeralOwnerId: Long): Boolean = { - isZKSessionTheEphemeralOwner(ephemeralOwnerId) && isZKSessionIdDiffFromCurrentZKSessionId - } - - private def updateCurrentZKSessionId(newSessionId: Long): Unit = { - currentZooKeeperSessionId = newSessionId - } - - private class CheckedEphemeral(path: String, data: Array[Byte]) extends Logging { - def create(): Stat = { - val response = retryRequestUntilConnected( - MultiRequest(Seq( - CreateOp(path, null, defaultAcls(path), CreateMode.EPHEMERAL), - SetDataOp(path, data, 0))) - ) - val stat = response.resultCode match { - case Code.OK => - val setDataResult = response.zkOpResults(1).rawOpResult.asInstanceOf[SetDataResult] - setDataResult.getStat - case Code.NODEEXISTS => - getAfterNodeExists - case code => - error(s"Error while creating ephemeral at $path with return code: $code") - throw KeeperException.create(code) - } - - // At this point, we need to save a reference to the zookeeper session id. - // This is done here since the Zookeeper session id may not be available at the Object creation time. - // This is assuming the 'retryRequestUntilConnected' method got connected and a valid session id is present. - // This code is part of the workaround done in the KAFKA-7165, once ZOOKEEPER-2985 is complete, this code - // must be deleted. - updateCurrentZKSessionId(zooKeeperClient.sessionId) - - stat - } - - // This method is part of the work around done in the KAFKA-7165, once ZOOKEEPER-2985 is complete, this code must - // be deleted. - private def delete(): Code = { - val deleteRequest = DeleteRequest(path, ZkVersion.MatchAnyVersion) - val deleteResponse = retryRequestUntilConnected(deleteRequest) - deleteResponse.resultCode match { - case code@ Code.OK => code - case code@ Code.NONODE => code - case code => - error(s"Error while deleting ephemeral node at $path with return code: $code") - code - } - } - - private def reCreate(): Stat = { - val codeAfterDelete = delete() - val codeAfterReCreate = codeAfterDelete - debug(s"Result of znode ephemeral deletion at $path is: $codeAfterDelete") - if (codeAfterDelete == Code.OK || codeAfterDelete == Code.NONODE) { - create() - } else { - throw KeeperException.create(codeAfterReCreate) - } - } - - private def getAfterNodeExists: Stat = { - val getDataRequest = GetDataRequest(path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - val ephemeralOwnerId = getDataResponse.stat.getEphemeralOwner - getDataResponse.resultCode match { - // At this point, the Zookeeper session could be different (due a 'Session expired') from the one that initially - // registered the Broker into the Zookeeper ephemeral node, but the znode is still present in ZooKeeper. - // The expected behaviour is that Zookeeper server removes the ephemeral node associated with the expired session - // but due an already reported bug in Zookeeper (ZOOKEEPER-2985) this is not happening, so, the following check - // will validate if this Broker got registered with the previous (expired) session and try to register again, - // deleting the ephemeral node and creating it again. - // This code is part of the work around done in the KAFKA-7165, once ZOOKEEPER-2985 is complete, this code must - // be deleted. - case Code.OK if shouldReCreateEphemeralZNode(ephemeralOwnerId) => - info(s"Was not possible to create the ephemeral at $path, node already exists and owner " + - s"'0x${JLong.toHexString(ephemeralOwnerId)}' does not match current session '0x${JLong.toHexString(zooKeeperClient.sessionId)}'" + - s", trying to delete and re-create it with the newest Zookeeper session") - reCreate() - case Code.OK if ephemeralOwnerId != zooKeeperClient.sessionId => - error(s"Error while creating ephemeral at $path, node already exists and owner " + - s"'0x${JLong.toHexString(ephemeralOwnerId)}' does not match current session '0x${JLong.toHexString(zooKeeperClient.sessionId)}'") - throw KeeperException.create(Code.NODEEXISTS) - case Code.OK => - getDataResponse.stat - case Code.NONODE => - info(s"The ephemeral node at $path went away while reading it, attempting create() again") - create() - case code => - error(s"Error while creating ephemeral at $path as it already exists and error getting the node data due to $code") - throw KeeperException.create(code) - } - } - } -} - -object KafkaZkClient { - - /** - * @param finishedPartitions Partitions that finished either in successfully - * updated partition states or failed with an exception. - * @param partitionsToRetry The partitions that we should retry due to a zookeeper BADVERSION conflict. Version conflicts - * can occur if the partition leader updated partition state while the controller attempted to - * update partition state. - */ - case class UpdateLeaderAndIsrResult( - finishedPartitions: Map[TopicPartition, Either[Exception, LeaderAndIsr]], - partitionsToRetry: Seq[TopicPartition] - ) - - /** - * Create an instance of this class with the provided parameters. - * - * The metric group and type are preserved by default for compatibility with previous versions. - */ - def apply(connectString: String, - isSecure: Boolean, - sessionTimeoutMs: Int, - connectionTimeoutMs: Int, - maxInFlightRequests: Int, - time: Time, - name: String, - zkClientConfig: ZKClientConfig, - metricGroup: String = "kafka.server", - metricType: String = "SessionExpireListener", - createChrootIfNecessary: Boolean = false, - enableEntityConfigControllerCheck: Boolean = true - ): KafkaZkClient = { - - /* ZooKeeper 3.6.0 changed the default configuration for JUTE_MAXBUFFER from 4 MB to 1 MB. - * This causes a regression if Kafka tries to retrieve a large amount of data across many - * znodes – in such a case the ZooKeeper client will repeatedly emit a message of the form - * "java.io.IOException: Packet len <####> is out of range". - * - * We restore the 3.4.x/3.5.x behavior unless the caller has set the property (note that ZKConfig - * auto configures itself if certain system properties have been set). - * - * See https://github.com/apache/zookeeper/pull/1129 for the details on why the behavior - * changed in 3.6.0. - */ - if (zkClientConfig.getProperty(ZKConfig.JUTE_MAXBUFFER) == null) - zkClientConfig.setProperty(ZKConfig.JUTE_MAXBUFFER, (4096 * 1024).toString) - - if (createChrootIfNecessary) { - val chrootIndex = connectString.indexOf("/") - if (chrootIndex > 0) { - val zkConnWithoutChrootForChrootCreation = connectString.substring(0, chrootIndex) - val zkClientForChrootCreation = apply(zkConnWithoutChrootForChrootCreation, isSecure, sessionTimeoutMs, - connectionTimeoutMs, maxInFlightRequests, time, name, zkClientConfig, metricGroup, metricType) - try { - val chroot = connectString.substring(chrootIndex) - if (!zkClientForChrootCreation.pathExists(chroot)) { - zkClientForChrootCreation.makeSurePersistentPathExists(chroot) - } - } finally { - zkClientForChrootCreation.close() - } - } - } - val zooKeeperClient = new ZooKeeperClient(connectString, sessionTimeoutMs, connectionTimeoutMs, maxInFlightRequests, - time, metricGroup, metricType, zkClientConfig, name) - new KafkaZkClient(zooKeeperClient, isSecure, time, enableEntityConfigControllerCheck) - } - - // A helper function to transform a regular request into a MultiRequest - // with the check on controller epoch znode zkVersion. - // This is used for fencing zookeeper updates in controller. - private def wrapRequestWithControllerEpochCheck(request: AsyncRequest, expectedControllerZkVersion: Int): MultiRequest = { - val checkOp = CheckOp(ControllerEpochZNode.path, expectedControllerZkVersion) - request match { - case CreateRequest(path, data, acl, createMode, ctx) => - MultiRequest(Seq(checkOp, CreateOp(path, data, acl, createMode)), ctx) - case DeleteRequest(path, version, ctx) => - MultiRequest(Seq(checkOp, DeleteOp(path, version)), ctx) - case SetDataRequest(path, data, version, ctx) => - MultiRequest(Seq(checkOp, SetDataOp(path, data, version)), ctx) - case _ => throw new IllegalStateException(s"$request does not need controller epoch check") - } - } - - private def handleUnwrappedCheckOp(checkOp: CheckOp, checkOpResult: OpResult): Unit = { - checkOpResult match { - case errorResult: ErrorResult => - if (checkOp.path.equals(ControllerEpochZNode.path)) { - val errorCode = Code.get(errorResult.getErr) - if (errorCode == Code.BADVERSION) - // Throw ControllerMovedException when the zkVersionCheck is performed on the controller epoch znode and the check fails - throw new ControllerMovedException(s"Controller epoch zkVersion check fails. Expected zkVersion = ${checkOp.version}") - else if (errorCode != Code.OK) - throw KeeperException.create(errorCode, checkOp.path) - } - case _ => - } - } - - private def handleUnwrappedZkOp(zkOpResult: ZkOpResult, - resultCode: Code, - ctx: Option[Any], - responseMetadata: ResponseMetadata): AsyncResponse = { - val rawOpResult = zkOpResult.rawOpResult - zkOpResult.zkOp match { - case createOp: CreateOp => - val name = rawOpResult match { - case c: CreateResult => c.getPath - case _ => null - } - CreateResponse(resultCode, createOp.path, ctx, name, responseMetadata) - case deleteOp: DeleteOp => - DeleteResponse(resultCode, deleteOp.path, ctx, responseMetadata) - case setDataOp: SetDataOp => - val stat = rawOpResult match { - case s: SetDataResult => s.getStat - case _ => null - } - SetDataResponse(resultCode, setDataOp.path, ctx, stat, responseMetadata) - case zkOp => throw new IllegalStateException(s"Unexpected zkOp: $zkOp") - } - } - - // A helper function to transform a MultiResponse with the check on - // controller epoch znode zkVersion back into a regular response. - // ControllerMovedException will be thrown if the controller epoch - // znode zkVersion check fails. This is used for fencing zookeeper - // updates in controller. - private def unwrapResponseWithControllerEpochCheck(response: AsyncResponse): AsyncResponse = { - response match { - case MultiResponse(resultCode, _, ctx, zkOpResults, responseMetadata) => - zkOpResults match { - // In normal ZK writes, we just have a MultiOp with a CheckOp and the actual operation we're performing - case Seq(ZkOpResult(checkOp: CheckOp, checkOpResult), zkOpResult) => - handleUnwrappedCheckOp(checkOp, checkOpResult) - handleUnwrappedZkOp(zkOpResult, resultCode, ctx, responseMetadata) - case null => throw KeeperException.create(resultCode) - case _ => throw new IllegalStateException(s"Cannot unwrap $response because the first zookeeper op is not check op in original MultiRequest") - } - case _ => throw new IllegalStateException(s"Cannot unwrap $response because it is not a MultiResponse") - } - } - - def createZkClient(name: String, time: Time, config: KafkaConfig, zkClientConfig: ZKClientConfig): KafkaZkClient = { - val secureAclsEnabled = config.zkEnableSecureAcls - val isZkSecurityEnabled = JaasUtils.isZkSaslEnabled || KafkaConfig.zkTlsClientAuthEnabled(zkClientConfig) - - if (secureAclsEnabled && !isZkSecurityEnabled) - throw new java.lang.SecurityException( - s"${ZkConfigs.ZK_ENABLE_SECURE_ACLS_CONFIG} is true, but ZooKeeper client TLS configuration identifying at least " + - s"${ZkConfigs.ZK_SSL_CLIENT_ENABLE_CONFIG}, ${ZkConfigs.ZK_CLIENT_CNXN_SOCKET_CONFIG}, and " + - s"${ZkConfigs.ZK_SSL_KEY_STORE_LOCATION_CONFIG} was not present and the verification of the JAAS login file failed " + - s"${JaasUtils.zkSecuritySysConfigString}") - - KafkaZkClient(config.zkConnect, secureAclsEnabled, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, - config.zkMaxInFlightRequests, time, name = name, zkClientConfig = zkClientConfig, - createChrootIfNecessary = true) - } -} diff --git a/core/src/main/scala/kafka/zk/ZkData.scala b/core/src/main/scala/kafka/zk/ZkData.scala deleted file mode 100644 index 7c1ec8ab56577..0000000000000 --- a/core/src/main/scala/kafka/zk/ZkData.scala +++ /dev/null @@ -1,1140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.zk - -import java.nio.charset.StandardCharsets.UTF_8 -import java.util -import java.util.Properties -import com.fasterxml.jackson.annotation.JsonProperty -import com.fasterxml.jackson.core.JsonProcessingException -import kafka.cluster.{Broker, EndPoint} -import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener} -import kafka.controller.{IsrChangeNotificationHandler, LeaderIsrAndControllerEpoch, ReplicaAssignment} -import kafka.server.DelegationTokenManagerZk -import kafka.utils.Json -import kafka.utils.json.JsonObject -import org.apache.kafka.common.errors.UnsupportedVersionException -import org.apache.kafka.common.feature.Features._ -import org.apache.kafka.common.feature.{Features, SupportedVersionRange} -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourceType} -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.common.security.token.delegation.TokenInformation -import org.apache.kafka.common.utils.{SecurityUtils, Time} -import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} -import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.metadata.migration.ZkMigrationLeadershipState -import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.common.{MetadataVersion, ProducerIdsBlock} -import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_0_IV1, IBP_2_7_IV0} -import org.apache.kafka.server.config.ConfigType -import org.apache.zookeeper.ZooDefs -import org.apache.zookeeper.data.{ACL, Stat} - -import scala.beans.BeanProperty -import scala.collection.mutable.ArrayBuffer -import scala.collection.{Map, Seq, immutable, mutable} -import scala.jdk.CollectionConverters._ -import scala.util.{Failure, Success, Try} - -// This file contains objects for encoding/decoding data stored in ZooKeeper nodes (znodes). - -object ControllerZNode { - def path = "/controller" - def encode(brokerId: Int, timestamp: Long, kraftControllerEpoch: Int = -1): Array[Byte] = { - Json.encodeAsBytes(Map( - "version" -> 2, - "brokerid" -> brokerId, - "timestamp" -> timestamp.toString, - "kraftControllerEpoch" -> kraftControllerEpoch).asJava) - } - def decode(bytes: Array[Byte]): Option[Int] = Json.parseBytes(bytes).map { js => - js.asJsonObject("brokerid").to[Int] - } - def decodeController(bytes: Array[Byte], zkVersion: Int): ZKControllerRegistration = Json.tryParseBytes(bytes) match { - case Right(json) => - val controller = json.asJsonObject - val brokerId = controller("brokerid").to[Int] - val kraftControllerEpoch = controller.get("kraftControllerEpoch").map(j => j.to[Int]) - ZKControllerRegistration(brokerId, kraftControllerEpoch, zkVersion) - - case Left(err) => - throw new KafkaException(s"Failed to parse ZooKeeper registration for controller: ${new String(bytes, UTF_8)}", err) - } -} - -case class ZKControllerRegistration(broker: Int, kraftEpoch: Option[Int], zkVersion: Int) - -object ControllerEpochZNode { - def path = "/controller_epoch" - def encode(epoch: Int): Array[Byte] = epoch.toString.getBytes(UTF_8) - def decode(bytes: Array[Byte]): Int = new String(bytes, UTF_8).toInt -} - -object ConfigZNode { - def path = "/config" -} - -object BrokersZNode { - def path = "/brokers" -} - -object BrokerIdsZNode { - def path = s"${BrokersZNode.path}/ids" - def encode: Array[Byte] = null -} - -object BrokerInfo { - - /** - * - Create a broker info with v5 json format if the metadataVersion is 2.7.x or above. - * - Create a broker info with v4 json format (which includes multiple endpoints and rack) if - * the metadataVersion is 0.10.0.X or above but lesser than 2.7.x. - * - Register the broker with v2 json format otherwise. - * - * Due to KAFKA-3100, 0.9.0.0 broker and old clients will break if JSON version is above 2. - * - * We include v2 to make it possible for the broker to migrate from 0.9.0.0 to 0.10.0.X or above - * without having to upgrade to 0.9.0.1 first (clients have to be upgraded to 0.9.0.1 in - * any case). - */ - def apply(broker: Broker, metadataVersion: MetadataVersion, jmxPort: Int): BrokerInfo = { - val version = { - if (metadataVersion.isAtLeast(IBP_2_7_IV0)) - 5 - else if (metadataVersion.isAtLeast(IBP_0_10_0_IV1)) - 4 - else - 2 - } - BrokerInfo(broker, version, jmxPort) - } - -} - -case class BrokerInfo(broker: Broker, version: Int, jmxPort: Int) { - val path: String = BrokerIdZNode.path(broker.id) - def toJsonBytes: Array[Byte] = BrokerIdZNode.encode(this) -} - -object BrokerIdZNode { - private val HostKey = "host" - private val PortKey = "port" - private val VersionKey = "version" - private val EndpointsKey = "endpoints" - private val RackKey = "rack" - private val JmxPortKey = "jmx_port" - private val ListenerSecurityProtocolMapKey = "listener_security_protocol_map" - private val TimestampKey = "timestamp" - private val FeaturesKey = "features" - - def path(id: Int) = s"${BrokerIdsZNode.path}/$id" - - /** - * Encode to JSON bytes. - * - * The JSON format includes a top level host and port for compatibility with older clients. - */ - def encode(version: Int, host: String, port: Int, advertisedEndpoints: Seq[EndPoint], jmxPort: Int, - rack: Option[String], features: Features[SupportedVersionRange]): Array[Byte] = { - val jsonMap = collection.mutable.Map(VersionKey -> version, - HostKey -> host, - PortKey -> port, - EndpointsKey -> advertisedEndpoints.map(_.connectionString).toBuffer.asJava, - JmxPortKey -> jmxPort, - TimestampKey -> Time.SYSTEM.milliseconds().toString - ) - rack.foreach(rack => if (version >= 3) jsonMap += (RackKey -> rack)) - - if (version >= 4) { - jsonMap += (ListenerSecurityProtocolMapKey -> advertisedEndpoints.map { endPoint => - endPoint.listenerName.value -> endPoint.securityProtocol.name - }.toMap.asJava) - } - - if (version >= 5) { - jsonMap += (FeaturesKey -> features.toMap) - } - Json.encodeAsBytes(jsonMap.asJava) - } - - def encode(brokerInfo: BrokerInfo): Array[Byte] = { - val broker = brokerInfo.broker - // the default host and port are here for compatibility with older clients that only support PLAINTEXT - // we choose the first plaintext port, if there is one - // or we register an empty endpoint, which means that older clients will not be able to connect - val plaintextEndpoint = broker.endPoints.find(_.securityProtocol == SecurityProtocol.PLAINTEXT).getOrElse( - new EndPoint(null, -1, null, null)) - encode(brokerInfo.version, plaintextEndpoint.host, plaintextEndpoint.port, broker.endPoints, brokerInfo.jmxPort, - broker.rack, broker.features) - } - - private def featuresAsJavaMap(brokerInfo: JsonObject): util.Map[String, util.Map[String, java.lang.Short]] = { - FeatureZNode.asJavaMap(brokerInfo - .get(FeaturesKey) - .flatMap(_.to[Option[Map[String, Map[String, Int]]]]) - .map(theMap => theMap.map { - case(featureName, versionsInfo) => featureName -> versionsInfo.map { - case(label, version) => label -> version.asInstanceOf[Short] - }.toMap - }.toMap) - .getOrElse(Map[String, Map[String, Short]]())) - } - - /** - * Create a BrokerInfo object from id and JSON bytes. - * - * @param id - * @param jsonBytes - * - * Version 1 JSON schema for a broker is: - * { - * "version":1, - * "host":"localhost", - * "port":9092 - * "jmx_port":9999, - * "timestamp":"2233345666" - * } - * - * Version 2 JSON schema for a broker is: - * { - * "version":2, - * "host":"localhost", - * "port":9092, - * "jmx_port":9999, - * "timestamp":"2233345666", - * "endpoints":["PLAINTEXT://host1:9092", "SSL://host1:9093"] - * } - * - * Version 3 JSON schema for a broker is: - * { - * "version":3, - * "host":"localhost", - * "port":9092, - * "jmx_port":9999, - * "timestamp":"2233345666", - * "endpoints":["PLAINTEXT://host1:9092", "SSL://host1:9093"], - * "rack":"dc1" - * } - * - * Version 4 JSON schema for a broker is: - * { - * "version":4, - * "host":"localhost", - * "port":9092, - * "jmx_port":9999, - * "timestamp":"2233345666", - * "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"], - * "rack":"dc1" - * } - * - * Version 5 (current) JSON schema for a broker is: - * { - * "version":5, - * "host":"localhost", - * "port":9092, - * "jmx_port":9999, - * "timestamp":"2233345666", - * "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"], - * "rack":"dc1", - * "features": {"feature": {"min_version":1, "first_active_version":2, "max_version":3}} - * } - */ - def decode(id: Int, jsonBytes: Array[Byte]): BrokerInfo = { - Json.tryParseBytes(jsonBytes) match { - case Right(js) => - val brokerInfo = js.asJsonObject - val version = brokerInfo(VersionKey).to[Int] - val jmxPort = brokerInfo(JmxPortKey).to[Int] - - val endpoints = - if (version < 1) - throw new KafkaException("Unsupported version of broker registration: " + - s"${new String(jsonBytes, UTF_8)}") - else if (version == 1) { - val host = brokerInfo(HostKey).to[String] - val port = brokerInfo(PortKey).to[Int] - val securityProtocol = SecurityProtocol.PLAINTEXT - val endPoint = new EndPoint(host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol) - Seq(endPoint) - } - else { - val securityProtocolMap = brokerInfo.get(ListenerSecurityProtocolMapKey) match { - case None => SocketServerConfigs.DEFAULT_NAME_TO_SECURITY_PROTO - case Some(m) => { - val result = new java.util.HashMap[ListenerName, SecurityProtocol]() - m.to[Map[String, String]].foreach { - case (k, v) => result.put( - new ListenerName(k), SecurityProtocol.forName(v)) - } - result - } - } - val listenersString = brokerInfo(EndpointsKey).to[Seq[String]].mkString(",") - SocketServerConfigs.listenerListToEndPoints(listenersString, securityProtocolMap). - asScala.map(EndPoint.fromJava(_)) - } - - val rack = brokerInfo.get(RackKey).flatMap(_.to[Option[String]]) - val features = featuresAsJavaMap(brokerInfo) - BrokerInfo( - Broker(id, endpoints, rack, fromSupportedFeaturesMap(features)), version, jmxPort) - case Left(e) => - throw new KafkaException(s"Failed to parse ZooKeeper registration for broker $id: " + - s"${new String(jsonBytes, UTF_8)}", e) - } - } -} - -object TopicsZNode { - def path = s"${BrokersZNode.path}/topics" -} - -object TopicZNode { - case class TopicIdReplicaAssignment(topic: String, - topicId: Option[Uuid], - assignment: Map[TopicPartition, ReplicaAssignment]) - def path(topic: String) = s"${TopicsZNode.path}/$topic" - def encode(topicId: Option[Uuid], - assignment: collection.Map[TopicPartition, ReplicaAssignment]): Array[Byte] = { - val replicaAssignmentJson = mutable.Map[String, util.List[Int]]() - val addingReplicasAssignmentJson = mutable.Map[String, util.List[Int]]() - val removingReplicasAssignmentJson = mutable.Map[String, util.List[Int]]() - - for ((partition, replicaAssignment) <- assignment) { - replicaAssignmentJson += (partition.partition.toString -> replicaAssignment.replicas.asJava) - if (replicaAssignment.addingReplicas.nonEmpty) - addingReplicasAssignmentJson += (partition.partition.toString -> replicaAssignment.addingReplicas.asJava) - if (replicaAssignment.removingReplicas.nonEmpty) - removingReplicasAssignmentJson += (partition.partition.toString -> replicaAssignment.removingReplicas.asJava) - } - - val topicAssignment = mutable.Map( - "version" -> 3, - "partitions" -> replicaAssignmentJson.asJava, - "adding_replicas" -> addingReplicasAssignmentJson.asJava, - "removing_replicas" -> removingReplicasAssignmentJson.asJava - ) - topicId.foreach(id => topicAssignment += "topic_id" -> id.toString) - - Json.encodeAsBytes(topicAssignment.asJava) - } - def decode(topic: String, bytes: Array[Byte]): TopicIdReplicaAssignment = { - def getReplicas(replicasJsonOpt: Option[JsonObject], partition: String): Seq[Int] = { - replicasJsonOpt match { - case Some(replicasJson) => replicasJson.get(partition) match { - case Some(ar) => ar.to[Seq[Int]] - case None => Seq.empty[Int] - } - case None => Seq.empty[Int] - } - } - - Json.parseBytes(bytes).map { js => - val assignmentJson = js.asJsonObject - val topicId = assignmentJson.get("topic_id").map(_.to[String]).map(Uuid.fromString) - val addingReplicasJsonOpt = assignmentJson.get("adding_replicas").map(_.asJsonObject) - val removingReplicasJsonOpt = assignmentJson.get("removing_replicas").map(_.asJsonObject) - val partitionsJsonOpt = assignmentJson.get("partitions").map(_.asJsonObject) - val partitions = partitionsJsonOpt.map { partitionsJson => - partitionsJson.iterator.map { case (partition, replicas) => - new TopicPartition(topic, partition.toInt) -> ReplicaAssignment( - replicas.to[Seq[Int]], - getReplicas(addingReplicasJsonOpt, partition), - getReplicas(removingReplicasJsonOpt, partition) - ) - }.toMap - }.getOrElse(immutable.Map.empty[TopicPartition, ReplicaAssignment]) - - TopicIdReplicaAssignment(topic, topicId, partitions) - }.getOrElse(TopicIdReplicaAssignment(topic, None, Map.empty[TopicPartition, ReplicaAssignment])) - } -} - -object TopicPartitionsZNode { - def path(topic: String) = s"${TopicZNode.path(topic)}/partitions" -} - -object TopicPartitionZNode { - def path(partition: TopicPartition) = s"${TopicPartitionsZNode.path(partition.topic)}/${partition.partition}" -} - -object TopicPartitionStateZNode { - def path(partition: TopicPartition) = s"${TopicPartitionZNode.path(partition)}/state" - - def encode(leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch): Array[Byte] = { - val leaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr - val controllerEpoch = leaderIsrAndControllerEpoch.controllerEpoch - var partitionState = Map( - "version" -> 1, - "leader" -> leaderAndIsr.leader, - "leader_epoch" -> leaderAndIsr.leaderEpoch, - "controller_epoch" -> controllerEpoch, - "isr" -> leaderAndIsr.isr - ) - - if (leaderAndIsr.leaderRecoveryState != LeaderRecoveryState.RECOVERED) { - partitionState = partitionState ++ Seq("leader_recovery_state" -> leaderAndIsr.leaderRecoveryState.value.toInt) - } - - Json.encodeAsBytes(partitionState.asJava) - } - - def decode(bytes: Array[Byte], stat: Stat): Option[LeaderIsrAndControllerEpoch] = { - Json.parseBytes(bytes).map { js => - val leaderIsrAndEpochInfo = js.asJsonObject - val leader = leaderIsrAndEpochInfo("leader").to[Int] - val epoch = leaderIsrAndEpochInfo("leader_epoch").to[Int] - val isr = leaderIsrAndEpochInfo("isr").to[List[Int]] - val recovery = leaderIsrAndEpochInfo - .get("leader_recovery_state") - .map(jsonValue => LeaderRecoveryState.of(jsonValue.to[Int].toByte)) - .getOrElse(LeaderRecoveryState.RECOVERED) - val controllerEpoch = leaderIsrAndEpochInfo("controller_epoch").to[Int] - - val zkPathVersion = stat.getVersion - LeaderIsrAndControllerEpoch(new LeaderAndIsr(leader, epoch, isr.map(Int.box).asJava, recovery, zkPathVersion), controllerEpoch) - } - } -} - -object ConfigEntityTypeZNode { - def path(entityType: String) = s"${ConfigZNode.path}/$entityType" -} - -object ConfigEntityZNode { - def path(entityType: String, entityName: String) = s"${ConfigEntityTypeZNode.path(entityType)}/$entityName" - def encode(config: Properties): Array[Byte] = { - Json.encodeAsBytes(Map("version" -> 1, "config" -> config).asJava) - } - def decode(bytes: Array[Byte]): Properties = { - val props = new Properties() - if (bytes != null) { - Json.parseBytes(bytes).foreach { js => - val configOpt = js.asJsonObjectOption.flatMap(_.get("config").flatMap(_.asJsonObjectOption)) - configOpt.foreach(config => config.iterator.foreach { case (k, v) => props.setProperty(k, v.to[String]) }) - } - } - props - } -} - -object ConfigEntityChangeNotificationZNode { - def path = s"${ConfigZNode.path}/changes" -} - -object ConfigEntityChangeNotificationSequenceZNode { - val SequenceNumberPrefix = "config_change_" - def createPath = s"${ConfigEntityChangeNotificationZNode.path}/$SequenceNumberPrefix" - def encode(sanitizedEntityPath: String): Array[Byte] = Json.encodeAsBytes( - Map("version" -> 2, "entity_path" -> sanitizedEntityPath).asJava) -} - -object IsrChangeNotificationZNode { - def path = "/isr_change_notification" -} - -object IsrChangeNotificationSequenceZNode { - val SequenceNumberPrefix = "isr_change_" - def path(sequenceNumber: String = "") = s"${IsrChangeNotificationZNode.path}/$SequenceNumberPrefix$sequenceNumber" - def encode(partitions: collection.Set[TopicPartition]): Array[Byte] = { - val partitionsJson = partitions.map(partition => Map("topic" -> partition.topic, "partition" -> partition.partition).asJava) - Json.encodeAsBytes(Map("version" -> IsrChangeNotificationHandler.Version, "partitions" -> partitionsJson.asJava).asJava) - } - - def decode(bytes: Array[Byte]): Set[TopicPartition] = { - Json.parseBytes(bytes).map { js => - val partitionsJson = js.asJsonObject("partitions").asJsonArray - partitionsJson.iterator.map { partitionsJson => - val partitionJson = partitionsJson.asJsonObject - val topic = partitionJson("topic").to[String] - val partition = partitionJson("partition").to[Int] - new TopicPartition(topic, partition) - } - } - }.map(_.toSet).getOrElse(Set.empty) - def sequenceNumber(path: String): String = path.substring(path.lastIndexOf(SequenceNumberPrefix) + SequenceNumberPrefix.length) -} - -object LogDirEventNotificationZNode { - def path = "/log_dir_event_notification" -} - -object LogDirEventNotificationSequenceZNode { - val SequenceNumberPrefix = "log_dir_event_" - private val LogDirFailureEvent = 1 - def path(sequenceNumber: String) = s"${LogDirEventNotificationZNode.path}/$SequenceNumberPrefix$sequenceNumber" - def encode(brokerId: Int): Array[Byte] = { - Json.encodeAsBytes(Map("version" -> 1, "broker" -> brokerId, "event" -> LogDirFailureEvent).asJava) - } - def decode(bytes: Array[Byte]): Option[Int] = Json.parseBytes(bytes).map { js => - js.asJsonObject("broker").to[Int] - } - def sequenceNumber(path: String): String = path.substring(path.lastIndexOf(SequenceNumberPrefix) + SequenceNumberPrefix.length) -} - -object AdminZNode { - def path = "/admin" -} - -object DeleteTopicsZNode { - def path = s"${AdminZNode.path}/delete_topics" -} - -object DeleteTopicsTopicZNode { - def path(topic: String) = s"${DeleteTopicsZNode.path}/$topic" -} - -/** - * The znode for initiating a partition reassignment. - * @deprecated Since 2.4, use the PartitionReassignment Kafka API instead. - */ -object ReassignPartitionsZNode { - - /** - * The assignment of brokers for a `TopicPartition`. - * - * A replica assignment consists of a `topic`, `partition` and a list of `replicas`, which - * represent the broker ids that the `TopicPartition` is assigned to. - */ - case class ReplicaAssignment(@BeanProperty @JsonProperty("topic") topic: String, - @BeanProperty @JsonProperty("partition") partition: Int, - @BeanProperty @JsonProperty("replicas") replicas: java.util.List[Int]) - - /** - * An assignment consists of a `version` and a list of `partitions`, which represent the - * assignment of topic-partitions to brokers. - * @deprecated Use the PartitionReassignment Kafka API instead - */ - @Deprecated - case class LegacyPartitionAssignment(@BeanProperty @JsonProperty("version") version: Int, - @BeanProperty @JsonProperty("partitions") partitions: java.util.List[ReplicaAssignment]) - - def path = s"${AdminZNode.path}/reassign_partitions" - - def encode(reassignmentMap: collection.Map[TopicPartition, Seq[Int]]): Array[Byte] = { - val reassignment = LegacyPartitionAssignment(1, - reassignmentMap.toSeq.map { case (tp, replicas) => - ReplicaAssignment(tp.topic, tp.partition, replicas.asJava) - }.asJava - ) - Json.encodeAsBytes(reassignment) - } - - def decode(bytes: Array[Byte]): Either[JsonProcessingException, collection.Map[TopicPartition, Seq[Int]]] = - Json.parseBytesAs[LegacyPartitionAssignment](bytes).map { partitionAssignment => - partitionAssignment.partitions.asScala.iterator.map { replicaAssignment => - new TopicPartition(replicaAssignment.topic, replicaAssignment.partition) -> replicaAssignment.replicas.asScala - }.toMap - } -} - -object PreferredReplicaElectionZNode { - def path = s"${AdminZNode.path}/preferred_replica_election" - def encode(partitions: Set[TopicPartition]): Array[Byte] = { - val jsonMap = Map("version" -> 1, - "partitions" -> partitions.map(tp => Map("topic" -> tp.topic, "partition" -> tp.partition).asJava).asJava) - Json.encodeAsBytes(jsonMap.asJava) - } - def decode(bytes: Array[Byte]): Set[TopicPartition] = Json.parseBytes(bytes).map { js => - val partitionsJson = js.asJsonObject("partitions").asJsonArray - partitionsJson.iterator.map { partitionsJson => - val partitionJson = partitionsJson.asJsonObject - val topic = partitionJson("topic").to[String] - val partition = partitionJson("partition").to[Int] - new TopicPartition(topic, partition) - } - }.map(_.toSet).getOrElse(Set.empty) -} - -//old consumer path znode -object ConsumerPathZNode { - def path = "/consumers" -} - -object ConsumerOffset { - def path(group: String, topic: String, partition: Integer) = s"${ConsumerPathZNode.path}/$group/offsets/$topic/$partition" - def encode(offset: Long): Array[Byte] = offset.toString.getBytes(UTF_8) - def decode(bytes: Array[Byte]): Option[Long] = Option(bytes).map(new String(_, UTF_8).toLong) -} - -object ZkVersion { - val MatchAnyVersion: Int = -1 // if used in a conditional set, matches any version (the value should match ZooKeeper codebase) - val UnknownVersion: Int = -2 // Version returned from get if node does not exist (internal constant for Kafka codebase, unused value in ZK) -} - -object ZkStat { - val NoStat = new Stat() -} - -object StateChangeHandlers { - val ControllerHandler = "controller-state-change-handler" - def zkNodeChangeListenerHandler(seqNodeRoot: String) = s"change-notification-$seqNodeRoot" -} - -/** - * Acls for resources are stored in ZK under two root paths: - *
                - *
              • [[org.apache.kafka.common.resource.PatternType#LITERAL Literal]] patterns are stored under '/kafka-acl'. - * The format is JSON. See [[kafka.zk.ResourceZNode]] for details.
              • - *
              • All other patterns are stored under '/kafka-acl-extended/pattern-type'. - * The format is JSON. See [[kafka.zk.ResourceZNode]] for details.
              • - *
              - * - * Under each root node there will be one child node per resource type (Topic, Cluster, Group, etc). - * Under each resourceType there will be a unique child for each resource pattern and the data for that child will contain - * list of its acls as a json object. Following gives an example: - * - *
              -  * // Literal patterns:
              -  * /kafka-acl/Topic/topic-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
              -  * /kafka-acl/Cluster/kafka-cluster => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
              -  *
              -  * // Prefixed patterns:
              -  * /kafka-acl-extended/PREFIXED/Group/group-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
              -  * 
              - * - * Acl change events are also stored under two paths: - *
                - *
              • [[org.apache.kafka.common.resource.PatternType#LITERAL Literal]] patterns are stored under '/kafka-acl-changes'. - * The format is a UTF8 string in the form: <resource-type>:<resource-name>
              • - *
              • All other patterns are stored under '/kafka-acl-extended-changes' - * The format is JSON, as defined by [[kafka.zk.ExtendedAclChangeEvent]]
              • - *
              - */ -sealed trait ZkAclStore { - val patternType: PatternType - val aclPath: String - - def path(resourceType: ResourceType): String = s"$aclPath/${SecurityUtils.resourceTypeName(resourceType)}" - - def path(resourceType: ResourceType, resourceName: String): String = s"$aclPath/${SecurityUtils.resourceTypeName(resourceType)}/$resourceName" - - def changeStore: ZkAclChangeStore -} - -object ZkAclStore { - private val storesByType: Map[PatternType, ZkAclStore] = PatternType.values - .filter(_.isSpecific) - .map(patternType => (patternType, create(patternType))) - .toMap - - val stores: Iterable[ZkAclStore] = storesByType.values - - val securePaths: Iterable[String] = stores - .flatMap(store => Set(store.aclPath, store.changeStore.aclChangePath)) - - def apply(patternType: PatternType): ZkAclStore = { - storesByType.get(patternType) match { - case Some(store) => store - case None => throw new KafkaException(s"Invalid pattern type: $patternType") - } - } - - private def create(patternType: PatternType) = { - patternType match { - case PatternType.LITERAL => LiteralAclStore - case _ => new ExtendedAclStore(patternType) - } - } -} - -object LiteralAclStore extends ZkAclStore { - val patternType: PatternType = PatternType.LITERAL - val aclPath: String = "/kafka-acl" - - def changeStore: ZkAclChangeStore = LiteralAclChangeStore -} - -class ExtendedAclStore(val patternType: PatternType) extends ZkAclStore { - if (patternType == PatternType.LITERAL) - throw new IllegalArgumentException("Literal pattern types are not supported") - - val aclPath: String = s"${ExtendedAclZNode.path}/${patternType.name.toLowerCase}" - - def changeStore: ZkAclChangeStore = ExtendedAclChangeStore -} - -object ExtendedAclZNode { - def path = "/kafka-acl-extended" -} - -trait AclChangeNotificationHandler { - def processNotification(resource: ResourcePattern): Unit -} - -trait AclChangeSubscription extends AutoCloseable { - def close(): Unit -} - -case class AclChangeNode(path: String, bytes: Array[Byte]) - -sealed trait ZkAclChangeStore { - val aclChangePath: String - def createPath: String = s"$aclChangePath/${ZkAclChangeStore.SequenceNumberPrefix}" - - def decode(bytes: Array[Byte]): ResourcePattern - - protected def encode(resource: ResourcePattern): Array[Byte] - - def createChangeNode(resource: ResourcePattern): AclChangeNode = AclChangeNode(createPath, encode(resource)) - - def createListener(handler: AclChangeNotificationHandler, zkClient: KafkaZkClient): AclChangeSubscription = { - val rawHandler: NotificationHandler = (bytes: Array[Byte]) => handler.processNotification(decode(bytes)) - - val aclChangeListener = new ZkNodeChangeNotificationListener( - zkClient, aclChangePath, ZkAclChangeStore.SequenceNumberPrefix, rawHandler) - - aclChangeListener.init() - - () => aclChangeListener.close() - } -} - -object ZkAclChangeStore { - val stores: Iterable[ZkAclChangeStore] = List(LiteralAclChangeStore, ExtendedAclChangeStore) - - def SequenceNumberPrefix = "acl_changes_" -} - -case object LiteralAclChangeStore extends ZkAclChangeStore { - val name = "LiteralAclChangeStore" - val aclChangePath: String = "/kafka-acl-changes" - - def encode(resource: ResourcePattern): Array[Byte] = { - if (resource.patternType != PatternType.LITERAL) - throw new IllegalArgumentException("Only literal resource patterns can be encoded") - - val legacyName = resource.resourceType.toString + AclEntry.RESOURCE_SEPARATOR + resource.name - legacyName.getBytes(UTF_8) - } - - def decode(bytes: Array[Byte]): ResourcePattern = { - val string = new String(bytes, UTF_8) - string.split(AclEntry.RESOURCE_SEPARATOR, 2) match { - case Array(resourceType, resourceName, _*) => new ResourcePattern(ResourceType.fromString(resourceType), resourceName, PatternType.LITERAL) - case _ => throw new IllegalArgumentException("expected a string in format ResourceType:ResourceName but got " + string) - } - } -} - -case object ExtendedAclChangeStore extends ZkAclChangeStore { - val name = "ExtendedAclChangeStore" - val aclChangePath: String = "/kafka-acl-extended-changes" - - def encode(resource: ResourcePattern): Array[Byte] = { - if (resource.patternType == PatternType.LITERAL) - throw new IllegalArgumentException("Literal pattern types are not supported") - - Json.encodeAsBytes(ExtendedAclChangeEvent( - ExtendedAclChangeEvent.currentVersion, - resource.resourceType.name, - resource.name, - resource.patternType.name)) - } - - def decode(bytes: Array[Byte]): ResourcePattern = { - val changeEvent = Json.parseBytesAs[ExtendedAclChangeEvent](bytes) match { - case Right(event) => event - case Left(e) => throw new IllegalArgumentException("Failed to parse ACL change event", e) - } - - changeEvent.toResource match { - case Success(r) => r - case Failure(e) => throw new IllegalArgumentException("Failed to convert ACL change event to resource", e) - } - } -} - -object ResourceZNode { - def path(resource: ResourcePattern): String = ZkAclStore(resource.patternType).path(resource.resourceType, resource.name) - - def encode(acls: Set[AclEntry]): Array[Byte] = Json.encodeAsBytes(AclEntry.toJsonCompatibleMap(acls.asJava)) - def decode(bytes: Array[Byte], stat: Stat): ZkData.VersionedAcls = ZkData.VersionedAcls(AclEntry.fromBytes(bytes).asScala.toSet, stat.getVersion) -} - -object ExtendedAclChangeEvent { - val currentVersion: Int = 1 -} - -case class ExtendedAclChangeEvent(@BeanProperty @JsonProperty("version") version: Int, - @BeanProperty @JsonProperty("resourceType") resourceType: String, - @BeanProperty @JsonProperty("name") name: String, - @BeanProperty @JsonProperty("patternType") patternType: String) { - if (version > ExtendedAclChangeEvent.currentVersion) - throw new UnsupportedVersionException(s"Acl change event received for unsupported version: $version") - - def toResource: Try[ResourcePattern] = { - for { - resType <- Try(ResourceType.fromString(resourceType)) - patType <- Try(PatternType.fromString(patternType)) - resource = new ResourcePattern(resType, name, patType) - } yield resource - } -} - -object ClusterZNode { - def path = "/cluster" -} - -object ClusterIdZNode { - def path = s"${ClusterZNode.path}/id" - - def toJson(id: String): Array[Byte] = { - Json.encodeAsBytes(Map("version" -> "1", "id" -> id).asJava) - } - - def fromJson(clusterIdJson: Array[Byte]): String = { - Json.parseBytes(clusterIdJson).map(_.asJsonObject("id").to[String]).getOrElse { - throw new KafkaException(s"Failed to parse the cluster id json ${clusterIdJson.mkString("Array(", ", ", ")")}") - } - } -} - -object BrokerSequenceIdZNode { - def path = s"${BrokersZNode.path}/seqid" -} - -object ProducerIdBlockZNode { - val CurrentVersion: Long = 1L - - def path = "/latest_producer_id_block" - - def generateProducerIdBlockJson(producerIdBlock: ProducerIdsBlock): Array[Byte] = { - Json.encodeAsBytes(Map("version" -> CurrentVersion, - "broker" -> producerIdBlock.assignedBrokerId, - "block_start" -> producerIdBlock.firstProducerId.toString, - "block_end" -> producerIdBlock.lastProducerId.toString).asJava - ) - } - - def parseProducerIdBlockData(jsonData: Array[Byte]): ProducerIdsBlock = { - val jsonDataAsString = jsonData.map(_.toChar).mkString - try { - Json.parseBytes(jsonData).map(_.asJsonObject).flatMap { js => - val brokerId = js("broker").to[Int] - val blockStart = js("block_start").to[String].toLong - val blockEnd = js("block_end").to[String].toLong - Some(new ProducerIdsBlock(brokerId, blockStart, Math.toIntExact(blockEnd - blockStart + 1))) - }.getOrElse(throw new KafkaException(s"Failed to parse the producerId block json $jsonDataAsString")) - } catch { - case e: java.lang.NumberFormatException => - // this should never happen: the written data has exceeded long type limit - throw new KafkaException(s"Read jason data $jsonDataAsString contains producerIds that have exceeded long type limit", e) - } - } -} - -object DelegationTokenAuthZNode { - def path = "/delegation_token" -} - -object DelegationTokenChangeNotificationZNode { - def path = s"${DelegationTokenAuthZNode.path}/token_changes" -} - -object DelegationTokenChangeNotificationSequenceZNode { - val SequenceNumberPrefix = "token_change_" - def createPath = s"${DelegationTokenChangeNotificationZNode.path}/$SequenceNumberPrefix" - def deletePath(sequenceNode: String) = s"${DelegationTokenChangeNotificationZNode.path}/$sequenceNode" - def encode(tokenId : String): Array[Byte] = tokenId.getBytes(UTF_8) - def decode(bytes: Array[Byte]): String = new String(bytes, UTF_8) -} - -object DelegationTokensZNode { - def path = s"${DelegationTokenAuthZNode.path}/tokens" -} - -object DelegationTokenInfoZNode { - def path(tokenId: String) = s"${DelegationTokensZNode.path}/$tokenId" - def encode(tokenInfo: TokenInformation): Array[Byte] = - Json.encodeAsBytes(DelegationTokenManagerZk.toJsonCompatibleMap(tokenInfo).asJava) - def decode(bytes: Array[Byte]): Option[TokenInformation] = DelegationTokenManagerZk.fromBytes(bytes) -} - -/** - * Represents the status of the FeatureZNode. - * - * Enabled -> This status means the feature versioning system (KIP-584) is enabled, and, the - * finalized features stored in the FeatureZNode are active. This status is written by - * the controller to the FeatureZNode only when the broker IBP config is greater than - * or equal to IBP_2_7_IV0. - * - * Disabled -> This status means the feature versioning system (KIP-584) is disabled, and, the - * the finalized features stored in the FeatureZNode is not relevant. This status is - * written by the controller to the FeatureZNode only when the broker IBP config - * is less than IBP_2_7_IV0. - */ -sealed trait FeatureZNodeStatus { - def id: Int -} - -object FeatureZNodeStatus { - case object Disabled extends FeatureZNodeStatus { - val id: Int = 0 - } - - case object Enabled extends FeatureZNodeStatus { - val id: Int = 1 - } - - def withNameOpt(id: Int): Option[FeatureZNodeStatus] = { - id match { - case Disabled.id => Some(Disabled) - case Enabled.id => Some(Enabled) - case _ => Option.empty - } - } -} - -/** - * Represents the contents of the ZK node containing finalized feature information. - * - * @param version the version of ZK node, we removed min_version_level in version 2 - * @param status the status of the ZK node - * @param features the cluster-wide finalized features - */ -case class FeatureZNode(version: Int, status: FeatureZNodeStatus, features: Map[String, Short]) { -} - -object FeatureZNode { - private val VersionKey = "version" - private val StatusKey = "status" - private val FeaturesKey = "features" - private val V1MinVersionKey = "min_version_level" - private val V1MaxVersionKey = "max_version_level" - - // V1 contains 'version', 'status' and 'features' keys. - val V1 = 1 - // V2 removes min_version_level - val V2 = 2 - - /** - * - Create a feature info with v1 json format if if the metadataVersion is before 3.2.0 - * - Create a feature znode with v2 json format if the metadataVersion is 3.2.1 or above. - */ - def apply(metadataVersion: MetadataVersion, status: FeatureZNodeStatus, features: Map[String, Short]): FeatureZNode = { - val version = if (metadataVersion.isAtLeast(MetadataVersion.IBP_3_3_IV0)) { - V2 - } else { - V1 - } - FeatureZNode(version, status, features) - } - - def path = "/feature" - - def asJavaMap(scalaMap: Map[String, Map[String, Short]]): util.Map[String, util.Map[String, java.lang.Short]] = { - scalaMap - .map { - case(featureName, versionInfo) => featureName -> versionInfo.map { - case(label, version) => label -> java.lang.Short.valueOf(version) - }.asJava - }.asJava - } - - /** - * Encodes a FeatureZNode to JSON. - * - * @param featureZNode FeatureZNode to be encoded - * - * @return JSON representation of the FeatureZNode, as an Array[Byte] - */ - def encode(featureZNode: FeatureZNode): Array[Byte] = { - val features = if (featureZNode.version == V1) { - asJavaMap(featureZNode.features.map{ - case (feature, version) => feature -> Map(V1MaxVersionKey -> version, V1MinVersionKey -> version) - }) - } else { - asJavaMap(featureZNode.features.map{ - case (feature, version) => feature -> Map(V1MaxVersionKey -> version) - }) - } - val jsonMap = collection.mutable.Map( - VersionKey -> featureZNode.version, - StatusKey -> featureZNode.status.id, - FeaturesKey -> features) - Json.encodeAsBytes(jsonMap.asJava) - } - - /** - * Decodes the contents of the feature ZK node from Array[Byte] to a FeatureZNode. - * - * @param jsonBytes the contents of the feature ZK node - * - * @return the FeatureZNode created from jsonBytes - * - * @throws IllegalArgumentException if the Array[Byte] can not be decoded. - */ - def decode(jsonBytes: Array[Byte]): FeatureZNode = { - Json.tryParseBytes(jsonBytes) match { - case Right(js) => - val featureInfo = js.asJsonObject - val version = featureInfo(VersionKey).to[Int] - if (version < V1 || version > V2) { - throw new IllegalArgumentException(s"Unsupported version: $version of feature information: " + - s"${new String(jsonBytes, UTF_8)}") - } - - val statusInt = featureInfo - .get(StatusKey) - .flatMap(_.to[Option[Int]]) - if (statusInt.isEmpty) { - throw new IllegalArgumentException("Status can not be absent in feature information: " + - s"${new String(jsonBytes, UTF_8)}") - } - val status = FeatureZNodeStatus.withNameOpt(statusInt.get) - if (status.isEmpty) { - throw new IllegalArgumentException( - s"Malformed status: $statusInt found in feature information: ${new String(jsonBytes, UTF_8)}") - } - - val finalizedFeatures = decodeFeature(version, featureInfo, jsonBytes) - FeatureZNode(version, status.get, finalizedFeatures) - case Left(e) => - throw new IllegalArgumentException(s"Failed to parse feature information: " + - s"${new String(jsonBytes, UTF_8)}", e) - } - } - - private def decodeFeature(version: Int, featureInfo: JsonObject, jsonBytes: Array[Byte]): Map[String, Short] = { - val featuresMap = featureInfo - .get(FeaturesKey) - .flatMap(_.to[Option[Map[String, Map[String, Int]]]]) - - if (featuresMap.isEmpty) { - throw new IllegalArgumentException("Features map can not be absent in: " + - s"${new String(jsonBytes, UTF_8)}") - } - featuresMap.get.map { - case (featureName, versionInfo) => - if (version == V1 && !versionInfo.contains(V1MinVersionKey)) { - throw new IllegalArgumentException(s"$V1MinVersionKey absent in [$versionInfo]") - } - if (!versionInfo.contains(V1MaxVersionKey)) { - throw new IllegalArgumentException(s"$V1MaxVersionKey absent in [$versionInfo]") - } - - val minValueOpt = versionInfo.get(V1MinVersionKey) - val maxValue = versionInfo(V1MaxVersionKey) - - if (version == V1 && (minValueOpt.get < 1 || maxValue < minValueOpt.get)) { - throw new IllegalArgumentException(s"Expected minValue >= 1, maxValue >= 1 and maxValue >= minValue, but received minValue: ${minValueOpt.get}, maxValue: $maxValue") - } - if (maxValue < 1) { - throw new IllegalArgumentException(s"Expected maxValue >= 1, but received maxValue: $maxValue") - } - featureName -> maxValue.toShort - } - } -} - -object MigrationZNode { - val path = "/migration" - - def encode(migration: ZkMigrationLeadershipState): Array[Byte] = { - val jsonMap = Map( - "version" -> 0, - "kraft_controller_id" -> migration.kraftControllerId(), - "kraft_controller_epoch" -> migration.kraftControllerEpoch(), - "kraft_metadata_offset" -> migration.kraftMetadataOffset(), - "kraft_metadata_epoch" -> migration.kraftMetadataEpoch() - ) - Json.encodeAsBytes(jsonMap.asJava) - } - - def decode(bytes: Array[Byte], zkVersion: Int, modifyTimeMs: Long): ZkMigrationLeadershipState = { - val jsonDataAsString = bytes.map(_.toChar).mkString - Json.parseBytes(bytes).map(_.asJsonObject).flatMap { js => - val version = js("version").to[Int] - if (version != 0) { - throw new KafkaException(s"Encountered unknown version $version when parsing migration json $jsonDataAsString") - } - val controllerId = js("kraft_controller_id").to[Int] - val controllerEpoch = js("kraft_controller_epoch").to[Int] - val metadataOffset = js("kraft_metadata_offset").to[Long] - val metadataEpoch = js("kraft_metadata_epoch").to[Int] - Some(new ZkMigrationLeadershipState( - controllerId, - controllerEpoch, - metadataOffset, - metadataEpoch, - modifyTimeMs, - zkVersion, - ZkMigrationLeadershipState.EMPTY.zkControllerEpoch(), - ZkMigrationLeadershipState.EMPTY.zkControllerEpochZkVersion())) - }.getOrElse(throw new KafkaException(s"Failed to parse the migration json $jsonDataAsString")) - } -} - -object ZkData { - - case class VersionedAcls(acls: Set[AclEntry], zkVersion: Int) { - def exists: Boolean = zkVersion != ZkVersion.UnknownVersion - } - - - // Important: it is necessary to add any new top level Zookeeper path to the Seq - val SecureRootPaths: Seq[String] = Seq(AdminZNode.path, - BrokersZNode.path, - ClusterZNode.path, - ConfigZNode.path, - ControllerZNode.path, - ControllerEpochZNode.path, - IsrChangeNotificationZNode.path, - ProducerIdBlockZNode.path, - LogDirEventNotificationZNode.path, - DelegationTokenAuthZNode.path, - ExtendedAclZNode.path, - MigrationZNode.path, - FeatureZNode.path) ++ ZkAclStore.securePaths - - // These are persistent ZK paths that should exist on kafka broker startup. - val PersistentZkPaths: Seq[String] = Seq( - ConsumerPathZNode.path, // old consumer path - BrokerIdsZNode.path, - TopicsZNode.path, - ConfigEntityChangeNotificationZNode.path, - DeleteTopicsZNode.path, - BrokerSequenceIdZNode.path, - IsrChangeNotificationZNode.path, - ProducerIdBlockZNode.path, - LogDirEventNotificationZNode.path - ) ++ ConfigType.ALL.asScala.map(ConfigEntityTypeZNode.path) - - val SensitiveRootPaths: Seq[String] = Seq( - ConfigEntityTypeZNode.path(ConfigType.USER), - ConfigEntityTypeZNode.path(ConfigType.BROKER), - DelegationTokensZNode.path - ) - - def sensitivePath(path: String): Boolean = { - path != null && SensitiveRootPaths.exists(path.startsWith) - } - - def defaultAcls(isSecure: Boolean, path: String): Seq[ACL] = { - //Old Consumer path is kept open as different consumers will write under this node. - if (!ConsumerPathZNode.path.equals(path) && isSecure) { - val acls = new ArrayBuffer[ACL] - acls ++= ZooDefs.Ids.CREATOR_ALL_ACL.asScala - if (!sensitivePath(path)) - acls ++= ZooDefs.Ids.READ_ACL_UNSAFE.asScala - acls - } else ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala - } -} diff --git a/core/src/main/scala/kafka/zk/ZkSecurityMigratorUtils.scala b/core/src/main/scala/kafka/zk/ZkSecurityMigratorUtils.scala deleted file mode 100644 index 31a7ba2907379..0000000000000 --- a/core/src/main/scala/kafka/zk/ZkSecurityMigratorUtils.scala +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.zk - -import org.apache.zookeeper.ZooKeeper - -/** - * This class should only be used in ZkSecurityMigrator tool. - * This class will be removed after we migrate ZkSecurityMigrator away from ZK's asynchronous API. - * @param kafkaZkClient - */ -class ZkSecurityMigratorUtils(val kafkaZkClient: KafkaZkClient) { - - def currentZooKeeper: ZooKeeper = kafkaZkClient.currentZooKeeper - -} diff --git a/core/src/main/scala/kafka/zookeeper/ZooKeeperClient.scala b/core/src/main/scala/kafka/zookeeper/ZooKeeperClient.scala deleted file mode 100755 index 3f77acf3dbb62..0000000000000 --- a/core/src/main/scala/kafka/zookeeper/ZooKeeperClient.scala +++ /dev/null @@ -1,594 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.zookeeper - -import java.util.Locale -import java.util.concurrent.locks.{ReentrantLock, ReentrantReadWriteLock} -import java.util.concurrent._ -import java.util.{List => JList} -import kafka.utils.CoreUtils.{inLock, inReadLock, inWriteLock} -import kafka.utils.Logging -import kafka.zookeeper.ZooKeeperClient._ -import org.apache.kafka.common.utils.Time -import org.apache.kafka.server.util.KafkaScheduler -import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.zookeeper.AsyncCallback.{Children2Callback, DataCallback, StatCallback} -import org.apache.zookeeper.KeeperException.Code -import org.apache.zookeeper.Watcher.Event.{EventType, KeeperState} -import org.apache.zookeeper.ZooKeeper.States -import org.apache.zookeeper.data.{ACL, Stat} -import org.apache.zookeeper._ -import org.apache.zookeeper.client.ZKClientConfig - -import scala.jdk.CollectionConverters._ -import scala.collection.{Seq, mutable} - -object ZooKeeperClient { - val RetryBackoffMs = 1000 -} - -/** - * A ZooKeeper client that encourages pipelined requests. - * - * @param connectString comma separated host:port pairs, each corresponding to a zk server - * @param sessionTimeoutMs session timeout in milliseconds - * @param connectionTimeoutMs connection timeout in milliseconds - * @param maxInFlightRequests maximum number of unacknowledged requests the client will send before blocking. - * @param clientConfig ZooKeeper client configuration, for TLS configs if desired - * @param name name of the client instance - */ -class ZooKeeperClient(connectString: String, - sessionTimeoutMs: Int, - connectionTimeoutMs: Int, - maxInFlightRequests: Int, - time: Time, - metricGroup: String, - metricType: String, - private[zookeeper] val clientConfig: ZKClientConfig, - name: String) extends Logging { - - private val metricsGroup: KafkaMetricsGroup = new KafkaMetricsGroup(metricGroup, metricType) - - this.logIdent = s"[ZooKeeperClient $name] " - private val initializationLock = new ReentrantReadWriteLock() - private val isConnectedOrExpiredLock = new ReentrantLock() - private val isConnectedOrExpiredCondition = isConnectedOrExpiredLock.newCondition() - private val zNodeChangeHandlers = new ConcurrentHashMap[String, ZNodeChangeHandler]().asScala - private val zNodeChildChangeHandlers = new ConcurrentHashMap[String, ZNodeChildChangeHandler]().asScala - private val inFlightRequests = new Semaphore(maxInFlightRequests) - private val stateChangeHandlers = new ConcurrentHashMap[String, StateChangeHandler]().asScala - private[zookeeper] val reinitializeScheduler = new KafkaScheduler(1, true, s"zk-client-${threadPrefix}reinit-") - private var isFirstConnectionEstablished = false - - private val metricNames = mutable.Set[String]() - - // The state map has to be created before creating ZooKeeper since it's needed in the ZooKeeper callback. - private val stateToMeterMap = { - import KeeperState._ - val stateToEventTypeMap = Map( - Disconnected -> "Disconnects", - SyncConnected -> "SyncConnects", - AuthFailed -> "AuthFailures", - ConnectedReadOnly -> "ReadOnlyConnects", - SaslAuthenticated -> "SaslAuthentications", - Expired -> "Expires" - ) - stateToEventTypeMap.map { case (state, eventType) => - val name = s"ZooKeeper${eventType}PerSec" - metricNames += name - state -> metricsGroup.newMeter(name, eventType.toLowerCase(Locale.ROOT), TimeUnit.SECONDS) - } - } - - info(s"Initializing a new session to $connectString.") - // Fail-fast if there's an error during construction (so don't call initialize, which retries forever) - @volatile private var zooKeeper = new ZooKeeper(connectString, sessionTimeoutMs, ZooKeeperClientWatcher, - clientConfig) - - metricsGroup.newGauge("SessionState", () => connectionState.toString) - - metricNames += "SessionState" - - reinitializeScheduler.startup() - try waitUntilConnected(connectionTimeoutMs, TimeUnit.MILLISECONDS) - catch { - case e: Throwable => - close() - throw e - } - - /** - * Return the state of the ZooKeeper connection. - */ - def connectionState: States = zooKeeper.getState - - /** - * Send a request and wait for its response. See handle(Seq[AsyncRequest]) for details. - * - * @param request a single request to send and wait on. - * @return an instance of the response with the specific type (e.g. CreateRequest -> CreateResponse). - */ - def handleRequest[Req <: AsyncRequest](request: Req): Req#Response = { - handleRequests(Seq(request)).head - } - - /** - * Send a pipelined sequence of requests and wait for all of their responses. - * - * The watch flag on each outgoing request will be set if we've already registered a handler for the - * path associated with the request. - * - * @param requests a sequence of requests to send and wait on. - * @return the responses for the requests. If all requests have the same type, the responses will have the respective - * response type (e.g. Seq[CreateRequest] -> Seq[CreateResponse]). Otherwise, the most specific common supertype - * will be used (e.g. Seq[AsyncRequest] -> Seq[AsyncResponse]). - */ - def handleRequests[Req <: AsyncRequest](requests: Seq[Req]): Seq[Req#Response] = { - if (requests.isEmpty) - Seq.empty - else { - val countDownLatch = new CountDownLatch(requests.size) - val responseQueue = new ArrayBlockingQueue[Req#Response](requests.size) - - requests.foreach { request => - inFlightRequests.acquire() - try { - inReadLock(initializationLock) { - send(request) { response => - responseQueue.add(response) - inFlightRequests.release() - countDownLatch.countDown() - } - } - } catch { - case e: Throwable => - inFlightRequests.release() - throw e - } - } - countDownLatch.await() - responseQueue.asScala.toBuffer - } - } - - // Visibility to override for testing - private[zookeeper] def send[Req <: AsyncRequest](request: Req)(processResponse: Req#Response => Unit): Unit = { - // Safe to cast as we always create a response of the right type - def callback(response: AsyncResponse): Unit = processResponse(response.asInstanceOf[Req#Response]) - - def responseMetadata(sendTimeMs: Long) = ResponseMetadata(sendTimeMs, receivedTimeMs = time.hiResClockMs()) - - val sendTimeMs = time.hiResClockMs() - - // Cast to AsyncRequest to workaround a scalac bug that results in an false exhaustiveness warning - // with -Xlint:strict-unsealed-patmat - (request: AsyncRequest) match { - case ExistsRequest(path, ctx) => - zooKeeper.exists(path, shouldWatch(request), new StatCallback { - def processResult(rc: Int, path: String, ctx: Any, stat: Stat): Unit = - callback(ExistsResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs))) - }, ctx.orNull) - case GetDataRequest(path, ctx) => - zooKeeper.getData(path, shouldWatch(request), new DataCallback { - def processResult(rc: Int, path: String, ctx: Any, data: Array[Byte], stat: Stat): Unit = - callback(GetDataResponse(Code.get(rc), path, Option(ctx), data, stat, responseMetadata(sendTimeMs))) - }, ctx.orNull) - case GetChildrenRequest(path, _, ctx) => - zooKeeper.getChildren(path, shouldWatch(request), new Children2Callback { - def processResult(rc: Int, path: String, ctx: Any, children: JList[String], stat: Stat): Unit = - callback(GetChildrenResponse(Code.get(rc), path, Option(ctx), Option(children).map(_.asScala).getOrElse(Seq.empty), - stat, responseMetadata(sendTimeMs))) - }, ctx.orNull) - case CreateRequest(path, data, acl, createMode, ctx) => - zooKeeper.create(path, data, acl.asJava, createMode, - (rc, path, ctx, name) => - callback(CreateResponse(Code.get(rc), path, Option(ctx), name, responseMetadata(sendTimeMs))), - ctx.orNull) - case SetDataRequest(path, data, version, ctx) => - zooKeeper.setData(path, data, version, - (rc, path, ctx, stat) => - callback(SetDataResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs))), - ctx.orNull) - case DeleteRequest(path, version, ctx) => - zooKeeper.delete(path, version, - (rc, path, ctx) => callback(DeleteResponse(Code.get(rc), path, Option(ctx), responseMetadata(sendTimeMs))), - ctx.orNull) - case GetAclRequest(path, ctx) => - zooKeeper.getACL(path, null, - (rc, path, ctx, acl, stat) => - callback(GetAclResponse(Code.get(rc), path, Option(ctx), Option(acl).map(_.asScala).getOrElse(Seq.empty), - stat, responseMetadata(sendTimeMs))), - ctx.orNull) - case SetAclRequest(path, acl, version, ctx) => - zooKeeper.setACL(path, acl.asJava, version, - (rc, path, ctx, stat) => - callback(SetAclResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs))), - ctx.orNull) - case MultiRequest(zkOps, ctx) => - def toZkOpResult(opResults: JList[OpResult]): Seq[ZkOpResult] = - Option(opResults).map(results => zkOps.zip(results.asScala).map { case (zkOp, result) => - ZkOpResult(zkOp, result) - }).orNull - zooKeeper.multi(zkOps.map(_.toZookeeperOp).asJava, - (rc, path, ctx, opResults) => - callback(MultiResponse(Code.get(rc), path, Option(ctx), toZkOpResult(opResults), responseMetadata(sendTimeMs))), - ctx.orNull) - } - } - - /** - * Wait indefinitely until the underlying zookeeper client to reaches the CONNECTED state. - * @throws ZooKeeperClientAuthFailedException if the authentication failed either before or while waiting for connection. - * @throws ZooKeeperClientExpiredException if the session expired either before or while waiting for connection. - */ - def waitUntilConnected(): Unit = inLock(isConnectedOrExpiredLock) { - waitUntilConnected(Long.MaxValue, TimeUnit.MILLISECONDS) - } - - private def waitUntilConnected(timeout: Long, timeUnit: TimeUnit): Unit = { - info("Waiting until connected.") - var nanos = timeUnit.toNanos(timeout) - inLock(isConnectedOrExpiredLock) { - var state = connectionState - while (!state.isConnected && state.isAlive) { - if (nanos <= 0) { - throw new ZooKeeperClientTimeoutException(s"Timed out waiting for connection while in state: $state") - } - nanos = isConnectedOrExpiredCondition.awaitNanos(nanos) - state = connectionState - } - if (state == States.AUTH_FAILED) { - throw new ZooKeeperClientAuthFailedException("Auth failed either before or while waiting for connection") - } else if (state == States.CLOSED) { - throw new ZooKeeperClientExpiredException("Session expired either before or while waiting for connection") - } - isFirstConnectionEstablished = true - } - info("Connected.") - } - - // If this method is changed, the documentation for registerZNodeChangeHandler and/or registerZNodeChildChangeHandler - // may need to be updated. - private def shouldWatch(request: AsyncRequest): Boolean = request match { - case GetChildrenRequest(_, registerWatch, _) => registerWatch && zNodeChildChangeHandlers.contains(request.path) - case _: ExistsRequest | _: GetDataRequest => zNodeChangeHandlers.contains(request.path) - case _ => throw new IllegalArgumentException(s"Request $request is not watchable") - } - - /** - * Register the handler to ZooKeeperClient. This is just a local operation. This does not actually register a watcher. - * - * The watcher is only registered once the user calls handle(AsyncRequest) or handle(Seq[AsyncRequest]) - * with either a GetDataRequest or ExistsRequest. - * - * NOTE: zookeeper only allows registration to a nonexistent znode with ExistsRequest. - * - * @param zNodeChangeHandler the handler to register - */ - def registerZNodeChangeHandler(zNodeChangeHandler: ZNodeChangeHandler): Unit = { - zNodeChangeHandlers.put(zNodeChangeHandler.path, zNodeChangeHandler) - } - - /** - * Unregister the handler from ZooKeeperClient. This is just a local operation. - * @param path the path of the handler to unregister - */ - def unregisterZNodeChangeHandler(path: String): Unit = { - zNodeChangeHandlers.remove(path) - } - - /** - * Register the handler to ZooKeeperClient. This is just a local operation. This does not actually register a watcher. - * - * The watcher is only registered once the user calls handle(AsyncRequest) or handle(Seq[AsyncRequest]) with a GetChildrenRequest. - * - * @param zNodeChildChangeHandler the handler to register - */ - def registerZNodeChildChangeHandler(zNodeChildChangeHandler: ZNodeChildChangeHandler): Unit = { - zNodeChildChangeHandlers.put(zNodeChildChangeHandler.path, zNodeChildChangeHandler) - } - - /** - * Unregister the handler from ZooKeeperClient. This is just a local operation. - * @param path the path of the handler to unregister - */ - def unregisterZNodeChildChangeHandler(path: String): Unit = { - zNodeChildChangeHandlers.remove(path) - } - - /** - * @param stateChangeHandler - */ - def registerStateChangeHandler(stateChangeHandler: StateChangeHandler): Unit = inReadLock(initializationLock) { - if (stateChangeHandler != null) - stateChangeHandlers.put(stateChangeHandler.name, stateChangeHandler) - } - - /** - * - * @param name - */ - def unregisterStateChangeHandler(name: String): Unit = inReadLock(initializationLock) { - stateChangeHandlers.remove(name) - } - - def close(): Unit = { - info("Closing.") - - // Shutdown scheduler outside of lock to avoid deadlock if scheduler - // is waiting for lock to process session expiry. Close expiry thread - // first to ensure that new clients are not created during close(). - reinitializeScheduler.shutdown() - - inWriteLock(initializationLock) { - zNodeChangeHandlers.clear() - zNodeChildChangeHandlers.clear() - stateChangeHandlers.clear() - zooKeeper.close() - metricNames.foreach(metricsGroup.removeMetric) - } - info("Closed.") - } - - def sessionId: Long = inReadLock(initializationLock) { - zooKeeper.getSessionId - } - - // Only for testing - private[kafka] def currentZooKeeper: ZooKeeper = inReadLock(initializationLock) { - zooKeeper - } - - private def reinitialize(): Unit = { - // Initialization callbacks are invoked outside of the lock to avoid deadlock potential since their completion - // may require additional Zookeeper requests, which will block to acquire the initialization lock - stateChangeHandlers.values.foreach(callBeforeInitializingSession) - - inWriteLock(initializationLock) { - if (!connectionState.isAlive) { - zooKeeper.close() - info(s"Initializing a new session to $connectString.") - // retry forever until ZooKeeper can be instantiated - var connected = false - while (!connected) { - try { - zooKeeper = new ZooKeeper(connectString, sessionTimeoutMs, ZooKeeperClientWatcher, clientConfig) - connected = true - } catch { - case e: Exception => - info("Error when recreating ZooKeeper, retrying after a short sleep", e) - Thread.sleep(RetryBackoffMs) - } - } - } - } - - stateChangeHandlers.values.foreach(callAfterInitializingSession) - } - - /** - * Close the zookeeper client to force session reinitialization. This is visible for testing only. - */ - private[zookeeper] def forceReinitialize(): Unit = { - zooKeeper.close() - reinitialize() - } - - private def callBeforeInitializingSession(handler: StateChangeHandler): Unit = { - try { - handler.beforeInitializingSession() - } catch { - case t: Throwable => - error(s"Uncaught error in handler ${handler.name}", t) - } - } - - private def callAfterInitializingSession(handler: StateChangeHandler): Unit = { - try { - handler.afterInitializingSession() - } catch { - case t: Throwable => - error(s"Uncaught error in handler ${handler.name}", t) - } - } - - // Visibility for testing - private[zookeeper] def scheduleReinitialize(name: String, message: String, delayMs: Long): Unit = { - reinitializeScheduler.scheduleOnce(name, () => { - info(message) - reinitialize() - }, delayMs) - } - - private def threadPrefix: String = name.replaceAll("\\s", "") + "-" - - // package level visibility for testing only - private[zookeeper] object ZooKeeperClientWatcher extends Watcher { - override def process(event: WatchedEvent): Unit = { - debug(s"Received event: $event") - Option(event.getPath) match { - case None => - val state = event.getState - stateToMeterMap.get(state).foreach(_.mark()) - inLock(isConnectedOrExpiredLock) { - isConnectedOrExpiredCondition.signalAll() - } - if (state == KeeperState.AuthFailed) { - error(s"Auth failed, initialized=$isFirstConnectionEstablished connectionState=$connectionState") - stateChangeHandlers.values.foreach(_.onAuthFailure()) - - // If this is during initial startup, we fail fast. Otherwise, schedule retry. - val initialized = inLock(isConnectedOrExpiredLock) { - isFirstConnectionEstablished - } - if (initialized && !connectionState.isAlive) - scheduleReinitialize("auth-failed", "Reinitializing due to auth failure.", RetryBackoffMs) - } else if (state == KeeperState.Expired) { - scheduleReinitialize("session-expired", "Session expired.", delayMs = 0L) - } - case Some(path) => - (event.getType: @unchecked) match { - case EventType.NodeChildrenChanged => zNodeChildChangeHandlers.get(path).foreach(_.handleChildChange()) - case EventType.NodeCreated => zNodeChangeHandlers.get(path).foreach(_.handleCreation()) - case EventType.NodeDeleted => zNodeChangeHandlers.get(path).foreach(_.handleDeletion()) - case EventType.NodeDataChanged => zNodeChangeHandlers.get(path).foreach(_.handleDataChange()) - } - } - } - } -} - -trait StateChangeHandler { - val name: String - def beforeInitializingSession(): Unit = {} - def afterInitializingSession(): Unit = {} - def onAuthFailure(): Unit = {} -} - -trait ZNodeChangeHandler { - val path: String - def handleCreation(): Unit = {} - def handleDeletion(): Unit = {} - def handleDataChange(): Unit = {} -} - -trait ZNodeChildChangeHandler { - val path: String - def handleChildChange(): Unit = {} -} - -// Thin wrapper for zookeeper.Op -sealed trait ZkOp { - def toZookeeperOp: Op -} - -case class CreateOp(path: String, data: Array[Byte], acl: Seq[ACL], createMode: CreateMode) extends ZkOp { - override def toZookeeperOp: Op = Op.create(path, data, acl.asJava, createMode) -} - -case class DeleteOp(path: String, version: Int) extends ZkOp { - override def toZookeeperOp: Op = Op.delete(path, version) -} - -case class SetDataOp(path: String, data: Array[Byte], version: Int) extends ZkOp { - override def toZookeeperOp: Op = Op.setData(path, data, version) -} - -case class CheckOp(path: String, version: Int) extends ZkOp { - override def toZookeeperOp: Op = Op.check(path, version) -} - -case class ZkOpResult(zkOp: ZkOp, rawOpResult: OpResult) - -sealed trait AsyncRequest { - /** - * This type member allows us to define methods that take requests and return responses with the correct types. - * See ``ZooKeeperClient.handleRequests`` for example. - */ - type Response <: AsyncResponse - def path: String - def ctx: Option[Any] -} - -case class CreateRequest(path: String, data: Array[Byte], acl: Seq[ACL], createMode: CreateMode, - ctx: Option[Any] = None) extends AsyncRequest { - type Response = CreateResponse -} - -case class DeleteRequest(path: String, version: Int, ctx: Option[Any] = None) extends AsyncRequest { - type Response = DeleteResponse -} - -case class ExistsRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest { - type Response = ExistsResponse -} - -case class GetDataRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest { - type Response = GetDataResponse -} - -case class SetDataRequest(path: String, data: Array[Byte], version: Int, ctx: Option[Any] = None) extends AsyncRequest { - type Response = SetDataResponse -} - -case class GetAclRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest { - type Response = GetAclResponse -} - -case class SetAclRequest(path: String, acl: Seq[ACL], version: Int, ctx: Option[Any] = None) extends AsyncRequest { - type Response = SetAclResponse -} - -case class GetChildrenRequest(path: String, registerWatch: Boolean, ctx: Option[Any] = None) extends AsyncRequest { - type Response = GetChildrenResponse -} - -case class MultiRequest(zkOps: Seq[ZkOp], ctx: Option[Any] = None) extends AsyncRequest { - type Response = MultiResponse - - override def path: String = null -} - - -sealed abstract class AsyncResponse { - def resultCode: Code - def path: String - def ctx: Option[Any] - - /** Return None if the result code is OK and KeeperException otherwise. */ - def resultException: Option[KeeperException] = - if (resultCode == Code.OK) None else Some(KeeperException.create(resultCode, path)) - - /** - * Throw KeeperException if the result code is not OK. - */ - def maybeThrow(): Unit = { - if (resultCode != Code.OK) - throw KeeperException.create(resultCode, path) - } - - def metadata: ResponseMetadata -} - -case class ResponseMetadata(sendTimeMs: Long, receivedTimeMs: Long) { - def responseTimeMs: Long = receivedTimeMs - sendTimeMs -} - -case class CreateResponse(resultCode: Code, path: String, ctx: Option[Any], name: String, - metadata: ResponseMetadata) extends AsyncResponse -case class DeleteResponse(resultCode: Code, path: String, ctx: Option[Any], - metadata: ResponseMetadata) extends AsyncResponse -case class ExistsResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat, - metadata: ResponseMetadata) extends AsyncResponse -case class GetDataResponse(resultCode: Code, path: String, ctx: Option[Any], data: Array[Byte], stat: Stat, - metadata: ResponseMetadata) extends AsyncResponse -case class SetDataResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat, - metadata: ResponseMetadata) extends AsyncResponse -case class GetAclResponse(resultCode: Code, path: String, ctx: Option[Any], acl: Seq[ACL], stat: Stat, - metadata: ResponseMetadata) extends AsyncResponse -case class SetAclResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat, - metadata: ResponseMetadata) extends AsyncResponse -case class GetChildrenResponse(resultCode: Code, path: String, ctx: Option[Any], children: Seq[String], stat: Stat, - metadata: ResponseMetadata) extends AsyncResponse -case class MultiResponse(resultCode: Code, path: String, ctx: Option[Any], zkOpResults: Seq[ZkOpResult], - metadata: ResponseMetadata) extends AsyncResponse - -class ZooKeeperClientException(message: String) extends RuntimeException(message) -class ZooKeeperClientExpiredException(message: String) extends ZooKeeperClientException(message) -class ZooKeeperClientAuthFailedException(message: String) extends ZooKeeperClientException(message) -class ZooKeeperClientTimeoutException(message: String) extends ZooKeeperClientException(message) diff --git a/core/src/main/scala/org/apache/zookeeper/ZooKeeperMainWithTlsSupportForKafka.scala b/core/src/main/scala/org/apache/zookeeper/ZooKeeperMainWithTlsSupportForKafka.scala deleted file mode 100644 index cd748dc9f163b..0000000000000 --- a/core/src/main/scala/org/apache/zookeeper/ZooKeeperMainWithTlsSupportForKafka.scala +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.zookeeper - -import kafka.admin.ZkSecurityMigrator -import org.apache.zookeeper.admin.ZooKeeperAdmin -import org.apache.zookeeper.cli.CommandNotFoundException -import org.apache.zookeeper.cli.MalformedCommandException -import org.apache.zookeeper.client.ZKClientConfig - -import scala.jdk.CollectionConverters._ - -object ZooKeeperMainWithTlsSupportForKafka { - private val zkTlsConfigFileOption = "-zk-tls-config-file" - def main(args: Array[String]): Unit = { - val zkTlsConfigFileIndex = args.indexOf(zkTlsConfigFileOption) - val zooKeeperMain: ZooKeeperMain = - if (zkTlsConfigFileIndex < 0) - // no TLS config, so just pass args directly - new ZooKeeperMainWithTlsSupportForKafka(args, None) - else if (zkTlsConfigFileIndex == args.length - 1) - throw new IllegalArgumentException(s"Error: no filename provided with option $zkTlsConfigFileOption") - else - // found TLS config, so instantiate it and pass args without the two TLS config-related arguments - new ZooKeeperMainWithTlsSupportForKafka( - args.slice(0, zkTlsConfigFileIndex) ++ args.slice(zkTlsConfigFileIndex + 2, args.length), - Some(ZkSecurityMigrator.createZkClientConfigFromFile(args(zkTlsConfigFileIndex + 1)))) - // The run method of ZooKeeperMain is package-private, - // therefore this code unfortunately must reside in the same org.apache.zookeeper package. - zooKeeperMain.run() - } -} - -class ZooKeeperMainWithTlsSupportForKafka(args: Array[String], val zkClientConfig: Option[ZKClientConfig]) - extends ZooKeeperMain(args) with Watcher { - - override def processZKCmd (co: ZooKeeperMain.MyCommandOptions): Boolean = { - // Unfortunately the usage() method is static, so it can't be overridden. - // This method is where usage() gets called. We don't cover all possible calls - // to usage() -- we would have to implement the entire method to do that -- but - // the short implementation below covers most cases. - val args = co.getArgArray - val cmd = co.getCommand - if (args.length < 1) { - kafkaTlsUsage() - throw new MalformedCommandException("No command entered") - } - - if (!ZooKeeperMain.commandMap.containsKey(cmd)) { - kafkaTlsUsage() - throw new CommandNotFoundException(s"Command not found $cmd") - } - super.processZKCmd(co) - } - - private def kafkaTlsUsage(): Unit = { - System.err.println("ZooKeeper -server host:port [-zk-tls-config-file ] cmd args") - ZooKeeperMain.commandMap.keySet.asScala.toList.sorted.foreach(cmd => - System.err.println(s"\t$cmd ${ZooKeeperMain.commandMap.get(cmd)}")) - } - - override def connectToZK(newHost: String): Unit = { - // ZooKeeperAdmin has no constructor that supports passing in both readOnly and ZkClientConfig, - // and readOnly ends up being set to false when passing in a ZkClientConfig instance; - // therefore it is currently not possible for us to construct a ZooKeeperAdmin instance with - // both an explicit ZkClientConfig instance and a readOnly value of true. - val readOnlyRequested = cl.getOption("readonly") != null - if (readOnlyRequested && zkClientConfig.isDefined) - throw new IllegalArgumentException( - s"read-only mode (-r) is not supported with an explicit TLS config (${ZooKeeperMainWithTlsSupportForKafka.zkTlsConfigFileOption})") - if (zk != null && zk.getState.isAlive) zk.close() - host = newHost - zk = if (zkClientConfig.isDefined) - new ZooKeeperAdmin(host, cl.getOption("timeout").toInt, this, zkClientConfig.get) - else - new ZooKeeperAdmin(host, cl.getOption("timeout").toInt, this, readOnlyRequested) - } - - override def process(event: WatchedEvent): Unit = { - if (getPrintWatches) { - ZooKeeperMain.printMessage("WATCHER::") - ZooKeeperMain.printMessage(event.toString) - } - } -} diff --git a/core/src/test/java/kafka/admin/AdminFenceProducersTest.java b/core/src/test/java/kafka/admin/AdminFenceProducersTest.java index aec4be5c1c29b..7b0ac1445d421 100644 --- a/core/src/test/java/kafka/admin/AdminFenceProducersTest.java +++ b/core/src/test/java/kafka/admin/AdminFenceProducersTest.java @@ -20,14 +20,13 @@ import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.FenceProducersOptions; -import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.InvalidProducerEpochException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.serialization.ByteArraySerializer; import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterInstance; import org.apache.kafka.common.test.api.ClusterTest; @@ -40,7 +39,8 @@ import org.junit.jupiter.api.extension.ExtendWith; import java.util.Collections; -import java.util.Properties; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.ExecutionException; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -48,11 +48,11 @@ import static org.junit.jupiter.api.Assertions.assertTrue; @ClusterTestDefaults(serverProperties = { - @ClusterConfigProperty(key = ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, value = "false"), - @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, value = "1"), - @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), - @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, value = "1"), - @ClusterConfigProperty(key = TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_CONFIG, value = "2000") + @ClusterConfigProperty(key = ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, value = "false"), + @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, value = "1"), + @ClusterConfigProperty(key = TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_CONFIG, value = "2000") }) @ExtendWith(ClusterTestExtensions.class) public class AdminFenceProducersTest { @@ -66,23 +66,17 @@ public class AdminFenceProducersTest { this.clusterInstance = clusterInstance; } - private KafkaProducer createProducer() { - Properties config = new Properties(); - config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); - config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, TXN_ID); - config.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "2000"); - config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); - config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); - - return new KafkaProducer<>(config); + private Producer createProducer() { + return clusterInstance.producer(Map.of(ProducerConfig.TRANSACTIONAL_ID_CONFIG, TXN_ID, + ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "2000")); } @ClusterTest void testFenceAfterProducerCommit() throws Exception { clusterInstance.createTopic(TOPIC_NAME, 1, (short) 1); - try (KafkaProducer producer = createProducer(); - Admin adminClient = clusterInstance.createAdminClient()) { + try (Producer producer = createProducer(); + Admin adminClient = clusterInstance.admin()) { producer.initTransactions(); producer.beginTransaction(); producer.send(RECORD).get(); @@ -93,20 +87,25 @@ void testFenceAfterProducerCommit() throws Exception { producer.beginTransaction(); ExecutionException exceptionDuringSend = assertThrows( ExecutionException.class, - () -> producer.send(RECORD).get(), "expected ProducerFencedException" + () -> producer.send(RECORD).get(), "expected InvalidProducerEpochException" ); - assertInstanceOf(ProducerFencedException.class, exceptionDuringSend.getCause()); - assertThrows(ProducerFencedException.class, producer::commitTransaction); + // In Transaction V2, the ProducerFencedException will be converted to InvalidProducerEpochException when + // coordinator handles AddPartitionRequest. + assertInstanceOf(InvalidProducerEpochException.class, exceptionDuringSend.getCause()); + + // InvalidProducerEpochException is treated as fatal error. The commitTransaction will return this last + // fatal error. + assertThrows(InvalidProducerEpochException.class, producer::commitTransaction); } } @ClusterTest void testFenceProducerTimeoutMs() { - Properties config = new Properties(); + Map config = new HashMap<>(); config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + INCORRECT_BROKER_PORT); - try (Admin adminClient = clusterInstance.createAdminClient(config)) { + try (Admin adminClient = clusterInstance.admin(config)) { ExecutionException exception = assertThrows( ExecutionException.class, () -> adminClient.fenceProducers(Collections.singletonList(TXN_ID), new FenceProducersOptions().timeoutMs(0)).all().get()); @@ -118,8 +117,8 @@ void testFenceProducerTimeoutMs() { void testFenceBeforeProducerCommit() throws Exception { clusterInstance.createTopic(TOPIC_NAME, 1, (short) 1); - try (KafkaProducer producer = createProducer(); - Admin adminClient = clusterInstance.createAdminClient()) { + try (Producer producer = createProducer(); + Admin adminClient = clusterInstance.admin()) { producer.initTransactions(); producer.beginTransaction(); diff --git a/core/src/test/java/kafka/admin/ClientTelemetryTest.java b/core/src/test/java/kafka/admin/ClientTelemetryTest.java index f9dc1a4330076..f701d3f38450e 100644 --- a/core/src/test/java/kafka/admin/ClientTelemetryTest.java +++ b/core/src/test/java/kafka/admin/ClientTelemetryTest.java @@ -75,7 +75,7 @@ public class ClientTelemetryTest { types = Type.KRAFT, brokers = 3, serverProperties = { - @ClusterConfigProperty(key = METRIC_REPORTER_CLASSES_CONFIG, value = "kafka.admin.ClientTelemetryTest$GetIdClientTelemetry"), + @ClusterConfigProperty(key = METRIC_REPORTER_CLASSES_CONFIG, value = "kafka.admin.ClientTelemetryTest$GetIdClientTelemetry"), }) public void testClientInstanceId(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException { Map configs = new HashMap<>(); @@ -132,7 +132,7 @@ public void testClientInstanceId(ClusterInstance clusterInstance) throws Interru public void testIntervalMsParser(ClusterInstance clusterInstance) { List alterOpts = asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--alter", "--entity-type", "client-metrics", "--entity-name", "test", "--add-config", "interval.ms=bbb"); - try (Admin client = clusterInstance.createAdminClient()) { + try (Admin client = clusterInstance.admin()) { ConfigCommand.ConfigCommandOptions addOpts = new ConfigCommand.ConfigCommandOptions(toArray(alterOpts)); Throwable e = assertThrows(ExecutionException.class, () -> ConfigCommand.alterConfig(client, addOpts)); diff --git a/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java b/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java index 767c76b9820c6..b46344f4bea36 100644 --- a/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java +++ b/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java @@ -17,16 +17,25 @@ package kafka.admin; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientTestUtils; +import org.apache.kafka.clients.admin.AlterConfigsOptions; +import org.apache.kafka.clients.admin.AlterConfigsResult; import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.test.api.ClusterConfigProperty; import org.apache.kafka.common.test.api.ClusterInstance; import org.apache.kafka.common.test.api.ClusterTest; import org.apache.kafka.common.test.api.ClusterTestExtensions; import org.apache.kafka.common.test.api.Type; import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mockito; import java.io.ByteArrayOutputStream; import java.io.PrintStream; @@ -51,14 +60,15 @@ import static org.apache.kafka.common.config.SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG; import static org.apache.kafka.coordinator.group.GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG; import static org.apache.kafka.coordinator.group.GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG; -import static org.apache.kafka.security.PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG; import static org.apache.kafka.server.config.ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG; import static org.apache.kafka.server.config.ServerConfigs.MESSAGE_MAX_BYTES_CONFIG; import static org.apache.kafka.server.config.ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; @ExtendWith(value = ClusterTestExtensions.class) public class ConfigCommandIntegrationTest { @@ -86,7 +96,7 @@ public ConfigCommandIntegrationTest(ClusterInstance cluster) { @ClusterTest public void testExitWithNonZeroStatusOnUpdatingUnallowedConfig() { assertNonZeroStatusExit(Stream.concat(quorumArgs(), Stream.of( - "--entity-name", cluster.isKRaftTest() ? "0" : "1", + "--entity-name", "0", "--entity-type", "brokers", "--alter", "--add-config", "security.inter.broker.protocol=PLAINTEXT")), @@ -141,7 +151,7 @@ public void testNullStatusOnKraftCommandAlterClientMetrics() { public void testDynamicBrokerConfigUpdateUsingKraft() throws Exception { List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); - try (Admin client = cluster.createAdminClient()) { + try (Admin client = cluster.admin()) { // Add config alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "110000"), alterOpts); alterAndVerifyConfig(client, Optional.empty(), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "120000"), alterOpts); @@ -158,11 +168,11 @@ public void testDynamicBrokerConfigUpdateUsingKraft() throws Exception { singletonMap("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts); // Per-broker config configured at default cluster-level should fail assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.empty(), + () -> alterConfigWithAdmin(client, Optional.empty(), singletonMap("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts)); deleteAndVerifyConfigValue(client, defaultBrokerId, singleton("listener.name.internal.ssl.keystore.location"), false, alterOpts); - alterConfigWithKraft(client, Optional.of(defaultBrokerId), + alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap("listener.name.external.ssl.keystore.password", "secret"), alterOpts); // Password config update with encoder secret should succeed and encoded password must be stored in ZK @@ -170,11 +180,10 @@ public void testDynamicBrokerConfigUpdateUsingKraft() throws Exception { configs.put("listener.name.external.ssl.keystore.password", "secret"); configs.put("log.cleaner.threads", "2"); // Password encoder configs - configs.put(PASSWORD_ENCODER_SECRET_CONFIG, "encoder-secret"); // Password config update at default cluster-level should fail assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.of(defaultBrokerId), configs, alterOpts)); + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), configs, alterOpts)); } } @@ -190,7 +199,7 @@ public void testGroupConfigUpdateUsingKraft() throws Exception { } private void verifyGroupConfigUpdate(List alterOpts) throws Exception { - try (Admin client = cluster.createAdminClient()) { + try (Admin client = cluster.admin()) { // Add config Map configs = new HashMap<>(); configs.put(CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "50000"); @@ -203,7 +212,7 @@ private void verifyGroupConfigUpdate(List alterOpts) throws Exception { deleteAndVerifyGroupConfigValue(client, defaultGroupName, configs, alterOpts); // Unknown config configured should fail - assertThrows(ExecutionException.class, () -> alterConfigWithKraft(client, singletonMap("unknown.config", "20000"), alterOpts)); + assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, singletonMap("unknown.config", "20000"), alterOpts)); } } @@ -220,7 +229,7 @@ public void testClientMetricsConfigUpdate() throws Exception { } private void verifyClientMetricsConfigUpdate(List alterOpts) throws Exception { - try (Admin client = cluster.createAdminClient()) { + try (Admin client = cluster.admin()) { // Add config Map configs = new HashMap<>(); configs.put("metrics", "org.apache.kafka.producer."); @@ -233,7 +242,7 @@ private void verifyClientMetricsConfigUpdate(List alterOpts) throws Exce deleteAndVerifyClientMetricsConfigValue(client, defaultClientMetricsName, configs, alterOpts); // Unknown config configured should fail - assertThrows(ExecutionException.class, () -> alterConfigWithKraft(client, singletonMap("unknown.config", "20000"), alterOpts)); + assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, singletonMap("unknown.config", "20000"), alterOpts)); } } @@ -241,15 +250,15 @@ private void verifyClientMetricsConfigUpdate(List alterOpts) throws Exce public void testAlterReadOnlyConfigInKRaftThenShouldFail() { List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); - try (Admin client = cluster.createAdminClient()) { + try (Admin client = cluster.admin()) { assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.of(defaultBrokerId), + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap(AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false"), alterOpts)); assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.of(defaultBrokerId), + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap(AUTO_LEADER_REBALANCE_ENABLE_CONFIG, "false"), alterOpts)); assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.of(defaultBrokerId), + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap("broker.id", "1"), alterOpts)); } } @@ -258,7 +267,7 @@ public void testAlterReadOnlyConfigInKRaftThenShouldFail() { public void testUpdateClusterWideConfigInKRaftThenShouldSuccessful() throws Exception { List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); - try (Admin client = cluster.createAdminClient()) { + try (Admin client = cluster.admin()) { alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap("log.flush.interval.messages", "100"), alterOpts); alterAndVerifyConfig(client, Optional.of(defaultBrokerId), @@ -273,12 +282,12 @@ public void testUpdatePerBrokerConfigWithListenerNameInKRaftThenShouldSuccessful List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); String listenerName = "listener.name.internal."; - try (Admin client = cluster.createAdminClient()) { + try (Admin client = cluster.admin()) { alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap(listenerName + "ssl.truststore.type", "PKCS12"), alterOpts); alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap(listenerName + "ssl.truststore.location", "/temp/test.jks"), alterOpts); - alterConfigWithKraft(client, Optional.of(defaultBrokerId), + alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap(listenerName + "ssl.truststore.password", "password"), alterOpts); verifyConfigSecretValue(client, Optional.of(defaultBrokerId), singleton(listenerName + "ssl.truststore.password")); @@ -289,19 +298,121 @@ public void testUpdatePerBrokerConfigWithListenerNameInKRaftThenShouldSuccessful public void testUpdatePerBrokerConfigInKRaftThenShouldFail() { List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); - try (Admin client = cluster.createAdminClient()) { + try (Admin client = cluster.admin()) { assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.of(defaultBrokerId), + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap(SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12"), alterOpts)); assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.of(defaultBrokerId), + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap(SSL_TRUSTSTORE_LOCATION_CONFIG, "/temp/test.jks"), alterOpts)); assertThrows(ExecutionException.class, - () -> alterConfigWithKraft(client, Optional.of(defaultBrokerId), + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), singletonMap(SSL_TRUSTSTORE_PASSWORD_CONFIG, "password"), alterOpts)); } } + @ClusterTest + public void testUpdateInvalidBrokerConfigs() { + updateAndCheckInvalidBrokerConfig(Optional.empty()); + updateAndCheckInvalidBrokerConfig(Optional.of(cluster.anyBrokerSocketServer().config().brokerId() + "")); + } + + private void updateAndCheckInvalidBrokerConfig(Optional brokerIdOrDefault) { + List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); + try (Admin client = cluster.admin()) { + alterConfigWithAdmin(client, brokerIdOrDefault, Collections.singletonMap("invalid", "2"), alterOpts); + + Stream describeCommand = Stream.concat( + Stream.concat( + Stream.of("--bootstrap-server", cluster.bootstrapServers()), + Stream.of(entityOp(brokerIdOrDefault).toArray(new String[0]))), + Stream.of("--entity-type", "brokers", "--describe")); + String describeResult = captureStandardStream(false, run(describeCommand)); + + // We will treat unknown config as sensitive + assertTrue(describeResult.contains("sensitive=true"), describeResult); + // Sensitive config will not return + assertTrue(describeResult.contains("invalid=null"), describeResult); + } + } + + @ClusterTest + public void testUpdateInvalidTopicConfigs() throws ExecutionException, InterruptedException { + List alterOpts = asList("--bootstrap-server", cluster.bootstrapServers(), "--entity-type", "topics", "--alter"); + try (Admin client = cluster.admin()) { + client.createTopics(Collections.singletonList(new NewTopic("test-config-topic", 1, (short) 1))).all().get(); + assertInstanceOf( + InvalidConfigurationException.class, + assertThrows( + ExecutionException.class, + () -> ConfigCommand.alterConfig( + client, + new ConfigCommand.ConfigCommandOptions( + toArray(alterOpts, + asList("--add-config", "invalid=2", "--entity-type", "topics", "--entity-name", "test-config-topic")))) + ).getCause() + ); + } + } + + // Test case from KAFKA-13788 + @ClusterTest(serverProperties = { + // Must be at greater than 1MB per cleaner thread, set to 2M+2 so that we can set 2 cleaner threads. + @ClusterConfigProperty(key = "log.cleaner.dedupe.buffer.size", value = "2097154"), + }) + public void testUpdateBrokerConfigNotAffectedByInvalidConfig() { + try (Admin client = cluster.admin()) { + ConfigCommand.alterConfig(client, new ConfigCommand.ConfigCommandOptions( + toArray(asList("--bootstrap-server", cluster.bootstrapServers(), + "--alter", + "--add-config", "log.cleaner.threadzz=2", + "--entity-type", "brokers", + "--entity-default")))); + + ConfigCommand.alterConfig(client, new ConfigCommand.ConfigCommandOptions( + toArray(asList("--bootstrap-server", cluster.bootstrapServers(), + "--alter", + "--add-config", "log.cleaner.threads=2", + "--entity-type", "brokers", + "--entity-default")))); + kafka.utils.TestUtils.waitUntilTrue( + () -> cluster.brokerSocketServers().stream().allMatch(broker -> broker.config().getInt("log.cleaner.threads") == 2), + () -> "Timeout waiting for topic config propagating to broker", + org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS, + 100L); + } + } + + @ClusterTest( + // Must be at greater than 1MB per cleaner thread, set to 2M+2 so that we can set 2 cleaner threads. + serverProperties = {@ClusterConfigProperty(key = "log.cleaner.dedupe.buffer.size", value = "2097154")}, + // Zk code has been removed, use kraft and mockito to mock this situation + metadataVersion = MetadataVersion.IBP_3_3_IV0 + ) + public void testUnsupportedVersionException() { + try (Admin client = cluster.admin()) { + Admin spyAdmin = Mockito.spy(client); + + AlterConfigsResult mockResult = AdminClientTestUtils.alterConfigsResult( + new ConfigResource(ConfigResource.Type.BROKER, ""), new UnsupportedVersionException("simulated error")); + Mockito.doReturn(mockResult).when(spyAdmin) + .incrementalAlterConfigs(any(java.util.Map.class), any(AlterConfigsOptions.class)); + assertEquals( + "The INCREMENTAL_ALTER_CONFIGS API is not supported by the cluster. The API is supported starting from version 2.3.0. You may want to use an older version of this tool to interact with your cluster, or upgrade your brokers to version 2.3.0 or newer to avoid this error.", + assertThrows(UnsupportedVersionException.class, () -> { + ConfigCommand.alterConfig(spyAdmin, new ConfigCommand.ConfigCommandOptions( + toArray(asList( + "--bootstrap-server", cluster.bootstrapServers(), + "--alter", + "--add-config", "log.cleaner.threads=2", + "--entity-type", "brokers", + "--entity-default")))); + }).getMessage() + ); + Mockito.verify(spyAdmin).incrementalAlterConfigs(any(java.util.Map.class), any(AlterConfigsOptions.class)); + } + } + private void assertNonZeroStatusExit(Stream args, Consumer checkErrOut) { AtomicReference exitStatus = new AtomicReference<>(); Exit.setExitProcedure((status, __) -> { @@ -334,7 +445,7 @@ private void alterAndVerifyConfig(Admin client, Optional brokerId, Map config, List alterOpts) throws Exception { - alterConfigWithKraft(client, brokerId, config, alterOpts); + alterConfigWithAdmin(client, brokerId, config, alterOpts); verifyConfig(client, brokerId, config); } @@ -342,7 +453,7 @@ private void alterAndVerifyGroupConfig(Admin client, String groupName, Map config, List alterOpts) throws Exception { - alterConfigWithKraft(client, config, alterOpts); + alterConfigWithAdmin(client, config, alterOpts); verifyGroupConfig(client, groupName, config); } @@ -350,11 +461,11 @@ private void alterAndVerifyClientMetricsConfig(Admin client, String clientMetricsName, Map config, List alterOpts) throws Exception { - alterConfigWithKraft(client, config, alterOpts); + alterConfigWithAdmin(client, config, alterOpts); verifyClientMetricsConfig(client, clientMetricsName, config); } - private void alterConfigWithKraft(Admin client, Optional resourceName, Map config, List alterOpts) { + private void alterConfigWithAdmin(Admin client, Optional resourceName, Map config, List alterOpts) { String configStr = transferConfigMapToString(config); List bootstrapOpts = quorumArgs().collect(Collectors.toList()); ConfigCommand.ConfigCommandOptions addOpts = @@ -366,7 +477,7 @@ private void alterConfigWithKraft(Admin client, Optional resourceName, M ConfigCommand.alterConfig(client, addOpts); } - private void alterConfigWithKraft(Admin client, Map config, List alterOpts) { + private void alterConfigWithAdmin(Admin client, Map config, List alterOpts) { String configStr = transferConfigMapToString(config); List bootstrapOpts = quorumArgs().collect(Collectors.toList()); ConfigCommand.ConfigCommandOptions addOpts = diff --git a/core/src/test/java/kafka/admin/ConfigCommandTest.java b/core/src/test/java/kafka/admin/ConfigCommandTest.java index 6d3cba6d246c3..10c24111e4757 100644 --- a/core/src/test/java/kafka/admin/ConfigCommandTest.java +++ b/core/src/test/java/kafka/admin/ConfigCommandTest.java @@ -16,10 +16,6 @@ */ package kafka.admin; -import kafka.cluster.Broker; -import kafka.zk.AdminZkClient; -import kafka.zk.KafkaZkClient; - import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AlterClientQuotasOptions; import org.apache.kafka.clients.admin.AlterClientQuotasResult; @@ -44,12 +40,8 @@ import org.apache.kafka.common.quota.ClientQuotaEntity; import org.apache.kafka.common.quota.ClientQuotaFilter; import org.apache.kafka.common.quota.ClientQuotaFilterComponent; -import org.apache.kafka.common.security.scram.ScramCredential; -import org.apache.kafka.common.security.scram.internals.ScramCredentialUtils; import org.apache.kafka.common.utils.Exit; -import org.apache.kafka.common.utils.Sanitizer; import org.apache.kafka.server.config.ConfigType; -import org.apache.kafka.server.config.ZooKeeperInternals; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; @@ -59,7 +51,6 @@ import java.io.File; import java.io.IOException; import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -75,7 +66,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -87,17 +77,12 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class ConfigCommandTest { - private static final String ZK_CONNECT = "localhost:2181"; - private static final DummyAdminZkClient DUMMY_ADMIN_ZK_CLIENT = new DummyAdminZkClient(null); - - private static final List ZOOKEEPER_BOOTSTRAP = Arrays.asList("--zookeeper", ZK_CONNECT); private static final List BROKER_BOOTSTRAP = Arrays.asList("--bootstrap-server", "localhost:9092"); private static final List CONTROLLER_BOOTSTRAP = Arrays.asList("--bootstrap-controller", "localhost:9093"); @@ -106,34 +91,6 @@ public void shouldExitWithNonZeroStatusOnArgError() { assertNonZeroStatusExit("--blah"); } - @Test - public void shouldExitWithNonZeroStatusOnZkCommandWithTopicsEntity() { - assertNonZeroStatusExit(toArray(ZOOKEEPER_BOOTSTRAP, Arrays.asList( - "--entity-type", "topics", - "--describe"))); - } - - @Test - public void shouldExitWithNonZeroStatusOnZkCommandWithClientsEntity() { - assertNonZeroStatusExit(toArray(ZOOKEEPER_BOOTSTRAP, Arrays.asList( - "--entity-type", "clients", - "--describe"))); - } - - @Test - public void shouldExitWithNonZeroStatusOnZkCommandWithIpsEntity() { - assertNonZeroStatusExit(toArray(ZOOKEEPER_BOOTSTRAP, Arrays.asList( - "--entity-type", "ips", - "--describe"))); - } - - @Test - public void shouldExitWithNonZeroStatusOnZkCommandWithGroupsEntity() { - assertNonZeroStatusExit(toArray(ZOOKEEPER_BOOTSTRAP, Arrays.asList( - "--entity-type", "groups", - "--describe"))); - } - @Test public void shouldExitWithNonZeroStatusAlterUserQuotaWithoutEntityName() { assertNonZeroStatusExit(toArray(BROKER_BOOTSTRAP, Arrays.asList( @@ -155,15 +112,6 @@ public void shouldExitWithNonZeroStatusIfBothBootstrapServerAndBootstrapControll "--describe", "--broker-defaults"))); } - @Test - public void shouldExitWithNonZeroStatusOnBrokerCommandWithZkTlsConfigFile() { - assertNonZeroStatusExit( - "--bootstrap-server", "invalid host", - "--entity-type", "users", - "--zk-tls-config-file", "zk_tls_config.properties", - "--describe"); - } - public static void assertNonZeroStatusExit(String... args) { AtomicReference exitStatus = new AtomicReference<>(); Exit.setExitProcedure((status, __) -> { @@ -183,11 +131,6 @@ public static void assertNonZeroStatusExit(String... args) { assertEquals(1, exitStatus.get()); } - @Test - public void shouldFailParseArgumentsForClientsEntityTypeUsingZookeeper() { - assertThrows(IllegalArgumentException.class, () -> testArgumentParse(ZOOKEEPER_BOOTSTRAP, "clients")); - } - @Test public void shouldParseArgumentsForClientsEntityTypeWithBrokerBootstrap() { testArgumentParse(BROKER_BOOTSTRAP, "clients"); @@ -198,11 +141,6 @@ public void shouldParseArgumentsForClientsEntityTypeWithControllerBootstrap() { testArgumentParse(CONTROLLER_BOOTSTRAP, "clients"); } - @Test - public void shouldParseArgumentsForUsersEntityTypeUsingZookeeper() { - testArgumentParse(ZOOKEEPER_BOOTSTRAP, "users"); - } - @Test public void shouldParseArgumentsForUsersEntityTypeWithBrokerBootstrap() { testArgumentParse(BROKER_BOOTSTRAP, "users"); @@ -213,11 +151,6 @@ public void shouldParseArgumentsForUsersEntityTypeWithControllerBootstrap() { testArgumentParse(CONTROLLER_BOOTSTRAP, "users"); } - @Test - public void shouldFailParseArgumentsForTopicsEntityTypeUsingZookeeper() { - assertThrows(IllegalArgumentException.class, () -> testArgumentParse(ZOOKEEPER_BOOTSTRAP, "topics")); - } - @Test public void shouldParseArgumentsForTopicsEntityTypeWithBrokerBootstrap() { testArgumentParse(BROKER_BOOTSTRAP, "topics"); @@ -228,11 +161,6 @@ public void shouldParseArgumentsForTopicsEntityTypeWithControllerBootstrap() { testArgumentParse(CONTROLLER_BOOTSTRAP, "topics"); } - @Test - public void shouldParseArgumentsForBrokersEntityTypeUsingZookeeper() { - testArgumentParse(ZOOKEEPER_BOOTSTRAP, "brokers"); - } - @Test public void shouldParseArgumentsForBrokersEntityTypeWithBrokerBootstrap() { testArgumentParse(BROKER_BOOTSTRAP, "brokers"); @@ -253,11 +181,6 @@ public void shouldParseArgumentsForBrokerLoggersEntityTypeWithControllerBootstra testArgumentParse(CONTROLLER_BOOTSTRAP, "broker-loggers"); } - @Test - public void shouldFailParseArgumentsForIpEntityTypeUsingZookeeper() { - assertThrows(IllegalArgumentException.class, () -> testArgumentParse(ZOOKEEPER_BOOTSTRAP, "ips")); - } - @Test public void shouldParseArgumentsForIpEntityTypeWithBrokerBootstrap() { testArgumentParse(BROKER_BOOTSTRAP, "ips"); @@ -268,11 +191,6 @@ public void shouldParseArgumentsForIpEntityTypeWithControllerBootstrap() { testArgumentParse(CONTROLLER_BOOTSTRAP, "ips"); } - @Test - public void shouldFailParseArgumentsForGroupEntityTypeUsingZookeeper() { - assertThrows(IllegalArgumentException.class, () -> testArgumentParse(ZOOKEEPER_BOOTSTRAP, "groups")); - } - @Test public void shouldParseArgumentsForGroupEntityTypeWithBrokerBootstrap() { testArgumentParse(BROKER_BOOTSTRAP, "groups"); @@ -502,64 +420,42 @@ public void testParseConfigsToBeAddedForAddConfigFile() throws IOException { assertEquals("[[1, 2], [3, 4]]", addedProps.getProperty("nested")); } - public void testExpectedEntityTypeNames(List expectedTypes, List expectedNames, List connectOpts, String...args) { + public void testExpectedEntityTypeNames(List expectedTypes, List expectedNames, List connectOpts, String... args) { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList(connectOpts.get(0), connectOpts.get(1), "--describe"), Arrays.asList(args))); createOpts.checkArgs(); assertEquals(createOpts.entityTypes().toSeq(), seq(expectedTypes)); assertEquals(createOpts.entityNames().toSeq(), seq(expectedNames)); } - public void doTestOptionEntityTypeNames(boolean zkConfig) { - List connectOpts = zkConfig - ? Arrays.asList("--zookeeper", ZK_CONNECT) - : Arrays.asList("--bootstrap-server", "localhost:9092"); - - // zookeeper config only supports "users" and "brokers" entity type - if (!zkConfig) { - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.singletonList("A"), connectOpts, "--entity-type", "topics", "--entity-name", "A"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.singletonList("1.2.3.4"), connectOpts, "--entity-name", "1.2.3.4", "--entity-type", "ips"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS), Collections.singletonList("A"), connectOpts, "--entity-type", "client-metrics", "--entity-name", "A"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.singletonList("A"), connectOpts, "--entity-type", "groups", "--entity-name", "A"); - testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER, ConfigType.CLIENT), Arrays.asList("A", ""), connectOpts, - "--entity-type", "users", "--entity-type", "clients", "--entity-name", "A", "--entity-default"); - testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER, ConfigType.CLIENT), Arrays.asList("", "B"), connectOpts, - "--entity-default", "--entity-name", "B", "--entity-type", "users", "--entity-type", "clients"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.singletonList("A"), connectOpts, "--topic", "A"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.singletonList("1.2.3.4"), connectOpts, "--ip", "1.2.3.4"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.singletonList("A"), connectOpts, "--group", "A"); - testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Arrays.asList("B", "A"), connectOpts, "--client", "B", "--user", "A"); - testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Arrays.asList("B", ""), connectOpts, "--client", "B", "--user-defaults"); - testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Collections.singletonList("A"), connectOpts, - "--entity-type", "clients", "--entity-type", "users", "--entity-name", "A"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.emptyList(), connectOpts, "--entity-type", "topics"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.emptyList(), connectOpts, "--entity-type", "ips"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.emptyList(), connectOpts, "--entity-type", "groups"); - testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS), Collections.emptyList(), connectOpts, "--entity-type", "client-metrics"); - } - + @Test + public void testOptionEntityTypeNames() { + List connectOpts = Arrays.asList("--bootstrap-server", "localhost:9092"); + + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.singletonList("A"), connectOpts, "--entity-type", "topics", "--entity-name", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.singletonList("1.2.3.4"), connectOpts, "--entity-name", "1.2.3.4", "--entity-type", "ips"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS), Collections.singletonList("A"), connectOpts, "--entity-type", "client-metrics", "--entity-name", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.singletonList("A"), connectOpts, "--entity-type", "groups", "--entity-name", "A"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER, ConfigType.CLIENT), Arrays.asList("A", ""), connectOpts, + "--entity-type", "users", "--entity-type", "clients", "--entity-name", "A", "--entity-default"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER, ConfigType.CLIENT), Arrays.asList("", "B"), connectOpts, + "--entity-default", "--entity-name", "B", "--entity-type", "users", "--entity-type", "clients"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.singletonList("A"), connectOpts, "--topic", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.singletonList("1.2.3.4"), connectOpts, "--ip", "1.2.3.4"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.singletonList("A"), connectOpts, "--group", "A"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Arrays.asList("B", "A"), connectOpts, "--client", "B", "--user", "A"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Arrays.asList("B", ""), connectOpts, "--client", "B", "--user-defaults"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Collections.singletonList("A"), connectOpts, + "--entity-type", "clients", "--entity-type", "users", "--entity-name", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.emptyList(), connectOpts, "--entity-type", "topics"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.emptyList(), connectOpts, "--entity-type", "ips"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.emptyList(), connectOpts, "--entity-type", "groups"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS), Collections.emptyList(), connectOpts, "--entity-type", "client-metrics"); testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER), Collections.singletonList("0"), connectOpts, "--entity-name", "0", "--entity-type", "brokers"); testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER), Collections.singletonList("0"), connectOpts, "--broker", "0"); testExpectedEntityTypeNames(Collections.singletonList(ConfigType.USER), Collections.emptyList(), connectOpts, "--entity-type", "users"); testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER), Collections.emptyList(), connectOpts, "--entity-type", "brokers"); } - @Test - public void testOptionEntityTypeNamesUsingZookeeper() { - doTestOptionEntityTypeNames(true); - } - - @Test - public void testOptionEntityTypeNames() { - doTestOptionEntityTypeNames(false); - } - - @Test - public void shouldFailIfUnrecognisedEntityTypeUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--entity-name", "client", "--entity-type", "not-recognised", "--alter", "--add-config", "a=b,c=d"}); - assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(null, createOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - @Test public void shouldFailIfUnrecognisedEntityType() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", @@ -567,13 +463,6 @@ public void shouldFailIfUnrecognisedEntityType() { assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); } - @Test - public void shouldFailIfBrokerEntityTypeIsNotAnIntegerUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--entity-name", "A", "--entity-type", "brokers", "--alter", "--add-config", "a=b,c=d"}); - assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(null, createOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - @Test public void shouldFailIfBrokerEntityTypeIsNotAnInteger() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", @@ -581,13 +470,6 @@ public void shouldFailIfBrokerEntityTypeIsNotAnInteger() { assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); } - @Test - public void shouldFailIfShortBrokerEntityTypeIsNotAnIntegerUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--broker", "A", "--alter", "--add-config", "a=b,c=d"}); - assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(null, createOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - @Test public void shouldFailIfShortBrokerEntityTypeIsNotAnInteger() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", @@ -595,13 +477,6 @@ public void shouldFailIfShortBrokerEntityTypeIsNotAnInteger() { assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); } - @Test - public void shouldFailIfMixedEntityTypeFlagsUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--entity-name", "A", "--entity-type", "users", "--client", "B", "--describe"}); - assertThrows(IllegalArgumentException.class, createOpts::checkArgs); - } - @Test public void shouldFailIfMixedEntityTypeFlags() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", @@ -616,13 +491,6 @@ public void shouldFailIfInvalidHost() { assertThrows(IllegalArgumentException.class, createOpts::checkArgs); } - @Test - public void shouldFailIfInvalidHostUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--entity-name", "A,B", "--entity-type", "ips", "--describe"}); - assertThrows(IllegalArgumentException.class, createOpts::checkArgs); - } - @Test public void shouldFailIfUnresolvableHost() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", @@ -630,69 +498,6 @@ public void shouldFailIfUnresolvableHost() { assertThrows(IllegalArgumentException.class, createOpts::checkArgs); } - @Test - public void shouldFailIfUnresolvableHostUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--entity-name", "RFC2606.invalid", "--entity-type", "ips", "--describe"}); - assertThrows(IllegalArgumentException.class, createOpts::checkArgs); - } - - @Test - public void shouldAddClientConfigUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--entity-name", "my-client-id", - "--entity-type", "clients", - "--alter", - "--add-config", "a=b,c=d"}); - - KafkaZkClient zkClient = mock(KafkaZkClient.class); - when(zkClient.getEntityConfigs(anyString(), anyString())).thenReturn(new Properties()); - - class TestAdminZkClient extends AdminZkClient { - public TestAdminZkClient(KafkaZkClient zkClient) { - super(zkClient, scala.None$.empty()); - } - - @Override - public void changeClientIdConfig(String clientId, Properties configChange) { - assertEquals("my-client-id", clientId); - assertEquals("b", configChange.get("a")); - assertEquals("d", configChange.get("c")); - } - } - - // Changing USER configs don't use `KafkaZkClient` so it safe to pass `null`. - ConfigCommand.alterConfigWithZk(null, createOpts, new TestAdminZkClient(zkClient)); - } - - @Test - public void shouldAddIpConfigsUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--zookeeper", ZK_CONNECT, - "--entity-name", "1.2.3.4", - "--entity-type", "ips", - "--alter", - "--add-config", "a=b,c=d"}); - - KafkaZkClient zkClient = mock(KafkaZkClient.class); - when(zkClient.getEntityConfigs(anyString(), anyString())).thenReturn(new Properties()); - - class TestAdminZkClient extends AdminZkClient { - public TestAdminZkClient(KafkaZkClient zkClient) { - super(zkClient, scala.None$.empty()); - } - - @Override - public void changeIpConfig(String ip, Properties configChange) { - assertEquals("1.2.3.4", ip); - assertEquals("b", configChange.get("a")); - assertEquals("d", configChange.get("c")); - } - } - - // Changing USER configs don't use `KafkaZkClient` so it safe to pass `null`. - ConfigCommand.alterConfigWithZk(null, createOpts, new TestAdminZkClient(zkClient)); - } - private Entry, Map> argsAndExpectedEntity(Optional entityName, String entityType) { String command; switch (entityType) { @@ -983,27 +788,6 @@ public void shouldNotDescribeUserScramCredentialsWithEntityDefaultUsingBootstrap verifyUserScramCredentialsNotDescribed(defaultUserOpt); } - @Test - public void shouldAddTopicConfigUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "my-topic", - "--entity-type", "topics", - "--alter", - "--add-config", "a=b,c=d")); - - KafkaZkClient zkClient = mock(KafkaZkClient.class); - when(zkClient.getEntityConfigs(anyString(), anyString())).thenReturn(new Properties()); - - ConfigCommand.alterConfigWithZk(null, createOpts, new AdminZkClient(zkClient, scala.None$.empty()) { - @Override - public void changeTopicConfig(String topic, Properties configChange) { - assertEquals("my-topic", topic); - assertEquals("b", configChange.get("a")); - assertEquals("d", configChange.get("c")); - } - }); - } - @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldAlterTopicConfig(boolean file) { @@ -1114,64 +898,6 @@ public synchronized DescribeConfigsResult describeConfigs(Collection ConfigCommand.alterConfigWithZk(mockZkClient, alterOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - - @Test - public void shouldNotAllowDescribeBrokerWhileBrokerUpUsingZookeeper() { - ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "1", - "--entity-type", "brokers", - "--describe")); - - KafkaZkClient mockZkClient = mock(KafkaZkClient.class); - Broker mockBroker = mock(Broker.class); - when(mockZkClient.getBroker(1)).thenReturn(scala.Option.apply(mockBroker)); - - assertThrows(IllegalArgumentException.class, - () -> ConfigCommand.describeConfigWithZk(mockZkClient, describeOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - - @Test - public void shouldSupportDescribeBrokerBeforeBrokerUpUsingZookeeper() { - ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "1", - "--entity-type", "brokers", - "--describe")); - - class TestAdminZkClient extends AdminZkClient { - public TestAdminZkClient(KafkaZkClient zkClient) { - super(zkClient, scala.None$.empty()); - } - - @Override - public Properties fetchEntityConfig(String rootEntityType, String sanitizedEntityName) { - assertEquals("brokers", rootEntityType); - assertEquals("1", sanitizedEntityName); - - return new Properties(); - } - } - - KafkaZkClient mockZkClient = mock(KafkaZkClient.class); - when(mockZkClient.getBroker(1)).thenReturn(scala.None$.empty()); - - ConfigCommand.describeConfigWithZk(mockZkClient, describeOpts, new TestAdminZkClient(null)); - } - @Test public void shouldAddBrokerLoggerConfig() { Node node = new Node(1, "localhost", 9092); @@ -1182,16 +908,6 @@ public void shouldAddBrokerLoggerConfig() { )); } - @Test - public void testNoSpecifiedEntityOptionWithDescribeBrokersInZKIsAllowed() { - String[] optsList = new String[]{"--zookeeper", ZK_CONNECT, - "--entity-type", ConfigType.BROKER, - "--describe" - }; - - new ConfigCommand.ConfigCommandOptions(optsList).checkArgs(); - } - @Test public void testNoSpecifiedEntityOptionWithDescribeBrokersInBootstrapServerIsAllowed() { String[] optsList = new String[]{"--bootstrap-server", "localhost:9092", @@ -1224,17 +940,6 @@ public void testDescribeAllTopicConfig() { new ConfigCommand.ConfigCommandOptions(optsList).checkArgs(); } - @Test - public void testDescribeAllBrokerConfigBootstrapServerRequired() { - String[] optsList = new String[]{"--zookeeper", ZK_CONNECT, - "--entity-type", ConfigType.BROKER, - "--entity-name", "1", - "--describe", - "--all"}; - - assertThrows(IllegalArgumentException.class, () -> new ConfigCommand.ConfigCommandOptions(optsList).checkArgs()); - } - @Test public void testEntityDefaultOptionWithDescribeBrokerLoggerIsNotAllowed() { String[] optsList = new String[]{"--bootstrap-server", "localhost:9092", @@ -1312,15 +1017,14 @@ public synchronized DescribeConfigsResult describeConfigs(Collection configs, AlterConfigsOptions options) { + public synchronized AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { assertEquals(1, configs.size()); - Map.Entry entry = configs.entrySet().iterator().next(); + Map.Entry> entry = configs.entrySet().iterator().next(); ConfigResource res = entry.getKey(); - Config config = entry.getValue(); + Collection config = entry.getValue(); assertEquals(ConfigResource.Type.BROKER, res.type()); - config.entries().forEach(e -> brokerConfigs.put(e.name(), e.value())); + config.forEach(e -> brokerConfigs.put(e.configEntry().name(), e.configEntry().value())); return alterResult; } }; @@ -1410,9 +1114,9 @@ public synchronized AlterConfigsResult incrementalAlterConfigs(Map expectedConfigOps = Arrays.asList( - new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", "DEBUG"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry("kafka.server.ReplicaManager", ""), AlterConfigOp.OpType.DELETE), - new AlterConfigOp(new ConfigEntry("kafka.server.KafkaApi", ""), AlterConfigOp.OpType.DELETE) + new AlterConfigOp(new ConfigEntry("kafka.server.KafkaApi", ""), AlterConfigOp.OpType.DELETE), + new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", "DEBUG"), AlterConfigOp.OpType.SET) ); assertEquals(expectedConfigOps.size(), alterConfigOps.size()); Iterator alterConfigOpsIter = alterConfigOps.iterator(); @@ -1428,44 +1132,6 @@ public synchronized AlterConfigsResult incrementalAlterConfigs(Map ConfigCommand.alterConfigWithZk(null, createOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - @Test public void shouldNotUpdateBrokerConfigIfMalformedEntityName() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", @@ -1476,16 +1142,6 @@ public void shouldNotUpdateBrokerConfigIfMalformedEntityName() { assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); } - @Test - public void shouldNotUpdateBrokerConfigIfMalformedConfigUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "1", - "--entity-type", "brokers", - "--alter", - "--add-config", "a==")); - assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(null, createOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - @Test public void shouldNotUpdateBrokerConfigIfMalformedConfig() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", @@ -1496,16 +1152,6 @@ public void shouldNotUpdateBrokerConfigIfMalformedConfig() { assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); } - @Test - public void shouldNotUpdateBrokerConfigIfMalformedBracketConfigUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "1", - "--entity-type", "brokers", - "--alter", - "--add-config", "a=[b,c,d=e")); - assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(null, createOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - @Test public void shouldNotUpdateBrokerConfigIfMalformedBracketConfig() { ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", @@ -1516,16 +1162,6 @@ public void shouldNotUpdateBrokerConfigIfMalformedBracketConfig() { assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); } - @Test - public void shouldNotUpdateConfigIfNonExistingConfigIsDeletedUsingZookeeper() { - ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "my-topic", - "--entity-type", "topics", - "--alter", - "--delete-config", "missing_config1, missing_config2")); - assertThrows(InvalidConfigurationException.class, () -> ConfigCommand.alterConfigWithZk(null, createOpts, DUMMY_ADMIN_ZK_CLIENT)); - } - @Test public void shouldNotUpdateConfigIfNonExistingConfigIsDeleted() { String resourceName = "my-topic"; @@ -1558,308 +1194,6 @@ public synchronized DescribeConfigsResult describeConfigs(Collection brokers, Properties configChange) { - assertEquals("f", configChange.get("e")); - assertEquals(1, configChange.size()); - } - } - - KafkaZkClient mockZkClient = mock(KafkaZkClient.class); - Broker mockBroker = mock(Broker.class); - when(mockZkClient.getBroker(1)).thenReturn(scala.Option.apply(mockBroker)); - - assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(mockZkClient, createOpts, new TestAdminZkClient(null))); - } - - private ConfigCommand.ConfigCommandOptions createOpts(String user, String config) { - return new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", user, - "--entity-type", "users", - "--alter", - "--add-config", config)); - } - - private ConfigCommand.ConfigCommandOptions deleteOpts(String user, String mechanism) { - return new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", user, - "--entity-type", "users", - "--alter", - "--delete-config", mechanism)); - } - - @Test - public void testScramCredentials() { - Map credentials = new HashMap<>(); - class CredentialChange extends AdminZkClient { - private final String user; - private final Set mechanisms; - private final int iterations; - - public CredentialChange(String user, Set mechanisms, int iterations) { - super(null, scala.None$.empty()); - this.user = user; - this.mechanisms = mechanisms; - this.iterations = iterations; - } - - @Override - public Properties fetchEntityConfig(String entityType, String entityName) { - return credentials.getOrDefault(entityName, new Properties()); - } - - @Override - public void changeUserOrUserClientIdConfig(String sanitizedEntityName, Properties configChange, boolean isUserClientId) { - assertEquals(user, sanitizedEntityName); - assertEquals(mechanisms, configChange.keySet()); - for (String mechanism : mechanisms) { - String value = configChange.getProperty(mechanism); - assertEquals(-1, value.indexOf("password=")); - ScramCredential scramCredential = ScramCredentialUtils.credentialFromString(value); - if (iterations != scramCredential.iterations()) - System.out.println("CredentialChange.changeUserOrUserClientIdConfig"); - assertEquals(iterations, scramCredential.iterations()); - credentials.put(user, configChange); - } - } - } - ConfigCommand.ConfigCommandOptions optsA = createOpts("userA", "SCRAM-SHA-256=[iterations=8192,password=abc, def]"); - ConfigCommand.alterConfigWithZk(null, optsA, new CredentialChange("userA", Collections.singleton("SCRAM-SHA-256"), 8192)); - ConfigCommand.ConfigCommandOptions optsB = createOpts("userB", "SCRAM-SHA-256=[iterations=4096,password=abc, def],SCRAM-SHA-512=[password=1234=abc]"); - ConfigCommand.alterConfigWithZk(null, optsB, new CredentialChange("userB", new HashSet<>(Arrays.asList("SCRAM-SHA-256", "SCRAM-SHA-512")), 4096)); - - ConfigCommand.ConfigCommandOptions del256 = deleteOpts("userB", "SCRAM-SHA-256"); - ConfigCommand.alterConfigWithZk(null, del256, new CredentialChange("userB", Collections.singleton("SCRAM-SHA-512"), 4096)); - ConfigCommand.ConfigCommandOptions del512 = deleteOpts("userB", "SCRAM-SHA-512"); - ConfigCommand.alterConfigWithZk(null, del512, new CredentialChange("userB", Collections.emptySet(), 4096)); - } - - @Test - public void testQuotaConfigEntityUsingZookeeperNotAllowed() { - assertThrows(IllegalArgumentException.class, () -> doTestQuotaConfigEntity(true)); - } - - private List connectOpts; - - private ConfigCommand.ConfigCommandOptions createOpts(String entityType, Optional entityName, List otherArgs) { - List optArray = Arrays.asList(connectOpts.get(0), connectOpts.get(1), "--entity-type", entityType); - List nameArray = entityName - .map(s -> Arrays.asList("--entity-name", s)) - .orElse(Collections.emptyList()); - return new ConfigCommand.ConfigCommandOptions(toArray(optArray, nameArray, otherArgs)); - } - - private void checkEntity(String entityType, Optional entityName, String expectedEntityName, List otherArgs) { - ConfigCommand.ConfigCommandOptions opts = createOpts(entityType, entityName, otherArgs); - opts.checkArgs(); - ConfigCommand.ConfigEntity entity = ConfigCommand.parseEntity(opts); - assertEquals(entityType, entity.root().entityType()); - assertEquals(expectedEntityName, entity.fullSanitizedName()); - } - - private void checkInvalidArgs(String entityType, Optional entityName, List otherArgs) { - ConfigCommand.ConfigCommandOptions opts = createOpts(entityType, entityName, otherArgs); - assertThrows(IllegalArgumentException.class, opts::checkArgs); - } - - private void checkInvalidEntity(String entityType, Optional entityName, List otherArgs) { - ConfigCommand.ConfigCommandOptions opts = createOpts(entityType, entityName, otherArgs); - opts.checkArgs(); - assertThrows(IllegalArgumentException.class, () -> ConfigCommand.parseEntity(opts)); - } - - public void doTestQuotaConfigEntity(boolean zkConfig) { - connectOpts = zkConfig - ? Arrays.asList("--zookeeper", ZK_CONNECT) - : Arrays.asList("--bootstrap-server", "localhost:9092"); - - List describeOpts = Collections.singletonList("--describe"); - List alterOpts = Arrays.asList("--alter", "--add-config", "a=b,c=d"); - - // quota - String clientId = "client-1"; - for (List opts: Arrays.asList(describeOpts, alterOpts)) { - checkEntity("clients", Optional.of(clientId), clientId, opts); - checkEntity("clients", Optional.of(""), ZooKeeperInternals.DEFAULT_STRING, opts); - } - checkEntity("clients", Optional.empty(), "", describeOpts); - checkInvalidArgs("clients", Optional.empty(), alterOpts); - - // quota - String principal = "CN=ConfigCommandTest,O=Apache,L="; - String sanitizedPrincipal = Sanitizer.sanitize(principal); - assertEquals(-1, sanitizedPrincipal.indexOf('=')); - assertEquals(principal, Sanitizer.desanitize(sanitizedPrincipal)); - for (List opts: Arrays.asList(describeOpts, alterOpts)) { - checkEntity("users", Optional.of(principal), sanitizedPrincipal, opts); - checkEntity("users", Optional.of(""), ZooKeeperInternals.DEFAULT_STRING, opts); - } - checkEntity("users", Optional.empty(), "", describeOpts); - checkInvalidArgs("users", Optional.empty(), alterOpts); - - // quota - String userClient = sanitizedPrincipal + "/clients/" + clientId; - Function> clientIdOpts = name -> Arrays.asList("--entity-type", "clients", "--entity-name", name); - for (List opts : Arrays.asList(describeOpts, alterOpts)) { - checkEntity("users", Optional.of(principal), userClient, concat(opts, clientIdOpts.apply(clientId))); - checkEntity("users", Optional.of(principal), sanitizedPrincipal + "/clients/" + ZooKeeperInternals.DEFAULT_STRING, concat(opts, clientIdOpts.apply(""))); - checkEntity("users", Optional.of(""), ZooKeeperInternals.DEFAULT_STRING + "/clients/" + clientId, concat(describeOpts, clientIdOpts.apply(clientId))); - checkEntity("users", Optional.of(""), ZooKeeperInternals.DEFAULT_STRING + "/clients/" + ZooKeeperInternals.DEFAULT_STRING, concat(opts, clientIdOpts.apply(""))); - } - checkEntity("users", Optional.of(principal), sanitizedPrincipal + "/clients", concat(describeOpts, Arrays.asList("--entity-type", "clients"))); - // Both user and client-id must be provided for alter - checkInvalidEntity("users", Optional.of(principal), concat(alterOpts, Arrays.asList("--entity-type", "clients"))); - checkInvalidEntity("users", Optional.empty(), concat(alterOpts, clientIdOpts.apply(clientId))); - checkInvalidArgs("users", Optional.empty(), concat(alterOpts, Arrays.asList("--entity-type", "clients"))); - } - - @Test - public void testQuotaConfigEntity() { - doTestQuotaConfigEntity(false); - } - - @Test - public void testUserClientQuotaOptsUsingZookeeperNotAllowed() { - assertThrows(IllegalArgumentException.class, () -> doTestUserClientQuotaOpts(true)); - } - - private void checkEntity(String expectedEntityType, String expectedEntityName, String...args) { - ConfigCommand.ConfigCommandOptions opts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts, Arrays.asList(args))); - opts.checkArgs(); - ConfigCommand.ConfigEntity entity = ConfigCommand.parseEntity(opts); - assertEquals(expectedEntityType, entity.root().entityType()); - assertEquals(expectedEntityName, entity.fullSanitizedName()); - } - - private void doTestUserClientQuotaOpts(boolean zkConfig) { - connectOpts = zkConfig - ? Arrays.asList("--zookeeper", ZK_CONNECT) - : Arrays.asList("--bootstrap-server", "localhost:9092"); - - // is a valid user principal and client-id (can be handled with URL-encoding), - checkEntity("users", Sanitizer.sanitize(""), - "--entity-type", "users", "--entity-name", "", - "--alter", "--add-config", "a=b,c=d"); - checkEntity("clients", Sanitizer.sanitize(""), - "--entity-type", "clients", "--entity-name", "", - "--alter", "--add-config", "a=b,c=d"); - - checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1", - "--entity-type", "users", "--entity-name", "CN=user1", "--entity-type", "clients", "--entity-name", "client1", - "--alter", "--add-config", "a=b,c=d"); - checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1", - "--entity-name", "CN=user1", "--entity-type", "users", "--entity-name", "client1", "--entity-type", "clients", - "--alter", "--add-config", "a=b,c=d"); - checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1", - "--entity-type", "clients", "--entity-name", "client1", "--entity-type", "users", "--entity-name", "CN=user1", - "--alter", "--add-config", "a=b,c=d"); - checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1", - "--entity-name", "client1", "--entity-type", "clients", "--entity-name", "CN=user1", "--entity-type", "users", - "--alter", "--add-config", "a=b,c=d"); - checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients", - "--entity-type", "clients", "--entity-name", "CN=user1", "--entity-type", "users", - "--describe"); - checkEntity("users", "/clients", - "--entity-type", "clients", "--entity-type", "users", - "--describe"); - checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/" + Sanitizer.sanitize("client1?@%"), - "--entity-name", "client1?@%", "--entity-type", "clients", "--entity-name", "CN=user1", "--entity-type", "users", - "--alter", "--add-config", "a=b,c=d"); - } - - @Test - public void testUserClientQuotaOpts() { - doTestUserClientQuotaOpts(false); - } - - private final KafkaZkClient zkClient = mock(KafkaZkClient.class); - - public void checkEntities(List opts, Map> expectedFetches, List expectedEntityNames) { - ConfigCommand.ConfigEntity entity = ConfigCommand.parseEntity(new ConfigCommand.ConfigCommandOptions(toArray(opts, Collections.singletonList("--describe")))); - expectedFetches.forEach((name, values) -> - when(zkClient.getAllEntitiesWithConfig(name)).thenReturn(seq(values))); - Seq entities0 = entity.getAllEntities(zkClient); - List entities = new ArrayList<>(); - entities0.foreach(e -> { - entities.add(e); - return null; - }); - assertEquals( - expectedEntityNames, - entities.stream().map(ConfigCommand.ConfigEntity::fullSanitizedName).collect(Collectors.toList())); - } - - @Test - public void testQuotaDescribeEntities() { - String clientId = "a-client"; - String principal = "CN=ConfigCommandTest.testQuotaDescribeEntities , O=Apache, L="; - String sanitizedPrincipal = Sanitizer.sanitize(principal); - String userClient = sanitizedPrincipal + "/clients/" + clientId; - - List opts = Arrays.asList("--entity-type", "clients", "--entity-name", clientId); - checkEntities(opts, Collections.emptyMap(), Collections.singletonList(clientId)); - - opts = Arrays.asList("--entity-type", "clients", "--entity-default"); - checkEntities(opts, Collections.emptyMap(), Collections.singletonList("")); - - opts = Arrays.asList("--entity-type", "clients"); - checkEntities(opts, Collections.singletonMap("clients", Collections.singletonList(clientId)), Collections.singletonList(clientId)); - - opts = Arrays.asList("--entity-type", "users", "--entity-name", principal); - checkEntities(opts, Collections.emptyMap(), Collections.singletonList(sanitizedPrincipal)); - - opts = Arrays.asList("--entity-type", "users", "--entity-default"); - checkEntities(opts, Collections.emptyMap(), Collections.singletonList("")); - - opts = Arrays.asList("--entity-type", "users"); - checkEntities(opts, Collections.singletonMap("users", Arrays.asList("", sanitizedPrincipal)), Arrays.asList("", sanitizedPrincipal)); - - opts = Arrays.asList("--entity-type", "users", "--entity-name", principal, "--entity-type", "clients", "--entity-name", clientId); - checkEntities(opts, Collections.emptyMap(), Collections.singletonList(userClient)); - - opts = Arrays.asList("--entity-type", "users", "--entity-name", principal, "--entity-type", "clients", "--entity-default"); - checkEntities(opts, Collections.emptyMap(), Collections.singletonList(sanitizedPrincipal + "/clients/")); - - opts = Arrays.asList("--entity-type", "users", "--entity-name", principal, "--entity-type", "clients"); - checkEntities(opts, - Collections.singletonMap("users/" + sanitizedPrincipal + "/clients", Collections.singletonList("client-4")), - Collections.singletonList(sanitizedPrincipal + "/clients/client-4")); - - opts = Arrays.asList("--entity-type", "users", "--entity-default", "--entity-type", "clients"); - checkEntities(opts, - Collections.singletonMap("users//clients", Collections.singletonList("client-5")), - Collections.singletonList("/clients/client-5")); - - opts = Arrays.asList("--entity-type", "users", "--entity-type", "clients"); - Map> userMap = Collections.singletonMap("users/" + sanitizedPrincipal + "/clients", Collections.singletonList("client-2")); - Map> defaultUserMap = Collections.singletonMap("users//clients", Collections.singletonList("client-3")); - checkEntities(opts, - concat(Collections.singletonMap("users", Arrays.asList("", sanitizedPrincipal)), defaultUserMap, userMap), - Arrays.asList("/clients/client-3", sanitizedPrincipal + "/clients/client-2")); - } - @Test public void shouldAlterClientMetricsConfig() { Node node = new Node(1, "localhost", 9092); @@ -1913,9 +1247,9 @@ public synchronized AlterConfigsResult incrementalAlterConfigs(Map expectedConfigOps = Arrays.asList( + new AlterConfigOp(new ConfigEntry("interval.ms", ""), AlterConfigOp.OpType.DELETE), new AlterConfigOp(new ConfigEntry("match", "client_software_name=kafka.python,client_software_version=1\\.2\\..*"), AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry("metrics", "org.apache.kafka.consumer."), AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry("interval.ms", ""), AlterConfigOp.OpType.DELETE) + new AlterConfigOp(new ConfigEntry("metrics", "org.apache.kafka.consumer."), AlterConfigOp.OpType.SET) ); assertEquals(expectedConfigOps.size(), alterConfigOps.size()); Iterator alterConfigOpsIter = alterConfigOps.iterator(); @@ -1972,67 +1306,6 @@ public void shouldNotAlterClientMetricsConfigWithoutEntityName() { assertEquals("An entity name must be specified with --alter of client-metrics", exception.getMessage()); } - @Test - public void shouldNotSupportAlterClientMetricsWithZookeeperArg() { - ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "sub", - "--entity-type", "client-metrics", - "--alter", - "--add-config", "interval.ms=1000")); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); - assertEquals("Invalid entity type client-metrics, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - - // Test for the --client-metrics alias - alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--client-metrics", "sub", - "--alter", - "--add-config", "interval.ms=1000")); - - exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); - assertEquals("Invalid entity type client-metrics, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - } - - @Test - public void shouldNotSupportDescribeClientMetricsWithZookeeperArg() { - ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "sub", - "--entity-type", "client-metrics", - "--describe")); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, describeOpts::checkArgs); - assertEquals("Invalid entity type client-metrics, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - - // Test for the --client-metrics alias - describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--client-metrics", "sub", - "--describe")); - - exception = assertThrows(IllegalArgumentException.class, describeOpts::checkArgs); - assertEquals("Invalid entity type client-metrics, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - } - - @Test - public void shouldNotSupportAlterClientMetricsWithZookeeper() { - ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "sub", - "--entity-type", "client-metrics", - "--alter", - "--add-config", "interval.ms=1000")); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); - assertEquals("Invalid entity type client-metrics, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - - // Test for the --client-metrics alias - alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--client-metrics", "sub", - "--alter", - "--add-config", "interval.ms=1000")); - - exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); - assertEquals("Invalid entity type client-metrics, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - } - @Test public void shouldAlterGroupConfig() { Node node = new Node(1, "localhost", 9092); @@ -2084,8 +1357,8 @@ public synchronized AlterConfigsResult incrementalAlterConfigs(Map expectedConfigOps = Arrays.asList( - new AlterConfigOp(new ConfigEntry("consumer.heartbeat.interval.ms", "6000"), AlterConfigOp.OpType.SET), - new AlterConfigOp(new ConfigEntry("consumer.session.timeout.ms", ""), AlterConfigOp.OpType.DELETE) + new AlterConfigOp(new ConfigEntry("consumer.session.timeout.ms", ""), AlterConfigOp.OpType.DELETE), + new AlterConfigOp(new ConfigEntry("consumer.heartbeat.interval.ms", "6000"), AlterConfigOp.OpType.SET) ); assertEquals(expectedConfigOps.size(), alterConfigOps.size()); Iterator alterConfigOpsIter = alterConfigOps.iterator(); @@ -2146,70 +1419,6 @@ public void shouldNotAlterGroupConfigWithoutEntityName() { assertEquals("An entity name must be specified with --alter of groups", exception.getMessage()); } - @Test - public void shouldNotSupportAlterGroupConfigWithZookeeperArg() { - ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "group", - "--entity-type", "groups", - "--alter", - "--add-config", "consumer.heartbeat.interval.ms=6000")); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); - assertEquals("Invalid entity type groups, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - - // Test for the --group alias - alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--group", "group", - "--alter", - "--add-config", "consumer.heartbeat.interval.ms=6000")); - - exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); - assertEquals("Invalid entity type groups, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - - } - - @Test - public void shouldNotSupportDescribeGroupConfigWithZookeeperArg() { - ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "group", - "--entity-type", "groups", - "--describe")); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, describeOpts::checkArgs); - assertEquals("Invalid entity type groups, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - - // Test for the --group alias - describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--group", "group", - "--describe")); - - exception = assertThrows(IllegalArgumentException.class, describeOpts::checkArgs); - assertEquals("Invalid entity type groups, the entity type must be one of users, brokers with a --zookeeper argument", exception.getMessage()); - - } - - @Test - public void shouldNotSupportAlterGroupConfigWithZookeeper() { - ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--entity-name", "group", - "--entity-type", "groups", - "--alter", - "--add-config", "consumer.heartbeat.interval.ms=6000")); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(null, alterOpts, DUMMY_ADMIN_ZK_CLIENT)); - assertEquals("groups is not a known entityType. Should be one of List(topics, clients, users, brokers, ips)", exception.getMessage()); - - // Test for the --group alias - ConfigCommand.ConfigCommandOptions alterOptsUsingAlias = new ConfigCommand.ConfigCommandOptions(toArray("--zookeeper", ZK_CONNECT, - "--group", "group", - "--alter", - "--add-config", "consumer.heartbeat.interval.ms=6000")); - - exception = assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfigWithZk(null, alterOptsUsingAlias, DUMMY_ADMIN_ZK_CLIENT)); - assertEquals("groups is not a known entityType. Should be one of List(topics, clients, users, brokers, ips)", exception.getMessage()); - - } - public static String[] toArray(String... first) { return first; } @@ -2225,7 +1434,7 @@ public static List concat(List... lists) { } @SafeVarargs - public static Map concat(Map...maps) { + public static Map concat(Map... maps) { Map res = new HashMap<>(); Stream.of(maps) .map(Map::entrySet) @@ -2234,32 +1443,6 @@ public static Map concat(Map...maps) { return res; } - static class DummyAdminZkClient extends AdminZkClient { - public DummyAdminZkClient(KafkaZkClient zkClient) { - super(zkClient, scala.None$.empty()); - } - - @Override - public void changeBrokerConfig(Seq brokers, Properties configs) { - } - - @Override - public Properties fetchEntityConfig(String rootEntityType, String sanitizedEntityName) { - return new Properties(); - } - - @Override - public void changeClientIdConfig(String sanitizedClientId, Properties configs) { - } - - @Override - public void changeUserOrUserClientIdConfig(String sanitizedEntityName, Properties configs, boolean isUserClientId) { - } - - @Override - public void changeTopicConfig(String topic, Properties configs) { - } - } static class DummyAdminClient extends MockAdminClient { public DummyAdminClient(Node node) { @@ -2276,11 +1459,6 @@ public synchronized AlterConfigsResult incrementalAlterConfigs(Map configs, AlterConfigsOptions options) { - return mock(AlterConfigsResult.class); - } - @Override public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) { return mock(DescribeClientQuotasResult.class); diff --git a/core/src/test/java/kafka/admin/DeleteTopicTest.java b/core/src/test/java/kafka/admin/DeleteTopicTest.java index 76173378e0510..be87e086f7f5a 100644 --- a/core/src/test/java/kafka/admin/DeleteTopicTest.java +++ b/core/src/test/java/kafka/admin/DeleteTopicTest.java @@ -39,7 +39,6 @@ import org.apache.kafka.common.test.api.Type; import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.metadata.BrokerState; -import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.RequestLocal; import org.apache.kafka.server.config.ServerConfigs; import org.apache.kafka.storage.internals.log.AppendOrigin; @@ -57,6 +56,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import scala.Option; import scala.jdk.javaapi.OptionConverters; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; @@ -79,7 +79,7 @@ public class DeleteTopicTest { @ClusterTest public void testDeleteTopicWithAllAliveReplicas(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); cluster.waitForTopic(DEFAULT_TOPIC, 0); @@ -88,7 +88,7 @@ public void testDeleteTopicWithAllAliveReplicas(ClusterInstance cluster) throws @ClusterTest public void testResumeDeleteTopicWithRecoveredFollower(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); int leaderId = waitUtilLeaderIsKnown(cluster.brokers(), topicPartition); @@ -111,7 +111,7 @@ public void testResumeDeleteTopicWithRecoveredFollower(ClusterInstance cluster) @ClusterTest(brokers = 4) public void testPartitionReassignmentDuringDeleteTopic(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); Map servers = findPartitionHostingBrokers(cluster.brokers()); @@ -137,7 +137,7 @@ public void testPartitionReassignmentDuringDeleteTopic(ClusterInstance cluster) @ClusterTest(brokers = 4) public void testIncreasePartitionCountDuringDeleteTopic(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); Map partitionHostingBrokers = findPartitionHostingBrokers(cluster.brokers()); @@ -165,7 +165,7 @@ public void testIncreasePartitionCountDuringDeleteTopic(ClusterInstance cluster) @ClusterTest public void testDeleteTopicDuringAddPartition(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); int leaderId = waitUtilLeaderIsKnown(cluster.brokers(), new TopicPartition(DEFAULT_TOPIC, 0)); TopicPartition newTopicPartition = new TopicPartition(DEFAULT_TOPIC, 1); @@ -190,7 +190,7 @@ public void testDeleteTopicDuringAddPartition(ClusterInstance cluster) throws Ex @ClusterTest public void testAddPartitionDuringDeleteTopic(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); // partitions to be added to the topic later TopicPartition newTopicPartition = new TopicPartition(DEFAULT_TOPIC, 1); @@ -204,7 +204,7 @@ public void testAddPartitionDuringDeleteTopic(ClusterInstance cluster) throws Ex @ClusterTest public void testRecreateTopicAfterDeletion(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); @@ -216,7 +216,7 @@ public void testRecreateTopicAfterDeletion(ClusterInstance cluster) throws Excep } @ClusterTest public void testDeleteNonExistingTopic(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); String topic = "test2"; @@ -239,11 +239,10 @@ public void testDeleteNonExistingTopic(ClusterInstance cluster) throws Exception @ClusterTest(serverProperties = { @ClusterConfigProperty(key = "log.cleaner.enable", value = "true"), @ClusterConfigProperty(key = "log.cleanup.policy", value = "compact"), - @ClusterConfigProperty(key = "log.segment.bytes", value = "100"), @ClusterConfigProperty(key = "log.cleaner.dedupe.buffer.size", value = "1048577") }) public void testDeleteTopicWithCleaner(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); // for simplicity, we are validating cleaner offsets on a single broker @@ -252,6 +251,8 @@ public void testDeleteTopicWithCleaner(ClusterInstance cluster) throws Exception "Replicas for topic test not created."); UnifiedLog log = server.logManager().getLog(topicPartition, false).get(); writeDups(100, 3, log); + // force roll the segment so that cleaner can work on it + server.logManager().getLog(topicPartition, false).get().roll(Option.empty()); // wait for cleaner to clean server.logManager().cleaner().awaitCleaned(topicPartition, 0, 60000); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); @@ -262,7 +263,7 @@ public void testDeleteTopicWithCleaner(ClusterInstance cluster) throws Exception @ClusterTest public void testDeleteTopicAlreadyMarkedAsDeleted(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); @@ -282,7 +283,7 @@ public void testDeleteTopicAlreadyMarkedAsDeleted(ClusterInstance cluster) throw @ClusterTest(controllers = 1, serverProperties = {@ClusterConfigProperty(key = ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, value = "false")}) public void testDisableDeleteTopic(ClusterInstance cluster) throws Exception { - try (Admin admin = cluster.createAdminClient()) { + try (Admin admin = cluster.admin()) { admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); TestUtils.waitForCondition(() -> { @@ -374,7 +375,6 @@ private List writeDups(int numKeys, int numDups, UnifiedLog log) { ), 0, AppendOrigin.CLIENT, - MetadataVersion.LATEST_PRODUCTION, RequestLocal.noCaching(), VerificationGuard.SENTINEL ); @@ -384,4 +384,4 @@ private List writeDups(int numKeys, int numDups, UnifiedLog log) { } return result; } -} \ No newline at end of file +} diff --git a/core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java b/core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java index 55db18893f8f7..697dda07363a7 100644 --- a/core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java +++ b/core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java @@ -62,10 +62,10 @@ public ConfigCommandResult(String stdout, OptionalInt exitStatus) { } } - private ConfigCommandResult runConfigCommandViaBroker(String...args) { + private ConfigCommandResult runConfigCommandViaBroker(String... args) { AtomicReference exitStatus = new AtomicReference<>(OptionalInt.empty()); Exit.setExitProcedure((status, __) -> { - exitStatus.set(OptionalInt.of((Integer) status)); + exitStatus.set(OptionalInt.of(status)); throw new RuntimeException(); }); diff --git a/core/src/test/java/kafka/clients/consumer/AsyncKafkaConsumerIntegrationTest.java b/core/src/test/java/kafka/clients/consumer/AsyncKafkaConsumerIntegrationTest.java deleted file mode 100644 index ff851145a21ef..0000000000000 --- a/core/src/test/java/kafka/clients/consumer/AsyncKafkaConsumerIntegrationTest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.clients.consumer; - -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.GroupProtocol; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.test.TestUtils; -import org.apache.kafka.common.test.api.ClusterConfigProperty; -import org.apache.kafka.common.test.api.ClusterInstance; -import org.apache.kafka.common.test.api.ClusterTest; -import org.apache.kafka.common.test.api.ClusterTestExtensions; -import org.apache.kafka.common.test.api.ClusterTests; - -import org.junit.jupiter.api.extension.ExtendWith; - -import java.time.Duration; -import java.util.Collections; -import java.util.Map; - -@ExtendWith(ClusterTestExtensions.class) -public class AsyncKafkaConsumerIntegrationTest { - - @ClusterTests({ - @ClusterTest(serverProperties = { - @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - @ClusterConfigProperty(key = "group.coordinator.new.enable", value = "false") - }), - @ClusterTest(serverProperties = { - @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - @ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic") - }) - }) - public void testAsyncConsumerWithOldGroupCoordinator(ClusterInstance clusterInstance) throws Exception { - String topic = "test-topic"; - clusterInstance.createTopic(topic, 1, (short) 1); - try (KafkaConsumer consumer = new KafkaConsumer<>(Map.of( - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers(), - ConsumerConfig.GROUP_ID_CONFIG, "test-group", - ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(), - ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(), - ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name()))) { - consumer.subscribe(Collections.singletonList(topic)); - TestUtils.waitForCondition(() -> { - try { - consumer.poll(Duration.ofMillis(1000)); - return false; - } catch (UnsupportedVersionException e) { - return e.getMessage().contains("The cluster doesn't yet support the new consumer group protocol. " + - "Set group.protocol=classic to revert to the classic protocol until the cluster is upgraded."); - } - }, "Should get UnsupportedVersionException and how to revert to classic protocol"); - } - } -} diff --git a/core/src/test/java/kafka/clients/consumer/ConsumerIntegrationTest.java b/core/src/test/java/kafka/clients/consumer/ConsumerIntegrationTest.java new file mode 100644 index 0000000000000..542b9c48d0d7b --- /dev/null +++ b/core/src/test/java/kafka/clients/consumer/ConsumerIntegrationTest.java @@ -0,0 +1,245 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.clients.consumer; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.GroupProtocol; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.internals.AbstractHeartbeatRequestManager; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.test.TestUtils; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestExtensions; +import org.apache.kafka.common.test.api.ClusterTests; +import org.apache.kafka.common.test.api.Type; + +import org.junit.jupiter.api.extension.ExtendWith; + +import java.time.Duration; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ExtendWith(ClusterTestExtensions.class) +public class ConsumerIntegrationTest { + + @ClusterTests({ + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + @ClusterConfigProperty(key = "group.coordinator.new.enable", value = "false") + }), + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + @ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic") + }) + }) + public void testAsyncConsumerWithOldGroupCoordinator(ClusterInstance clusterInstance) throws Exception { + String topic = "test-topic"; + clusterInstance.createTopic(topic, 1, (short) 1); + try (KafkaConsumer consumer = new KafkaConsumer<>(Map.of( + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers(), + ConsumerConfig.GROUP_ID_CONFIG, "test-group", + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(), + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(), + ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name()))) { + consumer.subscribe(Collections.singletonList(topic)); + TestUtils.waitForCondition(() -> { + try { + consumer.poll(Duration.ofMillis(1000)); + return false; + } catch (UnsupportedVersionException e) { + return e.getMessage().equals(AbstractHeartbeatRequestManager.CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG); + } + }, "Should get UnsupportedVersionException and how to revert to classic protocol"); + } + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsAfterFailedListenerWithGroupProtocolClassic(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsAfterFailedListener(clusterInstance, GroupProtocol.CLASSIC); + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsAfterFailedListenerWithGroupProtocolConsumer(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsAfterFailedListener(clusterInstance, GroupProtocol.CONSUMER); + } + + private static void testFetchPartitionsAfterFailedListener(ClusterInstance clusterInstance, GroupProtocol groupProtocol) + throws InterruptedException { + var topic = "topic"; + try (var producer = clusterInstance.producer(Map.of( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class))) { + producer.send(new ProducerRecord<>(topic, "key".getBytes(), "value".getBytes())); + } + + try (var consumer = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()))) { + consumer.subscribe(List.of(topic), new ConsumerRebalanceListener() { + private int count = 0; + @Override + public void onPartitionsRevoked(Collection partitions) { + } + + @Override + public void onPartitionsAssigned(Collection partitions) { + count++; + if (count == 1) throw new IllegalArgumentException("temporary error"); + } + }); + + TestUtils.waitForCondition(() -> consumer.poll(Duration.ofSeconds(1)).count() == 1, + 5000, + "failed to poll data"); + } + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsWithAlwaysFailedListenerWithGroupProtocolClassic(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsWithAlwaysFailedListener(clusterInstance, GroupProtocol.CLASSIC); + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsWithAlwaysFailedListenerWithGroupProtocolConsumer(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsWithAlwaysFailedListener(clusterInstance, GroupProtocol.CONSUMER); + } + + private static void testFetchPartitionsWithAlwaysFailedListener(ClusterInstance clusterInstance, GroupProtocol groupProtocol) + throws InterruptedException { + var topic = "topic"; + try (var producer = clusterInstance.producer(Map.of( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class))) { + producer.send(new ProducerRecord<>(topic, "key".getBytes(), "value".getBytes())); + } + + try (var consumer = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()))) { + consumer.subscribe(List.of(topic), new ConsumerRebalanceListener() { + @Override + public void onPartitionsRevoked(Collection partitions) { + } + + @Override + public void onPartitionsAssigned(Collection partitions) { + throw new IllegalArgumentException("always failed"); + } + }); + + long startTimeMillis = System.currentTimeMillis(); + long currentTimeMillis = System.currentTimeMillis(); + while (currentTimeMillis < startTimeMillis + 3000) { + currentTimeMillis = System.currentTimeMillis(); + try { + // In the async consumer, there is a possibility that the ConsumerRebalanceListenerCallbackCompletedEvent + // has not yet reached the application thread. And a poll operation might still succeed, but it + // should not return any records since none of the assigned topic partitions are marked as fetchable. + assertEquals(0, consumer.poll(Duration.ofSeconds(1)).count()); + } catch (KafkaException ex) { + assertEquals("User rebalance callback throws an error", ex.getMessage()); + } + Thread.sleep(300); + } + } + } + + @ClusterTest(types = {Type.KRAFT}, brokers = 3) + public void testLeaderEpoch(ClusterInstance clusterInstance) throws Exception { + String topic = "test-topic"; + clusterInstance.createTopic(topic, 1, (short) 2); + var msgNum = 10; + sendMsg(clusterInstance, topic, msgNum); + + try (var consumer = clusterInstance.consumer()) { + TopicPartition targetTopicPartition = new TopicPartition(topic, 0); + List topicPartitions = List.of(targetTopicPartition); + consumer.assign(topicPartitions); + consumer.seekToBeginning(List.of(targetTopicPartition)); + + int consumed = 0; + while (consumed < msgNum) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(1000)); + for (ConsumerRecord record : records) { + assertTrue(record.leaderEpoch().isPresent()); + assertEquals(0, record.leaderEpoch().get()); + } + consumed += records.count(); + } + + // make the leader epoch increment by shutdown the leader broker + clusterInstance.shutdownBroker(clusterInstance.getLeaderBrokerId(targetTopicPartition)); + + sendMsg(clusterInstance, topic, msgNum); + + consumed = 0; + while (consumed < msgNum) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(1000)); + for (ConsumerRecord record : records) { + assertTrue(record.leaderEpoch().isPresent()); + assertEquals(1, record.leaderEpoch().get()); + } + consumed += records.count(); + } + } + } + + private void sendMsg(ClusterInstance clusterInstance, String topic, int sendMsgNum) { + try (var producer = clusterInstance.producer(Map.of( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class, + ProducerConfig.ACKS_CONFIG, "-1"))) { + for (int i = 0; i < sendMsgNum; i++) { + producer.send(new ProducerRecord<>(topic, ("key_" + i), ("value_" + i))); + } + producer.flush(); + } + } +} diff --git a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java index 7bd77ba105261..b956c12380694 100644 --- a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java +++ b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java @@ -36,7 +36,6 @@ import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.server.common.OffsetAndEpoch; @@ -165,12 +164,16 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; @@ -225,13 +228,14 @@ public class RemoteLogManagerTest { private UnifiedLog mockLog = mock(UnifiedLog.class); private final MockScheduler scheduler = new MockScheduler(time); + private final Properties brokerConfig = kafka.utils.TestUtils.createDummyBrokerConfig(); @BeforeEach void setUp() throws Exception { checkpoint = new LeaderEpochCheckpointFile(TestUtils.tempFile(), new LogDirFailureChannel(1)); topicIds.put(leaderTopicIdPartition.topicPartition().topic(), leaderTopicIdPartition.topicId()); topicIds.put(followerTopicIdPartition.topicPartition().topic(), followerTopicIdPartition.topicId()); - Properties props = kafka.utils.TestUtils.createDummyBrokerConfig(); + Properties props = brokerConfig; props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, "100"); appendRLMConfig(props); @@ -259,6 +263,7 @@ long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) { return 0L; } }; + doReturn(true).when(remoteLogMetadataManager).isReady(any(TopicIdPartition.class)); } @AfterEach @@ -274,7 +279,7 @@ void tearDown() { void testGetLeaderEpochCheckpoint() { checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); assertEquals(totalEpochEntries, remoteLogManager.getLeaderEpochEntries(mockLog, 0, 300)); List epochEntries = remoteLogManager.getLeaderEpochEntries(mockLog, 100, 200); @@ -290,7 +295,7 @@ void testFindHighestRemoteOffsetOnEmptyRemoteStorage() throws RemoteStorageExce ); checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); OffsetAndEpoch offsetAndEpoch = remoteLogManager.findHighestRemoteOffset(tpId, mockLog); assertEquals(new OffsetAndEpoch(-1L, -1), offsetAndEpoch); @@ -304,7 +309,7 @@ void testFindHighestRemoteOffset() throws RemoteStorageException { ); checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); when(remoteLogMetadataManager.highestOffsetForEpoch(eq(tpId), anyInt())).thenAnswer(ans -> { Integer epoch = ans.getArgument(1, Integer.class); @@ -327,7 +332,7 @@ void testFindHighestRemoteOffsetWithUncleanLeaderElection() throws RemoteStorage ); checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); when(remoteLogMetadataManager.highestOffsetForEpoch(eq(tpId), anyInt())).thenAnswer(ans -> { Integer epoch = ans.getArgument(1, Integer.class); @@ -346,7 +351,7 @@ void testRemoteLogMetadataManagerWithUserDefinedConfigs() { String key = "key"; String configPrefix = "config.prefix"; Properties props = new Properties(); - props.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); + props.putAll(brokerConfig); props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP, configPrefix); props.put(configPrefix + key, "world"); props.put("remote.log.metadata.y", "z"); @@ -363,7 +368,7 @@ void testRemoteStorageManagerWithUserDefinedConfigs() { String key = "key"; String configPrefix = "config.prefix"; Properties props = new Properties(); - props.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); + props.putAll(brokerConfig); props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP, configPrefix); props.put(configPrefix + key, "world"); props.put("remote.storage.manager.y", "z"); @@ -395,7 +400,7 @@ void testRemoteLogMetadataManagerWithEndpointConfig() { @Test void testRemoteLogMetadataManagerWithEndpointConfigOverridden() throws IOException { Properties props = new Properties(); - props.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); + props.putAll(brokerConfig); // override common security.protocol by adding "RLMM prefix" and "remote log metadata common client prefix" props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "security.protocol", "SSL"); appendRLMConfig(props); @@ -496,7 +501,7 @@ private void assertCopyExpectedLogSegmentsToRemote(long oldSegmentStartOffset, // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); File tempFile = TestUtils.tempFile(); @@ -610,7 +615,7 @@ void testCustomMetadataSizeExceedsLimit() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); File tempFile = TestUtils.tempFile(); @@ -702,7 +707,7 @@ void testFailedCopyShouldDeleteTheDanglingSegment() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); File tempFile = TestUtils.tempFile(); @@ -792,7 +797,7 @@ void testRemoteLogManagerTasksAvgIdlePercentAndMetadataCountMetrics() throws Exc // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); File tempFile = TestUtils.tempFile(); @@ -911,7 +916,7 @@ void testRemoteLogTaskUpdateRemoteLogSegmentMetadataAfterLogDirChanged() throws // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) .thenReturn(Optional.of(0L)) .thenReturn(Optional.of(nextSegmentStartOffset - 1)); @@ -990,7 +995,7 @@ void testRemoteLogTaskUpdateRemoteLogSegmentMetadataAfterLogDirChanged() throws // simulate altering log dir completes, and the new partition leader changes to the same broker in different log dir (dir2) mockLog = mock(UnifiedLog.class); when(mockLog.parentDir()).thenReturn("dir2"); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.config()).thenReturn(logConfig); when(mockLog.logEndOffset()).thenReturn(500L); @@ -1026,7 +1031,7 @@ void testRemoteLogManagerRemoteMetrics() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); File tempFile = TestUtils.tempFile(); @@ -1190,7 +1195,7 @@ void testMetricsUpdateOnCopyLogSegmentsFailure() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); File tempFile = TestUtils.tempFile(); @@ -1265,7 +1270,7 @@ void testRLMTaskDoesNotUploadSegmentsWhenRemoteLogMetadataManagerIsNotInitialize // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); // Throw a retryable exception so indicate that the remote log metadata manager is not initialized yet when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) @@ -1435,7 +1440,7 @@ public void testFetchNextSegmentWithTxnIndex() throws RemoteStorageException { public void testFindNextSegmentWithTxnIndex() throws RemoteStorageException { checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) .thenReturn(Optional.of(0L)); @@ -1466,7 +1471,7 @@ public void testFindNextSegmentWithTxnIndex() throws RemoteStorageException { public void testFindNextSegmentWithTxnIndexTraversesNextEpoch() throws RemoteStorageException { checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) .thenReturn(Optional.of(0L)); @@ -1677,7 +1682,6 @@ private void doTestFindOffsetByTimestamp(long ts, long startOffset, int targetLe remoteLogManager.onLeadershipChange(Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.emptySet(), topicIds); } - @Flaky("KAFKA-17779") @Test void testFetchOffsetByTimestampWithTieredStorageDoesNotFetchIndexWhenExistsLocally() throws Exception { TopicPartition tp = new TopicPartition("sample", 0); @@ -1692,7 +1696,7 @@ void testFetchOffsetByTimestampWithTieredStorageDoesNotFetchIndexWhenExistsLocal epochEntries.add(new EpochEntry(5, 200L)); checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); long timestamp = time.milliseconds(); RemoteLogSegmentMetadata metadata0 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(tpId, Uuid.randomUuid()), @@ -2144,21 +2148,22 @@ public void testStopPartitionsWithDeletion() throws RemoteStorageException { Set partitions = new HashSet<>(); partitions.add(new StopPartition(leaderTopicIdPartition.topicPartition(), true, true, true)); partitions.add(new StopPartition(followerTopicIdPartition.topicPartition(), true, true, true)); - remoteLogManager.onLeadershipChange(Collections.singleton(mockPartition(leaderTopicIdPartition)), - Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); - assertNotNull(remoteLogManager.leaderCopyTask(leaderTopicIdPartition)); - assertNotNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); - assertNotNull(remoteLogManager.followerTask(followerTopicIdPartition)); when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition))) - .thenReturn(listRemoteLogSegmentMetadata(leaderTopicIdPartition, 5, 100, 1024, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED).iterator()); + .thenAnswer(invocation -> listRemoteLogSegmentMetadata(leaderTopicIdPartition, 5, 100, 1024, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED).iterator()); when(remoteLogMetadataManager.listRemoteLogSegments(eq(followerTopicIdPartition))) - .thenReturn(listRemoteLogSegmentMetadata(followerTopicIdPartition, 3, 100, 1024, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED).iterator()); + .thenAnswer(invocation -> listRemoteLogSegmentMetadata(followerTopicIdPartition, 3, 100, 1024, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED).iterator()); CompletableFuture dummyFuture = new CompletableFuture<>(); dummyFuture.complete(null); when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any())) .thenReturn(dummyFuture); + remoteLogManager.onLeadershipChange(Collections.singleton(mockPartition(leaderTopicIdPartition)), + Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + assertNotNull(remoteLogManager.leaderCopyTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.followerTask(followerTopicIdPartition)); + remoteLogManager.stopPartitions(partitions, errorHandler); assertNull(remoteLogManager.leaderCopyTask(leaderTopicIdPartition)); assertNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); @@ -2182,7 +2187,7 @@ public void testFindLogStartOffset() throws RemoteStorageException, IOException checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); long timestamp = time.milliseconds(); int segmentSize = 1024; @@ -2220,7 +2225,7 @@ public void testFindLogStartOffsetFallbackToLocalLogStartOffsetWhenRemoteIsEmpty checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.localLogStartOffset()).thenReturn(250L); when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) .thenReturn(Collections.emptyIterator()); @@ -2245,7 +2250,7 @@ public void testLogStartOffsetUpdatedOnStartup() throws RemoteStorageException, checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); RemoteLogSegmentMetadata metadata = mock(RemoteLogSegmentMetadata.class); when(metadata.startOffset()).thenReturn(600L); @@ -2345,7 +2350,7 @@ long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) { // leader epoch preparation checkpoint.write(Collections.singletonList(epochEntry0)); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); // create 2 log segments, with 0 and 150 as log start offset LogSegment oldSegment = mock(LogSegment.class); @@ -2450,7 +2455,7 @@ public void testDeletionOnRetentionBreachedSegments(long retentionSize, List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); when(mockLog.logEndOffset()).thenReturn(200L); @@ -2502,7 +2507,7 @@ public void testDeletionOnOverlappingRetentionBreachedSegments(long retentionSiz List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); when(mockLog.logEndOffset()).thenReturn(200L); @@ -2570,7 +2575,7 @@ public void testRemoteDeleteLagsOnRetentionBreachedSegments(long retentionSize, List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); when(mockLog.logEndOffset()).thenReturn(200L); @@ -2617,7 +2622,7 @@ public void testRemoteLogSizeRetentionShouldFilterOutCopySegmentStartState() List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); when(mockLog.logEndOffset()).thenReturn(2000L); @@ -2711,7 +2716,7 @@ public void testDeleteRetentionMsBeingCancelledBeforeSecondDelete() throws Remot checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); Map logProps = new HashMap<>(); logProps.put("retention.bytes", -1L); @@ -2759,6 +2764,35 @@ public void testDeleteRetentionMsBeingCancelledBeforeSecondDelete() throws Remot verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(1)); } + @Test + public void testDeleteRetentionMsBiggerThanTimeMs() throws RemoteStorageException, ExecutionException, InterruptedException { + // add 1 month to the current time to avoid flaky test + LogConfig mockLogConfig = new LogConfig(Map.of("retention.ms", time.milliseconds() + 24 * 30 * 60 * 60 * 1000L)); + when(mockLog.config()).thenReturn(mockLogConfig); + + RemoteLogManager.RLMExpirationTask leaderTask = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + List epochEntries = Collections.singletonList(epochEntry0); + + List metadataList = + listRemoteLogSegmentMetadata(leaderTopicIdPartition, 2, 100, 1024, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> metadataList.iterator()); + + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + assertDoesNotThrow(leaderTask::cleanupExpiredRemoteLogSegments); + + verify(remoteStorageManager, never()).deleteLogSegmentData(any()); + } + @ParameterizedTest(name = "testFailedDeleteExpiredSegments retentionSize={0} retentionMs={1}") @CsvSource(value = {"0, -1", "-1, 0"}) public void testFailedDeleteExpiredSegments(long retentionSize, @@ -2772,7 +2806,7 @@ public void testFailedDeleteExpiredSegments(long retentionSize, List epochEntries = Collections.singletonList(epochEntry0); checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); when(mockLog.logEndOffset()).thenReturn(200L); @@ -2842,7 +2876,7 @@ public void testDeleteLogSegmentDueToRetentionSizeBreach(int segmentCount, long localLogStartOffset = (long) segmentCount * recordsPerSegment; long logEndOffset = ((long) segmentCount * recordsPerSegment) + 1; - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.localLogStartOffset()).thenReturn(localLogStartOffset); when(mockLog.logEndOffset()).thenReturn(logEndOffset); when(mockLog.onlyLocalLogSegmentsSize()).thenReturn(localLogSegmentsSize); @@ -2880,7 +2914,7 @@ public void testDeleteLogSegmentDueToRetentionTimeBreach(int segmentCount, long localLogStartOffset = (long) segmentCount * recordsPerSegment; long logEndOffset = ((long) segmentCount * recordsPerSegment) + 1; - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.localLogStartOffset()).thenReturn(localLogStartOffset); when(mockLog.logEndOffset()).thenReturn(logEndOffset); when(mockLog.onlyLocalLogSegmentsSize()).thenReturn(localLogSegmentsSize); @@ -2967,7 +3001,7 @@ public RemoteLogMetadataManager createRemoteLogMetadataManager() { checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); Map logProps = new HashMap<>(); logProps.put("retention.bytes", -1L); @@ -3085,7 +3119,7 @@ public void testReadForMissingFirstBatchInRemote() throws RemoteStorageException when(remoteStorageManager.fetchLogSegment(any(RemoteLogSegmentMetadata.class), anyInt())) .thenAnswer(a -> fileInputStream); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); int fetchOffset = 0; int fetchMaxBytes = 10; @@ -3115,21 +3149,25 @@ public RemoteLogMetadataManager createRemoteLogMetadataManager() { return remoteLogMetadataManager; } + @Override public Optional fetchRemoteLogSegmentMetadata(TopicPartition topicPartition, int epochForOffset, long offset) { return Optional.of(segmentMetadata); } + @Override public Optional findNextSegmentMetadata(RemoteLogSegmentMetadata segmentMetadata, - Option leaderEpochFileCacheOption) { + LeaderEpochFileCache leaderEpochFileCacheOption) { return Optional.empty(); } + @Override int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, long offset) { return 1; } // This is the key scenario that we are testing here + @Override EnrichedRecordBatch findFirstBatch(RemoteLogInputStream remoteLogInputStream, long offset) { return new EnrichedRecordBatch(null, 0); } @@ -3155,7 +3193,7 @@ public void testReadForFirstBatchMoreThanMaxFetchBytes(boolean minOneMessage) th when(remoteStorageManager.fetchLogSegment(any(RemoteLogSegmentMetadata.class), anyInt())) .thenAnswer(a -> fileInputStream); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); int fetchOffset = 0; int fetchMaxBytes = 10; @@ -3230,7 +3268,7 @@ public void testReadForFirstBatchInLogCompaction() throws RemoteStorageException RemoteLogSegmentMetadata segmentMetadata = mock(RemoteLogSegmentMetadata.class); LeaderEpochFileCache cache = mock(LeaderEpochFileCache.class); when(cache.epochForOffset(anyLong())).thenReturn(OptionalInt.of(1)); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); int fetchOffset = 0; int fetchMaxBytes = 10; @@ -3302,7 +3340,7 @@ int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, l @Test public void testCopyQuotaManagerConfig() { Properties defaultProps = new Properties(); - defaultProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); + defaultProps.putAll(brokerConfig); appendRLMConfig(defaultProps); KafkaConfig defaultRlmConfig = KafkaConfig.fromProps(defaultProps); RLMQuotaManagerConfig defaultConfig = RemoteLogManager.copyQuotaManagerConfig(defaultRlmConfig.remoteLogManagerConfig()); @@ -3311,7 +3349,7 @@ public void testCopyQuotaManagerConfig() { assertEquals(DEFAULT_REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_SIZE_SECONDS, defaultConfig.quotaWindowSizeSeconds()); Properties customProps = new Properties(); - customProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); + customProps.putAll(brokerConfig); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP, 100); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_NUM_PROP, 31); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_SIZE_SECONDS_PROP, 1); @@ -3327,7 +3365,7 @@ public void testCopyQuotaManagerConfig() { @Test public void testFetchQuotaManagerConfig() { Properties defaultProps = new Properties(); - defaultProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); + defaultProps.putAll(brokerConfig); appendRLMConfig(defaultProps); KafkaConfig defaultRlmConfig = KafkaConfig.fromProps(defaultProps); @@ -3337,7 +3375,7 @@ public void testFetchQuotaManagerConfig() { assertEquals(DEFAULT_REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_SIZE_SECONDS, defaultConfig.quotaWindowSizeSeconds()); Properties customProps = new Properties(); - customProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); + customProps.putAll(brokerConfig); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP, 100); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_NUM_PROP, 31); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_SIZE_SECONDS_PROP, 1); @@ -3435,7 +3473,7 @@ private RemoteLogManager.RLMCopyTask setupRLMTask(boolean quotaExceeded) throws // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(mockLog.parentDir()).thenReturn("dir1"); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); @@ -3498,7 +3536,7 @@ public void testCopyThrottling() throws Exception { // leader epoch preparation checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); // create 3 log segments @@ -3597,7 +3635,7 @@ public void testTierLagResetsToZeroOnBecomingFollower() { public void testRemoteReadFetchDataInfo() throws RemoteStorageException, IOException { checkpoint.write(totalEpochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); - when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.leaderEpochCache()).thenReturn(cache); when(remoteLogMetadataManager.remoteLogSegmentMetadata(eq(leaderTopicIdPartition), anyInt(), anyLong())) .thenAnswer(ans -> { long offset = ans.getArgument(2); @@ -3643,6 +3681,69 @@ int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, l assertEquals(273, fetchDataInfo.fetchOffsetMetadata.relativePositionInSegment); } + @Test + public void testRLMOpsWhenMetadataIsNotReady() throws InterruptedException, IOException { + // Recreate a remoteLogManager with default REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP (default value is 30000). + // The value in setup function is 100 which is too small. If the case can't run two verifyNoMoreInteractions in + // 100ms, the test will fail. + remoteLogManager.close(); + clearInvocations(remoteLogMetadataManager, remoteStorageManager); + Properties props = brokerConfig; + props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); + props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, "30000"); + appendRLMConfig(props); + config = KafkaConfig.fromProps(props); + + remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> currentLogStartOffset.set(offset), + brokerTopicStats, metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return remoteStorageManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + public RLMQuotaManager createRLMCopyQuotaManager() { + return rlmCopyQuotaManager; + } + public Duration quotaTimeout() { + return Duration.ofMillis(100); + } + @Override + long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) { + return 0L; + } + }; + + CountDownLatch latch = new CountDownLatch(3); // there are 3 RLMTasks, so setting the count to 3 + when(remoteLogMetadataManager.isReady(any(TopicIdPartition.class))) + .thenAnswer(ans -> { + latch.countDown(); + return false; + }); + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), + Collections.singleton(mockPartition(followerTopicIdPartition)), + topicIds + ); + assertNotNull(remoteLogManager.rlmCopyTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.followerTask(followerTopicIdPartition)); + + // Once the partitions are assigned to the broker either as leader (or) follower in RLM#onLeadershipChange, + // then it should have called the `isReady` method for each of the partitions. Otherwise, the test will fail. + latch.await(5, TimeUnit.SECONDS); + verify(remoteLogMetadataManager).configure(anyMap()); + verify(remoteLogMetadataManager).onPartitionLeadershipChanges(anySet(), anySet()); + verify(remoteLogMetadataManager, atLeastOnce()).isReady(eq(leaderTopicIdPartition)); + verify(remoteLogMetadataManager, atLeastOnce()).isReady(eq(followerTopicIdPartition)); + verifyNoMoreInteractions(remoteLogMetadataManager); + verify(remoteStorageManager).configure(anyMap()); + verifyNoMoreInteractions(remoteStorageManager); + } + private void appendRecordsToFile(File file, int nRecords, int nRecordsPerBatch) throws IOException { byte magic = RecordBatch.CURRENT_MAGIC_VALUE; Compression compression = Compression.NONE; diff --git a/core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java b/core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java index ce027f8f91510..9737ed72a9bc3 100644 --- a/core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java +++ b/core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java @@ -16,7 +16,6 @@ */ package kafka.log.remote; -import kafka.log.AsyncOffsetReadFutureHolder; import kafka.utils.TestUtils; import org.apache.kafka.common.TopicPartition; @@ -28,7 +27,9 @@ import org.apache.kafka.server.util.MockTime; import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; +import org.apache.kafka.storage.internals.log.AsyncOffsetReadFutureHolder; import org.apache.kafka.storage.internals.log.LogDirFailureChannel; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import org.junit.jupiter.api.AfterEach; @@ -49,7 +50,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import scala.Option; -import scala.util.Either; import static org.apache.kafka.common.record.FileRecords.TimestampAndOffset; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -81,23 +81,23 @@ void tearDown() throws IOException { @Test public void testReadRemoteLog() throws Exception { - AsyncOffsetReadFutureHolder>> asyncOffsetReadFutureHolder = + AsyncOffsetReadFutureHolder asyncOffsetReadFutureHolder = rlm.asyncOffsetRead(topicPartition, time.milliseconds(), 0L, cache, Option::empty); asyncOffsetReadFutureHolder.taskFuture().get(1, TimeUnit.SECONDS); assertTrue(asyncOffsetReadFutureHolder.taskFuture().isDone()); - Either> result = asyncOffsetReadFutureHolder.taskFuture().get(); - assertFalse(result.isLeft()); - assertTrue(result.isRight()); - assertEquals(Option.apply(new TimestampAndOffset(100L, 90L, Optional.of(3))), - result.right().get()); + OffsetResultHolder.FileRecordsOrError result = asyncOffsetReadFutureHolder.taskFuture().get(); + assertFalse(result.hasException()); + assertTrue(result.hasTimestampAndOffset()); + assertEquals(new TimestampAndOffset(100L, 90L, Optional.of(3)), + result.timestampAndOffset().get()); } @Test public void testTaskQueueFullAndCancelTask() throws Exception { rlm.pause(); - List>>> holderList = new ArrayList<>(); + List> holderList = new ArrayList<>(); // Task queue size is 1 and number of threads is 2, so it can accept at-most 3 items for (int i = 0; i < 3; i++) { holderList.add(rlm.asyncOffsetRead(topicPartition, time.milliseconds(), 0L, cache, Option::empty)); @@ -111,7 +111,7 @@ public void testTaskQueueFullAndCancelTask() throws Exception { holderList.get(2).jobFuture().cancel(false); rlm.resume(); - for (AsyncOffsetReadFutureHolder>> holder : holderList) { + for (AsyncOffsetReadFutureHolder holder : holderList) { if (!holder.jobFuture().isCancelled()) { holder.taskFuture().get(1, TimeUnit.SECONDS); } @@ -133,13 +133,13 @@ public Optional findOffsetByTimestamp(TopicPartition tp, throw exception; } }) { - AsyncOffsetReadFutureHolder>> futureHolder + AsyncOffsetReadFutureHolder futureHolder = rlm.asyncOffsetRead(topicPartition, time.milliseconds(), 0L, cache, Option::empty); futureHolder.taskFuture().get(1, TimeUnit.SECONDS); assertTrue(futureHolder.taskFuture().isDone()); - assertTrue(futureHolder.taskFuture().get().isLeft()); - assertEquals(exception, futureHolder.taskFuture().get().left().get()); + assertTrue(futureHolder.taskFuture().get().hasException()); + assertEquals(exception, futureHolder.taskFuture().get().exception().get()); } } diff --git a/core/src/test/java/kafka/security/JaasModule.java b/core/src/test/java/kafka/security/JaasModule.java index 46527b186095b..b4901cc7933f5 100644 --- a/core/src/test/java/kafka/security/JaasModule.java +++ b/core/src/test/java/kafka/security/JaasModule.java @@ -23,15 +23,6 @@ import java.util.stream.Collectors; public class JaasModule { - public static JaasModule zkDigestModule(boolean debug, Map entries) { - String name = "org.apache.zookeeper.server.auth.DigestLoginModule"; - return new JaasModule( - name, - debug, - entries - ); - } - public static JaasModule krb5LoginModule(boolean useKeyTab, boolean storeKey, String keyTab, String principal, boolean debug, Optional serviceName, boolean isIbmSecurity) { String name = isIbmSecurity ? "com.ibm.security.auth.module.Krb5LoginModule" : "com.sun.security.auth.module.Krb5LoginModule"; diff --git a/core/src/test/java/kafka/security/JaasTestUtils.java b/core/src/test/java/kafka/security/JaasTestUtils.java index 73e8a7245bc3a..201a43313d6c5 100644 --- a/core/src/test/java/kafka/security/JaasTestUtils.java +++ b/core/src/test/java/kafka/security/JaasTestUtils.java @@ -31,7 +31,6 @@ import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -72,12 +71,6 @@ public String toString() { private static final boolean IS_IBM_SECURITY = Java.isIbmJdk() && !Java.isIbmJdkSemeru(); - private static final String ZK_SERVER_CONTEXT_NAME = "Server"; - private static final String ZK_CLIENT_CONTEXT_NAME = "Client"; - private static final String ZK_USER_SUPER_PASSWD = "adminpasswd"; - private static final String ZK_USER = "fpj"; - private static final String ZK_USER_PASSWORD = "fpjsecret"; - public static final String KAFKA_SERVER_CONTEXT_NAME = "KafkaServer"; public static final String KAFKA_SERVER_PRINCIPAL_UNQUALIFIED_NAME = "kafka"; private static final String KAFKA_SERVER_PRINCIPAL = KAFKA_SERVER_PRINCIPAL_UNQUALIFIED_NAME + "/localhost@EXAMPLE.COM"; @@ -172,20 +165,6 @@ public static String tokenClientLoginModule(String tokenId, String password) { return JaasModule.scramLoginModule(tokenId, password, false, tokenProps).toString(); } - public static List zkSections() { - Map zkServerEntries = new HashMap<>(); - zkServerEntries.put("user_super", ZK_USER_SUPER_PASSWD); - zkServerEntries.put("user_" + ZK_USER, ZK_USER_PASSWORD); - JaasSection zkServerSection = new JaasSection(ZK_SERVER_CONTEXT_NAME, Collections.singletonList(JaasModule.zkDigestModule(false, zkServerEntries))); - - Map zkClientEntries = new HashMap<>(); - zkClientEntries.put("username", ZK_USER); - zkClientEntries.put("password", ZK_USER_PASSWORD); - JaasSection zkClientSection = new JaasSection(ZK_CLIENT_CONTEXT_NAME, Collections.singletonList(JaasModule.zkDigestModule(false, zkClientEntries))); - - return Arrays.asList(zkServerSection, zkClientSection); - } - public static JaasSection kafkaServerSection(String contextName, List mechanisms, Optional keytabLocation) { List modules = new ArrayList<>(); for (String mechanism : mechanisms) { diff --git a/core/src/test/java/kafka/security/minikdc/MiniKdc.java b/core/src/test/java/kafka/security/minikdc/MiniKdc.java index 8c2e90f11a6ae..93cc1c3108251 100644 --- a/core/src/test/java/kafka/security/minikdc/MiniKdc.java +++ b/core/src/test/java/kafka/security/minikdc/MiniKdc.java @@ -443,7 +443,7 @@ private void writeKrb5Conf() throws IOException { reader.lines().forEach(line -> stringBuilder.append(line).append("{3}")); } String output = MessageFormat.format(stringBuilder.toString(), realm, host, String.valueOf(port), System.lineSeparator()); - Files.write(krb5conf.toPath(), output.getBytes(StandardCharsets.UTF_8)); + Files.writeString(krb5conf.toPath(), output); } private void refreshJvmKerberosConfig() throws ClassNotFoundException, NoSuchMethodException, InvocationTargetException, IllegalAccessException { diff --git a/core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java b/core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java index 1462e1c7f64a7..75cd070b93d30 100644 --- a/core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java +++ b/core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java @@ -311,16 +311,16 @@ private static List> translatePartitionInfoToNodeIdList(List partitionMetadataFile = Optional.ofNullable( - raftInstance.getUnderlying().brokers().get(0).logManager() + cluster.brokers().get(0).logManager() .getLog(new TopicPartition("foo", 0), false).get() .partitionMetadataFile().getOrElse(null)); assertTrue(partitionMetadataFile.isPresent()); - raftInstance.getUnderlying().brokers().get(0).shutdown(); - try (Admin admin = cluster.createAdminClient()) { + cluster.brokers().get(0).shutdown(); + try (Admin admin = cluster.admin()) { TestUtils.waitForCondition(() -> { List partitionInfos = admin.describeTopics(Collections.singletonList("foo")) .topicNameValues().get("foo").get().partitions(); @@ -93,10 +89,10 @@ public void testRestartBrokerNoErrorIfMissingPartitionMetadata() throws IOExcept // delete partition.metadata file here to simulate the scenario that partition.metadata not flush to disk yet partitionMetadataFile.get().delete(); assertFalse(partitionMetadataFile.get().exists()); - raftInstance.getUnderlying().brokers().get(0).startup(); + cluster.brokers().get(0).startup(); // make sure there is no error during load logs - assertDoesNotThrow(() -> raftInstance.getUnderlying().fatalFaultHandler().maybeRethrowFirstException()); - try (Admin admin = cluster.createAdminClient()) { + assertTrue(cluster.firstFatalException().isEmpty()); + try (Admin admin = cluster.admin()) { TestUtils.waitForCondition(() -> { List partitionInfos = admin.describeTopics(Collections.singletonList("foo")) .topicNameValues().get("foo").get().partitions(); diff --git a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java b/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java index 6d13001505837..539c11b7c7eb7 100644 --- a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java +++ b/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java @@ -150,6 +150,13 @@ public void testRemoveAndAddSameController() throws Exception { }); Uuid dirId = cluster.nodes().controllerNodes().get(3000).metadataDirectoryId(); admin.removeRaftVoter(3000, dirId).all().get(); + TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { + Map voters = findVoterDirs(admin); + assertEquals(new HashSet<>(Arrays.asList(3001, 3002, 3003)), voters.keySet()); + for (int replicaId : new int[] {3001, 3002, 3003}) { + assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); + } + }); admin.addRaftVoter( 3000, dirId, diff --git a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java index 643d9d333f541..e48e830175bdd 100644 --- a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java +++ b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java @@ -59,6 +59,7 @@ import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.MetadataProvenance; import org.apache.kafka.metadata.LeaderRecoveryState; +import org.apache.kafka.network.SocketServerConfigs; import org.apache.kafka.network.metrics.RequestChannelMetrics; import org.apache.kafka.raft.QuorumConfig; import org.apache.kafka.server.authorizer.Action; @@ -534,7 +535,6 @@ KafkaConfig createKafkaDefaultConfig() { int brokerId = 1; Properties properties = TestUtils.createBrokerConfig( brokerId, - "", true, true, TestUtils.RandomPort(), @@ -559,7 +559,8 @@ KafkaConfig createKafkaDefaultConfig() { int voterId = brokerId + 1; properties.put(QuorumConfig.QUORUM_VOTERS_CONFIG, voterId + "@localhost:9093"); properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL"); - TestUtils.setIbpAndMessageFormatVersions(properties, MetadataVersion.latestProduction()); + properties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,SSL:SSL"); + TestUtils.setIbpVersion(properties, MetadataVersion.latestProduction()); return new KafkaConfig(properties); } } \ No newline at end of file diff --git a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java index 0c7b488f18020..03ad745848aea 100644 --- a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java +++ b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java @@ -18,6 +18,7 @@ import kafka.cluster.Partition; import kafka.server.LogReadResult; +import kafka.server.QuotaFactory; import kafka.server.ReplicaManager; import kafka.server.ReplicaQuota; @@ -26,18 +27,22 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.record.Records; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.server.purgatory.DelayedOperationKey; import org.apache.kafka.server.purgatory.DelayedOperationPurgatory; +import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; +import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; -import org.apache.kafka.server.share.fetch.ShareFetchData; +import org.apache.kafka.server.share.fetch.ShareFetch; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchParams; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.server.util.timer.SystemTimer; import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.LogOffsetSnapshot; @@ -50,10 +55,17 @@ import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +import scala.Tuple2; +import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.SharePartitionManagerTest.DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL; import static kafka.server.share.SharePartitionManagerTest.PARTITION_MAX_BYTES; @@ -65,6 +77,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -75,6 +88,7 @@ public class DelayedShareFetchTest { private static final int MAX_WAIT_MS = 5000; + private static final int BATCH_SIZE = 500; private static final int MAX_FETCH_RECORDS = 100; private static final FetchParams FETCH_PARAMS = new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, @@ -113,13 +127,13 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .build()); @@ -127,6 +141,8 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit assertFalse(delayedShareFetch.tryComplete()); assertFalse(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(0)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -150,31 +166,34 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData( + ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(any(), anyInt(), any())).thenReturn( + when(sp0.acquire(any(), anyInt(), anyInt(), any())).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); // We are testing the case when the share partition is getting fetched for the first time, so for the first time // the fetchOffsetMetadata will return empty. Post the readFromLog call, the fetchOffsetMetadata will be // populated for the share partition, which has 1 as the positional difference, so it doesn't satisfy the minBytes(2). - when(sp0.fetchOffsetMetadata()) + when(sp0.fetchOffsetMetadata(anyLong())) .thenReturn(Optional.empty()) .thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); LogOffsetMetadata hwmOffsetMetadata = new LogOffsetMetadata(1, 1, 1); mockTopicIdPartitionFetchBytes(replicaManager, tp0, hwmOffsetMetadata); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + BiConsumer exceptionHandler = mockExceptionHandler(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) + .withExceptionHandler(exceptionHandler) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build()); assertFalse(delayedShareFetch.isCompleted()); @@ -182,6 +201,9 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { assertFalse(delayedShareFetch.tryComplete()); assertFalse(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + Mockito.verify(exceptionHandler, times(1)).accept(any(), any()); } @Test @@ -205,27 +227,29 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData( + ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(any(), anyInt(), any())).thenReturn( + when(sp0.acquire(any(), anyInt(), anyInt(), any())).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); // We are testing the case when the share partition has been fetched before, hence we are mocking positionDiff // functionality to give the file position difference as 1 byte, so it doesn't satisfy the minBytes(2). LogOffsetMetadata hwmOffsetMetadata = mock(LogOffsetMetadata.class); when(hwmOffsetMetadata.positionDiff(any())).thenReturn(1); - when(sp0.fetchOffsetMetadata()).thenReturn(Optional.of(mock(LogOffsetMetadata.class))); + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(mock(LogOffsetMetadata.class))); mockTopicIdPartitionFetchBytes(replicaManager, tp0, hwmOffsetMetadata); + BiConsumer exceptionHandler = mockExceptionHandler(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) + .withExceptionHandler(exceptionHandler) .build()); assertFalse(delayedShareFetch.isCompleted()); @@ -233,6 +257,9 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { assertFalse(delayedShareFetch.tryComplete()); assertFalse(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + Mockito.verify(exceptionHandler, times(1)).accept(any(), any()); } @Test @@ -256,21 +283,22 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - when(sp0.fetchOffsetMetadata()).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build()); assertFalse(delayedShareFetch.isCompleted()); @@ -278,6 +306,8 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { assertTrue(delayedShareFetch.tryComplete()); assertTrue(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -301,25 +331,29 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); + CompletableFuture> future = new CompletableFuture<>(); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build()); assertFalse(delayedShareFetch.isCompleted()); delayedShareFetch.forceComplete(); // Since no partition could be acquired, the future should be empty and replicaManager.readFromLog should not be called. - assertEquals(0, shareFetchData.future().join().size()); + assertEquals(0, future.join().size()); Mockito.verify(replicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); assertTrue(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(0)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -343,18 +377,19 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build()); assertFalse(delayedShareFetch.isCompleted()); delayedShareFetch.forceComplete(); @@ -365,8 +400,10 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { Mockito.verify(sp0, times(1)).nextFetchOffset(); Mockito.verify(sp1, times(0)).nextFetchOffset(); assertTrue(delayedShareFetch.isCompleted()); - assertTrue(shareFetchData.future().isDone()); + assertTrue(shareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -384,14 +421,14 @@ public void testToCompleteAnAlreadyCompletedFuture() { sharePartitions.put(tp0, sp0); CompletableFuture> future = new CompletableFuture<>(); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, partitionMaxBytes, MAX_FETCH_RECORDS); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) .build()); @@ -402,7 +439,9 @@ public void testToCompleteAnAlreadyCompletedFuture() { assertTrue(delayedShareFetch.isCompleted()); // Verifying that the first forceComplete calls acquirablePartitions method in DelayedShareFetch. Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(); - assertEquals(0, shareFetchData.future().join().size()); + assertEquals(0, future.join().size()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); // Force completing the share fetch request for the second time should hit the future completion check and not // proceed ahead in the function. @@ -411,6 +450,8 @@ public void testToCompleteAnAlreadyCompletedFuture() { // Verifying that the second forceComplete does not call acquirablePartitions method in DelayedShareFetch. Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(); Mockito.verify(delayedShareFetch, times(0)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -438,8 +479,8 @@ public void testForceCompleteTriggersDelayedActionsQueue() { sharePartitions1.put(tp1, sp1); sharePartitions1.put(tp2, sp2); - ShareFetchData shareFetchData1 = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes1, MAX_FETCH_RECORDS); + ShareFetch shareFetch1 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes1, BATCH_SIZE, MAX_FETCH_RECORDS); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, replicaManager.localBrokerId(), @@ -450,7 +491,7 @@ public void testForceCompleteTriggersDelayedActionsQueue() { partitionMaxBytes1.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); DelayedShareFetch delayedShareFetch1 = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData1) + .withShareFetchData(shareFetch1) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions1) .build(); @@ -460,13 +501,15 @@ public void testForceCompleteTriggersDelayedActionsQueue() { delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch1, delayedShareFetchWatchKeys); assertEquals(2, delayedShareFetchPurgatory.watched()); - assertFalse(shareFetchData1.future().isDone()); + assertFalse(shareFetch1.isCompleted()); + assertTrue(delayedShareFetch1.lock().tryLock()); + delayedShareFetch1.lock().unlock(); Map partitionMaxBytes2 = new HashMap<>(); partitionMaxBytes2.put(tp1, PARTITION_MAX_BYTES); partitionMaxBytes2.put(tp2, PARTITION_MAX_BYTES); - ShareFetchData shareFetchData2 = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes2, MAX_FETCH_RECORDS); + ShareFetch shareFetch2 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes2, BATCH_SIZE, MAX_FETCH_RECORDS); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -476,28 +519,31 @@ public void testForceCompleteTriggersDelayedActionsQueue() { sharePartitions2.put(tp2, sp2); DelayedShareFetch delayedShareFetch2 = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData2) + .withShareFetchData(shareFetch2) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions2) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build()); // sp1 can be acquired now when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); - when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + when(sp1.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); // when forceComplete is called for delayedShareFetch2, since tp1 is common in between delayed share fetch // requests, it should add a "check and complete" action for request key tp1 on the purgatory. delayedShareFetch2.forceComplete(); assertTrue(delayedShareFetch2.isCompleted()); - assertTrue(shareFetchData2.future().isDone()); + assertTrue(shareFetch2.isCompleted()); Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); assertFalse(delayedShareFetch1.isCompleted()); Mockito.verify(replicaManager, times(1)).addToActionQueue(any()); Mockito.verify(replicaManager, times(0)).tryCompleteActions(); Mockito.verify(delayedShareFetch2, times(1)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch2.lock().tryLock()); + delayedShareFetch2.lock().unlock(); } @Test @@ -518,32 +564,38 @@ public void testCombineLogReadResponse() { sharePartitions.put(tp1, sp1); CompletableFuture> future = new CompletableFuture<>(); - ShareFetchData shareFetchData = new ShareFetchData( + ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, partitionMaxBytes, MAX_FETCH_RECORDS); + future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build(); - Map topicPartitionData = new HashMap<>(); - topicPartitionData.put(tp0, mock(FetchRequest.PartitionData.class)); - topicPartitionData.put(tp1, mock(FetchRequest.PartitionData.class)); + LinkedHashMap topicPartitionData = new LinkedHashMap<>(); + topicPartitionData.put(tp0, 0L); + topicPartitionData.put(tp1, 0L); // Case 1 - logReadResponse contains tp0. - Map logReadResponse = Collections.singletonMap( - tp0, mock(LogReadResult.class)); + LinkedHashMap logReadResponse = new LinkedHashMap<>(); + LogReadResult logReadResult = mock(LogReadResult.class); + Records records = mock(Records.class); + when(records.sizeInBytes()).thenReturn(2); + FetchDataInfo fetchDataInfo = new FetchDataInfo(mock(LogOffsetMetadata.class), records); + when(logReadResult.info()).thenReturn(fetchDataInfo); + logReadResponse.put(tp0, logReadResult); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - Map combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); + LinkedHashMap combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); assertEquals(topicPartitionData.keySet(), combinedLogReadResponse.keySet()); assertEquals(combinedLogReadResponse.get(tp0), logReadResponse.get(tp0)); // Case 2 - logReadResponse contains tp0 and tp1. - logReadResponse = new HashMap<>(); + logReadResponse = new LinkedHashMap<>(); logReadResponse.put(tp0, mock(LogReadResult.class)); logReadResponse.put(tp1, mock(LogReadResult.class)); combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); @@ -568,13 +620,13 @@ public void testExceptionInMinBytesCalculation() { LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); - ShareFetchData shareFetchData = new ShareFetchData( + ShareFetch shareFetch = new ShareFetch( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); - when(sp0.acquire(any(), anyInt(), any())).thenReturn( + when(sp0.acquire(any(), anyInt(), anyInt(), any())).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -583,20 +635,39 @@ public void testExceptionInMinBytesCalculation() { when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(partition); when(partition.fetchOffsetSnapshot(any(), anyBoolean())).thenThrow(new RuntimeException("Exception thrown")); + BiConsumer exceptionHandler = mockExceptionHandler(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) + .withExceptionHandler(exceptionHandler) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build()); + + // Try complete should return false as the share partition has errored out. + assertFalse(delayedShareFetch.tryComplete()); + // Fetch should remain pending and should be completed on request timeout. assertFalse(delayedShareFetch.isCompleted()); + // The request should be errored out as topic partition should get added as erroneous. + assertTrue(shareFetch.errorInAllPartitions()); - // Since minBytes calculation throws an exception and returns true, tryComplete should return true. - assertTrue(delayedShareFetch.tryComplete()); + Mockito.verify(exceptionHandler, times(1)).accept(any(), any()); + Mockito.verify(replicaManager, times(1)).readFromLog( + any(), any(), any(ReplicaQuota.class), anyBoolean()); + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); + + // Force complete the request as it's still pending. Return false from the share partition lock acquire. + when(sp0.maybeAcquireFetchLock()).thenReturn(false); + assertTrue(delayedShareFetch.forceComplete()); assertTrue(delayedShareFetch.isCompleted()); - Mockito.verify(replicaManager, times(2)).readFromLog( + + // Read from log and release partition locks should not be called as the request is errored out. + Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - // releasePartitionLocks will be called twice, once from tryComplete and then from onComplete. - Mockito.verify(delayedShareFetch, times(2)).releasePartitionLocks(any()); + Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + Mockito.verify(exceptionHandler, times(1)).accept(any(), any()); } @Test @@ -615,11 +686,11 @@ public void testLocksReleasedForCompletedFetch() { doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), Map.of(tp0, PARTITION_MAX_BYTES), MAX_FETCH_RECORDS); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), Map.of(tp0, PARTITION_MAX_BYTES), BATCH_SIZE, MAX_FETCH_RECORDS); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions1) .withReplicaManager(replicaManager) .build(); @@ -629,6 +700,8 @@ public void testLocksReleasedForCompletedFetch() { assertFalse(spy.tryComplete()); Mockito.verify(sp0, times(1)).releaseFetchLock(); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -643,16 +716,336 @@ public void testLocksReleasedAcquireException() { LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), Map.of(tp0, PARTITION_MAX_BYTES), MAX_FETCH_RECORDS); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), Map.of(tp0, PARTITION_MAX_BYTES), BATCH_SIZE, MAX_FETCH_RECORDS); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .build(); assertFalse(delayedShareFetch.tryComplete()); Mockito.verify(sp0, times(1)).releaseFetchLock(); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + } + + @Test + public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { + String groupId = "grp"; + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + SharePartition sp0 = mock(SharePartition.class); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp0.canAcquireRecords()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + CompletableFuture> future = new CompletableFuture<>(); + + ShareFetch shareFetch = new ShareFetch( + new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, + 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), + future, partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); + + // partitionMaxBytesStrategy.maxBytes() function throws an exception + PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); + when(partitionMaxBytesStrategy.maxBytes(anyInt(), any(), anyInt())).thenThrow(new IllegalArgumentException("Exception thrown")); + + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withExceptionHandler(mockExceptionHandler()) + .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) + .build()); + + assertFalse(delayedShareFetch.isCompleted()); + assertTrue(delayedShareFetch.tryComplete()); + assertTrue(delayedShareFetch.isCompleted()); + // releasePartitionLocks is called twice - first time from tryComplete and second time from onComplete + Mockito.verify(delayedShareFetch, times(2)).releasePartitionLocks(any()); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); + + assertTrue(future.isDone()); + assertFalse(future.isCompletedExceptionally()); + Map partitionDataMap = future.join(); + assertEquals(1, partitionDataMap.size()); + assertTrue(partitionDataMap.containsKey(tp0)); + assertEquals("Exception thrown", partitionDataMap.get(tp0).errorMessage()); + } + + @Test + public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirable() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + String groupId = "grp"; + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); + TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); + TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 4)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + SharePartition sp3 = mock(SharePartition.class); + SharePartition sp4 = mock(SharePartition.class); + + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp3, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp4, PARTITION_MAX_BYTES); + + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp2.maybeAcquireFetchLock()).thenReturn(true); + when(sp3.maybeAcquireFetchLock()).thenReturn(true); + when(sp4.maybeAcquireFetchLock()).thenReturn(true); + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(true); + when(sp2.canAcquireRecords()).thenReturn(true); + when(sp3.canAcquireRecords()).thenReturn(true); + when(sp4.canAcquireRecords()).thenReturn(true); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + sharePartitions.put(tp2, sp2); + sharePartitions.put(tp3, sp3); + sharePartitions.put(tp4, sp4); + + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); + + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp1.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp2.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp3.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp4.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + + // All 5 partitions are acquirable. + doAnswer(invocation -> buildLogReadResult(sharePartitions.keySet())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp3.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp4.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + + mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp2, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp3, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp4, 1); + + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) + .build()); + + assertTrue(delayedShareFetch.tryComplete()); + assertTrue(delayedShareFetch.isCompleted()); + + // Since all partitions are acquirable, maxbytes per partition = requestMaxBytes(i.e. 1024*1024) / acquiredTopicPartitions(i.e. 5) + int expectedPartitionMaxBytes = 1024 * 1024 / 5; + LinkedHashMap expectedReadPartitionInfo = new LinkedHashMap<>(); + sharePartitions.keySet().forEach(topicIdPartition -> expectedReadPartitionInfo.put(topicIdPartition, + new FetchRequest.PartitionData( + topicIdPartition.topicId(), + 0, + 0, + expectedPartitionMaxBytes, + Optional.empty() + ))); + + Mockito.verify(replicaManager, times(1)).readFromLog( + shareFetch.fetchParams(), + CollectionConverters.asScala( + sharePartitions.keySet().stream().map(topicIdPartition -> + new Tuple2<>(topicIdPartition, expectedReadPartitionInfo.get(topicIdPartition))).collect(Collectors.toList()) + ), + QuotaFactory.UNBOUNDED_QUOTA, + true); + } + + @Test + public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirable() { + ReplicaManager replicaManager = mock(ReplicaManager.class); + String groupId = "grp"; + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); + TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); + TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 4)); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + SharePartition sp3 = mock(SharePartition.class); + SharePartition sp4 = mock(SharePartition.class); + + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp3, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp4, PARTITION_MAX_BYTES); + + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + when(sp2.maybeAcquireFetchLock()).thenReturn(false); + when(sp3.maybeAcquireFetchLock()).thenReturn(true); + when(sp4.maybeAcquireFetchLock()).thenReturn(false); + when(sp0.canAcquireRecords()).thenReturn(true); + when(sp1.canAcquireRecords()).thenReturn(true); + when(sp2.canAcquireRecords()).thenReturn(false); + when(sp3.canAcquireRecords()).thenReturn(false); + when(sp4.canAcquireRecords()).thenReturn(false); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + sharePartitions.put(tp2, sp2); + sharePartitions.put(tp3, sp3); + sharePartitions.put(tp4, sp4); + + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); + + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp1.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + + // Only 2 out of 5 partitions are acquirable. + Set acquirableTopicPartitions = new LinkedHashSet<>(); + acquirableTopicPartitions.add(tp0); + acquirableTopicPartitions.add(tp1); + doAnswer(invocation -> buildLogReadResult(acquirableTopicPartitions)).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + + mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); + + DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withSharePartitions(sharePartitions) + .withReplicaManager(replicaManager) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) + .build()); + + assertTrue(delayedShareFetch.tryComplete()); + assertTrue(delayedShareFetch.isCompleted()); + + // Since only 2 partitions are acquirable, maxbytes per partition = requestMaxBytes(i.e. 1024*1024) / acquiredTopicPartitions(i.e. 2) + int expectedPartitionMaxBytes = 1024 * 1024 / 2; + LinkedHashMap expectedReadPartitionInfo = new LinkedHashMap<>(); + acquirableTopicPartitions.forEach(topicIdPartition -> expectedReadPartitionInfo.put(topicIdPartition, + new FetchRequest.PartitionData( + topicIdPartition.topicId(), + 0, + 0, + expectedPartitionMaxBytes, + Optional.empty() + ))); + + Mockito.verify(replicaManager, times(1)).readFromLog( + shareFetch.fetchParams(), + CollectionConverters.asScala( + acquirableTopicPartitions.stream().map(topicIdPartition -> + new Tuple2<>(topicIdPartition, expectedReadPartitionInfo.get(topicIdPartition))).collect(Collectors.toList()) + ), + QuotaFactory.UNBOUNDED_QUOTA, + true); + } + + @Test + public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { + String groupId = "grp"; + ReplicaManager replicaManager = mock(ReplicaManager.class); + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + sharePartitions.put(tp1, sp1); + sharePartitions.put(tp2, sp2); + + ShareFetch shareFetch = new ShareFetch( + new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, + 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, MAX_FETCH_RECORDS); + + DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() + .withShareFetchData(shareFetch) + .withReplicaManager(replicaManager) + .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) + .build(); + + LinkedHashMap topicPartitionData = new LinkedHashMap<>(); + topicPartitionData.put(tp0, 0L); + topicPartitionData.put(tp1, 0L); + topicPartitionData.put(tp2, 0L); + + // Existing fetched data already contains tp0. + LinkedHashMap logReadResponse = new LinkedHashMap<>(); + LogReadResult logReadResult = mock(LogReadResult.class); + Records records = mock(Records.class); + when(records.sizeInBytes()).thenReturn(2); + FetchDataInfo fetchDataInfo = new FetchDataInfo(mock(LogOffsetMetadata.class), records); + when(logReadResult.info()).thenReturn(fetchDataInfo); + logReadResponse.put(tp0, logReadResult); + + Set fetchableTopicPartitions = new LinkedHashSet<>(); + fetchableTopicPartitions.add(tp1); + fetchableTopicPartitions.add(tp2); + // We will be doing replica manager fetch only for tp1 and tp2. + doAnswer(invocation -> buildLogReadResult(fetchableTopicPartitions)).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + LinkedHashMap combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); + + assertEquals(topicPartitionData.keySet(), combinedLogReadResponse.keySet()); + // Since only 2 partitions are fetchable but the third one has already been fetched, maxbytes per partition = requestMaxBytes(i.e. 1024*1024) / acquiredTopicPartitions(i.e. 3) + int expectedPartitionMaxBytes = 1024 * 1024 / 3; + LinkedHashMap expectedReadPartitionInfo = new LinkedHashMap<>(); + fetchableTopicPartitions.forEach(topicIdPartition -> expectedReadPartitionInfo.put(topicIdPartition, + new FetchRequest.PartitionData( + topicIdPartition.topicId(), + 0, + 0, + expectedPartitionMaxBytes, + Optional.empty() + ))); + + Mockito.verify(replicaManager, times(1)).readFromLog( + shareFetch.fetchParams(), + CollectionConverters.asScala( + fetchableTopicPartitions.stream().map(topicIdPartition -> + new Tuple2<>(topicIdPartition, expectedReadPartitionInfo.get(topicIdPartition))).collect(Collectors.toList()) + ), + QuotaFactory.UNBOUNDED_QUOTA, + true); } static void mockTopicIdPartitionToReturnDataEqualToMinBytes(ReplicaManager replicaManager, TopicIdPartition topicIdPartition, int minBytes) { @@ -674,14 +1067,20 @@ private void mockTopicIdPartitionFetchBytes(ReplicaManager replicaManager, Topic when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition())).thenReturn(partition); } + @SuppressWarnings("unchecked") + private static BiConsumer mockExceptionHandler() { + return mock(BiConsumer.class); + } + static class DelayedShareFetchBuilder { - ShareFetchData shareFetchData = mock(ShareFetchData.class); + ShareFetch shareFetch = mock(ShareFetch.class); private ReplicaManager replicaManager = mock(ReplicaManager.class); - private final SharePartitionManager sharePartitionManager = mock(SharePartitionManager.class); + private BiConsumer exceptionHandler = mockExceptionHandler(); private LinkedHashMap sharePartitions = mock(LinkedHashMap.class); + private PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); - DelayedShareFetchBuilder withShareFetchData(ShareFetchData shareFetchData) { - this.shareFetchData = shareFetchData; + DelayedShareFetchBuilder withShareFetchData(ShareFetch shareFetch) { + this.shareFetch = shareFetch; return this; } @@ -690,21 +1089,32 @@ DelayedShareFetchBuilder withReplicaManager(ReplicaManager replicaManager) { return this; } + DelayedShareFetchBuilder withExceptionHandler(BiConsumer exceptionHandler) { + this.exceptionHandler = exceptionHandler; + return this; + } + DelayedShareFetchBuilder withSharePartitions(LinkedHashMap sharePartitions) { this.sharePartitions = sharePartitions; return this; } + DelayedShareFetchBuilder withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy partitionMaxBytesStrategy) { + this.partitionMaxBytesStrategy = partitionMaxBytesStrategy; + return this; + } + public static DelayedShareFetchBuilder builder() { return new DelayedShareFetchBuilder(); } public DelayedShareFetch build() { return new DelayedShareFetch( - shareFetchData, + shareFetch, replicaManager, - sharePartitionManager, - sharePartitions); + exceptionHandler, + sharePartitions, + partitionMaxBytesStrategy); } } } diff --git a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java index 6ff0f90bc49d9..fef78916e3518 100644 --- a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java +++ b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java @@ -16,13 +16,13 @@ */ package kafka.server.share; -import kafka.log.OffsetResultHolder; import kafka.server.ReplicaManager; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.errors.FencedLeaderEpochException; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; @@ -30,11 +30,13 @@ import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.requests.FetchRequest; +import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; -import org.apache.kafka.server.share.fetch.ShareFetchData; +import org.apache.kafka.server.share.fetch.ShareFetch; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchParams; import org.apache.kafka.server.storage.log.FetchPartitionData; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -47,8 +49,7 @@ import java.util.OptionalInt; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; - -import scala.Option; +import java.util.function.BiConsumer; import static kafka.server.share.SharePartitionManagerTest.PARTITION_MAX_BYTES; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -59,8 +60,8 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; @@ -70,6 +71,10 @@ public class ShareFetchUtilsTest { private static final FetchParams FETCH_PARAMS = new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, 0, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); + private static final int BATCH_SIZE = 500; + private static final BiConsumer EXCEPTION_HANDLER = (key, exception) -> { + // No-op + }; @Test public void testProcessFetchResponse() { @@ -87,22 +92,19 @@ public void testProcessFetchResponse() { when(sp0.nextFetchOffset()).thenReturn((long) 3); when(sp1.nextFetchOffset()).thenReturn((long) 3); - when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + when(sp1.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1))); - doNothing().when(sp1).updateCacheAndOffsets(any(Long.class)); - doNothing().when(sp0).updateCacheAndOffsets(any(Long.class)); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), partitionMaxBytes, 100); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -124,7 +126,7 @@ public void testProcessFetchResponse() { records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); Map resultData = - ShareFetchUtils.processFetchResponse(shareFetchData, responseData, sharePartitions, mock(ReplicaManager.class)); + ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, mock(ReplicaManager.class), EXCEPTION_HANDLER); assertEquals(2, resultData.size()); assertTrue(resultData.containsKey(tp0)); @@ -157,18 +159,15 @@ public void testProcessFetchResponseWithEmptyRecords() { when(sp0.nextFetchOffset()).thenReturn((long) 3); when(sp1.nextFetchOffset()).thenReturn((long) 3); - when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); - when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); - - doNothing().when(sp1).updateCacheAndOffsets(any(Long.class)); - doNothing().when(sp0).updateCacheAndOffsets(any(Long.class)); + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); + when(sp1.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), partitionMaxBytes, 100); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100); Map responseData = new HashMap<>(); responseData.put(tp0, new FetchPartitionData(Errors.NONE, 0L, 0L, @@ -178,7 +177,7 @@ public void testProcessFetchResponseWithEmptyRecords() { MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); Map resultData = - ShareFetchUtils.processFetchResponse(shareFetchData, responseData, sharePartitions, mock(ReplicaManager.class)); + ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, mock(ReplicaManager.class), EXCEPTION_HANDLER); assertEquals(2, resultData.size()); assertTrue(resultData.containsKey(tp0)); @@ -209,30 +208,27 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, 100); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100); ReplicaManager replicaManager = mock(ReplicaManager.class); // Mock the replicaManager.fetchOffsetForTimestamp method to return a timestamp and offset for the topic partition. FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(100L, 1L, Optional.empty()); - doReturn(new OffsetResultHolder(Option.apply(timestampAndOffset), Option.empty())).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); + doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); when(sp0.nextFetchOffset()).thenReturn((long) 0, (long) 5); when(sp1.nextFetchOffset()).thenReturn((long) 4, (long) 4); - when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.empty(), ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + when(sp1.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1)), ShareAcquiredRecords.empty()); - doNothing().when(sp1).updateCacheAndOffsets(any(Long.class)); - doNothing().when(sp0).updateCacheAndOffsets(any(Long.class)); - MemoryRecords records1 = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), @@ -247,7 +243,7 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); Map resultData1 = - ShareFetchUtils.processFetchResponse(shareFetchData, responseData1, sharePartitions, replicaManager); + ShareFetchUtils.processFetchResponse(shareFetch, responseData1, sharePartitions, replicaManager, EXCEPTION_HANDLER); assertEquals(2, resultData1.size()); assertTrue(resultData1.containsKey(tp0)); @@ -276,7 +272,7 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); Map resultData2 = - ShareFetchUtils.processFetchResponse(shareFetchData, responseData2, sharePartitions, replicaManager); + ShareFetchUtils.processFetchResponse(shareFetch, responseData2, sharePartitions, replicaManager, EXCEPTION_HANDLER); assertEquals(2, resultData2.size()); assertTrue(resultData2.containsKey(tp0)); @@ -303,16 +299,15 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); - ShareFetchData shareFetchData = new ShareFetchData(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), partitionMaxBytes, 100); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 100); ReplicaManager replicaManager = mock(ReplicaManager.class); // Mock the replicaManager.fetchOffsetForTimestamp method to return a timestamp and offset for the topic partition. FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(100L, 1L, Optional.empty()); - doReturn(new OffsetResultHolder(Option.apply(timestampAndOffset), Option.empty())).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); - when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); - doNothing().when(sp0).updateCacheAndOffsets(any(Long.class)); + doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -327,7 +322,7 @@ tp0, new FetchPartitionData(Errors.NONE, 0L, 0L, OptionalInt.empty(), false)); Map resultData = - ShareFetchUtils.processFetchResponse(shareFetchData, responseData, sharePartitions, replicaManager); + ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, replicaManager, EXCEPTION_HANDLER); assertEquals(1, resultData.size()); assertTrue(resultData.containsKey(tp0)); @@ -342,7 +337,7 @@ tp0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - resultData = ShareFetchUtils.processFetchResponse(shareFetchData, responseData, sharePartitions, replicaManager); + resultData = ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, replicaManager, EXCEPTION_HANDLER); assertEquals(1, resultData.size()); assertTrue(resultData.containsKey(tp0)); @@ -376,15 +371,8 @@ public void testProcessFetchResponseWithMaxFetchRecords() { Uuid memberId = Uuid.randomUuid(); // Set max fetch records to 10 - ShareFetchData shareFetchData = new ShareFetchData( - new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, 0, - 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), - groupId, memberId.toString(), new CompletableFuture<>(), partitionMaxBytes, 10); - - ReplicaManager replicaManager = mock(ReplicaManager.class); - // Mock the replicaManager.fetchOffsetForTimestamp method to return a timestamp and offset for the topic partition. - FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(100L, 1L, Optional.empty()); - doReturn(new OffsetResultHolder(Option.apply(timestampAndOffset), Option.empty())).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); + ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId.toString(), + new CompletableFuture<>(), partitionMaxBytes, BATCH_SIZE, 10); MemoryRecords records1 = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -399,10 +387,10 @@ public void testProcessFetchResponseWithMaxFetchRecords() { records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false); - when(sp0.acquire(memberId.toString(), 10, fetchPartitionData1)).thenReturn( + when(sp0.acquire(memberId.toString(), BATCH_SIZE, 10, fetchPartitionData1)).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(1).setDeliveryCount((short) 1))); - when(sp1.acquire(memberId.toString(), 8, fetchPartitionData2)).thenReturn( + when(sp1.acquire(memberId.toString(), BATCH_SIZE, 8, fetchPartitionData2)).thenReturn( ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1))); @@ -413,7 +401,8 @@ public void testProcessFetchResponseWithMaxFetchRecords() { responseData1.put(tp1, fetchPartitionData2); Map resultData1 = - ShareFetchUtils.processFetchResponse(shareFetchData, responseData1, sharePartitions, replicaManager); + ShareFetchUtils.processFetchResponse(shareFetch, responseData1, sharePartitions, + mock(ReplicaManager.class), EXCEPTION_HANDLER); assertEquals(2, resultData1.size()); assertTrue(resultData1.containsKey(tp0)); @@ -429,4 +418,40 @@ public void testProcessFetchResponseWithMaxFetchRecords() { assertEquals(100, resultData1.get(tp1).acquiredRecords().get(0).firstOffset()); assertEquals(103, resultData1.get(tp1).acquiredRecords().get(0).lastOffset()); } + + @Test + @SuppressWarnings("unchecked") + public void testProcessFetchResponseWithOffsetFetchException() { + SharePartition sp0 = Mockito.mock(SharePartition.class); + when(sp0.leaderEpoch()).thenReturn(1); + + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); + sharePartitions.put(tp0, sp0); + + ShareFetch shareFetch = mock(ShareFetch.class); + when(shareFetch.groupId()).thenReturn("grp"); + ReplicaManager replicaManager = mock(ReplicaManager.class); + + // Mock the replicaManager.fetchOffsetForTimestamp method to throw exception. + Throwable exception = new FencedLeaderEpochException("Fenced exception"); + doThrow(exception).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); + when(sp0.acquire(anyString(), anyInt(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); + + // When no records are acquired from share partition. + Map responseData = Collections.singletonMap( + tp0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, + MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + BiConsumer exceptionHandler = mock(BiConsumer.class); + Map resultData = + ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, + replicaManager, exceptionHandler); + + assertTrue(resultData.isEmpty()); + Mockito.verify(shareFetch, times(1)).addErroneous(tp0, exception); + Mockito.verify(exceptionHandler, times(1)).accept(new SharePartitionKey("grp", tp0), exception); + Mockito.verify(sp0, times(0)).updateCacheAndOffsets(any(Long.class)); + } } diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index 46abf04b0a643..afbd9bc9658dd 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -17,10 +17,10 @@ package kafka.server.share; import kafka.cluster.Partition; -import kafka.log.OffsetResultHolder; import kafka.server.LogReadResult; import kafka.server.ReplicaManager; import kafka.server.ReplicaQuota; +import kafka.server.share.SharePartitionManager.SharePartitionListener; import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.common.MetricName; @@ -64,8 +64,9 @@ import org.apache.kafka.server.share.context.ShareSessionContext; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchKey; +import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; -import org.apache.kafka.server.share.fetch.ShareFetchData; +import org.apache.kafka.server.share.fetch.ShareFetch; import org.apache.kafka.server.share.persister.NoOpShareStatePersister; import org.apache.kafka.server.share.persister.Persister; import org.apache.kafka.server.share.session.ShareSession; @@ -80,6 +81,7 @@ import org.apache.kafka.server.util.timer.Timer; import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; @@ -123,6 +125,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.atMost; @@ -131,6 +134,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @Timeout(120) @@ -143,6 +147,7 @@ public class SharePartitionManagerTest { private static final short MAX_FETCH_RECORDS = 500; private static final int DELAYED_SHARE_FETCH_MAX_WAIT_MS = 2000; private static final int DELAYED_SHARE_FETCH_TIMEOUT_MS = 3000; + private static final int BATCH_SIZE = 500; private static final FetchParams FETCH_PARAMS = new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, DELAYED_SHARE_FETCH_MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); @@ -223,7 +228,7 @@ public void testNewContextReturnsFinalContextWithRequestData() { ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - // shareFetchData is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. + // shareFetch is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. // New context should be created successfully Map reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), new ShareFetchRequest.SharePartitionData(tpId1, 0)); @@ -257,7 +262,7 @@ public void testNewContextReturnsFinalContextError() { ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - // shareFetchData is not empty and the maxBytes of topic partition is not 0, which means this is trying to fetch on a Final request. + // shareFetch is not empty and the maxBytes of topic partition is not 0, which means this is trying to fetch on a Final request. // New context should throw an error Map reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), new ShareFetchRequest.SharePartitionData(tpId1, PARTITION_MAX_BYTES)); @@ -1068,15 +1073,18 @@ public void testMultipleSequentialShareFetches() { doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_SIZE, + partitionMaxBytes); Mockito.verify(mockReplicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_SIZE, + partitionMaxBytes); Mockito.verify(mockReplicaManager, times(2)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, BATCH_SIZE, + partitionMaxBytes); Mockito.verify(mockReplicaManager, times(3)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -1180,7 +1188,8 @@ public void testMultipleConcurrentShareFetches() throws InterruptedException { try { for (int i = 0; i != threadCount; ++i) { executorService.submit(() -> { - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); }); // We are blocking the main thread at an interval of 10 threads so that the currently running executorService threads can complete. if (i % 10 == 0) @@ -1225,7 +1234,8 @@ public void testReplicaManagerFetchShouldNotProceed() { .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); Map result = future.join(); @@ -1255,7 +1265,8 @@ public void testReplicaManagerFetchShouldProceed() { doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, BATCH_SIZE, + partitionMaxBytes); // Since the nextFetchOffset does not point to endOffset + 1, i.e. some of the records in the cachedState are AVAILABLE, // even though the maxInFlightMessages limit is exceeded, replicaManager.readFromLog should be called Mockito.verify(mockReplicaManager, times(1)).readFromLog( @@ -1325,9 +1336,11 @@ public void testReleaseSessionSuccess() { assertEquals(Errors.NONE.code(), result.get(tp1).errorCode()); assertEquals(2, result.get(tp2).partitionIndex()); assertEquals(Errors.INVALID_RECORD_STATE.code(), result.get(tp2).errorCode()); + assertEquals("Unable to release acquired records for the batch", result.get(tp2).errorMessage()); // tp3 was not a part of partitionCacheMap. assertEquals(4, result.get(tp3).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp3).errorCode()); + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp3).errorMessage()); } @Test @@ -1582,6 +1595,7 @@ public void testAcknowledgeIncorrectGroupId() { assertTrue(result.containsKey(tp)); assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp).errorCode()); + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp).errorMessage()); } @Test @@ -1612,6 +1626,7 @@ public void testAcknowledgeIncorrectMemberId() { assertTrue(result.containsKey(tp)); assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.INVALID_REQUEST.code(), result.get(tp).errorCode()); + assertEquals("Member is not the owner of batch record", result.get(tp).errorMessage()); } @Test @@ -1634,6 +1649,7 @@ public void testAcknowledgeEmptyPartitionCacheMap() { assertTrue(result.containsKey(tp)); assertEquals(3, result.get(tp).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp).errorCode()); + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp).errorMessage()); } @Test @@ -1665,19 +1681,20 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); - ShareFetchData shareFetchData = new ShareFetchData( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - partitionMaxBytes, - 100); + ShareFetch shareFetch = new ShareFetch( + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + BATCH_SIZE, + 100); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); - when(sp1.fetchOffsetMetadata()).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 2); // Initially you cannot acquire records for both sp1 and sp2. @@ -1700,9 +1717,10 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { sharePartitions.put(tp2, sp2); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); @@ -1727,6 +1745,8 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { Mockito.verify(sp1, times(1)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -1765,13 +1785,14 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); - ShareFetchData shareFetchData = new ShareFetchData( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - partitionMaxBytes, - 100); + ShareFetch shareFetch = new ShareFetch( + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + BATCH_SIZE, + 100); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), @@ -1801,7 +1822,7 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { sharePartitions.put(tp3, sp3); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) .build(); @@ -1825,6 +1846,8 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { Mockito.verify(sp1, times(0)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -1861,19 +1884,20 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); - ShareFetchData shareFetchData = new ShareFetchData( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - partitionMaxBytes, - 100); + ShareFetch shareFetch = new ShareFetch( + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + BATCH_SIZE, + 100); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); - when(sp1.fetchOffsetMetadata()).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); // Initially you cannot acquire records for both sp1 and sp2. @@ -1897,9 +1921,10 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { sharePartitions.put(tp2, sp2); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) + .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); @@ -1923,6 +1948,8 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { Mockito.verify(sp1, times(1)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -1965,13 +1992,14 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); - ShareFetchData shareFetchData = new ShareFetchData( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - partitionMaxBytes, - 100); + ShareFetch shareFetch = new ShareFetch( + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + BATCH_SIZE, + 100); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), @@ -2002,7 +2030,7 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { sharePartitions.put(tp3, sp3); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetchData) + .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) .build(); @@ -2025,6 +2053,8 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { Mockito.verify(sp1, times(0)).nextFetchOffset(); Mockito.verify(sp2, times(0)).nextFetchOffset(); + assertTrue(delayedShareFetch.lock().tryLock()); + delayedShareFetch.lock().unlock(); } @Test @@ -2053,7 +2083,8 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); // Verify that the fetch request is completed. TestUtils.waitForCondition( future::isDone, @@ -2063,10 +2094,77 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti // Verify that replica manager fetch is not called. Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); + assertFalse(pendingInitializationFuture.isDone()); // Complete the pending initialization future. pendingInitializationFuture.complete(null); } + @Test + public void testDelayedInitializationShouldCompleteFetchRequest() throws Exception { + String groupId = "grp"; + Uuid memberId = Uuid.randomUuid(); + Uuid fooId = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); + + SharePartition sp0 = mock(SharePartition.class); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + + // Keep the 2 initialization futures pending and 1 completed with leader not available exception. + CompletableFuture pendingInitializationFuture1 = new CompletableFuture<>(); + CompletableFuture pendingInitializationFuture2 = new CompletableFuture<>(); + when(sp0.maybeInitialize()). + thenReturn(pendingInitializationFuture1) + .thenReturn(pendingInitializationFuture2) + .thenReturn(CompletableFuture.failedFuture(new LeaderNotAvailableException("Leader not available"))); + + DelayedOperationPurgatory shareFetchPurgatorySpy = spy(new DelayedOperationPurgatory<>( + "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true)); + mockReplicaManagerDelayedShareFetch(mockReplicaManager, shareFetchPurgatorySpy); + + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).withReplicaManager(mockReplicaManager).withTimer(mockTimer) + .build(); + + // Send 3 requests for share fetch for same share partition. + CompletableFuture> future1 = + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); + + CompletableFuture> future2 = + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); + + CompletableFuture> future3 = + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); + + Mockito.verify(sp0, times(3)).maybeInitialize(); + Mockito.verify(mockReplicaManager, times(3)).addDelayedShareFetchRequest(any(), any()); + Mockito.verify(shareFetchPurgatorySpy, times(3)).tryCompleteElseWatch(any(), any()); + Mockito.verify(shareFetchPurgatorySpy, times(0)).checkAndComplete(any()); + + // All 3 requests should be pending. + assertFalse(future1.isDone()); + assertFalse(future2.isDone()); + assertFalse(future3.isDone()); + + // Complete one pending initialization future. + pendingInitializationFuture1.complete(null); + Mockito.verify(mockReplicaManager, times(1)).completeDelayedShareFetchRequest(any()); + Mockito.verify(shareFetchPurgatorySpy, times(1)).checkAndComplete(any()); + + pendingInitializationFuture2.complete(null); + Mockito.verify(mockReplicaManager, times(2)).completeDelayedShareFetchRequest(any()); + Mockito.verify(shareFetchPurgatorySpy, times(2)).checkAndComplete(any()); + + // Verify that replica manager fetch is not called. + Mockito.verify(mockReplicaManager, times(0)).readFromLog( + any(), any(), any(ReplicaQuota.class), anyBoolean()); + } + @Test public void testSharePartitionInitializationExceptions() throws Exception { String groupId = "grp"; @@ -2091,7 +2189,8 @@ public void testSharePartitionInitializationExceptions() throws Exception { // Return LeaderNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new LeaderNotAvailableException("Leader not available"))); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2100,77 +2199,90 @@ public void testSharePartitionInitializationExceptions() throws Exception { // between SharePartitionManager and SharePartition to retry the request as SharePartition is not yet ready. assertFalse(future.isCompletedExceptionally()); assertTrue(future.join().isEmpty()); + Mockito.verify(sp0, times(0)).markFenced(); // Verify that the share partition is still in the cache on LeaderNotAvailableException. assertEquals(1, partitionCacheMap.size()); // Return IllegalStateException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new IllegalStateException("Illegal state"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Illegal state"); + Mockito.verify(sp0, times(1)).markFenced(); assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return CoordinatorNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new CoordinatorNotAvailableException("Coordinator not available"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.COORDINATOR_NOT_AVAILABLE, "Coordinator not available"); + Mockito.verify(sp0, times(2)).markFenced(); assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return InvalidRequestException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new InvalidRequestException("Invalid request"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.INVALID_REQUEST, "Invalid request"); + Mockito.verify(sp0, times(3)).markFenced(); assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return FencedStateEpochException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new FencedStateEpochException("Fenced state epoch"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced state epoch"); + Mockito.verify(sp0, times(4)).markFenced(); assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return NotLeaderOrFollowerException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new NotLeaderOrFollowerException("Not leader or follower"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Not leader or follower"); + Mockito.verify(sp0, times(5)).markFenced(); assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return RuntimeException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new RuntimeException("Runtime exception"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Runtime exception"); + Mockito.verify(sp0, times(6)).markFenced(); assertTrue(partitionCacheMap.isEmpty()); } @@ -2192,7 +2304,8 @@ public void testShareFetchProcessingExceptions() throws Exception { .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2225,7 +2338,8 @@ public void testSharePartitionInitializationFailure() throws Exception { // Validate when exception is thrown. CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2234,7 +2348,8 @@ public void testSharePartitionInitializationFailure() throws Exception { assertTrue(partitionCacheMap.isEmpty()); // Validate when partition is not leader. - future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2247,18 +2362,25 @@ public void testSharePartitionInitializationFailure() throws Exception { public void testSharePartitionPartialInitializationFailure() throws Exception { String groupId = "grp"; Uuid memberId1 = Uuid.randomUuid(); + // For tp0, share partition instantiation will fail. TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + // For tp1, share fetch should succeed. TopicIdPartition tp1 = new TopicIdPartition(memberId1, new TopicPartition("foo", 1)); - Map partitionMaxBytes = Map.of(tp0, PARTITION_MAX_BYTES, tp1, PARTITION_MAX_BYTES); - - // Mark partition1 as not the leader. - Partition partition1 = mock(Partition.class); - when(partition1.isLeader()).thenReturn(false); - + // For tp2, share partition initialization will fail. + TopicIdPartition tp2 = new TopicIdPartition(memberId1, new TopicPartition("foo", 2)); + Map partitionMaxBytes = Map.of( + tp0, PARTITION_MAX_BYTES, + tp1, PARTITION_MAX_BYTES, + tp2, PARTITION_MAX_BYTES); + + // Mark partition0 as not the leader. + Partition partition0 = mock(Partition.class); + when(partition0.isLeader()).thenReturn(false); ReplicaManager replicaManager = mock(ReplicaManager.class); when(replicaManager.getPartitionOrException(any())) - .thenReturn(partition1); + .thenReturn(partition0); + // Mock share partition for tp1, so it can succeed. SharePartition sp1 = mock(SharePartition.class); Map partitionCacheMap = new HashMap<>(); partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); @@ -2266,13 +2388,18 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - when(sp1.acquire(anyString(), anyInt(), any())).thenReturn(new ShareAcquiredRecords(Collections.emptyList(), 0)); + when(sp1.acquire(anyString(), anyInt(), anyInt(), any())).thenReturn(new ShareAcquiredRecords(Collections.emptyList(), 0)); + + // Fail initialization for tp2. + SharePartition sp2 = mock(SharePartition.class); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + when(sp2.maybeInitialize()).thenReturn(CompletableFuture.failedFuture(new FencedStateEpochException("Fenced state epoch"))); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, replicaManager.localBrokerId(), DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(replicaManager, delayedShareFetchPurgatory); - when(sp1.fetchOffsetMetadata()).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); + when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); @@ -2284,16 +2411,22 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { // Validate when exception is thrown. CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); assertTrue(future.isDone()); assertFalse(future.isCompletedExceptionally()); Map partitionDataMap = future.get(); - // For now only 1 successful partition is included, this will be fixed in subsequents PRs. - assertEquals(1, partitionDataMap.size()); + assertEquals(3, partitionDataMap.size()); + assertTrue(partitionDataMap.containsKey(tp0)); + assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code(), partitionDataMap.get(tp0).errorCode()); assertTrue(partitionDataMap.containsKey(tp1)); assertEquals(Errors.NONE.code(), partitionDataMap.get(tp1).errorCode()); + assertTrue(partitionDataMap.containsKey(tp2)); + assertEquals(Errors.FENCED_STATE_EPOCH.code(), partitionDataMap.get(tp2).errorCode()); + assertEquals("Fenced state epoch", partitionDataMap.get(tp2).errorMessage()); + Mockito.verify(replicaManager, times(0)).completeDelayedShareFetchRequest(any()); Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); } @@ -2326,7 +2459,8 @@ public void testReplicaManagerFetchException() { .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Exception"); // Verify that the share partition is still in the cache on exception. assertEquals(1, partitionCacheMap.size()); @@ -2334,7 +2468,8 @@ public void testReplicaManagerFetchException() { // Throw NotLeaderOrFollowerException from replica manager fetch which should evict instance from the cache. doThrow(new NotLeaderOrFollowerException("Leader exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Leader exception"); assertTrue(partitionCacheMap.isEmpty()); } @@ -2380,7 +2515,8 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced exception"); // Verify that tp1 is still in the cache on exception. assertEquals(1, partitionCacheMap.size()); @@ -2394,11 +2530,105 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { // Throw FencedStateEpochException from replica manager fetch which should evict instance from the cache. doThrow(new FencedStateEpochException("Fenced exception again")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, + BATCH_SIZE, partitionMaxBytes); validateShareFetchFutureException(future, List.of(tp0, tp1), Errors.FENCED_STATE_EPOCH, "Fenced exception again"); assertTrue(partitionCacheMap.isEmpty()); } + @Test + public void testListenerRegistration() { + String groupId = "grp"; + Uuid memberId = Uuid.randomUuid(); + + TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + + ReplicaManager mockReplicaManager = mock(ReplicaManager.class); + Partition partition = mockPartition(); + when(mockReplicaManager.getPartitionOrException(Mockito.any())).thenReturn(partition); + + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withReplicaManager(mockReplicaManager) + .withTimer(mockTimer) + .build(); + + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, BATCH_SIZE, + partitionMaxBytes); + // Validate that the listener is registered. + verify(mockReplicaManager, times(2)).maybeAddListener(any(), any()); + } + + @Test + public void testSharePartitionListenerOnFailed() { + SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", + new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); + Map partitionCacheMap = new HashMap<>(); + ReplicaManager mockReplicaManager = mock(ReplicaManager.class); + + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); + testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onFailed); + } + + @Test + public void testSharePartitionListenerOnDeleted() { + SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", + new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); + Map partitionCacheMap = new HashMap<>(); + ReplicaManager mockReplicaManager = mock(ReplicaManager.class); + + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); + testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onDeleted); + } + + @Test + public void testSharePartitionListenerOnBecomingFollower() { + SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", + new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); + Map partitionCacheMap = new HashMap<>(); + ReplicaManager mockReplicaManager = mock(ReplicaManager.class); + + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); + testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onBecomingFollower); + } + + private void testSharePartitionListener( + SharePartitionKey sharePartitionKey, + Map partitionCacheMap, + ReplicaManager mockReplicaManager, + Consumer listenerConsumer + ) { + // Add another share partition to the cache. + TopicPartition tp = new TopicPartition("foo", 1); + TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); + SharePartitionKey spk = new SharePartitionKey("grp", tpId); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + partitionCacheMap.put(sharePartitionKey, sp0); + partitionCacheMap.put(spk, sp1); + + // Invoke listener for first share partition. + listenerConsumer.accept(sharePartitionKey.topicIdPartition().topicPartition()); + + // Validate that the share partition is removed from the cache. + assertEquals(1, partitionCacheMap.size()); + assertFalse(partitionCacheMap.containsKey(sharePartitionKey)); + verify(sp0, times(1)).markFenced(); + verify(mockReplicaManager, times(1)).removeListener(any(), any()); + + // Invoke listener for non-matching share partition. + listenerConsumer.accept(tp); + // The non-matching share partition should not be removed as the listener is attached to a different topic partition. + assertEquals(1, partitionCacheMap.size()); + verify(sp1, times(0)).markFenced(); + // Verify the remove listener is not called for the second share partition. + verify(mockReplicaManager, times(1)).removeListener(any(), any()); + } + private ShareFetchResponseData.PartitionData noErrorShareFetchResponse() { return new ShareFetchResponseData.PartitionData().setPartitionIndex(0); } @@ -2493,7 +2723,7 @@ private void validateShareFetchFutureException(CompletableFuture(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionAllData( + 0, PartitionFactory.DEFAULT_STATE_EPOCH, + PartitionFactory.UNINITIALIZED_START_OFFSET, + PartitionFactory.DEFAULT_ERROR_CODE, + PartitionFactory.DEFAULT_ERR_MESSAGE, + Collections.emptyList()))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + + GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); + GroupConfig groupConfig = Mockito.mock(GroupConfig.class); + Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig)); + + // Since the timestamp() of duration based strategy is not deterministic, we need to mock the ShareGroupAutoOffsetResetStrategy. + // mock: final ShareGroupAutoOffsetResetStrategy resetStrategy = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT1H"); + final ShareGroupAutoOffsetResetStrategy resetStrategy = Mockito.mock(ShareGroupAutoOffsetResetStrategy.class); + final long expectedTimestamp = MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1); + Mockito.when(resetStrategy.type()).thenReturn(ShareGroupAutoOffsetResetStrategy.StrategyType.BY_DURATION); + Mockito.when(resetStrategy.timestamp()).thenReturn(expectedTimestamp); + + Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(resetStrategy); + + ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); + + FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1), 15L, Optional.empty()); + Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). + when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); + + SharePartition sharePartition = SharePartitionBuilder.builder() + .withPersister(persister) + .withGroupConfigManager(groupConfigManager) + .withReplicaManager(replicaManager) + .build(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); + + // replicaManager.fetchOffsetForTimestamp should be called with the (current time - 1 hour) + Mockito.verify(replicaManager).fetchOffsetForTimestamp( + Mockito.any(TopicPartition.class), + Mockito.eq(expectedTimestamp), + Mockito.any(), + Mockito.any(), + Mockito.anyBoolean() + ); + + assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); + assertEquals(15, sharePartition.startOffset()); + assertEquals(15, sharePartition.endOffset()); + assertEquals(PartitionFactory.DEFAULT_STATE_EPOCH, sharePartition.stateEpoch()); + } + @Test public void testMaybeInitializeDefaultStartEpochGroupConfigNotPresent() { Persister persister = Mockito.mock(Persister.class); @@ -317,7 +376,7 @@ public void testMaybeInitializeDefaultStartEpochGroupConfigNotPresent() { ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 15L, Optional.empty()); - Mockito.doReturn(new OffsetResultHolder(Option.apply(timestampAndOffset), Option.empty())). + Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); SharePartition sharePartition = SharePartitionBuilder.builder() @@ -406,7 +465,7 @@ public void testMaybeInitializeFetchOffsetForEarliestTimestampThrowsError() { GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); GroupConfig groupConfig = Mockito.mock(GroupConfig.class); Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig)); - Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(GroupConfig.ShareGroupAutoOffsetReset.EARLIEST); + Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(ShareGroupAutoOffsetResetStrategy.EARLIEST); ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); @@ -435,6 +494,59 @@ public void testMaybeInitializeFetchOffsetForEarliestTimestampThrowsError() { assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } + @Test + public void testMaybeInitializeFetchOffsetForByDurationThrowsError() { + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionAllData( + 0, PartitionFactory.DEFAULT_STATE_EPOCH, + PartitionFactory.UNINITIALIZED_START_OFFSET, + PartitionFactory.DEFAULT_ERROR_CODE, + PartitionFactory.DEFAULT_ERR_MESSAGE, + Collections.emptyList()))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + + GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); + GroupConfig groupConfig = Mockito.mock(GroupConfig.class); + Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig)); + + // We need to mock the ShareGroupAutoOffsetResetStrategy as the timestamp() of duration based strategy is not deterministic. + // final ShareGroupAutoOffsetResetStrategy resetStrategy = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT1H"); + final ShareGroupAutoOffsetResetStrategy resetStrategy = Mockito.mock(ShareGroupAutoOffsetResetStrategy.class); + final long expectedTimestamp = MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1); + Mockito.when(groupConfig.shareAutoOffsetReset()).thenReturn(resetStrategy); + + Mockito.when(resetStrategy.type()).thenReturn(ShareGroupAutoOffsetResetStrategy.StrategyType.BY_DURATION); + Mockito.when(resetStrategy.timestamp()).thenReturn(expectedTimestamp); + + ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); + + Mockito.when(replicaManager.fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean())) + .thenThrow(new RuntimeException("fetch offsets exception")); + + SharePartition sharePartition = SharePartitionBuilder.builder() + .withPersister(persister) + .withGroupConfigManager(groupConfigManager) + .withReplicaManager(replicaManager) + .build(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + + Mockito.verify(replicaManager).fetchOffsetForTimestamp( + Mockito.any(TopicPartition.class), + Mockito.eq(expectedTimestamp), + Mockito.any(), + Mockito.any(), + Mockito.anyBoolean() + ); + + assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); + } + @Test public void testMaybeInitializeSharePartitionAgain() { Persister persister = Mockito.mock(Persister.class); @@ -490,11 +602,8 @@ public void testMaybeInitializeSharePartitionAgainConcurrentRequests() throws In if (!executorService.awaitTermination(30, TimeUnit.MILLISECONDS)) executorService.shutdown(); } - - for (CompletableFuture result : results) { - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); - } + assertTrue(results.stream().allMatch(CompletableFuture::isDone)); + assertFalse(results.stream().allMatch(CompletableFuture::isCompletedExceptionally)); assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); // Verify the persister read state is called only once. @@ -707,7 +816,7 @@ public void testMaybeInitializeWithNoOpShareStatePersister() { ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty()); - Mockito.doReturn(new OffsetResultHolder(Option.apply(timestampAndOffset), Option.empty())). + Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); SharePartition sharePartition = SharePartitionBuilder.builder().withReplicaManager(replicaManager).build(); @@ -771,24 +880,20 @@ public void testMaybeInitializeWithReadException() { Persister persister = Mockito.mock(Persister.class); // Complete the future exceptionally for read state. Mockito.when(persister.readState(Mockito.any())).thenReturn(FutureUtils.failedFuture(new RuntimeException("Read exception"))); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + SharePartition sharePartition1 = SharePartitionBuilder.builder().withPersister(persister).build(); - CompletableFuture result = sharePartition.maybeInitialize(); + CompletableFuture result = sharePartition1.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); assertFutureThrows(result, RuntimeException.class); - assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); + assertEquals(SharePartitionState.FAILED, sharePartition1.partitionState()); persister = Mockito.mock(Persister.class); // Throw exception for read state. Mockito.when(persister.readState(Mockito.any())).thenThrow(new RuntimeException("Read exception")); - sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + SharePartition sharePartition2 = SharePartitionBuilder.builder().withPersister(persister).build(); - result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(result, RuntimeException.class); - assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); + assertThrows(RuntimeException.class, sharePartition2::maybeInitialize); } @Test @@ -798,6 +903,7 @@ public void testAcquireSingleRecord() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -821,6 +927,7 @@ public void testAcquireMultipleRecords() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -844,6 +951,7 @@ public void testAcquireWithMaxFetchRecords() { MemoryRecords records = memoryRecords(5); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, 10, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -864,6 +972,7 @@ public void testAcquireWithMaxFetchRecords() { records = memoryRecords(25); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, 10, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -897,6 +1006,7 @@ public void testAcquireWithMultipleBatchesAndMaxFetchRecords() { // Acquire 10 records. List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, 10, new FetchPartitionData(Errors.NONE, 20, 10, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -922,6 +1032,7 @@ public void testAcquireMultipleRecordsWithOverlapAndNewBatch() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -934,6 +1045,7 @@ public void testAcquireMultipleRecordsWithOverlapAndNewBatch() { records = memoryRecords(10, 0); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -951,6 +1063,7 @@ public void testAcquireSameBatchAgain() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -961,6 +1074,7 @@ public void testAcquireSameBatchAgain() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -974,6 +1088,7 @@ public void testAcquireSameBatchAgain() { MemoryRecords subsetRecords = memoryRecords(2, 10); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, subsetRecords, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -991,6 +1106,7 @@ public void testAcquireWithEmptyFetchRecords() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1000,6 +1116,201 @@ public void testAcquireWithEmptyFetchRecords() { assertEquals(0, sharePartition.nextFetchOffset()); } + @Test + public void testAcquireWithBatchSizeAndSingleBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Single batch has more records than batch size. Hence, only a single batch exceeding the batch size + // should be acquired. + MemoryRecords records = memoryRecords(5); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 2 /* Batch size */, + 10, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); + + assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); + } + + @Test + public void testAcquireWithBatchSizeAndMultipleBatches() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Create 4 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 2).close(); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 7, 15).close(); + memoryRecordsBuilder(buffer, 6, 22).close(); + buffer.flip(); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 5 /* Batch size */, + 100, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 26 /* Gap of 3 records will also be added to first batch */); + + // Fetch expected records from 4 batches, but change the first expected record to include gap offsets. + List expectedAcquiredRecords = expectedAcquiredRecords(records, 1); + expectedAcquiredRecords.remove(0); + expectedAcquiredRecords.addAll(0, expectedAcquiredRecord(2, 9, 1)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(28, sharePartition.nextFetchOffset()); + assertEquals(4, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().containsKey(2L)); + assertTrue(sharePartition.cachedState().containsKey(10L)); + assertTrue(sharePartition.cachedState().containsKey(15L)); + assertTrue(sharePartition.cachedState().containsKey(22L)); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(22L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(2L).batchDeliveryCount()); + assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); + assertEquals(1, sharePartition.cachedState().get(22L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(2L).offsetState()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); + assertNull(sharePartition.cachedState().get(15L).offsetState()); + assertNull(sharePartition.cachedState().get(22L).offsetState()); + } + + @Test + public void testAcquireWithBatchSizeAndMaxFetchRecords() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Create 3 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 0).close(); + memoryRecordsBuilder(buffer, 15, 5).close(); + memoryRecordsBuilder(buffer, 15, 20).close(); + buffer.flip(); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 2 /* Batch size */, + 10, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 20); + + List expectedAcquiredRecords = expectedAcquiredRecords(records, 1); + // The last batch should be ignored as it exceeds the max fetch records. + expectedAcquiredRecords.remove(2); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(20, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); + assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); + assertEquals(19, sharePartition.cachedState().get(5L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); + } + + @Test + public void testAcquireSingleBatchWithBatchSizeAndEndOffsetLargerThanBatchFirstOffset() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.updateCacheAndOffsets(8L); + + MemoryRecords records = memoryRecords(10, 5); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 5 /* Batch size */, + 100, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 7 /* Acquisition of records starts post endOffset */); + + // Fetch expected single batch, but change the first offset as per endOffset. + assertArrayEquals(expectedAcquiredRecord(8, 14, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().containsKey(8L)); + } + + @Test + public void testAcquireWithBatchSizeAndEndOffsetLargerThanBatchFirstOffset() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.updateCacheAndOffsets(4L); + + // Create 2 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 8, 2).close(); + memoryRecordsBuilder(buffer, 7, 10).close(); + buffer.flip(); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 5 /* Batch size */, + 100, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 13 /* Acquisition of records starts post endOffset */); + + // Fetch expected records from 2 batches, but change the first batch's first offset as per endOffset. + List expectedAcquiredRecords = expectedAcquiredRecords(records, 1); + expectedAcquiredRecords.remove(0); + expectedAcquiredRecords.addAll(0, expectedAcquiredRecord(4, 9, 1)); + + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + assertEquals(17, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().containsKey(4L)); + assertTrue(sharePartition.cachedState().containsKey(10L)); + } + + @Test + public void testAcquireBatchSkipWithBatchSizeAndEndOffsetLargerThanFirstBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.updateCacheAndOffsets(12L); + + // Create 2 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 8, 2).close(); + memoryRecordsBuilder(buffer, 7, 10).close(); + buffer.flip(); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 5 /* Batch size */, + 100, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5 /* Acquisition of records starts post endOffset */); + + // First batch should be skipped and fetch should result a single batch (second batch), but + // change the first offset of acquired batch as per endOffset. + assertArrayEquals(expectedAcquiredRecord(12, 16, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(17, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertTrue(sharePartition.cachedState().containsKey(12L)); + } + @Test public void testNextFetchOffsetInitialState() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); @@ -1011,6 +1322,7 @@ public void testNextFetchOffsetWithCachedStateAcquired() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1033,6 +1345,7 @@ public void testNextFetchOffsetWithFindAndCachedState() { assertTrue(sharePartition.findNextFetchOffset()); sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1051,6 +1364,7 @@ public void testCanAcquireRecordsWithCachedDataAndLimitNotReached() { SharePartition sharePartition = SharePartitionBuilder.builder().withMaxInflightMessages(6).build(); sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1066,6 +1380,7 @@ public void testCanAcquireRecordsWithCachedDataAndLimitReached() { .build(); sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1078,7 +1393,7 @@ public void testMaybeAcquireAndReleaseFetchLock() { ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty()); - Mockito.doReturn(new OffsetResultHolder(Option.apply(timestampAndOffset), Option.empty())). + Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); SharePartition sharePartition = SharePartitionBuilder.builder().withReplicaManager(replicaManager).build(); @@ -1102,6 +1417,7 @@ public void testAcknowledgeSingleRecordBatch() { // Another batch is acquired because if there is only 1 batch, and it is acknowledged, the batch will be removed from cachedState List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1111,6 +1427,7 @@ public void testAcknowledgeSingleRecordBatch() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1138,6 +1455,7 @@ public void testAcknowledgeMultipleRecordBatch() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1167,6 +1485,7 @@ public void testAcknowledgeMultipleRecordBatchWithGapOffsets() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1177,6 +1496,7 @@ public void testAcknowledgeMultipleRecordBatchWithGapOffsets() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1234,6 +1554,7 @@ public void testAcknowledgeMultipleSubsetRecordBatchWithGapOffsets() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1244,6 +1565,7 @@ public void testAcknowledgeMultipleSubsetRecordBatchWithGapOffsets() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1303,6 +1625,7 @@ public void testAcknowledgeOutOfRangeCachedData() { MemoryRecords records = memoryRecords(5, 5); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1328,6 +1651,7 @@ public void testAcknowledgeOutOfRangeCachedDataFirstBatch() { MemoryRecords records = memoryRecords(5, 0); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1339,6 +1663,7 @@ public void testAcknowledgeOutOfRangeCachedDataFirstBatch() { records = memoryRecords(5, 20); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1359,6 +1684,7 @@ public void testAcknowledgeOutOfRangeCachedDataFirstBatch() { records = memoryRecords(6, 5); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1379,6 +1705,7 @@ public void testAcknowledgeWithAnotherMember() { MemoryRecords records = memoryRecords(5, 5); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1402,6 +1729,7 @@ public void testAcknowledgeWhenOffsetNotAcquired() { MemoryRecords records = memoryRecords(5, 5); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1428,6 +1756,7 @@ public void testAcknowledgeWhenOffsetNotAcquired() { // Re-acquire the same batch and then acknowledge subset with ACCEPT type. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1457,6 +1786,7 @@ public void testAcknowledgeRollbackWithFullBatchError() { MemoryRecords records3 = memoryRecords(5, 15); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1466,6 +1796,7 @@ public void testAcknowledgeRollbackWithFullBatchError() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1475,6 +1806,7 @@ public void testAcknowledgeRollbackWithFullBatchError() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1510,6 +1842,7 @@ public void testAcknowledgeRollbackWithSubsetError() { MemoryRecords records3 = memoryRecords(5, 15); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1519,6 +1852,7 @@ public void testAcknowledgeRollbackWithSubsetError() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1528,6 +1862,7 @@ public void testAcknowledgeRollbackWithSubsetError() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1564,6 +1899,7 @@ public void testAcquireReleasedRecord() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1593,6 +1929,7 @@ public void testAcquireReleasedRecord() { // Send the same fetch request batch again but only 2 offsets should come as acquired. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1616,6 +1953,7 @@ public void testAcquireReleasedRecordMultipleBatches() { List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1626,6 +1964,7 @@ public void testAcquireReleasedRecordMultipleBatches() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1636,6 +1975,7 @@ public void testAcquireReleasedRecordMultipleBatches() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1646,6 +1986,7 @@ public void testAcquireReleasedRecordMultipleBatches() { acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records4, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1701,6 +2042,7 @@ public void testAcquireReleasedRecordMultipleBatches() { // Send next batch from offset 12, only 3 records should be acquired. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1713,6 +2055,7 @@ public void testAcquireReleasedRecordMultipleBatches() { // next fetch offset should not move. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1725,6 +2068,7 @@ public void testAcquireReleasedRecordMultipleBatches() { MemoryRecords subsetRecords = memoryRecords(2, 17); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, subsetRecords, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1739,6 +2083,7 @@ public void testAcquireReleasedRecordMultipleBatches() { subsetRecords = memoryRecords(1, 28); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, subsetRecords, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1752,6 +2097,7 @@ public void testAcquireReleasedRecordMultipleBatches() { // offset should move. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, + BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), @@ -1771,7 +2117,7 @@ public void testAcquisitionLockForAcquiringSingleRecord() throws InterruptedExce .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(1), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(1), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1795,7 +2141,7 @@ public void testAcquisitionLockForAcquiringMultipleRecords() throws InterruptedE .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1820,7 +2166,7 @@ public void testAcquisitionLockForAcquiringMultipleRecordsWithOverlapAndNewBatch .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1828,7 +2174,7 @@ public void testAcquisitionLockForAcquiringMultipleRecordsWithOverlapAndNewBatch assertEquals(1, sharePartition.timer().size()); // Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(10, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(10, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1855,7 +2201,7 @@ public void testAcquisitionLockForAcquiringSameBatchAgain() throws InterruptedEx .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1871,7 +2217,7 @@ public void testAcquisitionLockForAcquiringSameBatchAgain() throws InterruptedEx () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); // Acquire the same batch again. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1884,7 +2230,7 @@ public void testAcquisitionLockForAcquiringSameBatchAgain() throws InterruptedEx public void testAcquisitionLockOnAcknowledgingSingleRecordBatch() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(1, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(1, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1918,7 +2264,7 @@ public void testAcquisitionLockOnAcknowledgingSingleRecordBatch() throws Interru public void testAcquisitionLockOnAcknowledgingMultipleRecordBatch() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -1959,19 +2305,19 @@ public void testAcquisitionLockOnAcknowledgingMultipleRecordBatchWithGapOffsets( MemoryRecords records2 = recordsBuilder.build(); MemoryRecords records3 = memoryRecords(2, 1); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records3, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); assertEquals(1, sharePartition.timer().size()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); assertEquals(2, sharePartition.timer().size()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); @@ -2009,7 +2355,7 @@ public void testAcquisitionLockForAcquiringSubsetBatchAgain() throws Interrupted .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(8, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(8, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2027,7 +2373,7 @@ public void testAcquisitionLockForAcquiringSubsetBatchAgain() throws Interrupted () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); // Acquire subset of records again. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(3, 12), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(3, 12), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2088,14 +2434,14 @@ public void testAcquisitionLockOnAcknowledgingMultipleSubsetRecordBatchWithGapOf recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); assertEquals(1, sharePartition.timer().size()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2181,11 +2527,11 @@ public void testAcquisitionLockTimeoutCauseMaxDeliveryCountExceed() throws Inter // Adding memoryRecords(10, 0) in the sharePartition to make sure that SPSO doesn't move forward when delivery count of records2 // exceed the max delivery count. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2203,7 +2549,7 @@ public void testAcquisitionLockTimeoutCauseMaxDeliveryCountExceed() throws Inter DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2232,7 +2578,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForward() throws InterruptedE .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2249,7 +2595,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForward() throws InterruptedE DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(5, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(5, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2309,7 +2655,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForwardAndClearCachedState() .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2325,7 +2671,7 @@ public void testAcquisitionLockTimeoutCauseSPSOMoveForwardAndClearCachedState() DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2350,7 +2696,7 @@ public void testAcknowledgeAfterAcquisitionLockTimeout() throws InterruptedExcep .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2390,7 +2736,7 @@ public void testAcquisitionLockAfterDifferentAcknowledges() throws InterruptedEx .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2457,7 +2803,7 @@ public void testAcquisitionLockOnBatchWithWriteShareGroupStateFailure() throws I PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertEquals(1, sharePartition.timer().size()); @@ -2490,7 +2836,7 @@ public void testAcquisitionLockOnOffsetWithWriteShareGroupStateFailure() throws PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertEquals(1, sharePartition.timer().size()); @@ -2532,7 +2878,7 @@ public void testAcquisitionLockOnOffsetWithWriteShareGroupStateFailure() throws public void testReleaseSingleRecordBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(1, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(1, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2551,7 +2897,7 @@ public void testReleaseSingleRecordBatch() { public void testReleaseMultipleRecordBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2574,15 +2920,15 @@ public void testReleaseMultipleAcknowledgedRecordBatch() { // Untracked gap of 3 offsets from 7-9. MemoryRecords records2 = memoryRecords(9, 10); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records0, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records0, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2614,11 +2960,11 @@ public void testReleaseAcknowledgedMultipleSubsetRecordBatch() { recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2670,10 +3016,10 @@ public void testReleaseAcquiredRecordsWithAnotherMember() { recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Acknowledging over subset of second batch with subset of gap offsets. @@ -2743,10 +3089,10 @@ public void testReleaseAcquiredRecordsWithAnotherMemberAndSubsetAcknowledged() { recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Acknowledging over subset of second batch with subset of gap offsets. @@ -2822,7 +3168,7 @@ public void testReleaseAcquiredRecordsForEmptyCachedData() { @Test public void testReleaseAcquiredRecordsAfterDifferentAcknowledges() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); sharePartition.acknowledge(MEMBER_ID, @@ -2852,19 +3198,19 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquire .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, memoryRecords(10, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, memoryRecords(10, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); MemoryRecords records2 = memoryRecords(5, 10); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 2)))); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2891,15 +3237,15 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquire // third fetch request with 5 records starting from offset20. MemoryRecords records3 = memoryRecords(5, 20); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 50, 3, records3, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 50, 3, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2910,16 +3256,16 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquire ))); // Send next batch from offset 13, only 2 records should be acquired. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Send next batch from offset 15, only 2 records should be acquired. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records3, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2967,15 +3313,15 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetCacheCleared() { // Third fetch request with 5 records starting from offset 20. MemoryRecords records3 = memoryRecords(5, 20); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 50, 3, records3, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 50, 3, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -2987,16 +3333,16 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetCacheCleared() { ))); // Send next batch from offset 13, only 2 records should be acquired. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Send next batch from offset 15, only 2 records should be acquired. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records3, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3012,7 +3358,7 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetCacheCleared() { public void testReleaseAcquiredRecordsSubsetWithAnotherMember() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3052,7 +3398,7 @@ public void testReleaseBatchWithWriteShareGroupStateFailure() { PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3082,7 +3428,7 @@ public void testReleaseOffsetWithWriteShareGroupStateFailure() { PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3120,7 +3466,7 @@ public void testReleaseOffsetWithWriteShareGroupStateFailure() { public void testAcquisitionLockOnReleasingMultipleRecordBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3155,11 +3501,11 @@ public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchW recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3236,25 +3582,25 @@ public void testLsoMovementOnInitializationSharePartition() { public void testLsoMovementForArchivingBatches() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 12), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 17), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 22), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 27), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 32), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3304,10 +3650,10 @@ public void testLsoMovementForArchivingBatches() { public void testLsoMovementForArchivingOffsets() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3356,10 +3702,10 @@ public void testLsoMovementForArchivingOffsets() { public void testLsoMovementForArchivingOffsetsWithStartAndEndBatchesNotFullMatches() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3396,10 +3742,10 @@ public void testLsoMovementForArchivingOffsetsWithStartAndEndBatchesNotFullMatch public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatches() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3436,10 +3782,10 @@ public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatches() { public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostAcceptAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3486,9 +3832,9 @@ public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostA public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostReleaseAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // LSO is at 4. @@ -3534,9 +3880,9 @@ public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostR public void testLsoMovementToEndOffset() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Acknowledge with RELEASE action. @@ -3568,9 +3914,9 @@ public void testLsoMovementToEndOffset() { public void testLsoMovementToEndOffsetWhereEndOffsetIsAvailable() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Acknowledge with RELEASE action. @@ -3603,9 +3949,9 @@ public void testLsoMovementToEndOffsetWhereEndOffsetIsAvailable() { public void testLsoMovementAheadOfEndOffsetPostAcknowledgment() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Acknowledge with RELEASE action. @@ -3637,9 +3983,9 @@ public void testLsoMovementAheadOfEndOffsetPostAcknowledgment() { public void testLsoMovementAheadOfEndOffset() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // LSO is at 14. @@ -3667,11 +4013,11 @@ public void testLsoMovementWithGapsInCachedStateMap() { // Gap of 15-19. MemoryRecords records3 = memoryRecords(5, 20); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records3, Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records3, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // LSO is at 18. @@ -3700,9 +4046,9 @@ public void testLsoMovementWithGapsInCachedStateMapAndAcknowledgedBatch() { // Gap of 7-9. MemoryRecords records2 = memoryRecords(5, 10); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Acknowledge with RELEASE action. @@ -3735,10 +4081,10 @@ public void testLsoMovementPostGapsInAcknowledgments() { recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); sharePartition.acknowledge(MEMBER_ID, Arrays.asList( @@ -3776,25 +4122,25 @@ public void testLsoMovementPostGapsInAcknowledgments() { public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 25), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 25), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 30), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 30), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 35), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 35), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3863,10 +4209,10 @@ public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovement() { public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToStartOfBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3894,10 +4240,10 @@ public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToStartOfBat public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToMiddleOfBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -3934,25 +4280,25 @@ public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovement() throws .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 25), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 25), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 30), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 30), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 35), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 35), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4019,10 +4365,10 @@ public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToStartOf .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4051,10 +4397,10 @@ public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToMiddleO .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4135,10 +4481,10 @@ public void testScheduleAcquisitionLockTimeoutValueUpdatesSuccessfully() { public void testAcknowledgeBatchAndOffsetPostLsoMovement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4194,13 +4540,13 @@ public void testAcknowledgeBatchAndOffsetPostLsoMovement() { public void testAcknowledgeBatchPostLsoMovement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4256,7 +4602,7 @@ public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledge() throws In .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4279,7 +4625,7 @@ public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledge() throws In DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4309,7 +4655,7 @@ public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledgeBatchLastOff .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 1), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 1), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4332,11 +4678,11 @@ public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledgeBatchLastOff DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 3), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 3), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(3, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(3, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4453,12 +4799,20 @@ public void testWriteShareGroupStateWithInvalidTopicsData() { public void testWriteShareGroupStateWithWriteException() { Persister persister = Mockito.mock(Persister.class); mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + SharePartition sharePartition1 = SharePartitionBuilder.builder().withPersister(persister).build(); Mockito.when(persister.writeState(Mockito.any())).thenReturn(FutureUtils.failedFuture(new RuntimeException("Write exception"))); - CompletableFuture writeResult = sharePartition.writeShareGroupState(anyList()); + CompletableFuture writeResult = sharePartition1.writeShareGroupState(anyList()); assertTrue(writeResult.isCompletedExceptionally()); assertFutureThrows(writeResult, IllegalStateException.class); + + persister = Mockito.mock(Persister.class); + // Throw exception for write state. + mockPersisterReadStateMethod(persister); + SharePartition sharePartition2 = SharePartitionBuilder.builder().withPersister(persister).build(); + + Mockito.when(persister.writeState(Mockito.any())).thenThrow(new RuntimeException("Write exception")); + assertThrows(RuntimeException.class, () -> sharePartition2.writeShareGroupState(anyList())); } @Test @@ -4584,7 +4938,7 @@ public void testWriteShareGroupStateWithNoOpShareStatePersister() { public void testMaybeUpdateCachedStateWhenAcknowledgementTypeAccept() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4606,7 +4960,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementTypeAccept() { public void testMaybeUpdateCachedStateWhenAcknowledgementTypeReject() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4628,7 +4982,7 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementTypeReject() { public void testMaybeUpdateCachedStateWhenAcknowledgementTypeRelease() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.canAcquireRecords()); @@ -4654,12 +5008,12 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForBatchS .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.canAcquireRecords()); @@ -4683,12 +5037,12 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForEntire .withMaxInflightMessages(20) .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.canAcquireRecords()); @@ -4713,12 +5067,12 @@ public void testMaybeUpdateCachedStateWhenAcknowledgementsInBetween() { .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.canAcquireRecords()); @@ -4746,12 +5100,12 @@ public void testMaybeUpdateCachedStateWhenAllRecordsInCachedStateAreAcknowledged .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.canAcquireRecords()); @@ -4772,17 +5126,17 @@ public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() .withState(SharePartitionState.ACTIVE) .build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 20), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 20), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 40), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 40), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); @@ -4796,7 +5150,7 @@ public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() assertEquals(59, sharePartition.endOffset()); assertEquals(60, sharePartition.nextFetchOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 60), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 60), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); @@ -4812,7 +5166,7 @@ public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() assertEquals(79, sharePartition.endOffset()); assertEquals(80, sharePartition.nextFetchOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 80), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 80), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.canAcquireRecords()); @@ -4827,7 +5181,7 @@ public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() assertEquals(180, sharePartition.endOffset()); assertEquals(180, sharePartition.nextFetchOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 180), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 180), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4846,7 +5200,7 @@ public void testCanAcquireRecordsReturnsTrue() { assertEquals(0, sharePartition.startOffset()); assertEquals(0, sharePartition.endOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4862,13 +5216,13 @@ public void testCanAcquireRecordsChangeResponsePostAcknowledgement() { assertEquals(0, sharePartition.startOffset()); assertEquals(0, sharePartition.endOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertTrue(sharePartition.canAcquireRecords()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4888,7 +5242,7 @@ public void testCanAcquireRecordsChangeResponsePostAcknowledgement() { public void testCanAcquireRecordsAfterReleaseAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4896,7 +5250,7 @@ public void testCanAcquireRecordsAfterReleaseAcknowledgement() { assertEquals(0, sharePartition.startOffset()); assertEquals(149, sharePartition.endOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4918,7 +5272,7 @@ public void testCanAcquireRecordsAfterReleaseAcknowledgement() { public void testCanAcquireRecordsAfterArchiveAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4926,7 +5280,7 @@ public void testCanAcquireRecordsAfterArchiveAcknowledgement() { assertEquals(0, sharePartition.startOffset()); assertEquals(149, sharePartition.endOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4947,7 +5301,7 @@ public void testCanAcquireRecordsAfterArchiveAcknowledgement() { public void testCanAcquireRecordsAfterAcceptAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -4955,7 +5309,7 @@ public void testCanAcquireRecordsAfterAcceptAcknowledgement() { assertEquals(0, sharePartition.startOffset()); assertEquals(149, sharePartition.endOffset()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.canAcquireRecords()); @@ -4987,7 +5341,7 @@ public void testAcknowledgeBatchWithWriteShareGroupStateFailure() { PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5018,7 +5372,7 @@ public void testAcknowledgeOffsetWithWriteShareGroupStateFailure() { PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5048,7 +5402,7 @@ public void testAcknowledgeOffsetWithWriteShareGroupStateFailure() { public void testAcknowledgeSubsetWithAnotherMember() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5066,15 +5420,15 @@ public void testAcknowledgeSubsetWithAnotherMember() { public void testAcknowledgeWithAnotherMemberRollbackBatchError() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5103,15 +5457,15 @@ public void testAcknowledgeWithAnotherMemberRollbackBatchError() { public void testAcknowledgeWithAnotherMemberRollbackSubsetError() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5144,14 +5498,14 @@ public void testMaxDeliveryCountLimitExceededForRecordBatch() { .build(); MemoryRecords records = memoryRecords(10, 5); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( new ShareAcknowledgementBatch(5, 14, Collections.singletonList((byte) 2)))); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5176,11 +5530,11 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubset() { // Second fetch request with 5 records starting from offset 15. MemoryRecords records2 = memoryRecords(5, 15); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5190,12 +5544,12 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubset() { new ShareAcknowledgementBatch(17, 19, Collections.singletonList((byte) 1))))); // Send next batch from offset 13, only 2 records should be acquired. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); // Send next batch from offset 15, only 2 records should be acquired. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5218,7 +5572,7 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetAndCachedStateNotCl // First fetch request with 5 records starting from offset 0. MemoryRecords records1 = memoryRecords(5, 0); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5226,7 +5580,7 @@ public void testMaxDeliveryCountLimitExceededForRecordsSubsetAndCachedStateNotCl new ShareAcknowledgementBatch(0, 1, Collections.singletonList((byte) 2))))); // Send next batch from offset 0, only 2 records should be acquired. - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, memoryRecords(2, 0), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, memoryRecords(2, 0), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5252,14 +5606,14 @@ public void testNextFetchOffsetPostAcquireAndAcknowledgeFunctionality() { String memberId1 = "memberId-1"; String memberId2 = "memberId-2"; - sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertFalse(sharePartition.findNextFetchOffset()); assertEquals(10, sharePartition.nextFetchOffset()); - sharePartition.acquire(memberId2, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 10), + sharePartition.acquire(memberId2, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5272,7 +5626,7 @@ public void testNextFetchOffsetPostAcquireAndAcknowledgeFunctionality() { assertTrue(sharePartition.findNextFetchOffset()); assertEquals(5, sharePartition.nextFetchOffset()); - sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5291,7 +5645,7 @@ public void testNextFetchOffsetWithMultipleConsumers() { String memberId1 = MEMBER_ID; String memberId2 = "member-2"; - sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertEquals(3, sharePartition.nextFetchOffset()); @@ -5300,12 +5654,12 @@ public void testNextFetchOffsetWithMultipleConsumers() { new ShareAcknowledgementBatch(0, 2, Collections.singletonList((byte) 2)))); assertEquals(0, sharePartition.nextFetchOffset()); - sharePartition.acquire(memberId2, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 3), + sharePartition.acquire(memberId2, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 3), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertEquals(0, sharePartition.nextFetchOffset()); - sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertEquals(5, sharePartition.nextFetchOffset()); @@ -5321,7 +5675,7 @@ public void testNumberOfWriteCallsOnUpdates() { .withState(SharePartitionState.ACTIVE) .build()); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(5, 2), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(5, 2), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5341,11 +5695,11 @@ public void testReacquireSubsetWithAnotherMember() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); MemoryRecords records1 = memoryRecords(5, 5); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(12, 10), + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(12, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); @@ -5356,13 +5710,13 @@ public void testReacquireSubsetWithAnotherMember() { new ShareAcknowledgementBatch(17, 20, Collections.singletonList((byte) 2)))); // Reacquire with another member. - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertEquals(10, sharePartition.nextFetchOffset()); // Reacquire with another member. - sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 10), + sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 10), Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); assertEquals(17, sharePartition.nextFetchOffset()); @@ -5419,6 +5773,78 @@ public void testMaybeInitializeWhenReadStateRpcReturnsZeroAvailableRecords() { assertEquals(734, sharePartition.endOffset()); } + @Test + public void testAcquireWithWriteShareGroupStateDelay() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withPersister(persister) + .withState(SharePartitionState.ACTIVE) + .build(); + + // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true with a delay of 5 sec. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + + CompletableFuture future = new CompletableFuture<>(); + // persister.writeState RPC will not complete instantaneously due to which commit won't happen for acknowledged offsets. + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); + + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + List acknowledgementBatches = new ArrayList<>(); + acknowledgementBatches.add(new ShareAcknowledgementBatch(2, 3, Collections.singletonList((byte) 2))); + acknowledgementBatches.add(new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2))); + // Acknowledge 2-3, 5-9 offsets with RELEASE acknowledge type. + sharePartition.acknowledge(MEMBER_ID, acknowledgementBatches); + + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + + // Even though offsets 2-3, 5-9 are in available state, but they won't be acquired since they are still in transition from ACQUIRED + // to AVAILABLE state as the write state RPC has not completed yet, so the commit hasn't happened yet. + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + + // persister.writeState RPC will complete now. This is going to commit all the acknowledged batches. Hence, their + // rollBack state will become null and they will be available for acquire again. + future.complete(writeShareGroupStateResult); + sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + } + private List fetchAcquiredRecords(ShareAcquiredRecords shareAcquiredRecords, int expectedOffsetCount) { assertNotNull(shareAcquiredRecords); assertEquals(expectedOffsetCount, shareAcquiredRecords.count()); @@ -5534,7 +5960,8 @@ public static SharePartitionBuilder builder() { public SharePartition build() { return new SharePartition(GROUP_ID, TOPIC_ID_PARTITION, 0, maxInflightMessages, maxDeliveryCount, - defaultAcquisitionLockTimeoutMs, mockTimer, MOCK_TIME, persister, replicaManager, groupConfigManager, state); + defaultAcquisitionLockTimeoutMs, mockTimer, MOCK_TIME, persister, replicaManager, groupConfigManager, + state, Mockito.mock(SharePartitionListener.class)); } } } diff --git a/core/src/test/java/kafka/test/api/ShareConsumerTest.java b/core/src/test/java/kafka/test/api/ShareConsumerTest.java index b7d127eb429a3..237fe34bbe701 100644 --- a/core/src/test/java/kafka/test/api/ShareConsumerTest.java +++ b/core/src/test/java/kafka/test/api/ShareConsumerTest.java @@ -29,6 +29,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.KafkaShareConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; @@ -39,10 +40,12 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.internals.Topic; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.serialization.ByteArrayDeserializer; @@ -51,16 +54,16 @@ import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.test.KafkaClusterTestKit; import org.apache.kafka.common.test.TestKitNodes; +import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.coordinator.group.GroupConfig; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; import java.util.ArrayList; @@ -76,15 +79,14 @@ import java.util.Properties; import java.util.Set; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.IntStream; +import java.util.stream.Stream; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; @@ -104,17 +106,12 @@ public class ShareConsumerTest { private final TopicPartition tp = new TopicPartition("topic", 0); private final TopicPartition tp2 = new TopicPartition("topic2", 0); private final TopicPartition warmupTp = new TopicPartition("warmup", 0); - private static final String DEFAULT_STATE_PERSISTER = "org.apache.kafka.server.share.persister.DefaultStatePersister"; - private static final String NO_OP_PERSISTER = "org.apache.kafka.server.share.persister.NoOpShareStatePersister"; + private List sgsTopicPartitions; private Admin adminClient; @BeforeEach public void createCluster(TestInfo testInfo) throws Exception { - String persisterClassName = NO_OP_PERSISTER; - if (testInfo.getDisplayName().contains(".persister=")) { - persisterClassName = testInfo.getDisplayName().split("=")[1]; - } cluster = new KafkaClusterTestKit.Builder( new TestKitNodes.Builder() .setNumBrokerNodes(1) @@ -124,10 +121,10 @@ public void createCluster(TestInfo testInfo) throws Exception { .setConfigProp("group.coordinator.rebalance.protocols", "classic,consumer,share") .setConfigProp("group.share.enable", "true") .setConfigProp("group.share.partition.max.record.locks", "10000") - .setConfigProp("group.share.persister.class.name", persisterClassName) .setConfigProp("group.share.record.lock.duration.ms", "15000") .setConfigProp("offsets.topic.replication.factor", "1") .setConfigProp("share.coordinator.state.topic.min.isr", "1") + .setConfigProp("share.coordinator.state.topic.num.partitions", "3") .setConfigProp("share.coordinator.state.topic.replication.factor", "1") .setConfigProp("transaction.state.log.min.isr", "1") .setConfigProp("transaction.state.log.replication.factor", "1") @@ -140,6 +137,9 @@ public void createCluster(TestInfo testInfo) throws Exception { createTopic("topic"); createTopic("topic2"); adminClient = createAdminClient(); + sgsTopicPartitions = IntStream.range(0, 3) + .mapToObj(part -> new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, part)) + .toList(); warmup(); } @@ -149,271 +149,263 @@ public void destroyCluster() throws Exception { cluster.close(); } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testPollNoSubscribeFails(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - assertEquals(Collections.emptySet(), shareConsumer.subscription()); - // "Consumer is not subscribed to any topics." - assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); - shareConsumer.close(); + @Test + public void testPollNoSubscribeFails() { + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + // "Consumer is not subscribed to any topics." + assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); + } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscribeAndPollNoRecords(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - Set subscription = Collections.singleton(tp.topic()); - shareConsumer.subscribe(subscription); - assertEquals(subscription, shareConsumer.subscription()); + @Test + public void testSubscribeAndPollNoRecords() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); - shareConsumer.close(); - assertEquals(0, records.count()); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscribePollUnsubscribe(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - Set subscription = Collections.singleton(tp.topic()); - shareConsumer.subscribe(subscription); - assertEquals(subscription, shareConsumer.subscription()); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testSubscribePollUnsubscribe() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); - shareConsumer.unsubscribe(); - assertEquals(Collections.emptySet(), shareConsumer.subscription()); - shareConsumer.close(); - assertEquals(0, records.count()); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscribePollSubscribe(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - Set subscription = Collections.singleton(tp.topic()); - shareConsumer.subscribe(subscription); - assertEquals(subscription, shareConsumer.subscription()); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + shareConsumer.unsubscribe(); + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testSubscribePollSubscribe() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); - shareConsumer.subscribe(subscription); - assertEquals(subscription, shareConsumer.subscription()); - records = shareConsumer.poll(Duration.ofMillis(500)); - shareConsumer.close(); - assertEquals(0, records.count()); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscribeUnsubscribePollFails(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - Set subscription = Collections.singleton(tp.topic()); - shareConsumer.subscribe(subscription); - assertEquals(subscription, shareConsumer.subscription()); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testSubscribeUnsubscribePollFails() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); - shareConsumer.unsubscribe(); - assertEquals(Collections.emptySet(), shareConsumer.subscription()); - // "Consumer is not subscribed to any topics." - assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); - shareConsumer.close(); - assertEquals(0, records.count()); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscribeSubscribeEmptyPollFails(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - Set subscription = Collections.singleton(tp.topic()); - shareConsumer.subscribe(subscription); - assertEquals(subscription, shareConsumer.subscription()); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + shareConsumer.unsubscribe(); + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + // "Consumer is not subscribed to any topics." + assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); // due to leader epoch in read + } + } + + @Test + public void testSubscribeSubscribeEmptyPollFails() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); - shareConsumer.subscribe(Collections.emptySet()); - assertEquals(Collections.emptySet(), shareConsumer.subscription()); - // "Consumer is not subscribed to any topics." - assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); - shareConsumer.close(); - assertEquals(0, records.count()); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscriptionAndPoll(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + shareConsumer.subscribe(Collections.emptySet()); + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + // "Consumer is not subscribed to any topics." + assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); // due to leader epoch in read + } + } + + @Test + public void testSubscriptionAndPoll() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscriptionAndPollMultiple(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testSubscriptionAndPollMultiple() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - producer.send(record); - records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - producer.send(record); - records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcknowledgementSentOnSubscriptionChange(String persister) throws ExecutionException, InterruptedException { - Map> partitionOffsetsMap = new HashMap<>(); - Map partitionExceptionMap = new HashMap<>(); - - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - ProducerRecord record2 = new ProducerRecord<>(tp2.topic(), tp2.partition(), null, "key".getBytes(), "value".getBytes()); - producer.send(record2).get(); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap, partitionExceptionMap)); - - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + @Test + public void testAcknowledgementSentOnSubscriptionChange() throws ExecutionException, InterruptedException { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); - shareConsumer.subscribe(Collections.singletonList(tp2.topic())); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + ProducerRecord record2 = new ProducerRecord<>(tp2.topic(), tp2.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record2).get(); + producer.flush(); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); - // Waiting for heartbeat to propagate the subscription change. - TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, - DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records from the updated subscription"); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - producer.send(record2).get(); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); - //Starting the 3rd poll to invoke the callback - shareConsumer.poll(Duration.ofMillis(500)); + shareConsumer.subscribe(Collections.singletonList(tp2.topic())); - // Verifying if the callback was invoked for the partitions in the old subscription. - assertTrue(partitionExceptionMap.containsKey(tp)); - assertNull(partitionExceptionMap.get(tp)); + // Waiting for heartbeat to propagate the subscription change. + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(500)); + return partitionExceptionMap.containsKey(tp) && partitionExceptionMap.containsKey(tp2); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records from the updated subscription"); - producer.close(); - shareConsumer.close(); + // Verifying if the callback was invoked without exceptions for the partitions for both topics. + assertNull(partitionExceptionMap.get(tp)); + assertNull(partitionExceptionMap.get(tp2)); + verifyShareGroupStateTopicRecordsProduced(); + } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcknowledgementCommitCallbackSuccessfulAcknowledgement(String persister) { - Map> partitionOffsetsMap = new HashMap<>(); - Map partitionExceptionMap = new HashMap<>(); - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap, partitionExceptionMap)); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - + @Test + public void testAcknowledgementCommitCallbackSuccessfulAcknowledgement() throws Exception { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + + producer.send(record); + producer.flush(); + + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - // Now in the second poll, we implicitly acknowledge the record received in the first poll. - // We get back the acknowledgment error code after the second poll. - // When we start the 3rd poll, the acknowledgment commit callback is invoked. - shareConsumer.poll(Duration.ofMillis(1000)); - shareConsumer.poll(Duration.ofMillis(1000)); - - // We expect null exception as the acknowledgment error code is null. - assertTrue(partitionExceptionMap.containsKey(tp)); - assertNull(partitionExceptionMap.get(tp)); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcknowledgementCommitCallbackOnClose(String persister) { - Map> partitionOffsetsMap = new HashMap<>(); - Map partitionExceptionMap = new HashMap<>(); - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap, partitionExceptionMap)); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(500)); + return partitionExceptionMap.containsKey(tp); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to receive call to callback"); + + // We expect null exception as the acknowledgment error code is null. + assertNull(partitionExceptionMap.get(tp)); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testAcknowledgementCommitCallbackOnClose() { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - - // Now in the second poll, we implicitly acknowledge the record received in the first poll. - // We get back the acknowledgement error code asynchronously after the second poll. - // The acknowledgement commit callback is invoked in close. - shareConsumer.poll(Duration.ofMillis(1000)); - shareConsumer.close(); - - // We expect null exception as the acknowledgment error code is null. - assertTrue(partitionExceptionMap.containsKey(tp)); - assertNull(partitionExceptionMap.get(tp)); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcknowledgementCommitCallbackInvalidRecordStateException(String persister) throws Exception { - Map> partitionOffsetsMap = new HashMap<>(); - Map partitionExceptionMap = new HashMap<>(); - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap, partitionExceptionMap)); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + + // Now in the second poll, we implicitly acknowledge the record received in the first poll. + // We get back the acknowledgement error code asynchronously after the second poll. + // The acknowledgement commit callback is invoked in close. + shareConsumer.poll(Duration.ofMillis(1000)); + shareConsumer.close(); + + // We expect null exception as the acknowledgment error code is null. + assertTrue(partitionExceptionMap.containsKey(tp)); + assertNull(partitionExceptionMap.get(tp)); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Flaky("KAFKA-18033") + @Test + public void testAcknowledgementCommitCallbackInvalidRecordStateException() throws Exception { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); - // Waiting until the acquisition lock expires. - Thread.sleep(20000); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - // Now in the second poll, we implicitly acknowledge the record received in the first poll. - // We get back the acknowledgment error code after the second poll. - // When we start the 3rd poll, the acknowledgment commit callback is invoked. - records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); - records = shareConsumer.poll(Duration.ofMillis(200)); - assertEquals(0, records.count()); + // Waiting until the acquisition lock expires. + Thread.sleep(20000); - // As we tried to acknowledge a record after the acquisition lock expired, - // we wil get an InvalidRecordStateException. - assertInstanceOf(InvalidRecordStateException.class, partitionExceptionMap.get(tp)); - shareConsumer.close(); - producer.close(); + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(500)); + return partitionExceptionMap.containsKey(tp) && partitionExceptionMap.get(tp) instanceof InvalidRecordStateException; + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to be notified by InvalidRecordStateException"); + } } - private static class TestableAcknowledgeCommitCallback implements AcknowledgementCommitCallback { + private static class TestableAcknowledgementCommitCallback implements AcknowledgementCommitCallback { private final Map> partitionOffsetsMap; private final Map partitionExceptionMap; - public TestableAcknowledgeCommitCallback(Map> partitionOffsetsMap, - Map partitionExceptionMap) { + public TestableAcknowledgementCommitCallback(Map> partitionOffsetsMap, + Map partitionExceptionMap) { this.partitionOffsetsMap = partitionOffsetsMap; this.partitionExceptionMap = partitionExceptionMap; } @@ -434,555 +426,575 @@ public void onComplete(Map> offsetsMap, Exception ex } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testHeaders(String persister) { - int numRecords = 1; - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - record.headers().add("headerKey", "headerValue".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + @Test + public void testHeaders() { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + int numRecords = 1; + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + record.headers().add("headerKey", "headerValue".getBytes()); + producer.send(record); + producer.flush(); - List> records = consumeRecords(shareConsumer, numRecords); - assertEquals(numRecords, records.size()); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - for (ConsumerRecord consumerRecord : records) { - Header header = consumerRecord.headers().lastHeader("headerKey"); - if (header != null) - assertEquals("headerValue", new String(header.value())); + List> records = consumeRecords(shareConsumer, numRecords); + assertEquals(numRecords, records.size()); + + for (ConsumerRecord consumerRecord : records) { + Header header = consumerRecord.headers().lastHeader("headerKey"); + if (header != null) + assertEquals("headerValue", new String(header.value())); + } + verifyShareGroupStateTopicRecordsProduced(); } - shareConsumer.close(); - producer.close(); } private void testHeadersSerializeDeserialize(Serializer serializer, Deserializer deserializer) { - int numRecords = 1; - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), serializer); + KafkaShareConsumer shareConsumer = createShareConsumer(deserializer, new ByteArrayDeserializer(), "group1")) { - KafkaProducer producer = createProducer(new ByteArraySerializer(), serializer); - producer.send(record); + int numRecords = 1; + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); - KafkaShareConsumer shareConsumer = createShareConsumer(deserializer, new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - alterShareAutoOffsetReset("group1", "earliest"); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - List> records = consumeRecords(shareConsumer, numRecords); - assertEquals(numRecords, records.size()); - shareConsumer.close(); - producer.close(); + List> records = consumeRecords(shareConsumer, numRecords); + assertEquals(numRecords, records.size()); + } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testHeadersSerializerDeserializer(String persister) { + @Test + public void testHeadersSerializerDeserializer() { testHeadersSerializeDeserialize(new BaseConsumerTest.SerializerImpl(), new BaseConsumerTest.DeserializerImpl()); + verifyShareGroupStateTopicRecordsProduced(); } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testMaxPollRecords(String persister) { - int maxPollRecords = 2; + @Test + public void testMaxPollRecords() { int numRecords = 10000; + int maxPollRecords = 2; - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - long startingTimestamp = System.currentTimeMillis(); - produceMessagesWithTimestamp(numRecords, startingTimestamp); + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), + "group1", Collections.singletonMap(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(maxPollRecords)))) { + + long startingTimestamp = System.currentTimeMillis(); + produceMessagesWithTimestamp(numRecords, startingTimestamp); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + List> records = consumeRecords(shareConsumer, numRecords); + long i = 0L; + for (ConsumerRecord record : records) { + assertEquals(tp.topic(), record.topic()); + assertEquals(tp.partition(), record.partition()); + assertEquals(TimestampType.CREATE_TIME, record.timestampType()); + assertEquals(startingTimestamp + i, record.timestamp()); + assertEquals("key " + i, new String(record.key())); + assertEquals("value " + i, new String(record.value())); + // this is true only because K and V are byte arrays + assertEquals(("key " + i).length(), record.serializedKeySize()); + assertEquals(("value " + i).length(), record.serializedValueSize()); + + i++; + } + verifyShareGroupStateTopicRecordsProduced(); + } + } - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), - "group1", Collections.singletonMap(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(maxPollRecords))); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + @Test + public void testControlRecordsSkipped() throws Exception { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer transactionalProducer = createProducer(new ByteArraySerializer(), new ByteArraySerializer(), "T1"); + KafkaProducer nonTransactionalProducer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - List> records = consumeRecords(shareConsumer, numRecords); - long i = 0L; - for (ConsumerRecord record : records) { - assertEquals(tp.topic(), record.topic()); - assertEquals(tp.partition(), record.partition()); - assertEquals(TimestampType.CREATE_TIME, record.timestampType()); - assertEquals(startingTimestamp + i, record.timestamp()); - assertEquals("key " + i, new String(record.key())); - assertEquals("value " + i, new String(record.value())); - // this is true only because K and V are byte arrays - assertEquals(("key " + i).length(), record.serializedKeySize()); - assertEquals(("value " + i).length(), record.serializedValueSize()); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - i++; - } - shareConsumer.close(); - producer.close(); - } + transactionalProducer.initTransactions(); + transactionalProducer.beginTransaction(); + RecordMetadata transactional1 = transactionalProducer.send(record).get(); - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testControlRecordsSkipped(String persister) throws Exception { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + RecordMetadata nonTransactional1 = nonTransactionalProducer.send(record).get(); - KafkaProducer transactionalProducer = createProducer(new ByteArraySerializer(), new ByteArraySerializer(), "T1"); - transactionalProducer.initTransactions(); - transactionalProducer.beginTransaction(); - RecordMetadata transactional1 = transactionalProducer.send(record).get(); + transactionalProducer.commitTransaction(); - KafkaProducer nonTransactionalProducer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - RecordMetadata nonTransactional1 = nonTransactionalProducer.send(record).get(); + transactionalProducer.beginTransaction(); + RecordMetadata transactional2 = transactionalProducer.send(record).get(); + transactionalProducer.abortTransaction(); - transactionalProducer.commitTransaction(); + RecordMetadata nonTransactional2 = nonTransactionalProducer.send(record).get(); - transactionalProducer.beginTransaction(); - RecordMetadata transactional2 = transactionalProducer.send(record).get(); - transactionalProducer.abortTransaction(); + transactionalProducer.close(); + nonTransactionalProducer.close(); - RecordMetadata nonTransactional2 = nonTransactionalProducer.send(record).get(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - transactionalProducer.close(); - nonTransactionalProducer.close(); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(4, records.count()); + assertEquals(transactional1.offset(), records.records(tp).get(0).offset()); + assertEquals(nonTransactional1.offset(), records.records(tp).get(1).offset()); + assertEquals(transactional2.offset(), records.records(tp).get(2).offset()); + assertEquals(nonTransactional2.offset(), records.records(tp).get(3).offset()); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - alterShareAutoOffsetReset("group1", "earliest"); + // There will be control records on the topic-partition, so the offsets of the non-control records + // are not 0, 1, 2, 3. Just assert that the offset of the final one is not 3. + assertNotEquals(3, nonTransactional2.offset()); + + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(4, records.count()); - assertEquals(transactional1.offset(), records.records(tp).get(0).offset()); - assertEquals(nonTransactional1.offset(), records.records(tp).get(1).offset()); - assertEquals(transactional2.offset(), records.records(tp).get(2).offset()); - assertEquals(nonTransactional2.offset(), records.records(tp).get(3).offset()); - - // There will be control records on the topic-partition, so the offsets of the non-control records - // are not 0, 1, 2, 3. Just assert that the offset of the final one is not 3. - assertNotEquals(3, nonTransactional2.offset()); - - records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); - shareConsumer.close(); - transactionalProducer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgeSuccess(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + @Test + public void testExplicitAcknowledgeSuccess() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - records.forEach(shareConsumer::acknowledge); - producer.send(record); - records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgeCommitSuccess(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(shareConsumer::acknowledge); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testExplicitAcknowledgeCommitSuccess() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - records.forEach(shareConsumer::acknowledge); - producer.send(record); - Map> result = shareConsumer.commitSync(); - assertEquals(1, result.size()); - records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgementCommitAsync(String persister) throws InterruptedException { - ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record1); - producer.send(record2); - producer.send(record3); - - KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer1.subscribe(Collections.singleton(tp.topic())); - shareConsumer2.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(shareConsumer::acknowledge); + producer.send(record); + Map> result = shareConsumer.commitSync(); + assertEquals(1, result.size()); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testExplicitAcknowledgementCommitAsync() throws InterruptedException { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.send(record3); + producer.flush(); + + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + shareConsumer2.subscribe(Collections.singleton(tp.topic())); + + Map> partitionOffsetsMap1 = new HashMap<>(); + Map partitionExceptionMap1 = new HashMap<>(); + shareConsumer1.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap1, partitionExceptionMap1)); + + ConsumerRecords records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(3, records.count()); + Iterator> iterator = records.iterator(); + + // Acknowledging 2 out of the 3 records received via commitAsync. + ConsumerRecord firstRecord = iterator.next(); + ConsumerRecord secondRecord = iterator.next(); + assertEquals(0L, firstRecord.offset()); + assertEquals(1L, secondRecord.offset()); + + shareConsumer1.acknowledge(firstRecord); + shareConsumer1.acknowledge(secondRecord); + shareConsumer1.commitAsync(); + + // The 3rd record should be reassigned to 2nd consumer when it polls, kept higher wait time + // as time out for locks is 15 secs. + TestUtils.waitForCondition(() -> { + ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(1000)); + return records2.count() == 1 && records2.iterator().next().offset() == 2L; + }, 30000, 100L, () -> "Didn't receive timed out record"); - Map> partitionOffsetsMap1 = new HashMap<>(); - Map partitionExceptionMap1 = new HashMap<>(); - shareConsumer1.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap1, partitionExceptionMap1)); - - ConsumerRecords records = shareConsumer1.poll(Duration.ofMillis(5000)); - assertEquals(3, records.count()); - Iterator> iterator = records.iterator(); - - // Acknowledging 2 out of the 3 records received via commitAsync. - ConsumerRecord firstRecord = iterator.next(); - ConsumerRecord secondRecord = iterator.next(); - assertEquals(0L, firstRecord.offset()); - assertEquals(1L, secondRecord.offset()); - - shareConsumer1.acknowledge(firstRecord); - shareConsumer1.acknowledge(secondRecord); - shareConsumer1.commitAsync(); - - // The 3rd record should be reassigned to 2nd consumer when it polls, kept higher wait time - // as time out for locks is 15 secs. - TestUtils.waitForCondition(() -> { - ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(200)); - return records2.count() == 1 && records2.iterator().next().offset() == 2L; - }, 30000, 100L, () -> "Didn't receive timed out record"); - - assertFalse(partitionExceptionMap1.containsKey(tp)); - // The callback will receive the acknowledgement responses asynchronously after the next poll. - shareConsumer1.poll(Duration.ofMillis(500)); - - shareConsumer1.close(); - shareConsumer2.close(); - producer.close(); - - assertTrue(partitionExceptionMap1.containsKey(tp)); - assertNull(partitionExceptionMap1.get(tp)); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgementCommitAsyncPartialBatch(String persister) { - ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record1); - producer.send(record2); - producer.send(record3); - - KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer1.subscribe(Collections.singleton(tp.topic())); + assertFalse(partitionExceptionMap1.containsKey(tp)); + + // The callback will receive the acknowledgement responses asynchronously after the next poll. + TestUtils.waitForCondition(() -> { + shareConsumer1.poll(Duration.ofMillis(1000)); + return partitionExceptionMap1.containsKey(tp); + }, 30000, 100L, () -> "Didn't receive call to callback"); + + assertNull(partitionExceptionMap1.get(tp)); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testExplicitAcknowledgementCommitAsyncPartialBatch() { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.send(record3); + producer.flush(); + + shareConsumer1.subscribe(Collections.singleton(tp.topic())); - Map> partitionOffsetsMap = new HashMap<>(); - Map partitionExceptionMap = new HashMap<>(); - shareConsumer1.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap, partitionExceptionMap)); - - ConsumerRecords records = shareConsumer1.poll(Duration.ofMillis(5000)); - assertEquals(3, records.count()); - Iterator> iterator = records.iterator(); - - // Acknowledging 2 out of the 3 records received via commitAsync. - ConsumerRecord firstRecord = iterator.next(); - ConsumerRecord secondRecord = iterator.next(); - assertEquals(0L, firstRecord.offset()); - assertEquals(1L, secondRecord.offset()); - - shareConsumer1.acknowledge(firstRecord); - shareConsumer1.acknowledge(secondRecord); - shareConsumer1.commitAsync(); - - // The 3rd record should be re-presented to the consumer when it polls again. - records = shareConsumer1.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - iterator = records.iterator(); - firstRecord = iterator.next(); - assertEquals(2L, firstRecord.offset()); - - // And poll again without acknowledging - the callback will receive the acknowledgement responses too - records = shareConsumer1.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - iterator = records.iterator(); - firstRecord = iterator.next(); - assertEquals(2L, firstRecord.offset()); - - shareConsumer1.acknowledge(firstRecord); - - // The callback will receive the acknowledgement responses after polling. The callback is - // called on entry to the poll method or during close. The commit is being performed asynchronously, so - // we can only rely on the completion once the consumer has closed because that waits for the response. - shareConsumer1.poll(Duration.ofMillis(500)); - - shareConsumer1.close(); - producer.close(); - - assertTrue(partitionExceptionMap.containsKey(tp)); - assertNull(partitionExceptionMap.get(tp)); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgeReleasePollAccept(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + shareConsumer1.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + + ConsumerRecords records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(3, records.count()); + Iterator> iterator = records.iterator(); + + // Acknowledging 2 out of the 3 records received via commitAsync. + ConsumerRecord firstRecord = iterator.next(); + ConsumerRecord secondRecord = iterator.next(); + assertEquals(0L, firstRecord.offset()); + assertEquals(1L, secondRecord.offset()); + + shareConsumer1.acknowledge(firstRecord); + shareConsumer1.acknowledge(secondRecord); + shareConsumer1.commitAsync(); + + // The 3rd record should be re-presented to the consumer when it polls again. + records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + iterator = records.iterator(); + firstRecord = iterator.next(); + assertEquals(2L, firstRecord.offset()); + + // And poll again without acknowledging - the callback will receive the acknowledgement responses too + records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + iterator = records.iterator(); + firstRecord = iterator.next(); + assertEquals(2L, firstRecord.offset()); + + shareConsumer1.acknowledge(firstRecord); + + // The callback will receive the acknowledgement responses after polling. The callback is + // called on entry to the poll method or during close. The commit is being performed asynchronously, so + // we can only rely on the completion once the consumer has closed because that waits for the response. + shareConsumer1.poll(Duration.ofMillis(500)); + + shareConsumer1.close(); + + assertTrue(partitionExceptionMap.containsKey(tp)); + assertNull(partitionExceptionMap.get(tp)); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testExplicitAcknowledgeReleasePollAccept() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); - records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.ACCEPT)); - records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgeReleaseAccept(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.ACCEPT)); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testExplicitAcknowledgeReleaseAccept() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); - records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.ACCEPT)); - records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgeReleaseClose(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.ACCEPT)); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testExplicitAcknowledgeReleaseClose() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testExplicitAcknowledgeThrowsNotInBatch(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testExplicitAcknowledgeThrowsNotInBatch() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - ConsumerRecord consumedRecord = records.records(tp).get(0); - shareConsumer.acknowledge(consumedRecord); - records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); - assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(consumedRecord)); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testImplicitAcknowledgeFailsExplicit(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + ConsumerRecord consumedRecord = records.records(tp).get(0); + shareConsumer.acknowledge(consumedRecord); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(consumedRecord)); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testImplicitAcknowledgeFailsExplicit() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - ConsumerRecord consumedRecord = records.records(tp).get(0); - records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); - assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(consumedRecord)); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testImplicitAcknowledgeCommitSync(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + ConsumerRecord consumedRecord = records.records(tp).get(0); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(consumedRecord)); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testImplicitAcknowledgeCommitSync() { alterShareAutoOffsetReset("group1", "earliest"); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - Map> result = shareConsumer.commitSync(); - assertEquals(1, result.size()); - result = shareConsumer.commitSync(); - assertEquals(0, result.size()); - records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testImplicitAcknowledgementCommitAsync(String persister) throws InterruptedException { - ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record1); - producer.send(record2); - producer.send(record3); - - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + Map> result = shareConsumer.commitSync(); + assertEquals(1, result.size()); + result = shareConsumer.commitSync(); + assertEquals(0, result.size()); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testImplicitAcknowledgementCommitAsync() throws InterruptedException { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.send(record3); + producer.flush(); - Map> partitionOffsetsMap1 = new HashMap<>(); - Map partitionExceptionMap1 = new HashMap<>(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap1, partitionExceptionMap1)); + Map> partitionOffsetsMap1 = new HashMap<>(); + Map partitionExceptionMap1 = new HashMap<>(); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(3, records.count()); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap1, partitionExceptionMap1)); - // Implicitly acknowledging all the records received. - shareConsumer.commitAsync(); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(3, records.count()); - assertFalse(partitionExceptionMap1.containsKey(tp)); - // The callback will receive the acknowledgement responses after the next poll. - TestUtils.waitForCondition(() -> { - shareConsumer.poll(Duration.ofMillis(1000)); - return partitionExceptionMap1.containsKey(tp); - }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Acknowledgement commit callback did not receive the response yet"); + // Implicitly acknowledging all the records received. + shareConsumer.commitAsync(); - assertNull(partitionExceptionMap1.get(tp)); + assertFalse(partitionExceptionMap1.containsKey(tp)); + // The callback will receive the acknowledgement responses after the next poll. + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(1000)); + return partitionExceptionMap1.containsKey(tp); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Acknowledgement commit callback did not receive the response yet"); - shareConsumer.close(); - producer.close(); + assertNull(partitionExceptionMap1.get(tp)); + verifyShareGroupStateTopicRecordsProduced(); + } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testFetchRecordLargerThanMaxPartitionFetchBytes(String persister) throws Exception { + @Test + public void testFetchRecordLargerThanMaxPartitionFetchBytes() throws Exception { int maxPartitionFetchBytes = 10000; - ProducerRecord smallRecord = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - ProducerRecord bigRecord = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), new byte[maxPartitionFetchBytes]); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(smallRecord).get(); - producer.send(bigRecord).get(); - - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), - "group1", Collections.singletonMap(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(maxPartitionFetchBytes))); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), + "group1", Collections.singletonMap(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(maxPartitionFetchBytes)))) { - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); - } + ProducerRecord smallRecord = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord bigRecord = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), new byte[maxPartitionFetchBytes]); + producer.send(smallRecord).get(); + producer.send(bigRecord).get(); - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testMultipleConsumersWithDifferentGroupIds(String persister) throws InterruptedException { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer1.subscribe(Collections.singleton(tp.topic())); - alterShareAutoOffsetReset("group1", "earliest"); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(2, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } - KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group2"); - shareConsumer2.subscribe(Collections.singleton(tp.topic())); + @Test + public void testMultipleConsumersWithDifferentGroupIds() throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); alterShareAutoOffsetReset("group2", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group2")) { - // producing 3 records to the topic - producer.send(record); - producer.send(record); - producer.send(record); - // Both the consumers should read all the messages, because they are part of different share groups (both have different group IDs) - AtomicInteger shareConsumer1Records = new AtomicInteger(); - AtomicInteger shareConsumer2Records = new AtomicInteger(); - TestUtils.waitForCondition(() -> { - int records1 = shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()); - int records2 = shareConsumer2Records.addAndGet(shareConsumer2.poll(Duration.ofMillis(2000)).count()); - return records1 == 3 && records2 == 3; - }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for both consumers"); - - producer.send(record); - producer.send(record); - - shareConsumer1Records.set(0); - TestUtils.waitForCondition(() -> shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()) == 2, - DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer 1"); - - producer.send(record); - producer.send(record); - producer.send(record); - - shareConsumer1Records.set(0); - shareConsumer2Records.set(0); - TestUtils.waitForCondition(() -> { - int records1 = shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()); - int records2 = shareConsumer2Records.addAndGet(shareConsumer2.poll(Duration.ofMillis(2000)).count()); - return records1 == 3 && records2 == 5; - }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for both consumers for the last batch"); - - shareConsumer1.close(); - shareConsumer2.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testMultipleConsumersInGroupSequentialConsumption(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer1.subscribe(Collections.singleton(tp.topic())); - KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer2.subscribe(Collections.singleton(tp.topic())); - alterShareAutoOffsetReset("group1", "earliest"); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + + shareConsumer1.subscribe(Collections.singleton(tp.topic())); - int totalMessages = 2000; - for (int i = 0; i < totalMessages; i++) { + shareConsumer2.subscribe(Collections.singleton(tp.topic())); + + // producing 3 records to the topic producer.send(record); - } + producer.send(record); + producer.send(record); + producer.flush(); - int consumer1MessageCount = 0; - int consumer2MessageCount = 0; + // Both the consumers should read all the messages, because they are part of different share groups (both have different group IDs) + AtomicInteger shareConsumer1Records = new AtomicInteger(); + AtomicInteger shareConsumer2Records = new AtomicInteger(); + TestUtils.waitForCondition(() -> { + int records1 = shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()); + int records2 = shareConsumer2Records.addAndGet(shareConsumer2.poll(Duration.ofMillis(2000)).count()); + return records1 == 3 && records2 == 3; + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for both consumers"); - int maxRetries = 10; - int retries = 0; - while (retries < maxRetries) { - ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(2000)); - consumer1MessageCount += records1.count(); - ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(2000)); - consumer2MessageCount += records2.count(); - if (records1.count() + records2.count() == 0) - break; - retries++; + producer.send(record); + producer.send(record); + + shareConsumer1Records.set(0); + TestUtils.waitForCondition(() -> shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()) == 2, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer 1"); + + producer.send(record); + producer.send(record); + producer.send(record); + + shareConsumer1Records.set(0); + shareConsumer2Records.set(0); + TestUtils.waitForCondition(() -> { + int records1 = shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()); + int records2 = shareConsumer2Records.addAndGet(shareConsumer2.poll(Duration.ofMillis(2000)).count()); + return records1 == 3 && records2 == 5; + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for both consumers for the last batch"); + verifyShareGroupStateTopicRecordsProduced(); } + } + + @Test + public void testMultipleConsumersInGroupSequentialConsumption() { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + shareConsumer2.subscribe(Collections.singleton(tp.topic())); + + int totalMessages = 2000; + for (int i = 0; i < totalMessages; i++) { + producer.send(record); + } + producer.flush(); + + int consumer1MessageCount = 0; + int consumer2MessageCount = 0; + + int maxRetries = 10; + int retries = 0; + while (retries < maxRetries) { + ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(2000)); + consumer1MessageCount += records1.count(); + ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(2000)); + consumer2MessageCount += records2.count(); + if (records1.count() + records2.count() == 0) + break; + retries++; + } - assertEquals(totalMessages, consumer1MessageCount + consumer2MessageCount); - shareConsumer1.close(); - shareConsumer2.close(); - producer.close(); + assertEquals(totalMessages, consumer1MessageCount + consumer2MessageCount); + } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testMultipleConsumersInGroupConcurrentConsumption(String persister) { + @Flaky("KAFKA-18033") + @Test + public void testMultipleConsumersInGroupConcurrentConsumption() + throws InterruptedException, ExecutionException, TimeoutException { AtomicInteger totalMessagesConsumed = new AtomicInteger(0); int consumerCount = 4; @@ -990,56 +1002,38 @@ public void testMultipleConsumersInGroupConcurrentConsumption(String persister) int messagesPerProducer = 5000; String groupId = "group1"; - - ExecutorService producerExecutorService = Executors.newFixedThreadPool(producerCount); - ExecutorService consumerExecutorService = Executors.newFixedThreadPool(consumerCount); - - // This consumer is created to register the share group id with the groupCoordinator - // so that the config share.auto.offset.reset can be altered for this group - createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId); alterShareAutoOffsetReset(groupId, "earliest"); + List> producerFutures = new ArrayList<>(); for (int i = 0; i < producerCount; i++) { - producerExecutorService.submit(() -> produceMessages(messagesPerProducer)); + producerFutures.add(CompletableFuture.runAsync(() -> produceMessages(messagesPerProducer))); } - ConcurrentLinkedQueue> futures = new ConcurrentLinkedQueue<>(); int maxBytes = 100000; + List> consumerFutures = new ArrayList<>(); for (int i = 0; i < consumerCount; i++) { final int consumerNumber = i + 1; - consumerExecutorService.submit(() -> { - CompletableFuture future = new CompletableFuture<>(); - futures.add(future); - consumeMessages(totalMessagesConsumed, producerCount * messagesPerProducer, groupId, consumerNumber, 30, true, future, Optional.of(maxBytes)); - }); + consumerFutures.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumed, + producerCount * messagesPerProducer, groupId, consumerNumber, + 30, true, maxBytes))); } - producerExecutorService.shutdown(); - consumerExecutorService.shutdown(); + CompletableFuture.allOf(producerFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); + CompletableFuture.allOf(consumerFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); - try { - assertTrue(producerExecutorService.awaitTermination(60, TimeUnit.SECONDS)); // Wait for all producer threads to complete - assertTrue(consumerExecutorService.awaitTermination(60, TimeUnit.SECONDS)); // Wait for all consumer threads to complete - int totalResult = 0; - for (CompletableFuture future : futures) { - totalResult += future.get(); - } - assertEquals(producerCount * messagesPerProducer, totalMessagesConsumed.get()); - assertEquals(producerCount * messagesPerProducer, totalResult); - } catch (Exception e) { - fail("Exception occurred : " + e.getMessage()); - } + int totalResult = consumerFutures.stream().mapToInt(CompletableFuture::join).sum(); + assertEquals(producerCount * messagesPerProducer, totalResult); } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testMultipleConsumersInMultipleGroupsConcurrentConsumption(String persister) { + @Test + public void testMultipleConsumersInMultipleGroupsConcurrentConsumption() + throws ExecutionException, InterruptedException, TimeoutException { AtomicInteger totalMessagesConsumedGroup1 = new AtomicInteger(0); AtomicInteger totalMessagesConsumedGroup2 = new AtomicInteger(0); AtomicInteger totalMessagesConsumedGroup3 = new AtomicInteger(0); int producerCount = 4; - int consumerCount = 2; int messagesPerProducer = 2000; final int totalMessagesSent = producerCount * messagesPerProducer; @@ -1047,159 +1041,104 @@ public void testMultipleConsumersInMultipleGroupsConcurrentConsumption(String pe String groupId2 = "group2"; String groupId3 = "group3"; - // These consumers are created to register the share group ids with the groupCoordinator - // so that the config share.auto.offset.reset can be altered for these groups - createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId1); alterShareAutoOffsetReset(groupId1, "earliest"); - createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId2); alterShareAutoOffsetReset(groupId2, "earliest"); - createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId3); alterShareAutoOffsetReset(groupId3, "earliest"); - ExecutorService producerExecutorService = Executors.newFixedThreadPool(producerCount); - ExecutorService shareGroupExecutorService1 = Executors.newFixedThreadPool(consumerCount); - ExecutorService shareGroupExecutorService2 = Executors.newFixedThreadPool(consumerCount); - ExecutorService shareGroupExecutorService3 = Executors.newFixedThreadPool(consumerCount); - - CountDownLatch startSignal = new CountDownLatch(producerCount); - - ConcurrentLinkedQueue> producerFutures = new ConcurrentLinkedQueue<>(); - + List> producerFutures = new ArrayList<>(); for (int i = 0; i < producerCount; i++) { - producerExecutorService.submit(() -> { - CompletableFuture future = produceMessages(messagesPerProducer); - producerFutures.add(future); - startSignal.countDown(); - }); + producerFutures.add(CompletableFuture.supplyAsync(() -> produceMessages(messagesPerProducer))); } - - ConcurrentLinkedQueue> futures1 = new ConcurrentLinkedQueue<>(); - ConcurrentLinkedQueue> futures2 = new ConcurrentLinkedQueue<>(); - ConcurrentLinkedQueue> futures3 = new ConcurrentLinkedQueue<>(); - // Wait for the producers to run - try { - boolean signalled = startSignal.await(15, TimeUnit.SECONDS); - assertTrue(signalled); - } catch (InterruptedException e) { - fail("Exception awaiting start signal"); - } + assertDoesNotThrow(() -> CompletableFuture.allOf(producerFutures.toArray(CompletableFuture[]::new)) + .get(15, TimeUnit.SECONDS), "Exception awaiting produceMessages"); + int actualMessageSent = producerFutures.stream().mapToInt(CompletableFuture::join).sum(); - int maxBytes = 100000; + List> consumeMessagesFutures1 = new ArrayList<>(); + List> consumeMessagesFutures2 = new ArrayList<>(); + List> consumeMessagesFutures3 = new ArrayList<>(); - for (int i = 0; i < consumerCount; i++) { + int maxBytes = 100000; + for (int i = 0; i < 2; i++) { final int consumerNumber = i + 1; - shareGroupExecutorService1.submit(() -> { - CompletableFuture future = new CompletableFuture<>(); - futures1.add(future); - consumeMessages(totalMessagesConsumedGroup1, totalMessagesSent, "group1", consumerNumber, 100, true, future, Optional.of(maxBytes)); - }); - shareGroupExecutorService2.submit(() -> { - CompletableFuture future = new CompletableFuture<>(); - futures2.add(future); - consumeMessages(totalMessagesConsumedGroup2, totalMessagesSent, "group2", consumerNumber, 100, true, future, Optional.of(maxBytes)); - }); - shareGroupExecutorService3.submit(() -> { - CompletableFuture future = new CompletableFuture<>(); - futures3.add(future); - consumeMessages(totalMessagesConsumedGroup3, totalMessagesSent, "group3", consumerNumber, 100, true, future, Optional.of(maxBytes)); - }); + consumeMessagesFutures1.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumedGroup1, totalMessagesSent, + "group1", consumerNumber, 100, true, maxBytes))); + + consumeMessagesFutures2.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumedGroup2, totalMessagesSent, + "group2", consumerNumber, 100, true, maxBytes))); + + consumeMessagesFutures3.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumedGroup3, totalMessagesSent, + "group3", consumerNumber, 100, true, maxBytes))); } - producerExecutorService.shutdown(); - shareGroupExecutorService1.shutdown(); - shareGroupExecutorService2.shutdown(); - shareGroupExecutorService3.shutdown(); - try { - shareGroupExecutorService1.awaitTermination(120, TimeUnit.SECONDS); // Wait for all consumer threads for group 1 to complete - shareGroupExecutorService2.awaitTermination(120, TimeUnit.SECONDS); // Wait for all consumer threads for group 2 to complete - shareGroupExecutorService3.awaitTermination(120, TimeUnit.SECONDS); // Wait for all consumer threads for group 3 to complete - int totalResult1 = 0; - for (CompletableFuture future : futures1) { - totalResult1 += future.get(); - } + CompletableFuture.allOf(Stream.of(consumeMessagesFutures1.stream(), consumeMessagesFutures2.stream(), + consumeMessagesFutures3.stream()).flatMap(Function.identity()).toArray(CompletableFuture[]::new)) + .get(120, TimeUnit.SECONDS); - int totalResult2 = 0; - for (CompletableFuture future : futures2) { - totalResult2 += future.get(); - } + int totalResult1 = consumeMessagesFutures1.stream().mapToInt(CompletableFuture::join).sum(); + int totalResult2 = consumeMessagesFutures2.stream().mapToInt(CompletableFuture::join).sum(); + int totalResult3 = consumeMessagesFutures3.stream().mapToInt(CompletableFuture::join).sum(); - int totalResult3 = 0; - for (CompletableFuture future : futures3) { - totalResult3 += future.get(); - } + assertEquals(totalMessagesSent, totalResult1); + assertEquals(totalMessagesSent, totalResult2); + assertEquals(totalMessagesSent, totalResult3); + assertEquals(totalMessagesSent, actualMessageSent); + verifyShareGroupStateTopicRecordsProduced(); + } - assertEquals(totalMessagesSent, totalMessagesConsumedGroup1.get()); - assertEquals(totalMessagesSent, totalMessagesConsumedGroup2.get()); - assertEquals(totalMessagesSent, totalMessagesConsumedGroup3.get()); - assertEquals(totalMessagesSent, totalResult1); - assertEquals(totalMessagesSent, totalResult2); - assertEquals(totalMessagesSent, totalResult3); + @Test + public void testConsumerCloseInGroupSequential() { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - int actualMessagesSent = 0; - try { - producerExecutorService.awaitTermination(60, TimeUnit.SECONDS); // Wait for all producer threads to complete + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + shareConsumer2.subscribe(Collections.singleton(tp.topic())); - for (CompletableFuture future : producerFutures) { - actualMessagesSent += future.get(); - } - } catch (Exception e) { - fail("Exception occurred : " + e.getMessage()); + int totalMessages = 1500; + for (int i = 0; i < totalMessages; i++) { + producer.send(record); } - assertEquals(totalMessagesSent, actualMessagesSent); - } catch (Exception e) { - fail("Exception occurred : " + e.getMessage()); - } - } + producer.close(); - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testConsumerCloseInGroupSequential(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer1.subscribe(Collections.singleton(tp.topic())); - KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer2.subscribe(Collections.singleton(tp.topic())); - alterShareAutoOffsetReset("group1", "earliest"); + int consumer1MessageCount = 0; + int consumer2MessageCount = 0; - int totalMessages = 1500; - for (int i = 0; i < totalMessages; i++) { - producer.send(record); + // Poll three times to receive records. The second poll acknowledges the records + // from the first poll, and so on. The third poll's records are not acknowledged + // because the consumer is closed, which makes the broker release the records fetched. + ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(5000)); + consumer1MessageCount += records1.count(); + int consumer1MessageCountA = records1.count(); + records1 = shareConsumer1.poll(Duration.ofMillis(5000)); + consumer1MessageCount += records1.count(); + int consumer1MessageCountB = records1.count(); + records1 = shareConsumer1.poll(Duration.ofMillis(5000)); + int consumer1MessageCountC = records1.count(); + assertEquals(totalMessages, consumer1MessageCountA + consumer1MessageCountB + consumer1MessageCountC); + shareConsumer1.close(); + + int maxRetries = 10; + int retries = 0; + while (consumer1MessageCount + consumer2MessageCount < totalMessages && retries < maxRetries) { + ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(5000)); + consumer2MessageCount += records2.count(); + retries++; + } + shareConsumer2.close(); + assertEquals(totalMessages, consumer1MessageCount + consumer2MessageCount); + verifyShareGroupStateTopicRecordsProduced(); } - producer.close(); - - int consumer1MessageCount = 0; - int consumer2MessageCount = 0; - - // Poll three times to receive records. The second poll acknowledges the records - // from the first poll, and so on. The third poll's records are not acknowledged - // because the consumer is closed, which makes the broker release the records fetched. - ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - consumer1MessageCount += records1.count(); - int consumer1MessageCountA = records1.count(); - records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - consumer1MessageCount += records1.count(); - int consumer1MessageCountB = records1.count(); - records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - int consumer1MessageCountC = records1.count(); - assertEquals(totalMessages, consumer1MessageCountA + consumer1MessageCountB + consumer1MessageCountC); - shareConsumer1.close(); - - int maxRetries = 10; - int retries = 0; - while (consumer1MessageCount + consumer2MessageCount < totalMessages && retries < maxRetries) { - ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(5000)); - consumer2MessageCount += records2.count(); - retries++; - } - shareConsumer2.close(); - assertEquals(totalMessages, consumer1MessageCount + consumer2MessageCount); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testMultipleConsumersInGroupFailureConcurrentConsumption(String persister) { + } + + @Test + public void testMultipleConsumersInGroupFailureConcurrentConsumption() + throws InterruptedException, ExecutionException, TimeoutException { AtomicInteger totalMessagesConsumed = new AtomicInteger(0); int consumerCount = 4; @@ -1208,143 +1147,131 @@ public void testMultipleConsumersInGroupFailureConcurrentConsumption(String pers String groupId = "group1"; - // This consumer is created to register the share group id with the groupCoordinator - // so that the config share.auto.offset.reset can be altered for this group - createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId); alterShareAutoOffsetReset(groupId, "earliest"); - ExecutorService consumerExecutorService = Executors.newFixedThreadPool(consumerCount); - ExecutorService producerExecutorService = Executors.newFixedThreadPool(producerCount); - + List> produceMessageFutures = new ArrayList<>(); for (int i = 0; i < producerCount; i++) { - Runnable task = () -> produceMessages(messagesPerProducer); - producerExecutorService.submit(task); + produceMessageFutures.add(CompletableFuture.runAsync(() -> produceMessages(messagesPerProducer))); } - ConcurrentLinkedQueue> futuresSuccess = new ConcurrentLinkedQueue<>(); - - CountDownLatch startSignal = new CountDownLatch(1); - int maxBytes = 1000000; - consumerExecutorService.submit(() -> { - // The "failing" consumer polls but immediately closes, which releases the records for the other consumers - CompletableFuture future = new CompletableFuture<>(); - AtomicInteger failedMessagesConsumed = new AtomicInteger(0); - consumeMessages(failedMessagesConsumed, producerCount * messagesPerProducer, groupId, 0, 1, false, future); - startSignal.countDown(); - }); + // The "failing" consumer polls but immediately closes, which releases the records for the other consumers + CompletableFuture failedMessagesConsumedFuture = CompletableFuture.supplyAsync( + () -> consumeMessages(new AtomicInteger(0), producerCount * messagesPerProducer, groupId, + 0, 1, false)); // Wait for the failed consumer to run - try { - boolean signalled = startSignal.await(15, TimeUnit.SECONDS); - assertTrue(signalled); - } catch (InterruptedException e) { - fail("Exception awaiting start signal"); - } + assertDoesNotThrow(() -> failedMessagesConsumedFuture.get(15, TimeUnit.SECONDS), + "Exception awaiting consumeMessages"); + List> consumeMessagesFutures = new ArrayList<>(); for (int i = 0; i < consumerCount; i++) { final int consumerNumber = i + 1; - consumerExecutorService.submit(() -> { - CompletableFuture future = new CompletableFuture<>(); - futuresSuccess.add(future); - consumeMessages(totalMessagesConsumed, producerCount * messagesPerProducer, groupId, consumerNumber, 40, true, future, Optional.of(maxBytes)); - }); + consumeMessagesFutures.add(CompletableFuture.supplyAsync( + () -> consumeMessages(totalMessagesConsumed, producerCount * messagesPerProducer, + groupId, consumerNumber, 40, true, maxBytes))); } - producerExecutorService.shutdown(); - consumerExecutorService.shutdown(); - try { - producerExecutorService.awaitTermination(60, TimeUnit.SECONDS); // Wait for all producer threads to complete - consumerExecutorService.awaitTermination(60, TimeUnit.SECONDS); // Wait for all consumer threads to complete - int totalSuccessResult = 0; - for (CompletableFuture future : futuresSuccess) { - totalSuccessResult += future.get(); - } - assertEquals(producerCount * messagesPerProducer, totalMessagesConsumed.get()); - assertEquals(producerCount * messagesPerProducer, totalSuccessResult); - } catch (Exception e) { - fail("Exception occurred : " + e.getMessage()); - } - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcquisitionLockTimeoutOnConsumer(String persister) throws InterruptedException { - ProducerRecord producerRecord1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, - "key_1".getBytes(), "value_1".getBytes()); - ProducerRecord producerRecord2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, - "key_2".getBytes(), "value_2".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer1.subscribe(Collections.singleton(tp.topic())); - alterShareAutoOffsetReset("group1", "earliest"); - producer.send(producerRecord1); + CompletableFuture.allOf(produceMessageFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); + CompletableFuture.allOf(consumeMessagesFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); - // Poll two times to receive records. The first poll puts the acquisition lock and fetches the record. - // Since, we are only sending one record and acquisition lock hasn't timed out, the second poll only acknowledges the - // record from the first poll and no more fetch. - ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - assertEquals(1, records1.count()); - assertEquals("key_1", new String(records1.iterator().next().key())); - assertEquals("value_1", new String(records1.iterator().next().value())); - ConsumerRecords records2 = shareConsumer1.poll(Duration.ofMillis(500)); - assertEquals(0, records2.count()); + int totalSuccessResult = consumeMessagesFutures.stream().mapToInt(CompletableFuture::join).sum(); + assertEquals(producerCount * messagesPerProducer, totalSuccessResult); + verifyShareGroupStateTopicRecordsProduced(); + } - producer.send(producerRecord2); + @Test + public void testAcquisitionLockTimeoutOnConsumer() throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - // Poll three times. The first poll puts the acquisition lock and fetches the record. Before the second poll, - // acquisition lock times out and hence the consumer needs to fetch the record again. Since, the acquisition lock - // hasn't timed out before the third poll, the third poll only acknowledges the record from the second poll and no more fetch. - records1 = shareConsumer1.poll(Duration.ofMillis(5000)); - assertEquals(1, records1.count()); - assertEquals("key_2", new String(records1.iterator().next().key())); - assertEquals("value_2", new String(records1.iterator().next().value())); + ProducerRecord producerRecord1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, + "key_1".getBytes(), "value_1".getBytes()); + ProducerRecord producerRecord2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, + "key_2".getBytes(), "value_2".getBytes()); + shareConsumer.subscribe(Set.of(tp.topic())); - // Allowing acquisition lock to expire. - Thread.sleep(20000); + // Produce a first record which is consumed and acknowledged normally. + producer.send(producerRecord1); + producer.flush(); - records2 = shareConsumer1.poll(Duration.ofMillis(5000)); - assertEquals(1, records2.count()); - assertEquals("key_2", new String(records2.iterator().next().key())); - assertEquals("value_2", new String(records2.iterator().next().value())); - ConsumerRecords records3 = shareConsumer1.poll(Duration.ofMillis(500)); - assertEquals(0, records3.count()); + // Poll twice to receive records. The first poll fetches the record and starts the acquisition lock timer. + // Since, we are only sending one record and the acquisition lock hasn't timed out, the second poll only + // acknowledges the record from the first poll and does not fetch any more records. + ConsumerRecords consumerRecords = shareConsumer.poll(Duration.ofMillis(5000)); + ConsumerRecord consumerRecord = consumerRecords.records(tp).get(0); + assertEquals("key_1", new String(consumerRecord.key())); + assertEquals("value_1", new String(consumerRecord.value())); + assertEquals(1, consumerRecords.count()); + + consumerRecords = shareConsumer.poll(Duration.ofMillis(1000)); + assertEquals(0, consumerRecords.count()); + + // Produce a second record which is fetched, but not acknowledged before it times out. The record will + // be released automatically by the broker. It is then fetched again and acknowledged normally. + producer.send(producerRecord2); + producer.flush(); - producer.close(); - shareConsumer1.close(); + // Poll three more times. The first poll fetches the second record and starts the acquisition lock timer. + // Before the second poll, acquisition lock times out and hence the consumer needs to fetch the record again. + // The acquisition lock doesn't time out between the second and third polls, so the third poll only acknowledges + // the record from the second poll and does not fetch any more records. + consumerRecords = shareConsumer.poll(Duration.ofMillis(5000)); + consumerRecord = consumerRecords.records(tp).get(0); + assertEquals("key_2", new String(consumerRecord.key())); + assertEquals("value_2", new String(consumerRecord.value())); + assertEquals(1, consumerRecords.count()); + + // Allow the acquisition lock to time out. + Thread.sleep(20000); + + consumerRecords = shareConsumer.poll(Duration.ofMillis(5000)); + consumerRecord = consumerRecords.records(tp).get(0); + // By checking the key and value before the count, we get a bit more information if too many records are returned. + // This test has been observed to fail very occasionally because of this. + assertEquals("key_2", new String(consumerRecord.key())); + assertEquals("value_2", new String(consumerRecord.value())); + assertEquals(1, consumerRecords.count()); + + consumerRecords = shareConsumer.poll(Duration.ofMillis(1000)); + assertEquals(0, consumerRecords.count()); + verifyShareGroupStateTopicRecordsProduced(); + } } /** * Test to verify that the acknowledgement commit callback cannot invoke methods of KafkaShareConsumer. - * The exception thrown is verified in {@link TestableAcknowledgeCommitCallbackWithShareConsumer} + * The exception thrown is verified in {@link TestableAcknowledgementCommitCallbackWithShareConsumer} */ - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcknowledgeCommitCallbackCallsShareConsumerDisallowed(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + @Test + public void testAcknowledgementCommitCallbackCallsShareConsumerDisallowed() { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallbackWithShareConsumer<>(shareConsumer)); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackWithShareConsumer<>(shareConsumer)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - // The acknowledgment commit callback will try to call a method of KafkaShareConsumer - shareConsumer.poll(Duration.ofMillis(5000)); - // The second poll sends the acknowledgments implicitly. - // The acknowledgement commit callback will be called and the exception is thrown. - // This is verified inside the onComplete() method implementation. - shareConsumer.poll(Duration.ofMillis(500)); - shareConsumer.close(); - producer.close(); + // The acknowledgment commit callback will try to call a method of KafkaShareConsumer + shareConsumer.poll(Duration.ofMillis(5000)); + // The second poll sends the acknowledgements implicitly. + // The acknowledgement commit callback will be called and the exception is thrown. + // This is verified inside the onComplete() method implementation. + shareConsumer.poll(Duration.ofMillis(500)); + verifyShareGroupStateTopicRecordsProduced(); + } } - private class TestableAcknowledgeCommitCallbackWithShareConsumer implements AcknowledgementCommitCallback { + private class TestableAcknowledgementCommitCallbackWithShareConsumer implements AcknowledgementCommitCallback { private final KafkaShareConsumer shareConsumer; - TestableAcknowledgeCommitCallbackWithShareConsumer(KafkaShareConsumer shareConsumer) { + TestableAcknowledgementCommitCallbackWithShareConsumer(KafkaShareConsumer shareConsumer) { this.shareConsumer = shareConsumer; } @@ -1361,33 +1288,45 @@ public void onComplete(Map> offsetsMap, Exception ex * Test to verify that the acknowledgement commit callback can invoke KafkaShareConsumer.wakeup() and it * wakes up the enclosing poll. */ - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcknowledgeCommitCallbackCallsShareConsumerWakeup(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + @Test + public void testAcknowledgementCommitCallbackCallsShareConsumerWakeup() throws InterruptedException { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + // The acknowledgment commit callback will try to call a method of KafkaShareConsumer + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackWakeup<>(shareConsumer)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - // The acknowledgment commit callback will try to call a method of KafkaShareConsumer - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallbackWakeup<>(shareConsumer)); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); - shareConsumer.poll(Duration.ofMillis(5000)); - // The second poll sends the acknowledgments implicitly. - shareConsumer.poll(Duration.ofMillis(1000)); - // Till now acknowledgement commit callback has not been called, so no exception thrown yet. - // On 3rd poll, the acknowledgement commit callback will be called and the exception is thrown. - assertThrows(WakeupException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); - shareConsumer.close(); - producer.close(); + // The second poll sends the acknowledgments implicitly. + shareConsumer.poll(Duration.ofMillis(2000)); + + // Till now acknowledgement commit callback has not been called, so no exception thrown yet. + // On 3rd poll, the acknowledgement commit callback will be called and the exception is thrown. + AtomicBoolean exceptionThrown = new AtomicBoolean(false); + TestUtils.waitForCondition(() -> { + try { + shareConsumer.poll(Duration.ofMillis(500)); + } catch (org.apache.kafka.common.errors.WakeupException e) { + exceptionThrown.set(true); + } + return exceptionThrown.get(); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to receive expected exception"); + verifyShareGroupStateTopicRecordsProduced(); + } } - private static class TestableAcknowledgeCommitCallbackWakeup implements AcknowledgementCommitCallback { + private static class TestableAcknowledgementCommitCallbackWakeup implements AcknowledgementCommitCallback { private final KafkaShareConsumer shareConsumer; - TestableAcknowledgeCommitCallbackWakeup(KafkaShareConsumer shareConsumer) { + TestableAcknowledgementCommitCallbackWakeup(KafkaShareConsumer shareConsumer) { this.shareConsumer = shareConsumer; } @@ -1401,34 +1340,39 @@ public void onComplete(Map> offsetsMap, Exception ex * Test to verify that the acknowledgement commit callback can throw an exception, and it is propagated * to the caller of poll(). */ - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testAcknowledgeCommitCallbackThrowsException(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + @Test + public void testAcknowledgementCommitCallbackThrowsException() throws InterruptedException { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallbackThrows<>()); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - - shareConsumer.poll(Duration.ofMillis(5000)); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); - // The second poll sends the acknowledgments implicitly. - shareConsumer.poll(Duration.ofMillis(1000)); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackThrows<>()); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - // On the third poll, the acknowledgement commit callback will be called and the exception is thrown. - assertThrows(org.apache.kafka.common.errors.OutOfOrderSequenceException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); - shareConsumer.close(); - producer.close(); + AtomicBoolean exceptionThrown = new AtomicBoolean(false); + TestUtils.waitForCondition(() -> { + try { + shareConsumer.poll(Duration.ofMillis(500)); + } catch (org.apache.kafka.common.errors.OutOfOrderSequenceException e) { + exceptionThrown.set(true); + } + return exceptionThrown.get(); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to receive expected exception"); + verifyShareGroupStateTopicRecordsProduced(); + } } - private static class TestableAcknowledgeCommitCallbackThrows implements AcknowledgementCommitCallback { + private static class TestableAcknowledgementCommitCallbackThrows implements AcknowledgementCommitCallback { @Override public void onComplete(Map> offsetsMap, Exception exception) { - throw new org.apache.kafka.common.errors.OutOfOrderSequenceException("Exception thrown in TestableAcknowledgeCommitCallbackThrows.onComplete"); + throw new org.apache.kafka.common.errors.OutOfOrderSequenceException("Exception thrown in TestableAcknowledgementCommitCallbackThrows.onComplete"); } } @@ -1436,354 +1380,375 @@ public void onComplete(Map> offsetsMap, Exception ex * Test to verify that calling Thread.interrupt() before KafkaShareConsumer.poll(Duration) * causes it to throw InterruptException */ - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testPollThrowsInterruptExceptionIfInterrupted(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + @Test + public void testPollThrowsInterruptExceptionIfInterrupted() { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - // interrupt the thread and call poll - try { - Thread.currentThread().interrupt(); - assertThrows(InterruptException.class, () -> shareConsumer.poll(Duration.ZERO)); - } finally { - // clear interrupted state again since this thread may be reused by JUnit - Thread.interrupted(); - } + shareConsumer.subscribe(Collections.singleton(tp.topic())); - assertDoesNotThrow(() -> shareConsumer.poll(Duration.ZERO)); - shareConsumer.close(); + // interrupt the thread and call poll + try { + Thread.currentThread().interrupt(); + assertThrows(InterruptException.class, () -> shareConsumer.poll(Duration.ZERO)); + } finally { + // clear interrupted state again since this thread may be reused by JUnit + Thread.interrupted(); + } + + assertDoesNotThrow(() -> shareConsumer.poll(Duration.ZERO), "Failed to consume records"); + } } /** * Test to verify that InvalidTopicException is thrown if the consumer subscribes * to an invalid topic. */ - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscribeOnInvalidTopicThrowsInvalidTopicException(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton("topic abc")); + @Test + public void testSubscribeOnInvalidTopicThrowsInvalidTopicException() { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + shareConsumer.subscribe(Collections.singleton("topic abc")); - // The exception depends upon a metadata response which arrives asynchronously. If the delay is - // too short, the poll might return before the error is known. - assertThrows(InvalidTopicException.class, () -> shareConsumer.poll(Duration.ofMillis(10000))); - shareConsumer.close(); + // The exception depends upon a metadata response which arrives asynchronously. If the delay is + // too short, the poll might return before the error is known. + assertThrows(InvalidTopicException.class, () -> shareConsumer.poll(Duration.ofMillis(10000))); + } } /** * Test to ensure that a wakeup when records are buffered doesn't prevent the records * being returned on the next poll. */ - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testWakeupWithFetchedRecordsAvailable(String persister) { - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - producer.send(record); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); + @Test + public void testWakeupWithFetchedRecordsAvailable() { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); - shareConsumer.wakeup(); - assertThrows(WakeupException.class, () -> shareConsumer.poll(Duration.ZERO)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); + shareConsumer.wakeup(); + assertThrows(WakeupException.class, () -> shareConsumer.poll(Duration.ZERO)); - shareConsumer.close(); - producer.close(); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscriptionFollowedByTopicCreation(String persister) throws InterruptedException { - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - String topic = "foo"; - shareConsumer.subscribe(Collections.singleton(topic)); + @Test + public void testSubscriptionFollowedByTopicCreation() throws InterruptedException { alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - // Topic is created post creation of share consumer and subscription - createTopic(topic); + String topic = "foo"; + shareConsumer.subscribe(Collections.singleton(topic)); - ProducerRecord record = new ProducerRecord<>(topic, 0, null, "key".getBytes(), "value".getBytes()); - producer.send(record); + // Topic is created post creation of share consumer and subscription + createTopic(topic); + + ProducerRecord record = new ProducerRecord<>(topic, 0, null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); - TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, - DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer, metadata sync failed"); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer, metadata sync failed"); - producer.send(record); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - producer.send(record); - records = shareConsumer.poll(Duration.ofMillis(5000)); - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); + producer.send(record); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } } - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testSubscriptionAndPollFollowedByTopicDeletion(String persister) throws InterruptedException, ExecutionException { + @Test + public void testSubscriptionAndPollFollowedByTopicDeletion() throws InterruptedException, ExecutionException { String topic1 = "bar"; String topic2 = "baz"; createTopic(topic1); createTopic(topic2); - ProducerRecord recordTopic1 = new ProducerRecord<>(topic1, 0, null, "key".getBytes(), "value".getBytes()); - ProducerRecord recordTopic2 = new ProducerRecord<>(topic2, 0, null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - // Consumer subscribes to the topics -> bar and baz. - shareConsumer.subscribe(Arrays.asList(topic1, topic2)); alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { - producer.send(recordTopic1).get(); - TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, - DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + ProducerRecord recordTopic1 = new ProducerRecord<>(topic1, 0, null, "key".getBytes(), "value".getBytes()); + ProducerRecord recordTopic2 = new ProducerRecord<>(topic2, 0, null, "key".getBytes(), "value".getBytes()); - producer.send(recordTopic2); - TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, - DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + // Consumer subscribes to the topics -> bar and baz. + shareConsumer.subscribe(Arrays.asList(topic1, topic2)); - // Topic bar is deleted, hence poll should not give any results. - deleteTopic(topic1); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); - assertEquals(0, records.count()); + producer.send(recordTopic1).get(); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); - producer.send(recordTopic2); - // Poll should give the record from the non-deleted topic baz. - TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, - DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + producer.send(recordTopic2).get(); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); - producer.send(recordTopic2); - TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, - DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); - shareConsumer.close(); - producer.close(); - } + // Topic bar is deleted, hence poll should not give any results. + deleteTopic(topic1); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testLsoMovementByRecordsDeletion(String persister) { - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - ProducerRecord record = new ProducerRecord<>(tp.topic(), 0, null, "key".getBytes(), "value".getBytes()); + producer.send(recordTopic2).get(); + // Poll should give the record from the non-deleted topic baz. + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + producer.send(recordTopic2).get(); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testLsoMovementByRecordsDeletion() { String groupId = "group1"; - // This consumer is created to register the share group id with the groupCoordinator - // so that the config share.auto.offset.reset can be altered for this group - createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId); alterShareAutoOffsetReset(groupId, "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { - // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. - try { + ProducerRecord record = new ProducerRecord<>(tp.topic(), 0, null, "key".getBytes(), "value".getBytes()); + + // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. for (int i = 0; i < 10; i++) { - producer.send(record).get(); + assertDoesNotThrow(() -> producer.send(record).get(), "Failed to send records"); } - } catch (Exception e) { - fail("Failed to send records: " + e); - } - // We delete records before offset 5, so the LSO should move to 5. - adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(5L))); + // We delete records before offset 5, so the LSO should move to 5. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(5L))); - AtomicInteger totalMessagesConsumed = new AtomicInteger(0); - CompletableFuture future = new CompletableFuture<>(); - consumeMessages(totalMessagesConsumed, 5, groupId, 1, 10, true, future); - // The records returned belong to offsets 5-9. - assertEquals(5, totalMessagesConsumed.get()); - try { - assertEquals(5, future.get()); - } catch (Exception e) { - fail("Exception occurred : " + e.getMessage()); - } + int messageCount = consumeMessages(new AtomicInteger(0), 5, groupId, 1, 10, true); + // The records returned belong to offsets 5-9. + assertEquals(5, messageCount); - // We write 5 records to the topic, so they would be written from offsets 10-14 on the topic. - try { + // We write 5 records to the topic, so they would be written from offsets 10-14 on the topic. for (int i = 0; i < 5; i++) { - producer.send(record).get(); + assertDoesNotThrow(() -> producer.send(record).get(), "Failed to send records"); } - } catch (Exception e) { - fail("Failed to send records: " + e); - } - // We delete records before offset 14, so the LSO should move to 14. - adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(14L))); + // We delete records before offset 14, so the LSO should move to 14. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(14L))); - totalMessagesConsumed = new AtomicInteger(0); - future = new CompletableFuture<>(); - consumeMessages(totalMessagesConsumed, 1, groupId, 1, 10, true, future); - // The record returned belong to offset 14. - assertEquals(1, totalMessagesConsumed.get()); - try { - assertEquals(1, future.get()); - } catch (Exception e) { - fail("Exception occurred : " + e.getMessage()); + int consumeMessagesCount = consumeMessages(new AtomicInteger(0), 1, groupId, 1, 10, true); + // The record returned belong to offset 14. + assertEquals(1, consumeMessagesCount); + + // We delete records before offset 15, so the LSO should move to 15 and now no records should be returned. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(15L))); + + messageCount = consumeMessages(new AtomicInteger(0), 0, groupId, 1, 5, true); + assertEquals(0, messageCount); + verifyShareGroupStateTopicRecordsProduced(); } + } - // We delete records before offset 15, so the LSO should move to 15 and now no records should be returned. - adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(15L))); + @Test + public void testShareAutoOffsetResetDefaultValue() { + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { - totalMessagesConsumed = new AtomicInteger(0); - future = new CompletableFuture<>(); - consumeMessages(totalMessagesConsumed, 0, groupId, 1, 5, true, future); - assertEquals(0, totalMessagesConsumed.get()); - try { - assertEquals(0, future.get()); - } catch (Exception e) { - fail("Exception occurred : " + e.getMessage()); - } - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testShareAutoOffsetResetDefaultValue(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - // Producing a record. - producer.send(record); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - // No records should be consumed because share.auto.offset.reset has a default of "latest". Since the record - // was produced before share partition was initialized (which happens after the first share fetch request - // in the poll method), the start offset would be the latest offset, i.e. 1 (the next offset after the already - // present 0th record) - assertEquals(0, records.count()); - // Producing another record. - producer.send(record); - records = shareConsumer.poll(Duration.ofMillis(5000)); - // Now the next record should be consumed successfully - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testShareAutoOffsetResetEarliest(String persister) { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - // Changing the value of share.auto.offset.reset value to "earliest" + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // Producing a record. + producer.send(record); + producer.flush(); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + // No records should be consumed because share.auto.offset.reset has a default of "latest". Since the record + // was produced before share partition was initialized (which happens after the first share fetch request + // in the poll method), the start offset would be the latest offset, i.e. 1 (the next offset after the already + // present 0th record) + assertEquals(0, records.count()); + // Producing another record. + producer.send(record); + producer.flush(); + records = shareConsumer.poll(Duration.ofMillis(5000)); + // Now the next record should be consumed successfully + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testShareAutoOffsetResetEarliest() { alterShareAutoOffsetReset("group1", "earliest"); - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - // Producing a record. - producer.send(record); - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); - // Since the value for share.auto.offset.reset has been altered to "earliest", the consumer should consume - // all messages present on the partition - assertEquals(1, records.count()); - // Producing another record. - producer.send(record); - records = shareConsumer.poll(Duration.ofMillis(5000)); - // The next records should also be consumed successfully - assertEquals(1, records.count()); - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testShareAutoOffsetResetEarliestAfterLsoMovement(String persister) throws Exception { - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - // Changing the value of share.auto.offset.reset value to "earliest" + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // Producing a record. + producer.send(record); + producer.flush(); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + // Since the value for share.auto.offset.reset has been altered to "earliest", the consumer should consume + // all messages present on the partition + assertEquals(1, records.count()); + // Producing another record. + producer.send(record); + producer.flush(); + records = shareConsumer.poll(Duration.ofMillis(5000)); + // The next records should also be consumed successfully + assertEquals(1, records.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testShareAutoOffsetResetEarliestAfterLsoMovement() { alterShareAutoOffsetReset("group1", "earliest"); - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. - try { + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. for (int i = 0; i < 10; i++) { - producer.send(record).get(); + assertDoesNotThrow(() -> producer.send(record).get(), "Failed to send records"); } - } catch (Exception e) { - fail("Failed to send records: " + e); - } - // We delete records before offset 5, so the LSO should move to 5. - adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(5L))); + // We delete records before offset 5, so the LSO should move to 5. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(5L))); - AtomicInteger totalMessagesConsumed = new AtomicInteger(0); - CompletableFuture future = new CompletableFuture<>(); - consumeMessages(totalMessagesConsumed, 5, "group1", 1, 10, true, future); - // The records returned belong to offsets 5-9. - assertEquals(5, totalMessagesConsumed.get()); - assertEquals(5, future.get()); - - shareConsumer.close(); - producer.close(); - } - - @ParameterizedTest(name = "{displayName}.persister={0}") - @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) - public void testShareAutoOffsetResetMultipleGroupsWithDifferentValue(String persister) { - KafkaShareConsumer shareConsumerEarliest = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); - shareConsumerEarliest.subscribe(Collections.singleton(tp.topic())); - // Changing the value of share.auto.offset.reset value to "earliest" for group1 - alterShareAutoOffsetReset("group1", "earliest"); + int consumedMessageCount = consumeMessages(new AtomicInteger(0), 5, "group1", 1, 10, true); + // The records returned belong to offsets 5-9. + assertEquals(5, consumedMessageCount); + verifyShareGroupStateTopicRecordsProduced(); + } + } - KafkaShareConsumer shareConsumerLatest = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group2"); - shareConsumerLatest.subscribe(Collections.singleton(tp.topic())); - // Changing the value of share.auto.offset.reset value to "latest" for group2 + @Test + public void testShareAutoOffsetResetMultipleGroupsWithDifferentValue() { + alterShareAutoOffsetReset("group1", "earliest"); alterShareAutoOffsetReset("group2", "latest"); - ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - // Producing a record. - producer.send(record); - ConsumerRecords records1 = shareConsumerEarliest.poll(Duration.ofMillis(5000)); - // Since the value for share.auto.offset.reset has been altered to "earliest", the consumer should consume - // all messages present on the partition - assertEquals(1, records1.count()); - - ConsumerRecords records2 = shareConsumerLatest.poll(Duration.ofMillis(5000)); - // Since the value for share.auto.offset.reset has been altered to "latest", the consumer should not consume - // any message - assertEquals(0, records2.count()); - - // Producing another record. - producer.send(record); - - records1 = shareConsumerEarliest.poll(Duration.ofMillis(5000)); - // The next record should also be consumed successfully by group1 - assertEquals(1, records1.count()); - - records2 = shareConsumerLatest.poll(Duration.ofMillis(5000)); - // The next record should also be consumed successfully by group2 - assertEquals(1, records2.count()); - - shareConsumerEarliest.close(); - shareConsumerLatest.close(); - producer.close(); - } - - private CompletableFuture produceMessages(int messageCount) { - CompletableFuture future = new CompletableFuture<>(); - Future[] recordFutures = new Future[messageCount]; - int messagesSent = 0; + try (KafkaShareConsumer shareConsumerEarliest = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumerLatest = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group2"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumerEarliest.subscribe(Collections.singleton(tp.topic())); + + shareConsumerLatest.subscribe(Collections.singleton(tp.topic())); + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // Producing a record. + producer.send(record); + producer.flush(); + ConsumerRecords records1 = shareConsumerEarliest.poll(Duration.ofMillis(5000)); + // Since the value for share.auto.offset.reset has been altered to "earliest", the consumer should consume + // all messages present on the partition + assertEquals(1, records1.count()); + + ConsumerRecords records2 = shareConsumerLatest.poll(Duration.ofMillis(5000)); + // Since the value for share.auto.offset.reset has been altered to "latest", the consumer should not consume + // any message + assertEquals(0, records2.count()); + + // Producing another record. + producer.send(record); + + records1 = shareConsumerEarliest.poll(Duration.ofMillis(5000)); + // The next record should also be consumed successfully by group1 + assertEquals(1, records1.count()); + + records2 = shareConsumerLatest.poll(Duration.ofMillis(5000)); + // The next record should also be consumed successfully by group2 + assertEquals(1, records2.count()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testShareAutoOffsetResetByDuration() throws Exception { + // Set auto offset reset to 1 hour before current time + alterShareAutoOffsetReset("group1", "by_duration:PT1H"); + + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + long currentTime = System.currentTimeMillis(); + long twoHoursAgo = currentTime - TimeUnit.HOURS.toMillis(2); + long thirtyMinsAgo = currentTime - TimeUnit.MINUTES.toMillis(30); + + // Produce messages with different timestamps + ProducerRecord oldRecord = new ProducerRecord<>(tp.topic(), tp.partition(), + twoHoursAgo, "old_key".getBytes(), "old_value".getBytes()); + ProducerRecord recentRecord = new ProducerRecord<>(tp.topic(), tp.partition(), + thirtyMinsAgo, "recent_key".getBytes(), "recent_value".getBytes()); + ProducerRecord currentRecord = new ProducerRecord<>(tp.topic(), tp.partition(), + currentTime, "current_key".getBytes(), "current_value".getBytes()); + + producer.send(oldRecord).get(); + producer.send(recentRecord).get(); + producer.send(currentRecord).get(); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + // Should only receive messages from last hour (recent and current) + List> records = consumeRecords(shareConsumer, 2); + assertEquals(2, records.size()); + + // Verify timestamps and order + assertEquals(thirtyMinsAgo, records.get(0).timestamp()); + assertEquals("recent_key", new String(records.get(0).key())); + assertEquals(currentTime, records.get(1).timestamp()); + assertEquals("current_key", new String(records.get(1).key())); + } + + // Set the auto offset reset to 3 hours before current time + // so the consumer should consume all messages (3 records) + alterShareAutoOffsetReset("group2", "by_duration:PT3H"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group2"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + List> records = consumeRecords(shareConsumer, 3); + assertEquals(3, records.size()); + verifyShareGroupStateTopicRecordsProduced(); + } + } + + @Test + public void testShareAutoOffsetResetByDurationInvalidFormat() throws Exception { + // Test invalid duration format + ConfigResource configResource = new ConfigResource(ConfigResource.Type.GROUP, "group1"); + Map> alterEntries = new HashMap<>(); + + // Test invalid duration format + alterEntries.put(configResource, List.of(new AlterConfigOp(new ConfigEntry( + GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:1h"), AlterConfigOp.OpType.SET))); + ExecutionException e1 = assertThrows(ExecutionException.class, () -> + adminClient.incrementalAlterConfigs(alterEntries).all().get()); + assertInstanceOf(InvalidConfigurationException.class, e1.getCause()); + + // Test negative duration + alterEntries.put(configResource, List.of(new AlterConfigOp(new ConfigEntry( + GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:-PT1H"), AlterConfigOp.OpType.SET))); + ExecutionException e2 = assertThrows(ExecutionException.class, () -> + adminClient.incrementalAlterConfigs(alterEntries).all().get()); + assertInstanceOf(InvalidConfigurationException.class, e2.getCause()); + } + + private int produceMessages(int messageCount) { try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); - for (int i = 0; i < messageCount; i++) { - recordFutures[i] = producer.send(record); - } - for (int i = 0; i < messageCount; i++) { - try { - recordFutures[i].get(); - messagesSent++; - } catch (Exception e) { - fail("Failed to send record: " + e); - } - } - } finally { - future.complete(messagesSent); + IntStream.range(0, messageCount).forEach(__ -> producer.send(record)); + producer.flush(); } - return future; + return messageCount; } private void produceMessagesWithTimestamp(int messageCount, long startingTimestamp) { @@ -1797,48 +1762,57 @@ private void produceMessagesWithTimestamp(int messageCount, long startingTimesta } } - private void consumeMessages(AtomicInteger totalMessagesConsumed, + private int consumeMessages(AtomicInteger totalMessagesConsumed, int totalMessages, String groupId, int consumerNumber, int maxPolls, - boolean commit, - CompletableFuture future) { - consumeMessages(totalMessagesConsumed, totalMessages, groupId, consumerNumber, maxPolls, commit, future, Optional.empty()); + boolean commit) { + return assertDoesNotThrow(() -> { + try (KafkaShareConsumer shareConsumer = createShareConsumer( + new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId)) { + shareConsumer.subscribe(Collections.singleton(tp.topic())); + return consumeMessages(shareConsumer, totalMessagesConsumed, totalMessages, consumerNumber, maxPolls, commit); + } + }, "Consumer " + consumerNumber + " failed with exception"); } - private void consumeMessages(AtomicInteger totalMessagesConsumed, + private int consumeMessages(AtomicInteger totalMessagesConsumed, int totalMessages, String groupId, int consumerNumber, int maxPolls, boolean commit, - CompletableFuture future, - Optional maxFetchBytes) { - KafkaShareConsumer shareConsumer; - Map> partitionOffsetsMap = new HashMap<>(); - Map partitionExceptionMap = new HashMap<>(); - if (maxFetchBytes.isPresent()) { - shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId, - Collections.singletonMap(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(maxFetchBytes.get()))); - } else { - shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId); - } - shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgeCommitCallback(partitionOffsetsMap, partitionExceptionMap)); - shareConsumer.subscribe(Collections.singleton(tp.topic())); - int messagesConsumed = 0; - int retries = 0; - try { + int maxFetchBytes) { + return assertDoesNotThrow(() -> { + try (KafkaShareConsumer shareConsumer = createShareConsumer( + new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId, + Map.of(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxFetchBytes))) { + shareConsumer.subscribe(Collections.singleton(tp.topic())); + return consumeMessages(shareConsumer, totalMessagesConsumed, totalMessages, consumerNumber, maxPolls, commit); + } + }, "Consumer " + consumerNumber + " failed with exception"); + } + + private int consumeMessages(KafkaShareConsumer consumer, + AtomicInteger totalMessagesConsumed, + int totalMessages, + int consumerNumber, + int maxPolls, + boolean commit) { + return assertDoesNotThrow(() -> { + int messagesConsumed = 0; + int retries = 0; if (totalMessages > 0) { while (totalMessagesConsumed.get() < totalMessages && retries < maxPolls) { - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(2000)); + ConsumerRecords records = consumer.poll(Duration.ofMillis(2000)); messagesConsumed += records.count(); totalMessagesConsumed.addAndGet(records.count()); retries++; } } else { while (retries < maxPolls) { - ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(2000)); + ConsumerRecords records = consumer.poll(Duration.ofMillis(2000)); messagesConsumed += records.count(); totalMessagesConsumed.addAndGet(records.count()); retries++; @@ -1847,14 +1821,10 @@ private void consumeMessages(AtomicInteger totalMessagesConsumed, if (commit) { // Complete acknowledgement of the records - shareConsumer.commitSync(Duration.ofMillis(10000)); + consumer.commitSync(Duration.ofMillis(10000)); } - } catch (Exception e) { - fail("Consumer " + consumerNumber + " failed with exception: " + e); - } finally { - shareConsumer.close(); - future.complete(messagesConsumed); - } + return messagesConsumed; + }, "Consumer " + consumerNumber + " failed with exception"); } private List> consumeRecords(KafkaShareConsumer consumer, @@ -1865,29 +1835,27 @@ private List> consumeRecords(KafkaShareConsumer records = consumer.poll(Duration.ofMillis(100)); records.forEach(accumulatedRecords::add); long currentTimeMs = System.currentTimeMillis(); - if (currentTimeMs - startTimeMs > 60000) { - fail("Timed out before consuming expected records."); - } + assertFalse(currentTimeMs - startTimeMs > 60000, "Timed out before consuming expected records."); } return accumulatedRecords; } private void createTopic(String topicName) { Properties props = cluster.clientProperties(); - try (Admin admin = Admin.create(props)) { - admin.createTopics(Collections.singleton(new NewTopic(topicName, 1, (short) 1))).all().get(); - } catch (Exception e) { - fail("Failed to create topic"); - } + assertDoesNotThrow(() -> { + try (Admin admin = Admin.create(props)) { + admin.createTopics(Collections.singleton(new NewTopic(topicName, 1, (short) 1))).all().get(); + } + }, "Failed to create topic"); } private void deleteTopic(String topicName) { Properties props = cluster.clientProperties(); - try (Admin admin = Admin.create(props)) { - admin.deleteTopics(Collections.singleton(topicName)).all().get(); - } catch (Exception e) { - fail("Failed to create topic"); - } + assertDoesNotThrow(() -> { + try (Admin admin = Admin.create(props)) { + admin.deleteTopics(Collections.singleton(topicName)).all().get(); + } + }, "Failed to delete topic"); } private Admin createAdminClient() { @@ -1927,24 +1895,55 @@ private KafkaShareConsumer createShareConsumer(Deserializer keyD return new KafkaShareConsumer<>(props, keyDeserializer, valueDeserializer); } - private void warmup() throws InterruptedException, ExecutionException, TimeoutException { + private void warmup() throws InterruptedException { createTopic(warmupTp.topic()); - TestUtils.waitForCondition(() -> - !cluster.brokers().get(0).metadataCache().getAliveBrokerNodes(new ListenerName("EXTERNAL")).isEmpty(), - DEFAULT_MAX_WAIT_MS, 100L, () -> "cache not up yet"); + waitForMetadataCache(); ProducerRecord record = new ProducerRecord<>(warmupTp.topic(), warmupTp.partition(), null, "key".getBytes(), "value".getBytes()); - KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); - KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "warmupgroup1"); Set subscription = Collections.singleton(warmupTp.topic()); - try { - producer.send(record).get(15000, TimeUnit.MILLISECONDS); + alterShareAutoOffsetReset("warmupgroup1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "warmupgroup1")) { + + producer.send(record); + producer.flush(); + shareConsumer.subscribe(subscription); - alterShareAutoOffsetReset("warmupgroup1", "earliest"); TestUtils.waitForCondition( () -> shareConsumer.poll(Duration.ofMillis(5000)).count() == 1, 30000, 200L, () -> "warmup record not received"); - } finally { - producer.close(); - shareConsumer.close(); + } + } + + private void waitForMetadataCache() throws InterruptedException { + TestUtils.waitForCondition(() -> + !cluster.brokers().get(0).metadataCache().getAliveBrokerNodes(new ListenerName("EXTERNAL")).isEmpty(), + DEFAULT_MAX_WAIT_MS, 100L, () -> "cache not up yet"); + } + + private void verifyShareGroupStateTopicRecordsProduced() { + try { + Map consumerConfigs = new HashMap<>(); + consumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + consumerConfigs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); + consumerConfigs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); + + try (KafkaConsumer consumer = new KafkaConsumer<>(consumerConfigs)) { + consumer.assign(sgsTopicPartitions); + consumer.seekToBeginning(sgsTopicPartitions); + Set> records = new HashSet<>(); + TestUtils.waitForCondition(() -> { + ConsumerRecords msgs = consumer.poll(Duration.ofMillis(5000L)); + if (msgs.count() > 0) { + msgs.records(Topic.SHARE_GROUP_STATE_TOPIC_NAME).forEach(records::add); + } + return records.size() > 2; // +2 because of extra warmup records + }, + 30000L, + 200L, + () -> "no records produced" + ); + } + } catch (InterruptedException e) { + fail(e); } } @@ -1954,12 +1953,8 @@ private void alterShareAutoOffsetReset(String groupId, String newValue) { alterEntries.put(configResource, List.of(new AlterConfigOp(new ConfigEntry( GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, newValue), AlterConfigOp.OpType.SET))); AlterConfigsOptions alterOptions = new AlterConfigsOptions(); - try { - adminClient.incrementalAlterConfigs(alterEntries, alterOptions) + assertDoesNotThrow(() -> adminClient.incrementalAlterConfigs(alterEntries, alterOptions) .all() - .get(60, TimeUnit.SECONDS); - } catch (Exception e) { - fail("Exception was thrown: ", e); - } + .get(60, TimeUnit.SECONDS), "Failed to alter configs"); } } diff --git a/core/src/test/resources/log4j2.yaml b/core/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..016a542689b4e --- /dev/null +++ b/core/src/test/resources/log4j2.yaml @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: OFF + AppenderRef: + - ref: STDOUT + Logger: + - name: kafka + level: WARN + + - name: org.apache.kafka + level: WARN diff --git a/core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala b/core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala index 02b8928e2b53d..37348c0657862 100644 --- a/core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala @@ -25,7 +25,6 @@ import org.apache.kafka.clients.admin._ import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.ListOffsetsResponse import org.apache.kafka.common.utils.{MockTime, Time, Utils} import org.apache.kafka.server.config.ServerLogConfigs @@ -45,7 +44,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness { private val topicNameWithCustomConfigs = "foo2" private var adminClient: Admin = _ private val mockTime: Time = new MockTime(1) - private var version = RecordBatch.MAGIC_VALUE_V2 private val dataFolder = Seq(tempDir().getAbsolutePath, tempDir().getAbsolutePath) @BeforeEach @@ -73,20 +71,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness { assertEquals(ListOffsetsResponse.UNKNOWN_TIMESTAMP, maxTimestampOffset.timestamp()) } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testListVersion0(quorum: String): Unit = { - // create records for version 0 - createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V0) - produceMessagesInSeparateBatch() - - // update version to version 1 to list offset for max timestamp - createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V1) - // the offset of max timestamp is always -1 if the batch version is 0 - verifyListOffsets(expectedMaxTimestampOffset = -1) - } - - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testThreeCompressedRecordsInOneBatch(quorum: String): Unit = { @@ -129,38 +113,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness { verifyListOffsets(topic = topicNameWithCustomConfigs, 2) } - // The message conversion test only run in ZK mode because KRaft mode doesn't support "inter.broker.protocol.version" < 3.0 - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testThreeRecordsInOneBatchWithMessageConversion(quorum: String): Unit = { - createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V1) - produceMessagesInOneBatch() - verifyListOffsets() - - // test LogAppendTime case - setUpForLogAppendTimeCase() - produceMessagesInOneBatch(topic = topicNameWithCustomConfigs) - // In LogAppendTime's case, the maxTimestampOffset should be the first message of the batch. - // So in this one batch test, it'll be the first offset 0 - verifyListOffsets(topic = topicNameWithCustomConfigs, 0) - } - - // The message conversion test only run in ZK mode because KRaft mode doesn't support "inter.broker.protocol.version" < 3.0 - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testThreeRecordsInSeparateBatchWithMessageConversion(quorum: String): Unit = { - createMessageFormatBrokers(RecordBatch.MAGIC_VALUE_V1) - produceMessagesInSeparateBatch() - verifyListOffsets() - - // test LogAppendTime case - setUpForLogAppendTimeCase() - produceMessagesInSeparateBatch(topic = topicNameWithCustomConfigs) - // In LogAppendTime's case, the maxTimestampOffset is the message in the last batch since we advance the time - // for each batch, So it'll be the last offset 2 - verifyListOffsets(topic = topicNameWithCustomConfigs, 2) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testThreeRecordsInOneBatchHavingDifferentCompressionTypeWithServer(quorum: String): Unit = { @@ -201,15 +153,6 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness { createTopicWithConfig(topicNameWithCustomConfigs, props) } - private def createMessageFormatBrokers(recordVersion: Byte): Unit = { - version = recordVersion - recreateBrokers(reconfigure = true, startup = true) - Utils.closeQuietly(adminClient, "ListOffsetsAdminClient") - adminClient = Admin.create(Map[String, Object]( - AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers() - ).asJava) - } - private def createTopicWithConfig(topic: String, props: Properties): Unit = { createTopic(topic, 1, 1.toShort, topicConfig = props) } @@ -224,12 +167,9 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness { val maxTimestampOffset = runFetchOffsets(adminClient, OffsetSpec.maxTimestamp(), topic) assertEquals(expectedMaxTimestampOffset, maxTimestampOffset.offset()) - if (version >= RecordBatch.MAGIC_VALUE_V2) - // the epoch is related to the returned offset. - // Hence, it should be zero (the earliest leader epoch), regardless of new leader election - assertEquals(Optional.of(0), maxTimestampOffset.leaderEpoch()) - else - assertEquals(Optional.empty(), maxTimestampOffset.leaderEpoch()) + // the epoch is related to the returned offset. + // Hence, it should be zero (the earliest leader epoch), regardless of new leader election + assertEquals(Optional.of(0), maxTimestampOffset.leaderEpoch()) } // case 0: test the offsets from leader's append path @@ -336,15 +276,7 @@ class ListOffsetsIntegrationTest extends KafkaServerTestHarness { } def generateConfigs: Seq[KafkaConfig] = { - TestUtils.createBrokerConfigs(2, zkConnectOrNull).zipWithIndex.map{ case (props, index) => - if (version == RecordBatch.MAGIC_VALUE_V0) { - props.setProperty("log.message.format.version", "0.9.0") - props.setProperty("inter.broker.protocol.version", "0.9.0") - } - if (version == RecordBatch.MAGIC_VALUE_V1) { - props.setProperty("log.message.format.version", "0.10.0") - props.setProperty("inter.broker.protocol.version", "0.10.0") - } + TestUtils.createBrokerConfigs(2).zipWithIndex.map{ case (props, index) => // We use mock timer so the records can get removed if the test env is too busy to complete // tests before kafka-log-retention. Hence, we disable the retention to avoid failed tests props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, "-1") diff --git a/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala b/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala index faef809b17025..d89a83c7750f2 100644 --- a/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala +++ b/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala @@ -444,35 +444,6 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { () => admin.incrementalAlterConfigs(configs).all().get(), "Disabling remote storage feature on the topic level is not supported.") } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testUpdateInvalidRemoteStorageConfigUnderZK(quorum: String): Unit = { - val admin = createAdminClient() - val errorMsg = "It is invalid to set `remote.log.delete.on.disable` or `remote.log.copy.disable` under Zookeeper's mode." - val topicConfig = new Properties - topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, - topicConfig = topicConfig) - - val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET), - )) - assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get(), errorMsg) - - configs.clear() - configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), - util.Arrays.asList( - new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, "true"), - AlterConfigOp.OpType.SET), - )) - assertThrowsException(classOf[InvalidConfigurationException], - () => admin.incrementalAlterConfigs(configs).all().get(), errorMsg) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testTopicDeletion(quorum: String): Unit = { @@ -501,17 +472,13 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, brokerCount, topicConfig = topicConfig) - val tsDisabledProps = TestUtils.createBrokerConfigs(1, zkConnectOrNull).head + val tsDisabledProps = TestUtils.createBrokerConfigs(1).head instanceConfigs = List(KafkaConfig.fromProps(tsDisabledProps)) - if (isKRaftTest()) { - recreateBrokers(startup = true) - assertTrue(faultHandler.firstException().getCause.isInstanceOf[ConfigException]) - // Normally the exception is thrown as part of the TearDown method of the parent class(es). We would like to not do this. - faultHandler.setIgnore(true) - } else { - assertThrows(classOf[ConfigException], () => recreateBrokers(startup = true)) - } + recreateBrokers(startup = true) + assertTrue(faultHandler.firstException().getCause.isInstanceOf[ConfigException]) + // Normally the exception is thrown as part of the TearDown method of the parent class(es). We would like to not do this. + faultHandler.setIgnore(true) } @ParameterizedTest @@ -523,7 +490,7 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, brokerCount, topicConfig = topicConfig) - val tsDisabledProps = TestUtils.createBrokerConfigs(1, zkConnectOrNull).head + val tsDisabledProps = TestUtils.createBrokerConfigs(1).head instanceConfigs = List(KafkaConfig.fromProps(tsDisabledProps)) recreateBrokers(startup = true) diff --git a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala index d29f05b36b3a8..517614d84a11f 100644 --- a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala @@ -183,9 +183,13 @@ abstract class AbstractConsumerTest extends BaseRequestTest { protected def sendRecords(producer: KafkaProducer[Array[Byte], Array[Byte]], numRecords: Int, tp: TopicPartition, - startingTimestamp: Long = System.currentTimeMillis()): Seq[ProducerRecord[Array[Byte], Array[Byte]]] = { + startingTimestamp: Long = System.currentTimeMillis(), + timestampIncrement: Long = -1L): Seq[ProducerRecord[Array[Byte], Array[Byte]]] = { val records = (0 until numRecords).map { i => - val timestamp = startingTimestamp + i.toLong + val timestamp = if (timestampIncrement > 0) + startingTimestamp + i.toLong * timestampIncrement + else + startingTimestamp + i.toLong val record = new ProducerRecord(tp.topic(), tp.partition(), timestamp, s"key $i".getBytes, s"value $i".getBytes) producer.send(record) record @@ -202,7 +206,8 @@ abstract class AbstractConsumerTest extends BaseRequestTest { startingTimestamp: Long = 0L, timestampType: TimestampType = TimestampType.CREATE_TIME, tp: TopicPartition = tp, - maxPollRecords: Int = Int.MaxValue): Unit = { + maxPollRecords: Int = Int.MaxValue, + timestampIncrement: Long = -1L): Unit = { val records = consumeRecords(consumer, numRecords, maxPollRecords = maxPollRecords) val now = System.currentTimeMillis() for (i <- 0 until numRecords) { @@ -212,8 +217,13 @@ abstract class AbstractConsumerTest extends BaseRequestTest { assertEquals(tp.partition, record.partition) if (timestampType == TimestampType.CREATE_TIME) { assertEquals(timestampType, record.timestampType) - val timestamp = startingTimestamp + i - assertEquals(timestamp, record.timestamp) + if (timestampIncrement > 0) { + val timestamp = startingTimestamp + i * timestampIncrement + assertEquals(timestamp, record.timestamp) + } else { + val timestamp = startingTimestamp + i + assertEquals(timestamp, record.timestamp) + } } else assertTrue(record.timestamp >= startingTimestamp && record.timestamp <= now, s"Got unexpected timestamp ${record.timestamp}. Timestamp should be between [$startingTimestamp, $now}]") diff --git a/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala index 70b514f199b19..64cc259408e13 100644 --- a/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala @@ -16,15 +16,18 @@ */ package kafka.api -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource class AdminClientRebootstrapTest extends RebootstrapTest { - @Test - def testRebootstrap(): Unit = { + @ParameterizedTest(name = "{displayName}.quorum=kraft.useRebootstrapTriggerMs={0}") + @ValueSource(booleans = Array(false, true)) + def testRebootstrap(useRebootstrapTriggerMs: Boolean): Unit = { + server1.shutdown() server1.awaitShutdown() - val adminClient = createAdminClient(configOverrides = clientOverrides) + val adminClient = createAdminClient(configOverrides = clientOverrides(useRebootstrapTriggerMs)) // Only the server 0 is available for the admin client during the bootstrap. adminClient.listTopics().names().get() diff --git a/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala index 6f902b2db3b36..b11cb96ef8ce0 100644 --- a/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala @@ -14,13 +14,12 @@ package kafka.api import java.util -import java.util.Properties +import java.util.{Collections, Properties} import kafka.integration.KafkaServerTestHarness import kafka.server.KafkaConfig -import kafka.utils.TestUtils.assertFutureExceptionTypeEquals import kafka.utils.{Logging, TestUtils} import org.apache.kafka.clients.admin.AlterConfigOp.OpType -import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsOptions, Config, ConfigEntry} +import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsOptions, ConfigEntry} import org.apache.kafka.common.config.{ConfigResource, SslConfigs, TopicConfig} import org.apache.kafka.common.errors.{InvalidConfigurationException, InvalidRequestException, PolicyViolationException} import org.apache.kafka.common.utils.Utils @@ -28,12 +27,12 @@ import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.config.{ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.policy.AlterConfigPolicy import org.apache.kafka.storage.internals.log.LogConfig +import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource -import scala.annotation.nowarn import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -65,7 +64,7 @@ class AdminClientWithPoliciesIntegrationTest extends KafkaServerTestHarness with Map[String, Object](AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers()).asJava override def generateConfigs: collection.Seq[KafkaConfig] = { - val configs = TestUtils.createBrokerConfigs(brokerCount, null) + val configs = TestUtils.createBrokerConfigs(brokerCount) configs.foreach(overrideNodeConfigs) configs.map(KafkaConfig.fromProps) } @@ -88,15 +87,17 @@ class AdminClientWithPoliciesIntegrationTest extends KafkaServerTestHarness with val topic1 = "describe-alter-configs-topic-1" val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1) val topicConfig1 = new Properties - topicConfig1.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "500000") - topicConfig1.setProperty(TopicConfig.RETENTION_MS_CONFIG, "60000000") + val maxMessageBytes = "500000" + val retentionMs = "60000000" + topicConfig1.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageBytes) + topicConfig1.setProperty(TopicConfig.RETENTION_MS_CONFIG, retentionMs) createTopic(topic1, 1, 1, topicConfig1) val topic2 = "describe-alter-configs-topic-2" val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2) createTopic(topic2) - PlaintextAdminIntegrationTest.checkValidAlterConfigs(client, this, topicResource1, topicResource2) + PlaintextAdminIntegrationTest.checkValidAlterConfigs(client, this, topicResource1, topicResource2, maxMessageBytes, retentionMs) } @ParameterizedTest @@ -106,7 +107,6 @@ class AdminClientWithPoliciesIntegrationTest extends KafkaServerTestHarness with PlaintextAdminIntegrationTest.checkInvalidAlterConfigs(this, client) } - @nowarn("cat=deprecation") @ParameterizedTest @ValueSource(strings = Array("kraft")) def testInvalidAlterConfigsDueToPolicy(quorum: String): Unit = { @@ -127,36 +127,38 @@ class AdminClientWithPoliciesIntegrationTest extends KafkaServerTestHarness with // Set a mutable broker config val brokerResource = new ConfigResource(ConfigResource.Type.BROKER, brokers.head.config.brokerId.toString) - val brokerConfigs = Seq(new ConfigEntry(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, "50000")).asJava - val alterResult1 = client.alterConfigs(Map(brokerResource -> new Config(brokerConfigs)).asJava) - alterResult1.all.get + var alterResult = client.incrementalAlterConfigs(Collections.singletonMap(brokerResource, + util.Arrays.asList(new AlterConfigOp(new ConfigEntry(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, "50000"), OpType.SET)))) + alterResult.all.get assertEquals(Set(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG), validationsForResource(brokerResource).head.configs().keySet().asScala) validations.clear() - val topicConfigEntries1 = Seq( - new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), - new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2") // policy doesn't allow this - ).asJava + val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + alterConfigs.put(topicResource1, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2"), OpType.SET) + )) - var topicConfigEntries2 = Seq(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.8")).asJava + alterConfigs.put(topicResource2, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.8"), OpType.SET), + )) - val topicConfigEntries3 = Seq(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "-1")).asJava + alterConfigs.put(topicResource3, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "-1"), OpType.SET), + )) - val brokerConfigEntries = Seq(new ConfigEntry(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "12313")).asJava + alterConfigs.put(brokerResource, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "12313"), OpType.SET), + )) // Alter configs: second is valid, the others are invalid - var alterResult = client.alterConfigs(Map( - topicResource1 -> new Config(topicConfigEntries1), - topicResource2 -> new Config(topicConfigEntries2), - topicResource3 -> new Config(topicConfigEntries3), - brokerResource -> new Config(brokerConfigEntries) - ).asJava) + alterResult = client.incrementalAlterConfigs(alterConfigs) assertEquals(Set(topicResource1, topicResource2, topicResource3, brokerResource).asJava, alterResult.values.keySet) - assertFutureExceptionTypeEquals(alterResult.values.get(topicResource1), classOf[PolicyViolationException]) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[PolicyViolationException]) alterResult.values.get(topicResource2).get - assertFutureExceptionTypeEquals(alterResult.values.get(topicResource3), classOf[InvalidConfigurationException]) - assertFutureExceptionTypeEquals(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) + assertFutureThrows(alterResult.values.get(topicResource3), classOf[InvalidConfigurationException]) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) assertTrue(validationsForResource(brokerResource).isEmpty, "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated.") validations.clear() @@ -175,20 +177,17 @@ class AdminClientWithPoliciesIntegrationTest extends KafkaServerTestHarness with assertNull(configs.get(brokerResource).get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value) // Alter configs with validateOnly = true: only second is valid - topicConfigEntries2 = Seq(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.7")).asJava + alterConfigs.put(topicResource2, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.7"), OpType.SET), + )) - alterResult = client.alterConfigs(Map( - topicResource1 -> new Config(topicConfigEntries1), - topicResource2 -> new Config(topicConfigEntries2), - brokerResource -> new Config(brokerConfigEntries), - topicResource3 -> new Config(topicConfigEntries3) - ).asJava, new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) assertEquals(Set(topicResource1, topicResource2, topicResource3, brokerResource).asJava, alterResult.values.keySet) - assertFutureExceptionTypeEquals(alterResult.values.get(topicResource1), classOf[PolicyViolationException]) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[PolicyViolationException]) alterResult.values.get(topicResource2).get - assertFutureExceptionTypeEquals(alterResult.values.get(topicResource3), classOf[InvalidConfigurationException]) - assertFutureExceptionTypeEquals(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) + assertFutureThrows(alterResult.values.get(topicResource3), classOf[InvalidConfigurationException]) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) assertTrue(validationsForResource(brokerResource).isEmpty, "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated.") validations.clear() diff --git a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala index 3fedfd99fb2a8..3ee420f2c4992 100644 --- a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala @@ -264,7 +264,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } private def createProduceRequest = - requests.ProduceRequest.forCurrentMagic(new ProduceRequestData() + requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(tp.topic).setPartitionData(Collections.singletonList( @@ -389,7 +389,6 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setPartitionIndex(part) .setCommittedOffset(0) .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setCommitTimestamp(OffsetCommitRequest.DEFAULT_TIMESTAMP) .setCommittedMetadata("metadata") ))) ) @@ -920,7 +919,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumeUsingAssignWithNoAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -935,7 +934,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testSimpleConsumeWithOffsetLookupAndNoGroupAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -955,7 +954,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testSimpleConsumeWithExplicitSeekAndNoGroupAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -975,7 +974,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumeWithoutTopicDescribeAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -993,7 +992,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumeWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1012,7 +1011,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumeWithTopicWrite(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1031,7 +1030,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumeWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1049,7 +1048,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testPatternSubscriptionWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { val assignSemaphore = new Semaphore(0) createTopicWithBrokerPrincipal(topic) @@ -1076,7 +1075,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testPatternSubscriptionWithTopicDescribeOnlyAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1094,7 +1093,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testPatternSubscriptionWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { val assignSemaphore = new Semaphore(0) createTopicWithBrokerPrincipal(topic) @@ -1135,7 +1134,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testPatternSubscriptionMatchingInternalTopic(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1165,7 +1164,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testPatternSubscriptionMatchingInternalTopicWithDescribeOnlyPermission(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1191,7 +1190,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testPatternSubscriptionNotMatchingInternalTopic(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1210,7 +1209,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCreatePermissionOnTopicToReadFromNonExistentTopic(quorum: String, groupProtocol: String): Unit = { testCreatePermissionNeededToReadFromNonExistentTopic("newTopic", Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), @@ -1218,7 +1217,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCreatePermissionOnClusterToReadFromNonExistentTopic(quorum: String, groupProtocol: String): Unit = { testCreatePermissionNeededToReadFromNonExistentTopic("newTopic", Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), @@ -1273,14 +1272,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034")) def testCommitWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCommitWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() @@ -1288,7 +1287,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCommitWithTopicWrite(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1299,7 +1298,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCommitWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1310,7 +1309,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034")) def testCommitWithNoGroupAccess(quorum: String, groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() @@ -1318,7 +1317,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCommitWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) @@ -1328,7 +1327,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetFetchWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() consumer.assign(List(tp).asJava) @@ -1336,7 +1335,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034")) def testOffsetFetchWithNoGroupAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) @@ -1346,7 +1345,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetFetchWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() @@ -1355,7 +1354,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetFetchAllTopicPartitionsAuthorization(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1387,7 +1386,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetFetchMultipleGroupsAuthorization(quorum: String, groupProtocol: String): Unit = { val groups: Seq[String] = (1 to 5).map(i => s"group$i") val groupResources = groups.map(group => new ResourcePattern(GROUP, group, LITERAL)) @@ -1543,7 +1542,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetFetchTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), groupResource) @@ -1554,7 +1553,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetFetchWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) @@ -1565,14 +1564,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testMetadataWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() assertThrows(classOf[TopicAuthorizationException], () => consumer.partitionsFor(topic)) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testMetadataWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) @@ -1581,14 +1580,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testListOffsetsWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() assertThrows(classOf[TopicAuthorizationException], () => consumer.endOffsets(Set(tp).asJava)) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testListOffsetsWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) @@ -1601,7 +1600,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { def testDescribeGroupApiWithNoGroupAcl(quorum: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val result = createAdminClient().describeConsumerGroups(Seq(group).asJava) - TestUtils.assertFutureExceptionTypeEquals(result.describedGroups().get(group), classOf[GroupAuthorizationException]) + JTestUtils.assertFutureThrows(result.describedGroups().get(group), classOf[GroupAuthorizationException]) } @ParameterizedTest @@ -1610,11 +1609,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) - createAdminClient().describeConsumerGroups(Seq(group).asJava).describedGroups().get(group).get() + val result = createAdminClient().describeConsumerGroups(Seq(group).asJava) + JTestUtils.assertFutureThrows(result.describedGroups().get(group), classOf[GroupIdNotFoundException]) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testListGroupApiWithAndWithoutListGroupAcls(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1663,7 +1663,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testDeleteGroupApiWithDeleteGroupAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1677,7 +1677,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testDeleteGroupApiWithNoDeleteGroupAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1687,18 +1687,18 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { consumer.assign(List(tp).asJava) consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) val result = createAdminClient().deleteConsumerGroups(Seq(group).asJava) - TestUtils.assertFutureExceptionTypeEquals(result.deletedGroups().get(group), classOf[GroupAuthorizationException]) + JTestUtils.assertFutureThrows(result.deletedGroups().get(group), classOf[GroupAuthorizationException]) } @ParameterizedTest @ValueSource(strings = Array("kraft")) def testDeleteGroupApiWithNoDeleteGroupAcl2(quorum: String): Unit = { val result = createAdminClient().deleteConsumerGroups(Seq(group).asJava) - TestUtils.assertFutureExceptionTypeEquals(result.deletedGroups().get(group), classOf[GroupAuthorizationException]) + JTestUtils.assertFutureThrows(result.deletedGroups().get(group), classOf[GroupAuthorizationException]) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testDeleteGroupOffsetsWithAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1714,7 +1714,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testDeleteGroupOffsetsWithoutDeleteAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) @@ -1725,11 +1725,11 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) consumer.close() val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) - TestUtils.assertFutureExceptionTypeEquals(result.all(), classOf[GroupAuthorizationException]) + JTestUtils.assertFutureThrows(result.all(), classOf[GroupAuthorizationException]) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testDeleteGroupOffsetsWithDeleteAclWithoutTopicAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) // Create the consumer group @@ -1745,15 +1745,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) - TestUtils.assertFutureExceptionTypeEquals(result.all(), classOf[TopicAuthorizationException]) - TestUtils.assertFutureExceptionTypeEquals(result.partitionResult(tp), classOf[TopicAuthorizationException]) + JTestUtils.assertFutureThrows(result.all(), classOf[TopicAuthorizationException]) + JTestUtils.assertFutureThrows(result.partitionResult(tp), classOf[TopicAuthorizationException]) } @ParameterizedTest @ValueSource(strings = Array("kraft")) def testDeleteGroupOffsetsWithNoAcl(quorum: String): Unit = { val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) - TestUtils.assertFutureExceptionTypeEquals(result.all(), classOf[GroupAuthorizationException]) + JTestUtils.assertFutureThrows(result.all(), classOf[GroupAuthorizationException]) } @ParameterizedTest @@ -2160,7 +2160,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.initTransactions() producer.beginTransaction() removeAllClientAcls() - assertThrows(classOf[TransactionalIdAuthorizationException], () => { + // In transaction V2, the server receives the offset commit request first, so the error is GroupAuthorizationException + // instead of TransactionalIdAuthorizationException. + assertThrows(classOf[GroupAuthorizationException], () => { val offsets = Map(tp -> new OffsetAndMetadata(1L)).asJava producer.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(group)) producer.commitTransaction() @@ -2302,8 +2304,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { def testMetadataClusterAuthorizedOperationsWithoutDescribeCluster(quorum: String): Unit = { removeAllClientAcls() - // MetadataRequest versions older than 1 are not supported. - for (version <- 1 to ApiKeys.METADATA.latestVersion) { + for (version <- ApiKeys.METADATA.oldestVersion to ApiKeys.METADATA.latestVersion) { testMetadataClusterClusterAuthorizedOperations(version.toShort, 0) } } @@ -2323,8 +2324,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val expectedClusterAuthorizedOperations = Utils.to32BitField( acls.map(_.operation.code.asInstanceOf[JByte]).asJava) - // MetadataRequest versions older than 1 are not supported. - for (version <- 1 to ApiKeys.METADATA.latestVersion) { + for (version <- ApiKeys.METADATA.oldestVersion to ApiKeys.METADATA.latestVersion) { testMetadataClusterClusterAuthorizedOperations(version.toShort, expectedClusterAuthorizedOperations) } } @@ -2458,7 +2458,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCreateAndCloseConsumerWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() val closeConsumer: Executable = () => consumer.close() diff --git a/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala index 6a877bd6361ae..3fc63c5952633 100644 --- a/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala @@ -30,6 +30,7 @@ import org.apache.kafka.common.resource.ResourceType import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.security.authorizer.AclEntry +import org.apache.kafka.test.TestUtils.assertFutureThrows import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} @@ -108,14 +109,14 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg val failedCreateResult = client.createTopics(newTopics.asJava) val results = failedCreateResult.values() assertTrue(results.containsKey("mytopic")) - assertFutureExceptionTypeEquals(results.get("mytopic"), classOf[TopicExistsException]) + assertFutureThrows(results.get("mytopic"), classOf[TopicExistsException]) assertTrue(results.containsKey("mytopic2")) - assertFutureExceptionTypeEquals(results.get("mytopic2"), classOf[TopicExistsException]) + assertFutureThrows(results.get("mytopic2"), classOf[TopicExistsException]) assertTrue(results.containsKey("mytopic3")) - assertFutureExceptionTypeEquals(results.get("mytopic3"), classOf[TopicExistsException]) - assertFutureExceptionTypeEquals(failedCreateResult.numPartitions("mytopic3"), classOf[TopicExistsException]) - assertFutureExceptionTypeEquals(failedCreateResult.replicationFactor("mytopic3"), classOf[TopicExistsException]) - assertFutureExceptionTypeEquals(failedCreateResult.config("mytopic3"), classOf[TopicExistsException]) + assertFutureThrows(results.get("mytopic3"), classOf[TopicExistsException]) + assertFutureThrows(failedCreateResult.numPartitions("mytopic3"), classOf[TopicExistsException]) + assertFutureThrows(failedCreateResult.replicationFactor("mytopic3"), classOf[TopicExistsException]) + assertFutureThrows(failedCreateResult.config("mytopic3"), classOf[TopicExistsException]) val topicToDescription = client.describeTopics(topics.asJava).allTopicNames.get() assertEquals(topics.toSet, topicToDescription.keySet.asScala) diff --git a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala index 1d98552d1cda0..82d795b737fc9 100644 --- a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala @@ -17,7 +17,7 @@ package kafka.api import kafka.utils.TestInfoUtils -import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig} +import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, GroupProtocol} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig} import org.apache.kafka.common.header.Headers import org.apache.kafka.common.{ClusterResource, ClusterResourceListener, PartitionInfo} @@ -83,8 +83,10 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testCoordinatorFailover(quorum: String, groupProtocol: String): Unit = { val listener = new TestConsumerReassignmentListener() - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5001") - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5001") + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") + } // Use higher poll timeout to avoid consumer leaving the group due to timeout this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "15000") val consumer = createConsumer() diff --git a/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala index be853d9d990bf..74111e319b0ab 100644 --- a/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala @@ -34,6 +34,7 @@ import org.apache.kafka.common.network.{ConnectionMode, ListenerName} import org.apache.kafka.common.record.TimestampType import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.{KafkaException, TopicPartition} +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.ServerLogConfigs import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} @@ -50,10 +51,10 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { def generateConfigs: scala.collection.Seq[KafkaConfig] = { val overridingProps = new Properties() val numServers = 2 + overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, 2.toShort) overridingProps.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 4.toString) TestUtils.createBrokerConfigs( numServers, - zkConnectOrNull, interBrokerSecurityProtocol = Some(securityProtocol), trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties @@ -367,7 +368,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_16176")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testSendToPartitionWithFollowerShutdownShouldNotTimeout(quorum: String, groupProtocol: String): Unit = { // This test produces to a leader that has follower that is shutting down. It shows that // the produce request succeed, do not timeout and do not need to be retried. diff --git a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala index 55dbe268fa58b..a8dbe0ecdaa5d 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala @@ -26,10 +26,10 @@ import org.apache.kafka.common.message.FindCoordinatorRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{FindCoordinatorRequest, FindCoordinatorResponse} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig -import org.apache.kafka.server.config.{ReplicationConfigs, ServerLogConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} import org.apache.kafka.server.util.ShutdownableThread import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, Disabled, Test} +import org.junit.jupiter.api.{AfterEach, Disabled, Test, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource @@ -54,17 +54,30 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { generateKafkaConfigs() } + val testConfigs = Map[String, String]( + GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG -> "3", // don't want to lose offset + GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG -> "1", + GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG -> "10", // set small enough session timeout + GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG -> "0", + GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG -> maxGroupSize.toString, + ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG -> "false", + ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG -> "true", + ReplicationConfigs.UNCLEAN_LEADER_ELECTION_INTERVAL_MS_CONFIG -> "50", + KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG -> "50", + KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG -> "300", + ) + + override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { + super.kraftControllerConfigs(testInfo).map(props => { + testConfigs.foreachEntry((k, v) => props.setProperty(k, v)) + props + }) + } + private def generateKafkaConfigs(maxGroupSize: String = maxGroupSize.toString): Seq[KafkaConfig] = { val properties = new Properties - properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "3") // don't want to lose offset - properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1") - properties.put(GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, "10") // set small enough session timeout - properties.put(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, "0") - properties.put(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, maxGroupSize) - properties.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") - properties.put(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false") - - FixedPortTestUtils.createBrokerConfigs(brokerCount, zkConnect, enableControlledShutdown = false) + testConfigs.foreachEntry((k, v) => properties.setProperty(k, v)) + FixedPortTestUtils.createBrokerConfigs(brokerCount, enableControlledShutdown = false) .map(KafkaConfig.fromProps(_, properties)) } @@ -81,7 +94,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testConsumptionWithBrokerFailures(quorum: String, groupProtocol: String): Unit = consumeWithBrokerFailures(10) /* @@ -126,7 +139,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testSeekAndCommitWithBrokerFailures(quorum: String, groupProtocol: String): Unit = seekAndCommitWithBrokerFailures(5) def seekAndCommitWithBrokerFailures(numIters: Int): Unit = { @@ -139,7 +152,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { consumer.seek(tp, 0) // wait until all the followers have synced the last HW with leader - TestUtils.waitUntilTrue(() => servers.forall(server => + TestUtils.waitUntilTrue(() => brokerServers.forall(server => server.replicaManager.localLog(tp).get.highWatermark == numRecords ), "Failed to update high watermark for followers after timeout") @@ -170,7 +183,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testSubscribeWhenTopicUnavailable(quorum: String, groupProtocol: String): Unit = { val numRecords = 1000 val newtopic = "newtopic" @@ -210,7 +223,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { receiveExactRecords(poller, numRecords, 10000) poller.shutdown() - servers.foreach(server => killBroker(server.config.brokerId)) + brokerServers.foreach(server => killBroker(server.config.brokerId)) Thread.sleep(500) restartDeadBrokers() @@ -222,7 +235,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testClose(quorum: String, groupProtocol: String): Unit = { val numRecords = 10 val producer = createProducer() @@ -293,7 +306,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { this.consumerConfig.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout.toString) val consumer2 = createConsumerAndReceive(group2, manualAssign = true, numRecords) - servers.foreach(server => killBroker(server.config.brokerId)) + brokerServers.foreach(server => killBroker(server.config.brokerId)) val closeTimeout = 2000 val future1 = submitCloseAndValidate(consumer1, closeTimeout, None, Some(closeTimeout)) val future2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(requestTimeout)) @@ -325,10 +338,10 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { // roll all brokers with a lesser max group size to make sure coordinator has the new config val newConfigs = generateKafkaConfigs(maxGroupSize.toString) - for (serverIdx <- servers.indices) { + for (serverIdx <- brokerServers.indices) { killBroker(serverIdx) val config = newConfigs(serverIdx) - servers(serverIdx) = TestUtils.createServer(config, time = brokerTime(config.brokerId)) + servers(serverIdx) = createBroker(config, time = brokerTime(config.brokerId)) restartDeadBrokers() } @@ -348,7 +361,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { * When we have the consumer group max size configured to X, the X+1th consumer trying to join should receive a fatal exception */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testConsumerReceivesFatalExceptionWhenGroupPassesMaxSize(quorum: String, groupProtocol: String): Unit = { val group = "fatal-exception-test" val topic = "fatal-exception-test" @@ -387,7 +400,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { * close should terminate immediately without sending leave group. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testCloseDuringRebalance(quorum: String, groupProtocol: String): Unit = { val topic = "closetest" createTopic(topic, 10, brokerCount) @@ -439,7 +452,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { // Trigger another rebalance and shutdown all brokers // This consumer poll() doesn't complete and `tearDown` shuts down the executor and closes the consumer createConsumerToRebalance() - servers.foreach(server => killBroker(server.config.brokerId)) + brokerServers.foreach(server => killBroker(server.config.brokerId)) // consumer2 should close immediately without LeaveGroup request since there are no brokers available val closeFuture2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(0)) diff --git a/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala index 51a5afe391b87..a20e6954f8e3f 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala @@ -16,17 +16,25 @@ */ package kafka.api +import kafka.api.ConsumerRebootstrapTest._ +import kafka.server.QuorumTestHarness.getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} +import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows} +import org.junit.jupiter.api.Disabled import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource +import org.junit.jupiter.params.provider.{Arguments, MethodSource} -import java.util.Collections +import java.time.Duration +import java.util.{Collections, stream} +import java.util.concurrent.TimeUnit +import java.util.concurrent.TimeoutException class ConsumerRebootstrapTest extends RebootstrapTest { - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) - def testRebootstrap(quorum: String, groupProtocol: String): Unit = { + @ParameterizedTest(name = RebootstrapTestName) + @MethodSource(Array("rebootstrapTestParams")) + def testRebootstrap(quorum: String, groupProtocol: String, useRebootstrapTriggerMs: Boolean): Unit = { sendRecords(10, 0) TestUtils.waitUntilTrue( @@ -37,7 +45,7 @@ class ConsumerRebootstrapTest extends RebootstrapTest { server1.shutdown() server1.awaitShutdown() - val consumer = createConsumer(configOverrides = clientOverrides) + val consumer = createConsumer(configOverrides = clientOverrides(useRebootstrapTriggerMs)) // Only the server 0 is available for the consumer during the bootstrap. consumer.assign(Collections.singleton(tp)) @@ -77,6 +85,41 @@ class ConsumerRebootstrapTest extends RebootstrapTest { consumeAndVerifyRecords(consumer, 10, 20, startingKeyAndValueIndex = 20, startingTimestamp = 20) } + @Disabled + @ParameterizedTest(name = RebootstrapTestName) + @MethodSource(Array("rebootstrapTestParams")) + def testRebootstrapDisabled(quorum: String, groupProtocol: String, useRebootstrapTriggerMs: Boolean): Unit = { + server1.shutdown() + server1.awaitShutdown() + + val configOverrides = clientOverrides(useRebootstrapTriggerMs) + configOverrides.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "none") + if (useRebootstrapTriggerMs) + configOverrides.put(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, "1000") + + val producer = createProducer(configOverrides = configOverrides) + val consumer = createConsumer(configOverrides = configOverrides) + val adminClient = createAdminClient(configOverrides = configOverrides) + + // Only the server 0 is available during the bootstrap. + val recordMetadata0 = producer.send(new ProducerRecord(topic, part, 0L, "key 0".getBytes, "value 0".getBytes)).get(15, TimeUnit.SECONDS) + assertEquals(0, recordMetadata0.offset()) + adminClient.listTopics().names().get(15, TimeUnit.SECONDS) + consumer.assign(Collections.singleton(tp)) + consumeAndVerifyRecords(consumer, 1, 0) + + server0.shutdown() + server0.awaitShutdown() + server1.startup() + + assertThrows(classOf[TimeoutException], () => producer.send(new ProducerRecord(topic, part, "key 2".getBytes, "value 2".getBytes)).get(5, TimeUnit.SECONDS)) + assertThrows(classOf[TimeoutException], () => adminClient.listTopics().names().get(5, TimeUnit.SECONDS)) + + val producer2 = createProducer(configOverrides = configOverrides) + producer2.send(new ProducerRecord(topic, part, 1L, "key 1".getBytes, "value 1".getBytes)).get(15, TimeUnit.SECONDS) + assertEquals(0, consumer.poll(Duration.ofSeconds(5)).count) + } + private def sendRecords(numRecords: Int, from: Int): Unit = { val producer: KafkaProducer[Array[Byte], Array[Byte]] = createProducer() (from until (numRecords + from)).foreach { i => @@ -87,3 +130,17 @@ class ConsumerRebootstrapTest extends RebootstrapTest { producer.close() } } + +object ConsumerRebootstrapTest { + + final val RebootstrapTestName = s"${TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames}.useRebootstrapTriggerMs={2}" + def rebootstrapTestParams: stream.Stream[Arguments] = { + assertEquals(1, getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly.count()) + val args = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly + .findFirst().get.get + stream.Stream.of( + Arguments.of((args :+ true):_*), + Arguments.of((args :+ false):_*) + ) + } +} diff --git a/core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala index 072952b2c39d4..0c6a58d98d12b 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala @@ -39,7 +39,9 @@ class ConsumerTopicCreationTest { @MethodSource(Array("parameters")) def testAutoTopicCreation(groupProtocol: String, brokerAutoTopicCreationEnable: JBoolean, consumerAllowAutoCreateTopics: JBoolean): Unit = { val testCase = new ConsumerTopicCreationTest.TestCase(groupProtocol, brokerAutoTopicCreationEnable, consumerAllowAutoCreateTopics) - testCase.setUp(new EmptyTestInfo()) + testCase.setUp(new EmptyTestInfo() { + override def getDisplayName = "quorum=kraft" + }) try testCase.test() finally testCase.tearDown() } @@ -88,7 +90,7 @@ object ConsumerTopicCreationTest { }, "Timed out waiting to consume") // MetadataRequest is guaranteed to create the topic znode if creation was required - val topicCreated = zkClient.getAllTopicsInCluster().contains(topic_2) + val topicCreated = getTopicIds().keySet.contains(topic_2) if (brokerAutoTopicCreationEnable && consumerAllowAutoCreateTopics) assertTrue(topicCreated) else diff --git a/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala index 5d9455d1b40da..e3c8b2b85a19b 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala @@ -17,29 +17,17 @@ package kafka.api import kafka.utils.TestInfoUtils -import org.apache.kafka.clients.producer.ProducerConfig import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.server.config.ReplicationConfigs import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource import java.util -import java.util.{Collections, Optional, Properties} -import scala.annotation.nowarn +import java.util.{Collections, Optional} import scala.jdk.CollectionConverters._ class ConsumerWithLegacyMessageFormatIntegrationTest extends AbstractConsumerTest { - override protected def brokerPropertyOverrides(properties: Properties): Unit = { - // legacy message formats are only supported with IBP < 3.0 - // KRaft mode is not supported for inter.broker.protocol.version = 2.8, The minimum version required is 3.0-IV1" - if (!isKRaftTest()) - properties.put(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "2.8") - } - - @nowarn("cat=deprecation") @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetsForTimes(quorum: String, groupProtocol: String): Unit = { @@ -47,11 +35,8 @@ class ConsumerWithLegacyMessageFormatIntegrationTest extends AbstractConsumerTes val topic1 = "part-test-topic-1" val topic2 = "part-test-topic-2" val topic3 = "part-test-topic-3" - val props = new Properties() - props.setProperty(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.9.0") createTopic(topic1, numParts) - // Topic2 is in old message format. - createTopic(topic2, numParts, 1, props) + createTopic(topic2, numParts) createTopic(topic3, numParts) val consumer = createConsumer() @@ -91,22 +76,16 @@ class ConsumerWithLegacyMessageFormatIntegrationTest extends AbstractConsumerTes assertEquals(20, timestampTopic1P1.timestamp) assertEquals(Optional.of(0), timestampTopic1P1.leaderEpoch) - if (!isKRaftTest()) { - assertNull(timestampOffsets.get(new TopicPartition(topic2, 0)), "null should be returned when message format is 0.9.0") - assertNull(timestampOffsets.get(new TopicPartition(topic2, 1)), "null should be returned when message format is 0.9.0") - } - else { - // legacy message formats are supported for IBP version < 3.0 and KRaft runs on minimum version 3.0-IV1 - val timestampTopic2P0 = timestampOffsets.get(new TopicPartition(topic2, 0)) - assertEquals(40, timestampTopic2P0.offset) - assertEquals(40, timestampTopic2P0.timestamp) - assertEquals(Optional.of(0), timestampTopic2P0.leaderEpoch) - - val timestampTopic2P1 = timestampOffsets.get(new TopicPartition(topic2, 1)) - assertEquals(60, timestampTopic2P1.offset) - assertEquals(60, timestampTopic2P1.timestamp) - assertEquals(Optional.of(0), timestampTopic2P1.leaderEpoch) - } + // legacy message formats are supported for IBP version < 3.0 and KRaft runs on minimum version 3.0-IV1 + val timestampTopic2P0 = timestampOffsets.get(new TopicPartition(topic2, 0)) + assertEquals(40, timestampTopic2P0.offset) + assertEquals(40, timestampTopic2P0.timestamp) + assertEquals(Optional.of(0), timestampTopic2P0.leaderEpoch) + + val timestampTopic2P1 = timestampOffsets.get(new TopicPartition(topic2, 1)) + assertEquals(60, timestampTopic2P1.offset) + assertEquals(60, timestampTopic2P1.timestamp) + assertEquals(Optional.of(0), timestampTopic2P1.leaderEpoch) val timestampTopic3P0 = timestampOffsets.get(new TopicPartition(topic3, 0)) assertEquals(80, timestampTopic3P0.offset) @@ -116,20 +95,14 @@ class ConsumerWithLegacyMessageFormatIntegrationTest extends AbstractConsumerTes assertNull(timestampOffsets.get(new TopicPartition(topic3, 1))) } - @nowarn("cat=deprecation") @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testEarliestOrLatestOffsets(quorum: String, groupProtocol: String): Unit = { - val topic0 = "topicWithNewMessageFormat" - val topic1 = "topicWithOldMessageFormat" - val prop = new Properties() - // idempotence producer doesn't support old version of messages - prop.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false") - val producer = createProducer(configOverrides = prop) + val topic0 = "topic0" + val topic1 = "topic1" + val producer = createProducer() createTopicAndSendRecords(producer, topicName = topic0, numPartitions = 2, recordsPerPartition = 100) - val props = new Properties() - props.setProperty(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.9.0") - createTopic(topic1, numPartitions = 1, replicationFactor = 1, props) + createTopic(topic1) sendRecords(producer, numRecords = 100, new TopicPartition(topic1, 0)) val t0p0 = new TopicPartition(topic0, 0) diff --git a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala index bff719681a2b8..4d443a7f9b7af 100644 --- a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala +++ b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala @@ -19,7 +19,6 @@ import kafka.api.GroupedUserQuotaCallback._ import kafka.security.{JaasModule, JaasTestUtils} import kafka.server._ import kafka.utils.{Logging, TestInfoUtils, TestUtils} -import kafka.zk.ConfigEntityChangeNotificationZNode import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} @@ -31,7 +30,7 @@ import org.apache.kafka.common.{Cluster, Reconfigurable} import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs} import org.apache.kafka.server.quota._ import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, Disabled, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource @@ -39,9 +38,11 @@ import java.util.Properties import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} import java.{lang, util} +import scala.collection.Seq import scala.collection.mutable.ArrayBuffer import scala.jdk.CollectionConverters._ +@Disabled("KAFKA-18213") class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { override protected def securityProtocol = SecurityProtocol.SASL_SSL @@ -64,7 +65,7 @@ class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some("SCRAM-SHA-256"), KafkaSasl, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Some("SCRAM-SHA-256"), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) this.serverConfig.setProperty(QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, classOf[GroupedUserQuotaCallback].getName) this.serverConfig.setProperty(s"${listenerName.configPrefix}${BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG}", classOf[GroupedUserPrincipalBuilder].getName) @@ -86,8 +87,7 @@ class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { override def configureSecurityBeforeServersStart(testInfo: TestInfo): Unit = { super.configureSecurityBeforeServersStart(testInfo) - zkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path) - createScramCredentials(zkConnect, JaasTestUtils.KAFKA_SCRAM_ADMIN, JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD) + createScramCredentials(createAdminClient(), JaasTestUtils.KAFKA_SCRAM_ADMIN, JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @@ -148,7 +148,7 @@ class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true) // Remove the second topic with large number of partitions, verify no longer throttled - adminZkClient.deleteTopic(largeTopic) + deleteTopic(largeTopic) user = addUser("group1_user3", brokerId) user.waitForQuotaUpdate(8000 * 100, 2500 * 100, defaultRequestQuota) user.removeThrottleMetrics() // since group was throttled before @@ -179,8 +179,7 @@ class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { } private def createTopic(topic: String, numPartitions: Int, leader: Int): Unit = { - val assignment = (0 until numPartitions).map { i => i -> Seq(leader) }.toMap - TestUtils.createTopic(zkClient, topic, assignment, servers) + // TODO createTopic } private def createAdminClient(): Admin = { @@ -235,7 +234,7 @@ class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { createProducer(), createConsumer(), adminClient) } - case class GroupedUser(user: String, userGroup: String, topic: String, leaderNode: KafkaServer, + case class GroupedUser(user: String, userGroup: String, topic: String, leaderNode: KafkaBroker, producerClientId: String, consumerClientId: String, override val producer: KafkaProducer[Array[Byte], Array[Byte]], override val consumer: Consumer[Array[Byte], Array[Byte]], diff --git a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala index d0798a71412d4..ab5b587a0e681 100644 --- a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala @@ -117,7 +117,7 @@ class DelegationTokenEndToEndAuthorizationTest extends EndToEndAuthorizationTest @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism), KafkaSasl)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism))) super.setUp(testInfo) privilegedAdminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) } diff --git a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala index d886d52ee9a2a..833b06654d3ee 100644 --- a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala +++ b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala @@ -66,7 +66,7 @@ class DelegationTokenEndToEndAuthorizationWithOwnerTest extends DelegationTokenE override def configureSecurityAfterServersStart(): Unit = { // Create the Acls before calling super which will create the additiona tokens - Using(createPrivilegedAdminClient()) { superuserAdminClient => + Using.resource(createPrivilegedAdminClient()) { superuserAdminClient => superuserAdminClient.createAcls(List(AclTokenOtherDescribe, AclTokenCreate, AclTokenDescribe).asJava).values brokers.foreach { s => @@ -106,8 +106,8 @@ class DelegationTokenEndToEndAuthorizationWithOwnerTest extends DelegationTokenE @ParameterizedTest @ValueSource(strings = Array("kraft")) def testDescribeTokenForOtherUserFails(quorum: String): Unit = { - Using(createScramAdminClient(kafkaClientSaslMechanism, describeTokenFailPrincipal.getName, describeTokenFailPassword)) { describeTokenFailAdminClient => - Using(createScramAdminClient(kafkaClientSaslMechanism, otherClientPrincipal.getName, otherClientPassword)) { otherClientAdminClient => + Using.resource(createScramAdminClient(kafkaClientSaslMechanism, describeTokenFailPrincipal.getName, describeTokenFailPassword)) { describeTokenFailAdminClient => + Using.resource(createScramAdminClient(kafkaClientSaslMechanism, otherClientPrincipal.getName, otherClientPassword)) { otherClientAdminClient => otherClientAdminClient.createDelegationToken().delegationToken().get() val tokens = describeTokenFailAdminClient.describeDelegationToken( new DescribeDelegationTokenOptions().owners(Collections.singletonList(otherClientPrincipal)) diff --git a/core/src/test/scala/integration/kafka/api/DescribeAuthorizedOperationsTest.scala b/core/src/test/scala/integration/kafka/api/DescribeAuthorizedOperationsTest.scala index c61494b122eae..0f23b93e31cf6 100644 --- a/core/src/test/scala/integration/kafka/api/DescribeAuthorizedOperationsTest.scala +++ b/core/src/test/scala/integration/kafka/api/DescribeAuthorizedOperationsTest.scala @@ -111,7 +111,7 @@ class DescribeAuthorizedOperationsTest extends IntegrationTestHarness with SaslS @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), Both, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) super.setUp(testInfo) TestUtils.waitUntilBrokerMetadataIsPropagated(servers) client = Admin.create(createConfig()) diff --git a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala index e1ed205cd29c2..fe9336a04f4f6 100644 --- a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala @@ -58,7 +58,7 @@ import scala.jdk.CollectionConverters._ * brokers. * * To start brokers we need to set a cluster ACL, which happens optionally in KafkaServerTestHarness. - * The remaining ACLs to enable access to producers and consumers are set here. To set ACLs, we use AclCommand directly. + * The remaining ACLs to enable access to producers and consumers are set here. * * Finally, we rely on SaslSetup to bootstrap and setup Kerberos. We don't use * SaslTestHarness here directly because it extends QuorumTestHarness, and we @@ -171,7 +171,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * Tests the ability of producing and consuming with the appropriate ACLs set. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeViaAssign(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() @@ -200,7 +200,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeViaSubscribe(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() @@ -210,7 +210,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeWithWildcardAcls(quorum: String, groupProtocol: String): Unit = { setWildcardResourceAcls() val producer = createProducer() @@ -222,7 +222,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeWithPrefixedAcls(quorum: String, groupProtocol: String): Unit = { setPrefixedResourceAcls() val producer = createProducer() @@ -234,7 +234,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeTopicAutoCreateTopicCreateAcl(quorum: String, groupProtocol: String): Unit = { // topic2 is not created on setup() val tp2 = new TopicPartition("topic2", 0) @@ -404,7 +404,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * ACL set. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithoutDescribeAclViaAssign(quorum: String, groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() @@ -415,7 +415,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithoutDescribeAclViaSubscribe(quorum: String, groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() @@ -456,7 +456,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithDescribeAclViaAssign(quorum: String, groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() @@ -468,7 +468,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithDescribeAclViaSubscribe(quorum: String, groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() @@ -497,7 +497,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * ACL set. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoGroupAcl(quorum: String, groupProtocol: String): Unit = { val superuserAdminClient = createSuperuserAdminClient() superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values diff --git a/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala b/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala index 6c8d119daee38..c423c95ae9d89 100644 --- a/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala +++ b/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala @@ -102,7 +102,7 @@ class EndToEndClusterIdTest extends KafkaServerTestHarness { this.serverConfig.setProperty(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, classOf[MockBrokerMetricsReporter].getName) override def generateConfigs = { - val cfgs = TestUtils.createBrokerConfigs(serverCount, zkConnectOrNull, interBrokerSecurityProtocol = Some(securityProtocol), + val cfgs = TestUtils.createBrokerConfigs(serverCount, interBrokerSecurityProtocol = Some(securityProtocol), trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties) cfgs.foreach(_ ++= serverConfig) cfgs.map(KafkaConfig.fromProps) diff --git a/core/src/test/scala/integration/kafka/api/FixedPortTestUtils.scala b/core/src/test/scala/integration/kafka/api/FixedPortTestUtils.scala index bf5f8c1e6a3ec..cb0d5d0ee0f55 100644 --- a/core/src/test/scala/integration/kafka/api/FixedPortTestUtils.scala +++ b/core/src/test/scala/integration/kafka/api/FixedPortTestUtils.scala @@ -39,12 +39,11 @@ object FixedPortTestUtils { } def createBrokerConfigs(numConfigs: Int, - zkConnect: String, enableControlledShutdown: Boolean = true, enableDeleteTopic: Boolean = false): Seq[Properties] = { val ports = FixedPortTestUtils.choosePorts(numConfigs) (0 until numConfigs).map { node => - TestUtils.createBrokerConfig(node, zkConnect, enableControlledShutdown, enableDeleteTopic, ports(node)) + TestUtils.createBrokerConfig(node, enableControlledShutdown, enableDeleteTopic, ports(node)) } } diff --git a/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala index 46b34f4efdaff..e9a0644a26c63 100644 --- a/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala @@ -33,6 +33,7 @@ import org.apache.kafka.metadata.authorizer.StandardAuthorizer import org.apache.kafka.security.authorizer.AclEntry.WILDCARD_HOST import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.function.Executable import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource @@ -132,6 +133,84 @@ class GroupAuthorizerIntegrationTest extends BaseRequestTest { assertEquals(Set(topic), consumeException.unauthorizedTopics.asScala) } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeUnsubscribeWithoutGroupPermission(quorum: String, groupProtocol: String): Unit = { + val topic = "topic" + + createTopic(topic, listenerName = interBrokerListenerName) + + // allow topic read/write permission to poll/send record + addAndVerifyAcls( + Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW), createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) + ) + val producer = createProducer() + producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() + producer.close() + + // allow group read permission to join group + val group = "group" + addAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + val props = new Properties() + props.put(ConsumerConfig.GROUP_ID_CONFIG, group) + props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + val consumer = createConsumer(configOverrides = props) + consumer.subscribe(List(topic).asJava) + TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) + + removeAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + assertDoesNotThrow(new Executable { + override def execute(): Unit = consumer.unsubscribe() + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeCloseWithoutGroupPermission(quorum: String, groupProtocol: String): Unit = { + val topic = "topic" + createTopic(topic, listenerName = interBrokerListenerName) + + // allow topic read/write permission to poll/send record + addAndVerifyAcls( + Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW), createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) + ) + val producer = createProducer() + producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() + + // allow group read permission to join group + val group = "group" + addAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + val props = new Properties() + props.put(ConsumerConfig.GROUP_ID_CONFIG, group) + props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + val consumer = createConsumer(configOverrides = props) + consumer.subscribe(List(topic).asJava) + TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) + + removeAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + assertDoesNotThrow(new Executable { + override def execute(): Unit = consumer.close() + }) + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testAuthorizedProduceAndConsume(quorum: String, groupProtocol: String): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala index b506d2bea2c49..84214a79ed91f 100644 --- a/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala @@ -19,6 +19,7 @@ import org.apache.kafka.common.test.api.ClusterTestExtensions import kafka.utils.TestUtils import org.apache.kafka.clients.admin.{Admin, ConsumerGroupDescription} import org.apache.kafka.clients.consumer.{Consumer, GroupProtocol, OffsetAndMetadata} +import org.apache.kafka.common.errors.GroupIdNotFoundException import org.apache.kafka.common.{ConsumerGroupState, GroupType, KafkaFuture, TopicPartition} import org.junit.jupiter.api.Assertions._ @@ -221,7 +222,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { .asScala .toMap - assertDescribedGroup(groups, "grp3", GroupType.CLASSIC, ConsumerGroupState.DEAD) + assertDescribedDeadGroup(groups, "grp3") } } @@ -289,7 +290,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { } private def withAdmin(f: Admin => Unit): Unit = { - val admin: Admin = cluster.createAdminClient() + val admin: Admin = cluster.admin() try { f(admin) } finally { @@ -328,4 +329,18 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { assertEquals(state, group.state) assertEquals(Collections.emptyList, group.members) } + + private def assertDescribedDeadGroup( + groups: Map[String, KafkaFuture[ConsumerGroupDescription]], + groupId: String + ): Unit = { + try { + groups(groupId).get(10, TimeUnit.SECONDS) + fail(s"Group $groupId should not be found") + } catch { + case e: java.util.concurrent.ExecutionException => + assertTrue(e.getCause.isInstanceOf[GroupIdNotFoundException]) + assertEquals(s"Group $groupId not found.", e.getCause.getMessage) + } + } } diff --git a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala index 2c8de8e83ec4d..ed5611eb74b84 100644 --- a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala +++ b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala @@ -66,22 +66,15 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { } override def generateConfigs: Seq[KafkaConfig] = { - val cfgs = TestUtils.createBrokerConfigs(brokerCount, zkConnectOrNull, interBrokerSecurityProtocol = Some(securityProtocol), + val cfgs = TestUtils.createBrokerConfigs(brokerCount, interBrokerSecurityProtocol = Some(securityProtocol), trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties, logDirCount = logDirCount) configureListeners(cfgs) modifyConfigs(cfgs) - if (isZkMigrationTest()) { - cfgs.foreach(_.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true")) - } if (isShareGroupTest()) { cfgs.foreach(_.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,share")) cfgs.foreach(_.setProperty(ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, "true")) } - - if(isKRaftTest()) { - cfgs.foreach(_.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, TestUtils.tempDir().getAbsolutePath)) - } - + cfgs.foreach(_.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, TestUtils.tempDir().getAbsolutePath)) insertControllerListenersIfNeeded(cfgs) cfgs.map(KafkaConfig.fromProps) } @@ -106,16 +99,14 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { } private def insertControllerListenersIfNeeded(props: Seq[Properties]): Unit = { - if (isKRaftTest()) { - props.foreach { config => - // Add a security protocol for the controller endpoints, if one is not already set. - val securityPairs = config.getProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "").split(",") - val toAdd = config.getProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "").split(",").filter( - e => !securityPairs.exists(_.startsWith(s"$e:"))) - if (toAdd.nonEmpty) { - config.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, (securityPairs ++ - toAdd.map(e => s"$e:${controllerListenerSecurityProtocol.toString}")).mkString(",")) - } + props.foreach { config => + // Add a security protocol for the controller endpoints, if one is not already set. + val securityPairs = config.getProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "").split(",") + val toAdd = config.getProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "").split(",").filter( + e => !securityPairs.exists(_.startsWith(s"$e:"))) + if (toAdd.nonEmpty) { + config.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, (securityPairs ++ + toAdd.map(e => s"$e:${controllerListenerSecurityProtocol.toString}")).mkString(",")) } } } diff --git a/core/src/test/scala/integration/kafka/api/MetricsTest.scala b/core/src/test/scala/integration/kafka/api/MetricsTest.scala index 71d2764aee856..e08801343fc5b 100644 --- a/core/src/test/scala/integration/kafka/api/MetricsTest.scala +++ b/core/src/test/scala/integration/kafka/api/MetricsTest.scala @@ -65,7 +65,7 @@ class MetricsTest extends IntegrationTestHarness with SaslSetup { } this.consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, "classic") verifyNoRequestMetrics("Request metrics not removed in a previous test") - startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), KafkaSasl, kafkaServerJaasEntryName)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), kafkaServerJaasEntryName)) super.setUp(testInfo) } diff --git a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala index 797a7fb8977dd..11960b7f375ee 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala @@ -18,6 +18,8 @@ package kafka.api import java.io.File import java.net.InetAddress +import java.nio.ByteBuffer +import java.nio.file.{Files, Paths, StandardOpenOption} import java.lang.{Long => JLong} import java.time.{Duration => JDuration} import java.util.Arrays.asList @@ -26,41 +28,42 @@ import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit} import java.util.{Collections, Optional, Properties} import java.{time, util} import kafka.integration.KafkaServerTestHarness -import kafka.server.metadata.KRaftMetadataCache import kafka.server.KafkaConfig import kafka.utils.TestUtils._ import kafka.utils.{Log4jController, TestInfoUtils, TestUtils} import org.apache.kafka.clients.HostResolver +import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.ConfigEntry.ConfigSource import org.apache.kafka.clients.admin._ -import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, GroupProtocol, KafkaConsumer, OffsetAndMetadata, ShareConsumer} +import org.apache.kafka.clients.consumer.{CommitFailedException, Consumer, ConsumerConfig, GroupProtocol, KafkaConsumer, OffsetAndMetadata, ShareConsumer} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.acl.{AccessControlEntry, AclBinding, AclBindingFilter, AclOperation, AclPermissionType} import org.apache.kafka.common.config.{ConfigResource, LogLevelConfig, SslConfigs, TopicConfig} import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.KafkaException import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter} +import org.apache.kafka.common.record.FileRecords import org.apache.kafka.common.requests.DeleteRecordsRequest import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourceType} import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer} import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.common.{ConsumerGroupState, ElectionType, GroupType, IsolationLevel, ShareGroupState, TopicCollection, TopicPartition, TopicPartitionInfo, TopicPartitionReplica, Uuid} +import org.apache.kafka.common.{ConsumerGroupState, ElectionType, GroupState, GroupType, IsolationLevel, TopicCollection, TopicPartition, TopicPartitionInfo, TopicPartitionReplica, Uuid} import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinatorConfig} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs, ServerLogConfigs} -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig} -import org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS -import org.apache.log4j.PropertyConfigurator +import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs, ServerLogConfigs, ZkConfigs} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogFileUtils} +import org.apache.kafka.test.TestUtils.{DEFAULT_MAX_WAIT_MS, assertFutureThrows} +import org.apache.logging.log4j.core.config.Configurator import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} +import org.junit.jupiter.api.{BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{MethodSource, ValueSource} import org.slf4j.LoggerFactory import java.util.AbstractMap.SimpleImmutableEntry -import scala.annotation.nowarn import scala.collection.Seq import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} @@ -85,18 +88,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) + Configurator.reconfigure(); brokerLoggerConfigResource = new ConfigResource( ConfigResource.Type.BROKER_LOGGER, brokers.head.config.brokerId.toString) } - @AfterEach - override def tearDown(): Unit = { - // Due to the fact that log4j is not re-initialized across tests, changing a logger's log level persists - // across test classes. We need to clean up the changes done after testing. - resetLogging() - super.tearDown() - } - @ParameterizedTest @Timeout(30) @ValueSource(strings = Array("kraft")) @@ -118,29 +114,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally brokenClient.close(time.Duration.ZERO) } - @ParameterizedTest - @Timeout(30) - @ValueSource(strings = Array("kraft")) - def testAlterConfigsWithOptionTimeoutMs(quorum: String): Unit = { - client = createAdminClient - val config = createConfig - config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, s"localhost:${TestUtils.IncorrectBrokerPort}") - val brokenClient = Admin.create(config) - - try { - val alterLogLevelsEntries = Seq( - new ConfigEntry("kafka.server.ControllerServer", LogLevelConfig.INFO_LOG_LEVEL) - ).asJavaCollection - - val exception = assertThrows(classOf[ExecutionException], () => { - brokenClient.alterConfigs( - Map(brokerLoggerConfigResource -> new Config(alterLogLevelsEntries)).asJava, - new AlterConfigsOptions().timeoutMs(0)).all().get() - }) - assertInstanceOf(classOf[TimeoutException], exception.getCause) - } finally brokenClient.close(time.Duration.ZERO) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testCreatePartitionWithOptionRetryOnQuotaViolation(quorum: String): Unit = { @@ -614,6 +587,30 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(brokerStrs.mkString(","), nodeStrs.mkString(",")) } + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesWithFencedBroker(quorum: String): Unit = { + client = createAdminClient + val fencedBrokerId = brokers.last.config.brokerId + killBroker(fencedBrokerId, JDuration.ofMillis(0)) + // It takes a few seconds for a broker to get fenced after being killed + // So we retry until only 2 of 3 brokers returned in the result or the max wait is reached + TestUtils.retry(20000) { + assertTrue(client.describeCluster().nodes().get().asScala.size.equals(brokers.size - 1)) + } + + // List nodes again but this time include the fenced broker + val nodes = client.describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)).nodes().get().asScala + assertTrue(nodes.size.equals(brokers.size)) + nodes.foreach(node => { + if (node.id().equals(fencedBrokerId)) { + assertTrue(node.isFenced) + } else { + assertFalse(node.isFenced) + } + }) + } + @ParameterizedTest @ValueSource(strings = Array("kraft")) def testAdminClientHandlingBadIPWithoutTimeout(quorum: String): Unit = { @@ -745,7 +742,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val nonExistingTopic = "non-existing" val results = client.describeTopics(Seq(nonExistingTopic, existingTopic).asJava).topicNameValues() assertEquals(existingTopic, results.get(existingTopic).get.name) - assertFutureExceptionTypeEquals(results.get(nonExistingTopic), classOf[UnknownTopicOrPartitionException]) + assertFutureThrows(results.get(nonExistingTopic), classOf[UnknownTopicOrPartitionException]) } @ParameterizedTest @@ -764,7 +761,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val results = client.describeTopics(TopicCollection.ofTopicIds(Seq(existingTopicId, nonExistingTopicId).asJava)).topicIdValues() assertEquals(existingTopicId, results.get(existingTopicId).get.topicId()) - assertFutureExceptionTypeEquals(results.get(nonExistingTopicId), classOf[UnknownTopicIdException]) + assertFutureThrows(results.get(nonExistingTopicId), classOf[UnknownTopicIdException]) } @ParameterizedTest @@ -985,8 +982,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val topic1 = "describe-alter-configs-topic-1" val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1) val topicConfig1 = new Properties - topicConfig1.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "500000") - topicConfig1.setProperty(TopicConfig.RETENTION_MS_CONFIG, "60000000") + val maxMessageBytes = "500000" + val retentionMs = "60000000" + topicConfig1.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageBytes) + topicConfig1.setProperty(TopicConfig.RETENTION_MS_CONFIG, retentionMs) createTopic(topic1, numPartitions = 1, replicationFactor = 1, topicConfig1) val topic2 = "describe-alter-configs-topic-2" @@ -1056,7 +1055,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(brokers(2).config.logCleanerThreads.toString, configs.get(brokerResource2).get(CleanerConfig.LOG_CLEANER_THREADS_PROP).value) - checkValidAlterConfigs(client, this, topicResource1, topicResource2) + checkValidAlterConfigs(client, this, topicResource1, topicResource2, maxMessageBytes, retentionMs) } @ParameterizedTest @@ -1117,8 +1116,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { groupResource -> groupAlterConfigs ).asJava, new AlterConfigsOptions().validateOnly(true)) - assertFutureExceptionTypeEquals(alterResult.values.get(groupResource), classOf[InvalidConfigurationException], - Some("consumer.session.timeout.ms must be greater than or equal to group.consumer.min.session.timeout.ms")) + assertFutureThrows(alterResult.values.get(groupResource), classOf[InvalidConfigurationException], + "consumer.session.timeout.ms must be greater than or equal to group.consumer.min.session.timeout.ms") } @ParameterizedTest @@ -1539,6 +1538,69 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertNull(returnedOffsets.get(topicPartition)) } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testDeleteRecordsAfterCorruptRecords(quorum: String, groupProtocol: String): Unit = { + val config = new Properties() + config.put(TopicConfig.SEGMENT_BYTES_CONFIG, "200") + createTopic(topic, numPartitions = 1, replicationFactor = 1, config) + + client = createAdminClient + + val consumer = createConsumer() + subscribeAndWaitForAssignment(topic, consumer) + + val producer = createProducer() + def sendRecords(begin: Int, end: Int) = { + val futures = (begin until end).map( i => { + val record = new ProducerRecord(topic, partition, s"$i".getBytes, s"$i".getBytes) + producer.send(record) + }) + futures.foreach(_.get) + } + sendRecords(0, 10) + sendRecords(10, 20) + + val topicDesc = client.describeTopics(Collections.singletonList(topic)).allTopicNames().get().get(topic) + assertEquals(1, topicDesc.partitions().size()) + val partitionLeaderId = topicDesc.partitions().get(0).leader().id() + val logDirMap = client.describeLogDirs(Collections.singletonList(partitionLeaderId)) + .allDescriptions().get().get(partitionLeaderId) + val logDir = logDirMap.entrySet.stream + .filter(entry => entry.getValue.replicaInfos.containsKey(topicPartition)).findAny().get().getKey + // retrieve the path of the first segment + val logFilePath = LogFileUtils.logFile(Paths.get(logDir).resolve(topicPartition.toString).toFile, 0).toPath + val firstSegmentRecordsSize = FileRecords.open(logFilePath.toFile).records().asScala.iterator.size + assertTrue(firstSegmentRecordsSize > 0) + + // manually load the inactive segment file to corrupt the data + val originalContent = Files.readAllBytes(logFilePath) + val newContent = ByteBuffer.allocate(JLong.BYTES + Integer.BYTES + originalContent.length) + newContent.putLong(0) // offset + newContent.putInt(0) // size -> this will make FileLogInputStream throw "Found record size 0 smaller than minimum record..." + newContent.put(Files.readAllBytes(logFilePath)) + newContent.flip() + Files.write(logFilePath, newContent.array(), StandardOpenOption.TRUNCATE_EXISTING) + + consumer.seekToBeginning(Collections.singletonList(topicPartition)) + assertEquals("Encountered corrupt message when fetching offset 0 for topic-partition topic-0", + assertThrows(classOf[KafkaException], () => consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS))).getMessage) + + val partitionFollowerId = brokers.map(b => b.config.nodeId).filter(id => id != partitionLeaderId).head + val newAssignment = Map(topicPartition -> Optional.of(new NewPartitionReassignment( + List(Integer.valueOf(partitionLeaderId), Integer.valueOf(partitionFollowerId)).asJava))).asJava + + // add follower to topic partition + client.alterPartitionReassignments(newAssignment).all().get() + // delete records in corrupt segment (the first segment) + client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(firstSegmentRecordsSize)).asJava).all.get + // verify reassignment is finished after delete records + TestUtils.waitForBrokersInIsr(client, topicPartition, Set(partitionLeaderId, partitionFollowerId)) + // seek to beginning and make sure we can consume all records + consumer.seekToBeginning(Collections.singletonList(topicPartition)) + assertEquals(19, TestUtils.consumeRecords(consumer, 20 - firstSegmentRecordsSize).last.offset()) + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumeAfterDeleteRecords(quorum: String, groupProtocol: String): Unit = { @@ -1657,10 +1719,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val acl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)) client = createAdminClient - assertFutureExceptionTypeEquals(client.describeAcls(AclBindingFilter.ANY).values(), classOf[SecurityDisabledException]) - assertFutureExceptionTypeEquals(client.createAcls(Collections.singleton(acl)).all(), + assertFutureThrows(client.describeAcls(AclBindingFilter.ANY).values(), classOf[SecurityDisabledException]) + assertFutureThrows(client.createAcls(Collections.singleton(acl)).all(), classOf[SecurityDisabledException]) - assertFutureExceptionTypeEquals(client.deleteAcls(Collections.singleton(acl.toFilter())).all(), + assertFutureThrows(client.deleteAcls(Collections.singleton(acl.toFilter())).all(), classOf[SecurityDisabledException]) } @@ -1677,7 +1739,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val future = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)).all() client.close(time.Duration.ofHours(2)) val future2 = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)).all() - assertFutureExceptionTypeEquals(future2, classOf[IllegalStateException]) + assertFutureThrows(future2, classOf[IllegalStateException]) future.get client.close(time.Duration.ofMinutes(30)) // multiple close-with-timeout should have no effect } @@ -1697,7 +1759,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().timeoutMs(900000)).all() client.close(time.Duration.ZERO) - assertFutureExceptionTypeEquals(future, classOf[TimeoutException]) + assertFutureThrows(future, classOf[TimeoutException]) } /** @@ -1714,7 +1776,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val startTimeMs = Time.SYSTEM.milliseconds() val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().timeoutMs(2)).all() - assertFutureExceptionTypeEquals(future, classOf[TimeoutException]) + assertFutureThrows(future, classOf[TimeoutException]) val endTimeMs = Time.SYSTEM.milliseconds() assertTrue(endTimeMs > startTimeMs, "Expected the timeout to take at least one millisecond.") } @@ -1732,7 +1794,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = KafkaAdminClientTest.createInternal(new AdminClientConfig(config), factory) val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().validateOnly(true)).all() - assertFutureExceptionTypeEquals(future, classOf[TimeoutException]) + assertFutureThrows(future, classOf[TimeoutException]) val future2 = client.createTopics(Seq("mytopic3", "mytopic4").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().validateOnly(true)).all() future2.get @@ -1743,7 +1805,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { * Test the consumer group APIs. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17960")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumerGroups(quorum: String, groupProtocol: String): Unit = { val config = createConfig client = Admin.create(config) @@ -1808,7 +1870,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { consumer.poll(JDuration.ofSeconds(5)) if (!consumer.assignment.isEmpty && latch.getCount > 0L) latch.countDown() - consumer.commitSync() + try { + consumer.commitSync() + } catch { + case _: CommitFailedException => // Ignore and retry on next iteration. + } } } catch { case _: InterruptException => // Suppress the output to stderr @@ -1828,7 +1894,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.waitUntilTrue(() => { val matching = client.listConsumerGroups.all.get.asScala.filter(group => group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE) + group.groupState.get == GroupState.STABLE) matching.size == 1 }, s"Expected to be able to list $testGroupId") @@ -1836,29 +1902,29 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE) + group.groupState.get == GroupState.STABLE) matching.size == 1 }, s"Expected to be able to list $testGroupId in group type $groupType") TestUtils.waitUntilTrue(() => { val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) - .inStates(Set(ConsumerGroupState.STABLE).asJava) + .inGroupStates(Set(GroupState.STABLE).asJava) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE) + group.groupState.get == GroupState.STABLE) matching.size == 1 }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.STABLE).asJava) + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.STABLE).asJava) val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE) + group.groupState.get == GroupState.STABLE) matching.size == 1 }, s"Expected to be able to list $testGroupId in state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.EMPTY).asJava) + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.EMPTY).asJava) val matching = client.listConsumerGroups(options).all.get.asScala.filter( _.groupId == testGroupId) matching.isEmpty @@ -1871,12 +1937,22 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Test that we can get information about the test consumer group. assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(testGroupId)) var testGroupDescription = describeWithFakeGroupResult.describedGroups().get(testGroupId).get() + if (groupType == GroupType.CLASSIC) { + assertTrue(testGroupDescription.groupEpoch.isEmpty) + assertTrue(testGroupDescription.targetAssignmentEpoch.isEmpty) + } else { + assertEquals(Optional.of(3), testGroupDescription.groupEpoch) + assertEquals(Optional.of(3), testGroupDescription.targetAssignmentEpoch) + } assertEquals(testGroupId, testGroupDescription.groupId()) assertFalse(testGroupDescription.isSimpleConsumerGroup) assertEquals(groupInstanceSet.size, testGroupDescription.members().size()) val members = testGroupDescription.members() - members.asScala.foreach(member => assertEquals(testClientId, member.clientId())) + members.asScala.foreach { member => + assertEquals(testClientId, member.clientId) + assertEquals(if (groupType == GroupType.CLASSIC) Optional.empty else Optional.of(true), member.upgraded) + } val topicPartitionsByTopic = members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()) topicSet.foreach { topic => val topicPartitions = topicPartitionsByTopic.getOrElse(topic, List.empty) @@ -1886,18 +1962,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) - // Test that the fake group is listed as dead. + // Test that the fake group throws GroupIdNotFoundException assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) - val fakeGroupDescription = describeWithFakeGroupResult.describedGroups().get(fakeGroupId).get() + assertFutureThrows(describeWithFakeGroupResult.describedGroups().get(fakeGroupId), classOf[GroupIdNotFoundException], + s"Group $fakeGroupId not found.") - assertEquals(fakeGroupId, fakeGroupDescription.groupId()) - assertEquals(0, fakeGroupDescription.members().size()) - assertEquals("", fakeGroupDescription.partitionAssignor()) - assertEquals(ConsumerGroupState.DEAD, fakeGroupDescription.state()) - assertEquals(expectedOperations, fakeGroupDescription.authorizedOperations()) - - // Test that all() returns 2 results - assertEquals(2, describeWithFakeGroupResult.all().get().size()) + // Test that all() also throws GroupIdNotFoundException + assertFutureThrows(describeWithFakeGroupResult.all(), classOf[GroupIdNotFoundException], + s"Group $fakeGroupId not found.") val testTopicPart0 = new TopicPartition(testTopicName, 0) @@ -1932,9 +2004,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { Collections.singleton(new MemberToRemove(invalidInstanceId)) )) - TestUtils.assertFutureExceptionTypeEquals(removeMembersResult.all, classOf[UnknownMemberIdException]) + assertFutureThrows(removeMembersResult.all, classOf[UnknownMemberIdException]) val firstMemberFuture = removeMembersResult.memberResult(new MemberToRemove(invalidInstanceId)) - TestUtils.assertFutureExceptionTypeEquals(firstMemberFuture, classOf[UnknownMemberIdException]) + assertFutureThrows(firstMemberFuture, classOf[UnknownMemberIdException]) // Test consumer group deletion var deleteResult = client.deleteConsumerGroups(Seq(testGroupId, fakeGroupId).asJava) @@ -1942,12 +2014,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Deleting the fake group ID should get GroupIdNotFoundException. assertTrue(deleteResult.deletedGroups().containsKey(fakeGroupId)) - assertFutureExceptionTypeEquals(deleteResult.deletedGroups().get(fakeGroupId), + assertFutureThrows(deleteResult.deletedGroups().get(fakeGroupId), classOf[GroupIdNotFoundException]) // Deleting the real group ID should get GroupNotEmptyException assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) - assertFutureExceptionTypeEquals(deleteResult.deletedGroups().get(testGroupId), + assertFutureThrows(deleteResult.deletedGroups().get(testGroupId), classOf[GroupNotEmptyException]) // Test delete one correct static member @@ -2012,6 +2084,392 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } + /** + * Test the consumer group APIs. + */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupWithMemberMigration(quorum: String): Unit = { + val config = createConfig + client = Admin.create(config) + var classicConsumer: Consumer[Array[Byte], Array[Byte]] = null + var consumerConsumer: Consumer[Array[Byte], Array[Byte]] = null + try { + // Verify that initially there are no consumer groups to list. + val list1 = client.listConsumerGroups + assertEquals(0, list1.all.get.size) + assertEquals(0, list1.errors.get.size) + assertEquals(0, list1.valid.get.size) + val testTopicName = "test_topic" + val testNumPartitions = 2 + + client.createTopics(util.Arrays.asList( + new NewTopic(testTopicName, testNumPartitions, 1.toShort), + )).all.get + waitForTopics(client, List(testTopicName), List()) + + val producer = createProducer() + try { + producer.send(new ProducerRecord(testTopicName, 0, null, null)) + producer.send(new ProducerRecord(testTopicName, 1, null, null)) + producer.flush() + } finally { + Utils.closeQuietly(producer, "producer") + } + + val testGroupId = "test_group_id" + val testClassicClientId = "test_classic_client_id" + val testConsumerClientId = "test_consumer_client_id" + + val newConsumerConfig = new Properties(consumerConfig) + newConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newConsumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, testClassicClientId) + consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name) + + classicConsumer = createConsumer(configOverrides = newConsumerConfig) + classicConsumer.subscribe(List(testTopicName).asJava) + classicConsumer.poll(JDuration.ofMillis(1000)) + + newConsumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, testConsumerClientId) + consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name) + consumerConsumer = createConsumer(configOverrides = newConsumerConfig) + consumerConsumer.subscribe(List(testTopicName).asJava) + consumerConsumer.poll(JDuration.ofMillis(1000)) + + TestUtils.waitUntilTrue(() => { + classicConsumer.poll(JDuration.ofMillis(100)) + consumerConsumer.poll(JDuration.ofMillis(100)) + val describeConsumerGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava).all.get + describeConsumerGroupResult.containsKey(testGroupId) && + describeConsumerGroupResult.get(testGroupId).groupState == GroupState.STABLE && + describeConsumerGroupResult.get(testGroupId).members.size == 2 + }, s"Expected to find 2 members in a stable group $testGroupId") + + val describeConsumerGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava).all.get + val group = describeConsumerGroupResult.get(testGroupId) + assertNotNull(group) + assertEquals(Optional.of(2), group.groupEpoch) + assertEquals(Optional.of(2), group.targetAssignmentEpoch) + + val classicMember = group.members.asScala.find(_.clientId == testClassicClientId) + assertTrue(classicMember.isDefined) + assertEquals(Optional.of(2), classicMember.get.memberEpoch) + assertEquals(Optional.of(false), classicMember.get.upgraded) + + val consumerMember = group.members.asScala.find(_.clientId == testConsumerClientId) + assertTrue(consumerMember.isDefined) + assertEquals(Optional.of(2), consumerMember.get.memberEpoch) + assertEquals(Optional.of(true), consumerMember.get.upgraded) + } finally { + Utils.closeQuietly(classicConsumer, "classicConsumer") + Utils.closeQuietly(consumerConsumer, "consumerConsumer") + Utils.closeQuietly(client, "adminClient") + } + } + + /** + * Test the consumer group APIs. + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerGroupsDeprecatedConsumerGroupState(quorum: String, groupProtocol: String): Unit = { + val config = createConfig + client = Admin.create(config) + try { + // Verify that initially there are no consumer groups to list. + val list1 = client.listConsumerGroups() + assertEquals(0, list1.all().get().size()) + assertEquals(0, list1.errors().get().size()) + assertEquals(0, list1.valid().get().size()) + val testTopicName = "test_topic" + val testTopicName1 = testTopicName + "1" + val testTopicName2 = testTopicName + "2" + val testNumPartitions = 2 + + client.createTopics(util.Arrays.asList( + new NewTopic(testTopicName, testNumPartitions, 1.toShort), + new NewTopic(testTopicName1, testNumPartitions, 1.toShort), + new NewTopic(testTopicName2, testNumPartitions, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName, testTopicName1, testTopicName2), List()) + + val producer = createProducer() + try { + producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() + } finally { + Utils.closeQuietly(producer, "producer") + } + + val EMPTY_GROUP_INSTANCE_ID = "" + val testGroupId = "test_group_id" + val testClientId = "test_client_id" + val testInstanceId1 = "test_instance_id_1" + val testInstanceId2 = "test_instance_id_2" + val fakeGroupId = "fake_group_id" + + def createProperties(groupInstanceId: String): Properties = { + val newConsumerConfig = new Properties(consumerConfig) + // We need to disable the auto commit because after the members got removed from group, the offset commit + // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) + newConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + newConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + if (groupInstanceId != EMPTY_GROUP_INSTANCE_ID) { + newConsumerConfig.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) + } + newConsumerConfig + } + + // contains two static members and one dynamic member + val groupInstanceSet = Set(testInstanceId1, testInstanceId2, EMPTY_GROUP_INSTANCE_ID) + val consumerSet = groupInstanceSet.map { groupInstanceId => createConsumer(configOverrides = createProperties(groupInstanceId))} + val topicSet = Set(testTopicName, testTopicName1, testTopicName2) + + val latch = new CountDownLatch(consumerSet.size) + try { + def createConsumerThread[K,V](consumer: Consumer[K,V], topic: String): Thread = { + new Thread { + override def run : Unit = { + consumer.subscribe(Collections.singleton(topic)) + try { + while (true) { + consumer.poll(JDuration.ofSeconds(5)) + if (!consumer.assignment.isEmpty && latch.getCount > 0L) + latch.countDown() + try { + consumer.commitSync() + } catch { + case _: CommitFailedException => // Ignore and retry on next iteration. + } + } + } catch { + case _: InterruptException => // Suppress the output to stderr + } + } + } + } + + // Start consumers in a thread that will subscribe to a new group. + val consumerThreads = consumerSet.zip(topicSet).map(zipped => createConsumerThread(zipped._1, zipped._2)) + val groupType = if (groupProtocol.equalsIgnoreCase(GroupProtocol.CONSUMER.name)) GroupType.CONSUMER else GroupType.CLASSIC + + try { + consumerThreads.foreach(_.start()) + assertTrue(latch.await(30000, TimeUnit.MILLISECONDS)) + // Test that we can list the new group. + TestUtils.waitUntilTrue(() => { + val matching = client.listConsumerGroups.all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + .inStates(Set(ConsumerGroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + .inGroupStates(Set(GroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.EMPTY).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter( + _.groupId == testGroupId) + matching.isEmpty + }, s"Expected to find zero groups") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.EMPTY).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter( + _.groupId == testGroupId) + matching.isEmpty + }, s"Expected to find zero groups") + + val describeWithFakeGroupResult = client.describeConsumerGroups(Seq(testGroupId, fakeGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) + + // Test that we can get information about the test consumer group. + assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(testGroupId)) + var testGroupDescription = describeWithFakeGroupResult.describedGroups().get(testGroupId).get() + + assertEquals(testGroupId, testGroupDescription.groupId()) + assertFalse(testGroupDescription.isSimpleConsumerGroup) + assertEquals(groupInstanceSet.size, testGroupDescription.members().size()) + val members = testGroupDescription.members() + members.asScala.foreach(member => assertEquals(testClientId, member.clientId())) + val topicPartitionsByTopic = members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()) + topicSet.foreach { topic => + val topicPartitions = topicPartitionsByTopic.getOrElse(topic, List.empty) + assertEquals(testNumPartitions, topicPartitions.size) + } + + val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) + assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) + + // Test that the fake group throws GroupIdNotFoundException + assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) + assertFutureThrows(describeWithFakeGroupResult.describedGroups().get(fakeGroupId), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") + + // Test that all() also throws GroupIdNotFoundException + assertFutureThrows(describeWithFakeGroupResult.all(), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") + + val testTopicPart0 = new TopicPartition(testTopicName, 0) + + // Test listConsumerGroupOffsets + TestUtils.waitUntilTrue(() => { + val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() + parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 1) + }, s"Expected the offset for partition 0 to eventually become 1.") + + // Test listConsumerGroupOffsets with requireStable true + val options = new ListConsumerGroupOffsetsOptions().requireStable(true) + var parts = client.listConsumerGroupOffsets(testGroupId, options) + .partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec + val groupSpecs = Collections.singletonMap(testGroupId, + new ListConsumerGroupOffsetsSpec().topicPartitions(Collections.singleton(new TopicPartition(testTopicName, 0)))) + parts = client.listConsumerGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec and requireStable option + parts = client.listConsumerGroupOffsets(groupSpecs, options).partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test delete non-exist consumer instance + val invalidInstanceId = "invalid-instance-id" + var removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions( + Collections.singleton(new MemberToRemove(invalidInstanceId)) + )) + + assertFutureThrows(removeMembersResult.all, classOf[UnknownMemberIdException]) + val firstMemberFuture = removeMembersResult.memberResult(new MemberToRemove(invalidInstanceId)) + assertFutureThrows(firstMemberFuture, classOf[UnknownMemberIdException]) + + // Test consumer group deletion + var deleteResult = client.deleteConsumerGroups(Seq(testGroupId, fakeGroupId).asJava) + assertEquals(2, deleteResult.deletedGroups().size()) + + // Deleting the fake group ID should get GroupIdNotFoundException. + assertTrue(deleteResult.deletedGroups().containsKey(fakeGroupId)) + assertFutureThrows(deleteResult.deletedGroups().get(fakeGroupId), + classOf[GroupIdNotFoundException]) + + // Deleting the real group ID should get GroupNotEmptyException + assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) + assertFutureThrows(deleteResult.deletedGroups().get(testGroupId), + classOf[GroupNotEmptyException]) + + // Test delete one correct static member + val removeOptions = new RemoveMembersFromConsumerGroupOptions(Collections.singleton(new MemberToRemove(testInstanceId1))) + removeOptions.reason("test remove") + removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, removeOptions) + + assertNull(removeMembersResult.all().get()) + val validMemberFuture = removeMembersResult.memberResult(new MemberToRemove(testInstanceId1)) + assertNull(validMemberFuture.get()) + + val describeTestGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + assertEquals(1, describeTestGroupResult.describedGroups().size()) + + testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get() + + assertEquals(testGroupId, testGroupDescription.groupId) + assertFalse(testGroupDescription.isSimpleConsumerGroup) + assertEquals(consumerSet.size - 1, testGroupDescription.members().size()) + + // Delete all active members remaining (a static member + a dynamic member) + removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions()) + assertNull(removeMembersResult.all().get()) + + // The group should contain no members now. + testGroupDescription = client.describeConsumerGroups(Seq(testGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + .describedGroups().get(testGroupId).get() + assertTrue(testGroupDescription.members().isEmpty) + + // Consumer group deletion on empty group should succeed + deleteResult = client.deleteConsumerGroups(Seq(testGroupId).asJava) + assertEquals(1, deleteResult.deletedGroups().size()) + + assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) + assertNull(deleteResult.deletedGroups().get(testGroupId).get()) + + // Test alterConsumerGroupOffsets + val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(testGroupId, + Collections.singletonMap(testTopicPart0, new OffsetAndMetadata(0L))) + assertNull(alterConsumerGroupOffsetsResult.all().get()) + assertNull(alterConsumerGroupOffsetsResult.partitionResult(testTopicPart0).get()) + + // Verify alterConsumerGroupOffsets success + TestUtils.waitUntilTrue(() => { + val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() + parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 0) + }, s"Expected the offset for partition 0 to eventually become 0.") + } finally { + consumerThreads.foreach { + case consumerThread => + consumerThread.interrupt() + consumerThread.join() + } + } + } finally { + consumerSet.zip(groupInstanceSet).foreach(zipped => Utils.closeQuietly(zipped._1, zipped._2)) + } + } finally { + Utils.closeQuietly(client, "adminClient") + } + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testDeleteConsumerGroupOffsets(quorum: String, groupProtocol: String): Unit = { @@ -2042,9 +2500,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { newConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) // Increase timeouts to avoid having a rebalance during the test newConsumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, Integer.MAX_VALUE.toString) - newConsumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_DEFAULT.toString) + if (GroupProtocol.CLASSIC.name.equalsIgnoreCase(groupProtocol)) { + newConsumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_DEFAULT.toString) + } - Using(createConsumer(configOverrides = newConsumerConfig)) { consumer => + Using.resource(createConsumer(configOverrides = newConsumerConfig)) { consumer => consumer.subscribe(Collections.singletonList(testTopicName)) val records = consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) assertNotEquals(0, records.count) @@ -2054,35 +2514,167 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, Set(tp1, tp2).asJava) // Top level error will equal to the first partition level error - assertFutureExceptionTypeEquals(offsetDeleteResult.all(), classOf[GroupSubscribedToTopicException]) - assertFutureExceptionTypeEquals(offsetDeleteResult.partitionResult(tp1), + assertFutureThrows(offsetDeleteResult.all(), classOf[GroupSubscribedToTopicException]) + assertFutureThrows(offsetDeleteResult.partitionResult(tp1), classOf[GroupSubscribedToTopicException]) - assertFutureExceptionTypeEquals(offsetDeleteResult.partitionResult(tp2), + assertFutureThrows(offsetDeleteResult.partitionResult(tp2), classOf[UnknownTopicOrPartitionException]) // Test the fake group ID val fakeDeleteResult = client.deleteConsumerGroupOffsets(fakeGroupId, Set(tp1, tp2).asJava) - assertFutureExceptionTypeEquals(fakeDeleteResult.all(), classOf[GroupIdNotFoundException]) - assertFutureExceptionTypeEquals(fakeDeleteResult.partitionResult(tp1), + assertFutureThrows(fakeDeleteResult.all(), classOf[GroupIdNotFoundException]) + assertFutureThrows(fakeDeleteResult.partitionResult(tp1), classOf[GroupIdNotFoundException]) - assertFutureExceptionTypeEquals(fakeDeleteResult.partitionResult(tp2), + assertFutureThrows(fakeDeleteResult.partitionResult(tp2), classOf[GroupIdNotFoundException]) } // Test offset deletion when group is empty val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, Set(tp1, tp2).asJava) - assertFutureExceptionTypeEquals(offsetDeleteResult.all(), + assertFutureThrows(offsetDeleteResult.all(), classOf[UnknownTopicOrPartitionException]) assertNull(offsetDeleteResult.partitionResult(tp1).get()) - assertFutureExceptionTypeEquals(offsetDeleteResult.partitionResult(tp2), + assertFutureThrows(offsetDeleteResult.partitionResult(tp2), classOf[UnknownTopicOrPartitionException]) } finally { Utils.closeQuietly(client, "adminClient") } } + @ParameterizedTest + @ValueSource(strings = Array("kraft+kip932")) + def testListGroups(quorum: String): Unit = { + val classicGroupId = "classic_group_id" + val consumerGroupId = "consumer_group_id" + val shareGroupId = "share_group_id" + val simpleGroupId = "simple_group_id" + val testTopicName = "test_topic" + + consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name) + val classicGroupConfig = new Properties(consumerConfig) + classicGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, classicGroupId) + val classicGroup = createConsumer(configOverrides = classicGroupConfig) + + consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name) + val consumerGroupConfig = new Properties(consumerConfig) + consumerGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId) + val consumerGroup = createConsumer(configOverrides = consumerGroupConfig) + + val shareGroupConfig = new Properties(consumerConfig) + shareGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroupId) + val shareGroup = createShareConsumer(configOverrides = shareGroupConfig) + + val config = createConfig + client = Admin.create(config) + try { + client.createTopics(Collections.singleton( + new NewTopic(testTopicName, 1, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName), List()) + val topicPartition = new TopicPartition(testTopicName, 0) + + classicGroup.subscribe(Collections.singleton(testTopicName)) + classicGroup.poll(JDuration.ofMillis(1000)) + consumerGroup.subscribe(Collections.singleton(testTopicName)) + consumerGroup.poll(JDuration.ofMillis(1000)) + shareGroup.subscribe(Collections.singleton(testTopicName)) + shareGroup.poll(JDuration.ofMillis(1000)) + + val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(simpleGroupId, + Collections.singletonMap(topicPartition, new OffsetAndMetadata(0L))) + assertNull(alterConsumerGroupOffsetsResult.all().get()) + assertNull(alterConsumerGroupOffsetsResult.partitionResult(topicPartition).get()) + + TestUtils.waitUntilTrue(() => { + val groups = client.listGroups().all().get() + groups.size() == 4 + }, "Expected to find all groups") + + val classicGroupListing = new GroupListing(classicGroupId, Optional.of(GroupType.CLASSIC), "consumer", Optional.of(GroupState.STABLE)) + val consumerGroupListing = new GroupListing(consumerGroupId, Optional.of(GroupType.CONSUMER), "consumer", Optional.of(GroupState.STABLE)) + val shareGroupListing = new GroupListing(shareGroupId, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.STABLE)) + val simpleGroupListing = new GroupListing(simpleGroupId, Optional.of(GroupType.CLASSIC), "", Optional.of(GroupState.EMPTY)) + + var listGroupsResult = client.listGroups() + assertTrue(listGroupsResult.errors().get().isEmpty) + assertEquals(Set(classicGroupListing, simpleGroupListing, consumerGroupListing, shareGroupListing), listGroupsResult.all().get().asScala.toSet) + assertEquals(Set(classicGroupListing, simpleGroupListing, consumerGroupListing, shareGroupListing), listGroupsResult.valid().get().asScala.toSet) + + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.CLASSIC))) + assertTrue(listGroupsResult.errors().get().isEmpty) + assertEquals(Set(classicGroupListing, simpleGroupListing), listGroupsResult.all().get().asScala.toSet) + assertEquals(Set(classicGroupListing, simpleGroupListing), listGroupsResult.valid().get().asScala.toSet) + + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.CONSUMER))) + assertTrue(listGroupsResult.errors().get().isEmpty) + assertEquals(Set(consumerGroupListing), listGroupsResult.all().get().asScala.toSet) + assertEquals(Set(consumerGroupListing), listGroupsResult.valid().get().asScala.toSet) + + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.SHARE))) + assertTrue(listGroupsResult.errors().get().isEmpty) + assertEquals(Set(shareGroupListing), listGroupsResult.all().get().asScala.toSet) + assertEquals(Set(shareGroupListing), listGroupsResult.valid().get().asScala.toSet) + } finally { + Utils.closeQuietly(classicGroup, "classicGroup") + Utils.closeQuietly(consumerGroup, "consumerGroup") + Utils.closeQuietly(shareGroup, "shareGroup") + Utils.closeQuietly(client, "adminClient") + } + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testDescribeClassicGroups(quorum: String, groupProtocol: String): Unit = { + val classicGroupId = "classic_group_id" + val simpleGroupId = "simple_group_id" + val testTopicName = "test_topic" + + val classicGroupConfig = new Properties(consumerConfig) + classicGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, classicGroupId) + val classicGroup = createConsumer(configOverrides = classicGroupConfig) + + val config = createConfig + client = Admin.create(config) + try { + client.createTopics(Collections.singleton( + new NewTopic(testTopicName, 1, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName), List()) + val topicPartition = new TopicPartition(testTopicName, 0) + + classicGroup.subscribe(Collections.singleton(testTopicName)) + classicGroup.poll(JDuration.ofMillis(1000)) + + val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(simpleGroupId, + Collections.singletonMap(topicPartition, new OffsetAndMetadata(0L))) + assertNull(alterConsumerGroupOffsetsResult.all().get()) + assertNull(alterConsumerGroupOffsetsResult.partitionResult(topicPartition).get()) + + val groupIds = Seq(simpleGroupId, classicGroupId) + TestUtils.waitUntilTrue(() => { + val groups = client.describeClassicGroups(groupIds.asJavaCollection).all().get() + groups.size() == 2 + }, "Expected to find all groups") + + val classicConsumers = client.describeClassicGroups(groupIds.asJavaCollection).all().get() + val classicConsumer = classicConsumers.get(classicGroupId) + assertNotNull(classicConsumer) + assertEquals(classicGroupId, classicConsumer.groupId) + assertEquals("consumer", classicConsumer.protocol) + assertFalse(classicConsumer.members.isEmpty) + classicConsumer.members.forEach(member => assertTrue(member.upgraded.isEmpty)) + + assertNotNull(classicConsumers.get(simpleGroupId)) + assertEquals(simpleGroupId, classicConsumers.get(simpleGroupId).groupId()) + assertTrue(classicConsumers.get(simpleGroupId).protocol().isEmpty) + } finally { + Utils.closeQuietly(classicGroup, "classicGroup") + Utils.closeQuietly(client, "adminClient") + } + } + @ParameterizedTest @ValueSource(strings = Array("kraft+kip932")) def testShareGroups(quorum: String): Unit = { @@ -2127,10 +2719,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() try { // Verify that initially there are no share groups to list. - val list1 = client.listShareGroups() - assertEquals(0, list1.all().get().size()) - assertEquals(0, list1.errors().get().size()) - assertEquals(0, list1.valid().get().size()) + val list = client.listGroups() + assertEquals(0, list.all().get().size()) + assertEquals(0, list.errors().get().size()) + assertEquals(0, list.valid().get().size()) client.createTopics(Collections.singleton( new NewTopic(testTopicName, testNumPartitions, 1.toShort) @@ -2146,23 +2738,24 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { consumerThreads.foreach(_.start()) assertTrue(latch.await(30000, TimeUnit.MILLISECONDS)) + // listGroups is used to list share groups // Test that we can list the new group. TestUtils.waitUntilTrue(() => { - client.listShareGroups.all.get.stream().filter(group => + client.listGroups.all.get.stream().filter(group => group.groupId == testGroupId && - group.state.get == ShareGroupState.STABLE).count() == 1 + group.groupState.get == GroupState.STABLE).count() == 1 }, s"Expected to be able to list $testGroupId") TestUtils.waitUntilTrue(() => { - val options = new ListShareGroupsOptions().inStates(Collections.singleton(ShareGroupState.STABLE)) - client.listShareGroups(options).all.get.stream().filter(group => + val options = new ListGroupsOptions().withTypes(Collections.singleton(GroupType.SHARE)).inGroupStates(Collections.singleton(GroupState.STABLE)) + client.listGroups(options).all.get.stream().filter(group => group.groupId == testGroupId && - group.state.get == ShareGroupState.STABLE).count() == 1 + group.groupState.get == GroupState.STABLE).count() == 1 }, s"Expected to be able to list $testGroupId in state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListShareGroupsOptions().inStates(Collections.singleton(ShareGroupState.EMPTY)) - client.listShareGroups(options).all.get.stream().filter(_.groupId == testGroupId).count() == 0 + val options = new ListGroupsOptions().withTypes(Collections.singleton(GroupType.SHARE)).inGroupStates(Collections.singleton(GroupState.EMPTY)) + client.listGroups(options).all.get.stream().filter(_.groupId == testGroupId).count() == 0 }, s"Expected to find zero groups") val describeWithFakeGroupResult = client.describeShareGroups(util.Arrays.asList(testGroupId, fakeGroupId), @@ -2187,17 +2780,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) - // Test that the fake group is listed as dead. + // Test that the fake group throws GroupIdNotFoundException assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) - val fakeGroupDescription = describeWithFakeGroupResult.describedGroups().get(fakeGroupId).get() - - assertEquals(fakeGroupId, fakeGroupDescription.groupId()) - assertEquals(0, fakeGroupDescription.members().size()) - assertEquals(ShareGroupState.DEAD, fakeGroupDescription.state()) - assertNull(fakeGroupDescription.authorizedOperations()) + assertFutureThrows(describeWithFakeGroupResult.describedGroups().get(fakeGroupId), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") - // Test that all() returns 2 results - assertEquals(2, describeWithFakeGroupResult.all().get().size()) + // Test that all() also throws GroupIdNotFoundException + assertFutureThrows(describeWithFakeGroupResult.all(), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") val describeTestGroupResult = client.describeShareGroups(Collections.singleton(testGroupId), new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) @@ -2209,18 +2799,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(testGroupId, testGroupDescription.groupId) assertEquals(consumerSet.size, testGroupDescription.members().size()) - // Describing a share group using describeConsumerGroups reports it as a DEAD consumer group - // in the same way as a non-existent group + // Describing a share group using describeConsumerGroups reports it as a non-existent group + // but the error message is different val describeConsumerGroupResult = client.describeConsumerGroups(Collections.singleton(testGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) - assertEquals(1, describeConsumerGroupResult.all().get().size()) - - val deadConsumerGroupDescription = describeConsumerGroupResult.describedGroups().get(testGroupId).get() - assertEquals(testGroupId, deadConsumerGroupDescription.groupId()) - assertEquals(0, deadConsumerGroupDescription.members().size()) - assertEquals("", deadConsumerGroupDescription.partitionAssignor()) - assertEquals(ConsumerGroupState.DEAD, deadConsumerGroupDescription.state()) - assertEquals(expectedOperations, deadConsumerGroupDescription.authorizedOperations()) + assertFutureThrows(describeConsumerGroupResult.all(), + classOf[GroupIdNotFoundException], s"Group $testGroupId is not a consumer group.") } finally { consumerThreads.foreach { case consumerThread => @@ -2758,8 +3342,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { topic1Resource -> topic1AlterConfigs ).asJava, new AlterConfigsOptions().validateOnly(true)) - assertFutureExceptionTypeEquals(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], - Some("Invalid value zip for configuration compression.type")) + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], + "Invalid value zip for configuration compression.type: String must be one of: uncompressed, zstd, lz4, snappy, gzip, producer") } @ParameterizedTest @@ -2907,8 +3491,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) // InvalidRequestException error for topic1 - assertFutureExceptionTypeEquals(alterResult.values().get(topic1Resource), classOf[InvalidRequestException], - Some("Error due to duplicate config keys")) + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidRequestException], + "Error due to duplicate config keys") // Operation should succeed for topic2 alterResult.values().get(topic2Resource).get() @@ -2938,11 +3522,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ).asJava) assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) - assertFutureExceptionTypeEquals(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], - Some("Can't APPEND to key compression.type because its type is not LIST.")) + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], + "Can't APPEND to key compression.type because its type is not LIST.") - assertFutureExceptionTypeEquals(alterResult.values().get(topic2Resource), classOf[InvalidConfigurationException], - Some("Can't SUBTRACT to key compression.type because its type is not LIST.")) + assertFutureThrows(alterResult.values().get(topic2Resource), classOf[InvalidConfigurationException], + "Can't SUBTRACT to key compression.type because its type is not LIST.") // Try to add invalid config topic1AlterConfigs = Seq( @@ -2954,8 +3538,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ).asJava) assertEquals(Set(topic1Resource).asJava, alterResult.values.keySet) - assertFutureExceptionTypeEquals(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], - Some("Invalid value 1.1 for configuration min.cleanable.dirty.ratio: Value must be no more than 1")) + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], + "Invalid value 1.1 for configuration min.cleanable.dirty.ratio: Value must be no more than 1") } @ParameterizedTest @@ -2982,8 +3566,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { nonExistentTp1 -> validAssignment, nonExistentTp2 -> validAssignment ).asJava).values() - assertFutureExceptionTypeEquals(nonExistentPartitionsResult.get(nonExistentTp1), classOf[UnknownTopicOrPartitionException]) - assertFutureExceptionTypeEquals(nonExistentPartitionsResult.get(nonExistentTp2), classOf[UnknownTopicOrPartitionException]) + assertFutureThrows(nonExistentPartitionsResult.get(nonExistentTp1), classOf[UnknownTopicOrPartitionException]) + assertFutureThrows(nonExistentPartitionsResult.get(nonExistentTp2), classOf[UnknownTopicOrPartitionException]) val extraNonExistentReplica = Optional.of(new NewPartitionReassignment((0 until brokerCount + 1).map(_.asInstanceOf[Integer]).asJava)) val negativeIdReplica = Optional.of(new NewPartitionReassignment(Seq(-3, -2, -1).map(_.asInstanceOf[Integer]).asJava)) @@ -2993,25 +3577,25 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { tp2 -> negativeIdReplica, tp3 -> duplicateReplica ).asJava).values() - assertFutureExceptionTypeEquals(invalidReplicaResult.get(tp1), classOf[InvalidReplicaAssignmentException]) - assertFutureExceptionTypeEquals(invalidReplicaResult.get(tp2), classOf[InvalidReplicaAssignmentException]) - assertFutureExceptionTypeEquals(invalidReplicaResult.get(tp3), classOf[InvalidReplicaAssignmentException]) + assertFutureThrows(invalidReplicaResult.get(tp1), classOf[InvalidReplicaAssignmentException]) + assertFutureThrows(invalidReplicaResult.get(tp2), classOf[InvalidReplicaAssignmentException]) + assertFutureThrows(invalidReplicaResult.get(tp3), classOf[InvalidReplicaAssignmentException]) } @ParameterizedTest @ValueSource(strings = Array("kraft")) def testLongTopicNames(quorum: String): Unit = { val client = createAdminClient - val longTopicName = String.join("", Collections.nCopies(249, "x")); - val invalidTopicName = String.join("", Collections.nCopies(250, "x")); + val longTopicName = String.join("", Collections.nCopies(249, "x")) + val invalidTopicName = String.join("", Collections.nCopies(250, "x")) val newTopics2 = Seq(new NewTopic(invalidTopicName, 3, 3.toShort), new NewTopic(longTopicName, 3, 3.toShort)) val results = client.createTopics(newTopics2.asJava).values() assertTrue(results.containsKey(longTopicName)) results.get(longTopicName).get() assertTrue(results.containsKey(invalidTopicName)) - assertFutureExceptionTypeEquals(results.get(invalidTopicName), classOf[InvalidTopicException]) - assertFutureExceptionTypeEquals(client.alterReplicaLogDirs( + assertFutureThrows(results.get(invalidTopicName), classOf[InvalidTopicException]) + assertFutureThrows(client.alterReplicaLogDirs( Map(new TopicPartitionReplica(longTopicName, 0, 0) -> brokers(0).config.logDirs(0)).asJava).all(), classOf[InvalidTopicException]) client.close() @@ -3024,7 +3608,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def validateLogConfig(compressionType: String): Unit = { ensureConsistentKRaftMetadata() - val topicProps = brokers.head.metadataCache.asInstanceOf[KRaftMetadataCache].topicConfig(topic) + val topicProps = brokers.head.metadataCache.topicConfig(topic) val logConfig = LogConfig.fromProps(Collections.emptyMap[String, AnyRef], topicProps) assertEquals(compressionType, logConfig.originals.get(TopicConfig.COMPRESSION_TYPE_CONFIG)) @@ -3038,10 +3622,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TopicConfig.COMPRESSION_TYPE_CONFIG -> "producer" ).asJava val newTopic = new NewTopic(topic, 2, brokerCount.toShort) - assertFutureExceptionTypeEquals( + assertFutureThrows( client.createTopics(Collections.singletonList(newTopic.configs(invalidConfigs))).all, classOf[InvalidConfigurationException], - Some("Null value not supported for topic configs: retention.bytes") + "Null value not supported for topic configs: retention.bytes" ) val validConfigs = Map[String, String](TopicConfig.COMPRESSION_TYPE_CONFIG -> "producer").asJava @@ -3054,10 +3638,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, null), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), AlterConfigOp.OpType.SET) ) - assertFutureExceptionTypeEquals( + assertFutureThrows( client.incrementalAlterConfigs(Map(topicResource -> alterOps.asJavaCollection).asJava).all, classOf[InvalidRequestException], - Some("Null value not supported for : retention.bytes") + "Null value not supported for : retention.bytes" ) validateLogConfig(compressionType = "producer") } @@ -3084,7 +3668,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def testIncrementalAlterConfigsForLog4jLogLevels(quorum: String): Unit = { client = createAdminClient - val ancestorLogger = "kafka"; + val ancestorLogger = "kafka" val initialLoggerConfig = describeBrokerLoggers() val initialAncestorLogLevel = initialLoggerConfig.get("kafka").value() val initialControllerServerLogLevel = initialLoggerConfig.get("kafka.server.ControllerServer").value() @@ -3183,6 +3767,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(newAncestorLogLevel, newAncestorLoggerConfig.get("kafka.server.ControllerServer").value()) } + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsForLog4jLogLevelsCanSetToRootLogger(quorum: String): Unit = { + client = createAdminClient + val initialLoggerConfig = describeBrokerLoggers() + val initialRootLogLevel = initialLoggerConfig.get(Log4jController.ROOT_LOGGER).value() + val newRootLogLevel = LogLevelConfig.DEBUG_LOG_LEVEL + + val alterRootLoggerEntry = Seq( + new AlterConfigOp(new ConfigEntry(Log4jController.ROOT_LOGGER, newRootLogLevel), AlterConfigOp.OpType.SET) + ).asJavaCollection + + alterBrokerLoggers(alterRootLoggerEntry, validateOnly = true) + val validatedRootLoggerConfig = describeBrokerLoggers() + assertEquals(initialRootLogLevel, validatedRootLoggerConfig.get(Log4jController.ROOT_LOGGER).value()) + + alterBrokerLoggers(alterRootLoggerEntry) + val changedRootLoggerConfig = describeBrokerLoggers() + assertEquals(newRootLogLevel, changedRootLoggerConfig.get(Log4jController.ROOT_LOGGER).value()) + } + @ParameterizedTest @ValueSource(strings = Array("kraft")) def testIncrementalAlterConfigsForLog4jLogLevelsCannotResetRootLogger(quorum: String): Unit = { @@ -3233,22 +3838,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertLogLevelDidNotChange() } - /** - * The AlterConfigs API is deprecated and should not support altering log levels - */ - @nowarn("cat=deprecation") - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterConfigsForLog4jLogLevelsDoesNotWork(quorum: String): Unit = { - client = createAdminClient - - val alterLogLevelsEntries = Seq( - new ConfigEntry("kafka.server.ControllerServer", LogLevelConfig.INFO_LOG_LEVEL) - ).asJavaCollection - val alterResult = client.alterConfigs(Map(brokerLoggerConfigResource -> new Config(alterLogLevelsEntries)).asJava) - assertTrue(assertThrows(classOf[ExecutionException], () => alterResult.values.get(brokerLoggerConfigResource).get).getCause.isInstanceOf[InvalidRequestException]) - } - def alterBrokerLoggers(entries: util.Collection[AlterConfigOp], validateOnly: Boolean = false): Unit = { client.incrementalAlterConfigs(Map(brokerLoggerConfigResource -> entries).asJava, new AlterConfigsOptions().validateOnly(validateOnly)) .values.get(brokerLoggerConfigResource).get() @@ -3414,27 +4003,21 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { object PlaintextAdminIntegrationTest { - @nowarn("cat=deprecation") def checkValidAlterConfigs( admin: Admin, test: KafkaServerTestHarness, topicResource1: ConfigResource, - topicResource2: ConfigResource - ): Unit = { + topicResource2: ConfigResource, + maxMessageBytes: String, + retentionMs: String): Unit = { // Alter topics - var topicConfigEntries1 = Seq( - new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000") - ).asJava - - var topicConfigEntries2 = Seq( - new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), - new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4") - ).asJava - - var alterResult = admin.alterConfigs(Map( - topicResource1 -> new Config(topicConfigEntries1), - topicResource2 -> new Config(topicConfigEntries2) - ).asJava) + val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + alterConfigs.put(topicResource1, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000"), OpType.SET))) + alterConfigs.put(topicResource2, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) + )) + var alterResult = admin.incrementalAlterConfigs(alterConfigs) assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet) alterResult.all.get @@ -3448,26 +4031,16 @@ object PlaintextAdminIntegrationTest { assertEquals(2, configs.size) assertEquals("1000", configs.get(topicResource1).get(TopicConfig.FLUSH_MS_CONFIG).value) - assertEquals(LogConfig.DEFAULT_MAX_MESSAGE_BYTES.toString, - configs.get(topicResource1).get(TopicConfig.MAX_MESSAGE_BYTES_CONFIG).value) - assertEquals(LogConfig.DEFAULT_RETENTION_MS.toString, configs.get(topicResource1).get(TopicConfig.RETENTION_MS_CONFIG).value) + assertEquals(maxMessageBytes, configs.get(topicResource1).get(TopicConfig.MAX_MESSAGE_BYTES_CONFIG).value) + assertEquals(retentionMs, configs.get(topicResource1).get(TopicConfig.RETENTION_MS_CONFIG).value) assertEquals("0.9", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) assertEquals("lz4", configs.get(topicResource2).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) // Alter topics with validateOnly=true - topicConfigEntries1 = Seq( - new ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "10") - ).asJava - - topicConfigEntries2 = Seq( - new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.3") - ).asJava - - alterResult = admin.alterConfigs(Map( - topicResource1 -> new Config(topicConfigEntries1), - topicResource2 -> new Config(topicConfigEntries2) - ).asJava, new AlterConfigsOptions().validateOnly(true)) + alterConfigs.put(topicResource1, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "10"), OpType.SET))) + alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.3"), OpType.SET))) + alterResult = admin.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet) alterResult.all.get @@ -3479,12 +4052,10 @@ object PlaintextAdminIntegrationTest { assertEquals(2, configs.size) - assertEquals(LogConfig.DEFAULT_MAX_MESSAGE_BYTES.toString, - configs.get(topicResource1).get(TopicConfig.MAX_MESSAGE_BYTES_CONFIG).value) + assertEquals(maxMessageBytes, configs.get(topicResource1).get(TopicConfig.MAX_MESSAGE_BYTES_CONFIG).value) assertEquals("0.9", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) } - @nowarn("cat=deprecation") def checkInvalidAlterConfigs( test: KafkaServerTestHarness, admin: Admin @@ -3498,27 +4069,22 @@ object PlaintextAdminIntegrationTest { val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2) createTopicWithAdmin(admin, topic2, test.brokers, test.controllerServers, numPartitions = 1, replicationFactor = 1) - val topicConfigEntries1 = Seq( - new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), // this value is invalid as it's above 1.0 - new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4") - ).asJava - - var topicConfigEntries2 = Seq(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy")).asJava - val brokerResource = new ConfigResource(ConfigResource.Type.BROKER, test.brokers.head.config.brokerId.toString) - val brokerConfigEntries = Seq(new ConfigEntry(ServerConfigs.BROKER_ID_CONFIG, "10")).asJava // Alter configs: first and third are invalid, second is valid - var alterResult = admin.alterConfigs(Map( - topicResource1 -> new Config(topicConfigEntries1), - topicResource2 -> new Config(topicConfigEntries2), - brokerResource -> new Config(brokerConfigEntries) - ).asJava) + val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + alterConfigs.put(topicResource1, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) + )) + alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"), OpType.SET))) + alterConfigs.put(brokerResource, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181"), OpType.SET))) + var alterResult = admin.incrementalAlterConfigs(alterConfigs) assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet) - assertFutureExceptionTypeEquals(alterResult.values.get(topicResource1), classOf[InvalidConfigurationException]) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[InvalidConfigurationException]) alterResult.values.get(topicResource2).get - assertFutureExceptionTypeEquals(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) // Verify that first and third resources were not updated and second was updated test.ensureConsistentKRaftMetadata() @@ -3536,18 +4102,18 @@ object PlaintextAdminIntegrationTest { assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, configs.get(brokerResource).get(ServerConfigs.COMPRESSION_TYPE_CONFIG).value) // Alter configs with validateOnly = true: first and third are invalid, second is valid - topicConfigEntries2 = Seq(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip")).asJava - - alterResult = admin.alterConfigs(Map( - topicResource1 -> new Config(topicConfigEntries1), - topicResource2 -> new Config(topicConfigEntries2), - brokerResource -> new Config(brokerConfigEntries) - ).asJava, new AlterConfigsOptions().validateOnly(true)) + alterConfigs.put(topicResource1, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) + )) + alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), OpType.SET))) + alterConfigs.put(brokerResource, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181"), OpType.SET))) + alterResult = admin.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet) - assertFutureExceptionTypeEquals(alterResult.values.get(topicResource1), classOf[InvalidConfigurationException]) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[InvalidConfigurationException]) alterResult.values.get(topicResource2).get - assertFutureExceptionTypeEquals(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) // Verify that no resources are updated since validate_only = true test.ensureConsistentKRaftMetadata() @@ -3564,17 +4130,4 @@ object PlaintextAdminIntegrationTest { assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, configs.get(brokerResource).get(ServerConfigs.COMPRESSION_TYPE_CONFIG).value) } - - /** - * Resets the logging configuration after the test. - */ - def resetLogging(): Unit = { - org.apache.log4j.LogManager.resetConfiguration() - val stream = this.getClass.getResourceAsStream("/log4j.properties") - try { - PropertyConfigurator.configure(stream) - } finally { - stream.close() - } - } } diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala index 11589fac48c0c..e159042df570d 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala @@ -35,67 +35,67 @@ class PlaintextConsumerCallbackTest extends AbstractConsumerTest { @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumerRebalanceListenerAssignOnPartitionsAssigned(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic, 0) - triggerOnPartitionsAssigned { (consumer, _) => + triggerOnPartitionsAssigned(tp, { (consumer, _) => val e: Exception = assertThrows(classOf[IllegalStateException], () => consumer.assign(Collections.singletonList(tp))) assertEquals(e.getMessage, "Subscription to topics, partitions and pattern are mutually exclusive") - } + }) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumerRebalanceListenerAssignmentOnPartitionsAssigned(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic, 0) - triggerOnPartitionsAssigned { (consumer, _) => + triggerOnPartitionsAssigned(tp, { (consumer, _) => assertTrue(consumer.assignment().contains(tp)); - } + }) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumerRebalanceListenerBeginningOffsetsOnPartitionsAssigned(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic, 0) - triggerOnPartitionsAssigned { (consumer, _) => + triggerOnPartitionsAssigned(tp, { (consumer, _) => val map = consumer.beginningOffsets(Collections.singletonList(tp)) assertTrue(map.containsKey(tp)) assertEquals(0, map.get(tp)) - } + }) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumerRebalanceListenerAssignOnPartitionsRevoked(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic, 0) - triggerOnPartitionsRevoked { (consumer, _) => + triggerOnPartitionsRevoked(tp, { (consumer, _) => val e: Exception = assertThrows(classOf[IllegalStateException], () => consumer.assign(Collections.singletonList(tp))) assertEquals(e.getMessage, "Subscription to topics, partitions and pattern are mutually exclusive") - } + }) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumerRebalanceListenerAssignmentOnPartitionsRevoked(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic, 0) - triggerOnPartitionsRevoked { (consumer, _) => + triggerOnPartitionsRevoked(tp, { (consumer, _) => assertTrue(consumer.assignment().contains(tp)) - } + }) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testConsumerRebalanceListenerBeginningOffsetsOnPartitionsRevoked(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic, 0) - triggerOnPartitionsRevoked { (consumer, _) => + triggerOnPartitionsRevoked(tp, { (consumer, _) => val map = consumer.beginningOffsets(Collections.singletonList(tp)) assertTrue(map.containsKey(tp)) assertEquals(0, map.get(tp)) - } + }) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testGetPositionOfNewlyAssignedPartitionOnPartitionsAssignedCallback(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic, 0) - triggerOnPartitionsAssigned { (consumer, _) => assertDoesNotThrow(() => consumer.position(tp)) } + triggerOnPartitionsAssigned(tp, { (consumer, _) => assertDoesNotThrow(() => consumer.position(tp)) }) } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @@ -110,7 +110,7 @@ class PlaintextConsumerCallbackTest extends AbstractConsumerTest { val startingTimestamp = 0 sendRecords(producer, totalRecords.toInt, tp, startingTimestamp) - triggerOnPartitionsAssigned(consumer, { (consumer, _) => + triggerOnPartitionsAssigned(tp, consumer, { (consumer, _) => consumer.seek(tp, startingOffset) consumer.pause(asList(tp)) }) @@ -122,16 +122,23 @@ class PlaintextConsumerCallbackTest extends AbstractConsumerTest { startingTimestamp = startingOffset) } - private def triggerOnPartitionsAssigned(execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { + private def triggerOnPartitionsAssigned(tp: TopicPartition, + execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { val consumer = createConsumer() - triggerOnPartitionsAssigned(consumer, execute) + triggerOnPartitionsAssigned(tp, consumer, execute) } - private def triggerOnPartitionsAssigned(consumer: Consumer[Array[Byte], Array[Byte]], execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { + + private def triggerOnPartitionsAssigned(tp: TopicPartition, + consumer: Consumer[Array[Byte], Array[Byte]], + execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { val partitionsAssigned = new AtomicBoolean(false) consumer.subscribe(asList(topic), new ConsumerRebalanceListener { override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { - execute(consumer, partitions) - partitionsAssigned.set(true) + // Make sure the partition used in the test is actually assigned before continuing. + if (partitions.contains(tp)) { + execute(consumer, partitions) + partitionsAssigned.set(true) + } } override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { @@ -141,18 +148,25 @@ class PlaintextConsumerCallbackTest extends AbstractConsumerTest { TestUtils.pollUntilTrue(consumer, () => partitionsAssigned.get(), "Timed out before expected rebalance completed") } - private def triggerOnPartitionsRevoked(execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { + private def triggerOnPartitionsRevoked(tp: TopicPartition, + execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { val consumer = createConsumer() val partitionsAssigned = new AtomicBoolean(false) val partitionsRevoked = new AtomicBoolean(false) consumer.subscribe(asList(topic), new ConsumerRebalanceListener { override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { - partitionsAssigned.set(true) + // Make sure the partition used in the test is actually assigned before continuing. + if (partitions.contains(tp)) { + partitionsAssigned.set(true) + } } override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { - execute(consumer, partitions) - partitionsRevoked.set(true) + // Make sure the partition used in the test is actually revoked before continuing. + if (partitions.contains(tp)) { + execute(consumer, partitions) + partitionsRevoked.set(true) + } } }) TestUtils.pollUntilTrue(consumer, () => partitionsAssigned.get(), "Timed out before expected rebalance completed") diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala index 22cc6a7205e52..4b50bddd9fc01 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala @@ -21,7 +21,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource import org.apache.kafka.common.TopicPartition -import java.time.Duration +import java.time.{Duration, Instant} import scala.jdk.CollectionConverters._ /** @@ -105,6 +105,52 @@ class PlaintextConsumerFetchTest extends AbstractConsumerTest { assertEquals(totalRecords, nextRecord.offset()) } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchOutOfRangeOffsetResetConfigByDuration(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "by_duration:PT1H") + // ensure no in-flight fetch request so that the offset can be reset immediately + this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") + + // Test the scenario where the requested duration much earlier than the starting offset + val consumer1 = createConsumer(configOverrides = this.consumerConfig) + val producer1 = createProducer() + val totalRecords = 10L + var startingTimestamp = System.currentTimeMillis() + sendRecords(producer1, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) + consumer1.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = consumer1, numRecords = totalRecords.toInt, startingOffset = 0, startingTimestamp = startingTimestamp) + + // seek to out of range position + var outOfRangePos = totalRecords + 1 + consumer1.seek(tp, outOfRangePos) + // assert that poll resets to the beginning position + consumeAndVerifyRecords(consumer = consumer1, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) + + // Test the scenario where starting offset is earlier than the requested duration + val consumer2 = createConsumer(configOverrides = this.consumerConfig) + val producer2 = createProducer() + val totalRecords2 = 25L + startingTimestamp = Instant.now().minus(Duration.ofHours(24)).toEpochMilli + //generate records with 1 hour interval for 1 day + sendRecords(producer2, totalRecords2.toInt, tp2, startingTimestamp = startingTimestamp, Duration.ofHours(1).toMillis) + consumer2.assign(List(tp2).asJava) + //consumer should read one record from last one hour + consumeAndVerifyRecords(consumer = consumer2, numRecords = 1, startingOffset = 24, startingKeyAndValueIndex = 24, + startingTimestamp = startingTimestamp + 24 * Duration.ofHours(1).toMillis, + tp = tp2, + timestampIncrement = Duration.ofHours(1).toMillis) + + // seek to out of range position + outOfRangePos = totalRecords2 + 1 + consumer2.seek(tp2, outOfRangePos) + // assert that poll resets to the duration offset. consumer should read one record from last one hour + consumeAndVerifyRecords(consumer = consumer2, numRecords = 1, startingOffset = 24, startingKeyAndValueIndex = 24, + startingTimestamp = startingTimestamp + 24 * Duration.ofHours(1).toMillis, + tp = tp2, + timestampIncrement = Duration.ofHours(1).toMillis) + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testFetchRecordLargerThanFetchMaxBytes(quorum: String, groupProtocol: String): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala index 9e6460df05027..c52228acbca32 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala @@ -16,6 +16,7 @@ import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.consumer._ import org.apache.kafka.common.{MetricName, TopicPartition} import org.apache.kafka.common.utils.Utils +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest @@ -23,6 +24,7 @@ import org.junit.jupiter.params.provider.MethodSource import java.time.Duration import java.util +import java.util.Properties import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -32,6 +34,12 @@ import scala.jdk.CollectionConverters._ @Timeout(600) class PlaintextConsumerPollTest extends AbstractConsumerTest { + override protected def brokerPropertyOverrides(properties: Properties): Unit = { + super.brokerPropertyOverrides(properties) + properties.setProperty(GroupCoordinatorConfig.CONSUMER_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + properties.setProperty(GroupCoordinatorConfig.CONSUMER_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testMaxPollRecords(quorum: String, groupProtocol: String): Unit = { @@ -53,8 +61,10 @@ class PlaintextConsumerPollTest extends AbstractConsumerTest { @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testMaxPollIntervalMs(quorum: String, groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 1000.toString) - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 2000.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 2000.toString) + } val consumer = createConsumer() @@ -79,8 +89,10 @@ class PlaintextConsumerPollTest extends AbstractConsumerTest { @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testMaxPollIntervalMsDelayInRevocation(quorum: String, groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 5000.toString) - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) + } this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) val consumer = createConsumer() @@ -121,8 +133,10 @@ class PlaintextConsumerPollTest extends AbstractConsumerTest { @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testMaxPollIntervalMsDelayInAssignment(quorum: String, groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 5000.toString) - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) + } this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) val consumer = createConsumer() @@ -146,7 +160,9 @@ class PlaintextConsumerPollTest extends AbstractConsumerTest { @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testMaxPollIntervalMsShorterThanPollTimeout(quorum: String, groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 1000.toString) - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + } val consumer = createConsumer() val listener = new TestConsumerReassignmentListener diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala index dd2b0d4cffdcc..5eea54b23d1d3 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala @@ -15,7 +15,7 @@ package kafka.api import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.consumer._ import org.apache.kafka.common.TopicPartition -import org.apache.kafka.common.errors.InvalidTopicException +import org.apache.kafka.common.errors.{InvalidRegularExpression, InvalidTopicException} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Timeout import org.junit.jupiter.api.function.Executable @@ -178,6 +178,129 @@ class PlaintextConsumerSubscriptionTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment().size) } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternSubscription(quorum: String, groupProtocol: String): Unit = { + val topic1 = "tblablac" // matches subscribed pattern + createTopic(topic1, 2, brokerCount) + + val topic2 = "tblablak" // does not match subscribed pattern + createTopic(topic2, 2, brokerCount) + + val topic3 = "tblab1" // does not match subscribed pattern + createTopic(topic3, 2, brokerCount) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + var pattern = new SubscriptionPattern("t.*c") + consumer.subscribe(pattern) + + var assignment = Set( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)) + awaitAssignment(consumer, assignment) + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + + // Subscribe to a different pattern to match topic2 (that did not match before) + pattern = new SubscriptionPattern(topic2 + ".*") + consumer.subscribe(pattern) + + assignment = Set( + new TopicPartition(topic2, 0), + new TopicPartition(topic2, 1)) + awaitAssignment(consumer, assignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternExpandSubscription(quorum: String, groupProtocol: String): Unit = { + val topic1 = "topic1" // matches first pattern + createTopic(topic1, 2, brokerCount) + + val topic2 = "topic2" // does not match first pattern + createTopic(topic2, 2, brokerCount) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + var pattern = new SubscriptionPattern("topic1.*") + consumer.subscribe(pattern) + val assignment = Set( + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)) + awaitAssignment(consumer, assignment) + + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + + // Subscribe to a different pattern that should match + // the same topics the member already had plus new ones + pattern = new SubscriptionPattern("topic1|topic2") + consumer.subscribe(pattern) + + val expandedAssignment = assignment ++ Set(new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)) + awaitAssignment(consumer, expandedAssignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternSubscriptionAndTopicSubscription(quorum: String, groupProtocol: String): Unit = { + val topic1 = "topic1" // matches subscribed pattern + createTopic(topic1, 2, brokerCount) + + val topic11 = "topic11" // matches subscribed pattern + createTopic(topic11, 2, brokerCount) + + val topic2 = "topic2" // does not match subscribed pattern + createTopic(topic2, 2, brokerCount) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + // Subscribe to pattern + val pattern = new SubscriptionPattern("topic1.*") + consumer.subscribe(pattern) + val patternAssignment = Set( + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1), + new TopicPartition(topic11, 0), + new TopicPartition(topic11, 1)) + awaitAssignment(consumer, patternAssignment) + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + + // Subscribe to explicit topic names + consumer.subscribe(List(topic2).asJava) + val assignment = Set( + new TopicPartition(topic2, 0), + new TopicPartition(topic2, 1)) + awaitAssignment(consumer, assignment) + consumer.unsubscribe() + + // Subscribe to pattern again + consumer.subscribe(pattern) + awaitAssignment(consumer, patternAssignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternSubscriptionInvalidRegex(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + val pattern = new SubscriptionPattern("(t.*c") + consumer.subscribe(pattern) + + TestUtils.tryUntilNoAssertionError() { + assertThrows(classOf[InvalidRegularExpression], () => consumer.poll(Duration.ZERO)) + } + consumer.unsubscribe() + } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testExpandingTopicSubscriptions(quorum: String, groupProtocol: String): Unit = { @@ -211,8 +334,10 @@ class PlaintextConsumerSubscriptionTest extends AbstractConsumerTest { @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testUnsubscribeTopic(quorum: String, groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") + } val consumer = createConsumer() val listener = new TestConsumerReassignmentListener() diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala index 6ce0f6c00de70..2469a482ab557 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala @@ -24,9 +24,10 @@ import org.apache.kafka.clients.admin.{NewPartitions, NewTopic} import org.apache.kafka.clients.consumer._ import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.errors.{InvalidGroupIdException, InvalidTopicException, TimeoutException, WakeupException} +import org.apache.kafka.common.errors.{InterruptException, InvalidGroupIdException, InvalidTopicException, TimeoutException, WakeupException} import org.apache.kafka.common.record.{CompressionType, TimestampType} import org.apache.kafka.common.serialization._ +import org.apache.kafka.common.test.api.Flaky import org.apache.kafka.common.{MetricName, TopicPartition} import org.apache.kafka.server.quota.QuotaType import org.apache.kafka.test.{MockConsumerInterceptor, MockProducerInterceptor} @@ -35,7 +36,7 @@ import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource -import java.util.concurrent.{CompletableFuture, TimeUnit} +import java.util.concurrent.{CompletableFuture, ExecutionException, TimeUnit} import scala.jdk.CollectionConverters._ @Timeout(600) @@ -387,8 +388,10 @@ class PlaintextConsumerTest extends BaseConsumerTest { @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testPauseStateNotPreservedByRebalance(quorum: String, groupProtocol: String): Unit = { - this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test - this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") + } val consumer = createConsumer() val producer = createProducer() @@ -824,4 +827,50 @@ class PlaintextConsumerTest extends BaseConsumerTest { assertThrows(classOf[WakeupException], () => consumer.position(topicPartition, Duration.ofSeconds(100))) } + + @Flaky("KAFKA-18031") + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCloseLeavesGroupOnInterrupt(quorum: String, groupProtocol: String): Unit = { + val adminClient = createAdminClient() + val consumer = createConsumer() + val listener = new TestConsumerReassignmentListener() + consumer.subscribe(List(topic).asJava, listener) + awaitRebalance(consumer, listener) + + assertEquals(1, listener.callsToAssigned) + assertEquals(0, listener.callsToRevoked) + + try { + Thread.currentThread().interrupt() + assertThrows(classOf[InterruptException], () => consumer.close()) + } finally { + // Clear the interrupted flag so we don't create problems for subsequent tests. + Thread.interrupted() + } + + assertEquals(1, listener.callsToAssigned) + assertEquals(1, listener.callsToRevoked) + + val config = new ConsumerConfig(consumerConfig) + + // Set the wait timeout to be only *half* the configured session timeout. This way we can make sure that the + // consumer explicitly left the group as opposed to being kicked out by the broker. + val leaveGroupTimeoutMs = config.getInt(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG) / 2 + + TestUtils.waitUntilTrue( + () => { + try { + val groupId = config.getString(ConsumerConfig.GROUP_ID_CONFIG) + val groupDescription = adminClient.describeConsumerGroups (Collections.singletonList (groupId) ).describedGroups.get (groupId).get + groupDescription.members.isEmpty + } catch { + case _: ExecutionException | _: InterruptedException => + false + } + }, + msg=s"Consumer did not leave the consumer group within $leaveGroupTimeoutMs ms of close", + waitTimeMs=leaveGroupTimeoutMs + ) + } } diff --git a/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala index 0c8bf16b4dbfa..1acd22dc3fa4b 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala @@ -75,7 +75,7 @@ class PlaintextEndToEndAuthorizationTest extends EndToEndAuthorizationTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(List.empty, None, KafkaSasl)) + startSasl(jaasSections(List.empty, None)) super.setUp(testInfo) } diff --git a/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala index c1ba3a1b83c4d..65eedf96e3a59 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala @@ -33,7 +33,6 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} import java.nio.charset.StandardCharsets -import scala.annotation.nowarn class PlaintextProducerSendTest extends BaseProducerSendTest { @@ -276,14 +275,11 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { object PlaintextProducerSendTest { - // See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for deprecation details - @nowarn("cat=deprecation") def quorumAndTimestampConfigProvider: java.util.stream.Stream[Arguments] = { val now: Long = System.currentTimeMillis() val fiveMinutesInMs: Long = 5 * 60 * 60 * 1000L val data = new java.util.ArrayList[Arguments]() for (groupProtocol <- GroupProtocol.values().map(gp => gp.name.toLowerCase(Locale.ROOT))) { - data.add(Arguments.of("kraft", groupProtocol, TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, Long.box(now - fiveMinutesInMs))) data.add(Arguments.of("kraft", groupProtocol, TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, Long.box(now - fiveMinutesInMs))) data.add(Arguments.of("kraft", groupProtocol, TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.box(now + fiveMinutesInMs))) } diff --git a/core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala b/core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala index 521b9cc0a0fd3..2782a46f18abf 100755 --- a/core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala @@ -48,7 +48,7 @@ class ProducerCompressionTest extends QuorumTestHarness { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - val props = TestUtils.createBrokerConfig(brokerId, zkConnectOrNull) + val props = TestUtils.createBrokerConfig(brokerId) broker = createBroker(new KafkaConfig(props)) } diff --git a/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala index a30d440f3251b..1826df1c6dc7b 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala @@ -52,7 +52,7 @@ class ProducerFailureHandlingTest extends KafkaServerTestHarness { overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, 1.toString) def generateConfigs = - TestUtils.createBrokerConfigs(numServers, zkConnectOrNull, enableControlledShutdown = false).map(KafkaConfig.fromProps(_, overridingProps)) + TestUtils.createBrokerConfigs(numServers, enableControlledShutdown = false).map(KafkaConfig.fromProps(_, overridingProps)) private var producer1: KafkaProducer[Array[Byte], Array[Byte]] = _ private var producer2: KafkaProducer[Array[Byte], Array[Byte]] = _ diff --git a/core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala b/core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala index 89e9e8dd6f621..6f50b60aa15d9 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala @@ -26,13 +26,12 @@ import kafka.utils.TestUtils.{consumeRecords, createAdminClient} import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ConfigEntry, ProducerState} import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} -import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.{KafkaException, TopicPartition} import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors.{InvalidPidMappingException, TransactionalIdNotFoundException} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} -import org.apache.kafka.test.{TestUtils => JTestUtils} import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest @@ -53,7 +52,7 @@ class ProducerIdExpirationTest extends KafkaServerTestHarness { var admin: Admin = _ override def generateConfigs: Seq[KafkaConfig] = { - TestUtils.createBrokerConfigs(3, zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps())) + TestUtils.createBrokerConfigs(3).map(KafkaConfig.fromProps(_, serverProps())) } @BeforeEach @@ -126,13 +125,20 @@ class ProducerIdExpirationTest extends KafkaServerTestHarness { // Producer IDs should be retained. assertEquals(1, producerState.size) - // Start a new transaction and attempt to send, which will trigger an AddPartitionsToTxnRequest, which will fail due to the expired transactional ID. + // Start a new transaction and attempt to send, triggering an AddPartitionsToTxnRequest that will fail + // due to the expired transactional ID, resulting in a fatal error. producer.beginTransaction() val failedFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "1", "1", willBeCommitted = false)) TestUtils.waitUntilTrue(() => failedFuture.isDone, "Producer future never completed.") + org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, classOf[InvalidPidMappingException]) - JTestUtils.assertFutureThrows(failedFuture, classOf[InvalidPidMappingException]) - producer.abortTransaction() + // Assert that aborting the transaction throws a KafkaException due to the fatal error. + assertThrows(classOf[KafkaException], () => producer.abortTransaction()) + + // Close the producer and reinitialize to recover from the fatal error. + producer.close() + producer = TestUtils.createTransactionalProducer("transactionalProducer", brokers) + producer.initTransactions() producer.beginTransaction() producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "4", "4", willBeCommitted = true)) diff --git a/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala index 3cb40b6a0cf46..aa8f46d7997ac 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala @@ -18,18 +18,22 @@ package kafka.api import org.apache.kafka.clients.producer.ProducerRecord import org.junit.jupiter.api.Assertions.assertEquals -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource class ProducerRebootstrapTest extends RebootstrapTest { - @Test - def testRebootstrap(): Unit = { + @ParameterizedTest(name = "{displayName}.quorum=kraft.useRebootstrapTriggerMs={0}") + @ValueSource(booleans = Array(false, true)) + def testRebootstrap(useRebootstrapTriggerMs: Boolean): Unit = { + // It's ok to shut the leader down, cause the reelection is small enough to the producer timeout. server1.shutdown() server1.awaitShutdown() - val producer = createProducer(configOverrides = clientOverrides) + val producer = createProducer(configOverrides = clientOverrides(useRebootstrapTriggerMs)) // Only the server 0 is available for the producer during the bootstrap. - producer.send(new ProducerRecord(topic, part, "key 0".getBytes, "value 0".getBytes)).get() + val recordMetadata0 = producer.send(new ProducerRecord(topic, part, "key 0".getBytes, "value 0".getBytes)).get() + assertEquals(0, recordMetadata0.offset()) server0.shutdown() server0.awaitShutdown() diff --git a/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala b/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala index 397e4660da7ee..0ee52530e57ff 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala @@ -34,6 +34,7 @@ import scala.jdk.CollectionConverters._ class ProducerSendWhileDeletionTest extends IntegrationTestHarness { val producerCount: Int = 1 val brokerCount: Int = 2 + val defaultLingerMs: Int = 5; serverConfig.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 2.toString) serverConfig.put(ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG, 2.toString) @@ -41,7 +42,7 @@ class ProducerSendWhileDeletionTest extends IntegrationTestHarness { producerConfig.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 5000L.toString) producerConfig.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10000.toString) - producerConfig.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, 10000.toString) + producerConfig.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, (10000 + defaultLingerMs).toString) /** * Tests that Producer gets self-recovered when a topic is deleted mid-way of produce. @@ -78,7 +79,7 @@ class ProducerSendWhileDeletionTest extends IntegrationTestHarness { deleteTopic(topic, listenerName) // Verify that the topic is deleted when no metadata request comes in - TestUtils.verifyTopicDeletion(zkClientOrNull, topic, 2, brokers) + TestUtils.verifyTopicDeletion(topic, 2, brokers) // Producer should be able to send messages even after topic gets deleted and auto-created assertEquals(topic, producer.send(new ProducerRecord(topic, null, "value".getBytes(StandardCharsets.UTF_8))).get.topic()) diff --git a/core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala b/core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala index a8d2431f80bb3..03a312d5f077a 100644 --- a/core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala +++ b/core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala @@ -17,34 +17,54 @@ package kafka.api import java.util.Properties - -import kafka.admin.{RackAwareMode, RackAwareTest} +import kafka.admin.RackAwareTest import kafka.integration.KafkaServerTestHarness import kafka.server.KafkaConfig -import kafka.utils.TestUtils +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.admin.Admin import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.server.config.{ReplicationConfigs, ServerLogConfigs} +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.security.auth.SecurityProtocol import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + import scala.collection.Map +import scala.jdk.CollectionConverters.ListHasAsScala class RackAwareAutoTopicCreationTest extends KafkaServerTestHarness with RackAwareTest { val numServers = 4 val numPartitions = 8 val replicationFactor = 2 val overridingProps = new Properties() + var admin: Admin = _ overridingProps.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, numPartitions.toString) overridingProps.put(ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG, replicationFactor.toString) def generateConfigs = (0 until numServers) map { node => - TestUtils.createBrokerConfig(node, zkConnect, enableControlledShutdown = false, rack = Some((node / 2).toString)) + TestUtils.createBrokerConfig(node, enableControlledShutdown = false, rack = Some((node / 2).toString)) } map (KafkaConfig.fromProps(_, overridingProps)) private val topic = "topic" - @Test - def testAutoCreateTopic(): Unit = { + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + admin = TestUtils.createAdminClient(brokers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) + } + + @AfterEach + override def tearDown(): Unit = { + if (admin != null) admin.close() + super.tearDown() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoCreateTopic(quorum: String, groupProtocol: String): Unit = { val producer = TestUtils.createProducer(bootstrapServers()) try { // Send a message to auto-create the topic @@ -52,15 +72,20 @@ class RackAwareAutoTopicCreationTest extends KafkaServerTestHarness with RackAwa assertEquals(0L, producer.send(record).get.offset, "Should have offset 0") // double check that the topic is created with leader elected - TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0) - val assignment = zkClient.getReplicaAssignmentForTopics(Set(topic)).map { case (topicPartition, replicas) => - topicPartition.partition -> replicas - } - val brokerMetadatas = adminZkClient.getBrokerMetadatas(RackAwareMode.Enforced) + TestUtils.waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic, 0) + val assignment = getReplicaAssignment(topic) + val brokerMetadatas = brokers.head.metadataCache.getAliveBrokers() val expectedMap = Map(0 -> "0", 1 -> "0", 2 -> "1", 3 -> "1") assertEquals(expectedMap, brokerMetadatas.map(b => b.id -> b.rack.get).toMap) - checkReplicaDistribution(assignment, expectedMap, numServers, numPartitions, replicationFactor) + checkReplicaDistribution(assignment, expectedMap, numServers, numPartitions, replicationFactor, + verifyLeaderDistribution = false) } finally producer.close() } + + private def getReplicaAssignment(topic: String): Map[Int, Seq[Int]] = { + TestUtils.describeTopic(admin, topic).partitions.asScala.map { partition => + partition.partition -> partition.replicas.asScala.map(_.id).toSeq + }.toMap + } } diff --git a/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala index b3b044ebcdbb7..2d84284cd6bf1 100644 --- a/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala @@ -16,36 +16,53 @@ */ package kafka.api -import kafka.server.{KafkaConfig, KafkaServer} +import kafka.server.{KafkaBroker, KafkaConfig} import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.junit.jupiter.api.{BeforeEach, TestInfo} import java.util.Properties abstract class RebootstrapTest extends AbstractConsumerTest { override def brokerCount: Int = 2 - def server0: KafkaServer = serverForId(0).get - def server1: KafkaServer = serverForId(1).get + def server0: KafkaBroker = serverForId(0).get + def server1: KafkaBroker = serverForId(1).get + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.doSetup(testInfo, createOffsetsTopic = true) + + // Enable unclean leader election for the test topic + val topicProps = new Properties + topicProps.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") + + // create the test topic with all the brokers as replicas + createTopic(topic, 2, brokerCount, adminClientConfig = this.adminClientConfig, topicConfig = topicProps) + } override def generateConfigs: Seq[KafkaConfig] = { val overridingProps = new Properties() overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, brokerCount.toString) - overridingProps.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") // In this test, fixed ports are necessary, because brokers must have the // same port after the restart. - FixedPortTestUtils.createBrokerConfigs(brokerCount, zkConnect, enableControlledShutdown = false) + FixedPortTestUtils.createBrokerConfigs(brokerCount, enableControlledShutdown = false) .map(KafkaConfig.fromProps(_, overridingProps)) } - def clientOverrides: Properties = { + def clientOverrides(useRebootstrapTriggerMs: Boolean): Properties = { val overrides = new Properties() - overrides.put(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, "5000") - overrides.put(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, "5000") - overrides.put(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG, "1000") - overrides.put(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG, "1000") + if (useRebootstrapTriggerMs) { + overrides.put(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, "5000") + } else { + overrides.put(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, "3600000") + overrides.put(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, "5000") + overrides.put(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, "5000") + overrides.put(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG, "1000") + overrides.put(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG, "1000") + } overrides.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "rebootstrap") overrides } diff --git a/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala b/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala index 1a4451c2b0a44..0735829a0b1a5 100644 --- a/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala @@ -23,17 +23,19 @@ import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.{KafkaException, TopicPartition} import org.apache.kafka.common.errors.SaslAuthenticationException -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ import kafka.utils.{TestInfoUtils, TestUtils} -import kafka.zk.ConfigEntityChangeNotificationZNode +import org.apache.kafka.common.config.SaslConfigs import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.metadata.storage.Formatter import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{MethodSource, ValueSource} import scala.jdk.javaapi.OptionConverters +import scala.util.Using class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { @@ -57,9 +59,11 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { override def configureSecurityBeforeServersStart(testInfo: TestInfo): Unit = { super.configureSecurityBeforeServersStart(testInfo) - zkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path) - // Create broker credentials before starting brokers - createScramCredentials(zkConnect, JaasTestUtils.KAFKA_SCRAM_ADMIN, JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD) + } + + override def addFormatterSettings(formatter: Formatter): Unit = { + formatter.setScramArguments( + List(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]").asJava) } override def createPrivilegedAdminClient() = { @@ -69,10 +73,16 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), Both, + startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + val superuserLoginContext = jaasAdminLoginModule(kafkaClientSaslMechanism) + superuserClientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, superuserLoginContext) super.setUp(testInfo) - createTopic(topic, numPartitions, brokerCount) + Using.resource(createPrivilegedAdminClient()) { superuserAdminClient => + TestUtils.createTopicWithAdmin( + superuserAdminClient, topic, brokers, controllerServers, numPartitions + ) + } } @AfterEach @@ -81,7 +91,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { closeSasl() } - @ParameterizedTest + @ParameterizedTest(name="{displayName}.quorum=kraft.isIdempotenceEnabled={0}") @ValueSource(booleans = Array(true, false)) def testProducerWithAuthenticationFailure(isIdempotenceEnabled: Boolean): Unit = { val prop = new Properties() @@ -101,8 +111,9 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { verifyWithRetry(sendOneRecord(producer2)) } - @Test - def testTransactionalProducerWithAuthenticationFailure(): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testTransactionalProducerWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { val txProducer = createTransactionalProducer() verifyAuthenticationException(txProducer.initTransactions()) @@ -111,7 +122,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testConsumerWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() consumer.subscribe(List(topic).asJava) @@ -119,7 +130,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testManualAssignmentConsumerWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() consumer.assign(List(tp).asJava) @@ -127,7 +138,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testManualAssignmentConsumerWithAutoCommitDisabledWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) val consumer = createConsumer() @@ -146,8 +157,9 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { verifyWithRetry(assertEquals(1, consumer.poll(Duration.ofMillis(1000)).count)) } - @Test - def testKafkaAdminClientWithAuthenticationFailure(): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testKafkaAdminClientWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { val props = JaasTestUtils.adminClientSecurityConfigs(securityProtocol, OptionConverters.toJava(trustStoreFile), OptionConverters.toJava(clientSaslProperties)) props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) val adminClient = Admin.create(props) diff --git a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala index 74d870837dd4f..ec81a98d725b7 100644 --- a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala @@ -39,7 +39,7 @@ abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { // create static config including client login context with credentials for JaasTestUtils 'client2' - startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism), Both)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism))) // set dynamic properties with credentials for JaasTestUtils 'client1' so that dynamic JAAS configuration is also // tested by this set of tests val clientLoginContext = jaasClientLoginModule(kafkaClientSaslMechanism) @@ -59,7 +59,7 @@ abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { */ @Timeout(15) @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testTwoConsumersWithDifferentSaslCredentials(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) consumerConfig.putIfAbsent(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) diff --git a/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala index 0d562e917eefe..30a33c2ab647c 100644 --- a/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala @@ -34,7 +34,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), Both, + startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) super.setUp(testInfo) } diff --git a/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala index a6d5ae9327105..09f1f5119b134 100644 --- a/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala @@ -14,17 +14,11 @@ package kafka.api import kafka.security.JaasTestUtils import kafka.utils.TestUtils -import kafka.zk.{KafkaZkClient, ZkData} import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.server.config.ZkConfigs -import org.apache.zookeeper.ZooDefs.Perms -import org.apache.zookeeper.data.ACL -import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api._ import java.util.Locale -import scala.collection.Seq @Timeout(600) class SaslPlainPlaintextConsumerTest extends BaseConsumerTest with SaslSetup { @@ -33,9 +27,6 @@ class SaslPlainPlaintextConsumerTest extends BaseConsumerTest with SaslSetup { private val kafkaServerSaslMechanisms = List(kafkaClientSaslMechanism) private val kafkaServerJaasEntryName = s"${listenerName.value.toLowerCase(Locale.ROOT)}.${JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME}" - this.serverConfig.setProperty(ZkConfigs.ZK_ENABLE_SECURE_ACLS_CONFIG, "false") - // disable secure acls of zkClient in QuorumTestHarness - override protected def zkAclsEnabled = Some(false) override protected def securityProtocol = SecurityProtocol.SASL_PLAINTEXT override protected lazy val trustStoreFile = Some(TestUtils.tempFile("truststore", ".jks")) override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism)) @@ -43,7 +34,7 @@ class SaslPlainPlaintextConsumerTest extends BaseConsumerTest with SaslSetup { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), Both, kafkaServerJaasEntryName)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), kafkaServerJaasEntryName)) super.setUp(testInfo) } @@ -52,42 +43,4 @@ class SaslPlainPlaintextConsumerTest extends BaseConsumerTest with SaslSetup { super.tearDown() closeSasl() } - - /** - * Checks that everyone can access ZkData.SecureZkRootPaths and ZkData.SensitiveZkRootPaths - * when zookeeper.set.acl=false, even if ZooKeeper is SASL-enabled. - */ - @Test - def testZkAclsDisabled(): Unit = { - secureZkPaths(zkClient).foreach(path => { - if (zkClient.pathExists(path)) { - val acls = zkClient.getAcl(path) - assertEquals(1, acls.size, s"Invalid ACLs for $path $acls") - acls.foreach(isAclUnsecure) - } - }) - } - - def secureZkPaths(zkClient: KafkaZkClient): Seq[String] = { - def subPaths(path: String): Seq[String] = { - if (zkClient.pathExists(path)) - path +: zkClient.getChildren(path).map(c => path + "/" + c).flatMap(subPaths) - else - Seq.empty - } - val topLevelPaths = ZkData.SecureRootPaths ++ ZkData.SensitiveRootPaths - topLevelPaths.flatMap(subPaths) - } - - /** - * Verifies that the ACL corresponds to the unsecure one that - * provides ALL access to everyone (world). - */ - def isAclUnsecure(acl: ACL): Boolean = { - debug(s"ACL $acl") - acl.getPerms match { - case Perms.ALL => acl.getId.getScheme == "world" - case _ => false - } - } } diff --git a/core/src/test/scala/integration/kafka/api/SaslPlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslPlaintextConsumerTest.scala index 76bec482f8242..36189a57c9f14 100644 --- a/core/src/test/scala/integration/kafka/api/SaslPlaintextConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslPlaintextConsumerTest.scala @@ -22,7 +22,7 @@ class SaslPlaintextConsumerTest extends BaseConsumerTest with SaslSetup { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), KafkaSasl, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) super.setUp(testInfo) } diff --git a/core/src/test/scala/integration/kafka/api/SaslSetup.scala b/core/src/test/scala/integration/kafka/api/SaslSetup.scala index a9eb15a24b227..b7d2d920fd931 100644 --- a/core/src/test/scala/integration/kafka/api/SaslSetup.scala +++ b/core/src/test/scala/integration/kafka/api/SaslSetup.scala @@ -20,19 +20,13 @@ package kafka.api import kafka.security.JaasTestUtils import kafka.security.JaasTestUtils.JaasSection import kafka.security.minikdc.MiniKdc -import kafka.server.KafkaConfig import kafka.utils.TestUtils -import kafka.zk.{AdminZkClient, KafkaZkClient} import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, ScramCredentialInfo, UserScramCredentialUpsertion, ScramMechanism => PublicScramMechanism} import org.apache.kafka.common.config.SaslConfigs import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.security.JaasUtils import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.security.authenticator.LoginManager -import org.apache.kafka.common.security.scram.internals.{ScramCredentialUtils, ScramFormatter, ScramMechanism} -import org.apache.kafka.common.utils.Time -import org.apache.kafka.server.config.ConfigType -import org.apache.zookeeper.client.ZKClientConfig import java.io.File import java.util @@ -42,16 +36,6 @@ import scala.collection.Seq import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters._ import scala.jdk.javaapi.OptionConverters -import scala.util.Using - -/* - * Implements an enumeration for the modes enabled here: - * zk only, kafka only, both, custom KafkaServer. - */ -sealed trait SaslSetupMode -case object ZkSasl extends SaslSetupMode -case object KafkaSasl extends SaslSetupMode -case object Both extends SaslSetupMode /* * Trait used in SaslTestHarness and EndToEndAuthorizationTest to setup keytab and jaas files. @@ -74,11 +58,6 @@ trait SaslSetup { } writeJaasConfigurationToFile(jaasSections) - - val hasZk = jaasSections.exists(_.getModules.asScala.exists(_.name() == "org.apache.zookeeper.server.auth.DigestLoginModule")) - - if (hasZk) - System.setProperty("zookeeper.authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider") } protected def initializeKerberos(): Unit = { @@ -100,21 +79,13 @@ trait SaslSetup { } def jaasSections(kafkaServerSaslMechanisms: Seq[String], - kafkaClientSaslMechanism: Option[String], - mode: SaslSetupMode = Both, - kafkaServerEntryName: String = JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME): Seq[JaasSection] = { - val hasKerberos = mode != ZkSasl && - (kafkaServerSaslMechanisms.contains("GSSAPI") || kafkaClientSaslMechanism.contains("GSSAPI")) + kafkaClientSaslMechanism: Option[String], + kafkaServerEntryName: String = JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME): Seq[JaasSection] = { + val hasKerberos = kafkaServerSaslMechanisms.contains("GSSAPI") || kafkaClientSaslMechanism.contains("GSSAPI") if (hasKerberos) maybeCreateEmptyKeytabFiles() - mode match { - case ZkSasl => JaasTestUtils.zkSections.asScala - case KafkaSasl => - Seq(JaasTestUtils.kafkaServerSection(kafkaServerEntryName, kafkaServerSaslMechanisms.asJava, serverKeytabFile.toJava), - JaasTestUtils.kafkaClientSection(kafkaClientSaslMechanism.toJava, clientKeytabFile.toJava)) - case Both => Seq(JaasTestUtils.kafkaServerSection(kafkaServerEntryName, kafkaServerSaslMechanisms.asJava, serverKeytabFile.toJava), - JaasTestUtils.kafkaClientSection(kafkaClientSaslMechanism.toJava, clientKeytabFile.toJava)) ++ JaasTestUtils.zkSections.asScala - } + Seq(JaasTestUtils.kafkaServerSection(kafkaServerEntryName, kafkaServerSaslMechanisms.asJava, serverKeytabFile.toJava), + JaasTestUtils.kafkaClientSection(kafkaClientSaslMechanism.toJava, clientKeytabFile.toJava)) } private def writeJaasConfigurationToFile(jaasSections: Seq[JaasSection]): Unit = { @@ -130,7 +101,6 @@ trait SaslSetup { // Important if tests leak consumers, producers or brokers LoginManager.closeAll() System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) - System.clearProperty("zookeeper.authProvider.1") Configuration.setConfiguration(null) } @@ -201,26 +171,4 @@ trait SaslSetup { results.all.get }) } - - def createScramCredentials(zkConnect: String, userName: String, password: String): Unit = { - val zkClientConfig = new ZKClientConfig() - Using(KafkaZkClient( - zkConnect, JaasUtils.isZkSaslEnabled || KafkaConfig.zkTlsClientAuthEnabled(zkClientConfig), 30000, 30000, - Int.MaxValue, Time.SYSTEM, name = "SaslSetup", zkClientConfig = zkClientConfig, enableEntityConfigControllerCheck = false)) { zkClient => - val adminZkClient = new AdminZkClient(zkClient) - - val entityType = ConfigType.USER - val entityName = userName - val configs = adminZkClient.fetchEntityConfig(entityType, entityName) - - ScramMechanism.values().foreach(mechanism => { - val credential = new ScramFormatter(mechanism).generateCredential(password, 4096) - val credentialString = ScramCredentialUtils.credentialToString(credential) - configs.setProperty(mechanism.mechanismName, credentialString) - }) - - adminZkClient.changeConfigs(entityType, entityName, configs) - } - } - } diff --git a/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala index 6a71032240b45..06592b9c3777a 100644 --- a/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala @@ -34,6 +34,7 @@ import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, ServerConf import org.apache.kafka.metadata.authorizer.StandardAuthorizer import org.apache.kafka.server.authorizer.{Authorizer => JAuthorizer} import org.apache.kafka.storage.internals.log.LogConfig +import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest @@ -76,7 +77,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu } def setUpSasl(): Unit = { - startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), Both, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) val loginContext = jaasAdminLoginModule("GSSAPI") superuserClientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, loginContext) @@ -173,7 +174,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val renewer = List(SecurityUtils.parseKafkaPrincipal("User:renewer")) def generateTokenResult(maxLifeTimeMs: Int, expiryTimePeriodMs: Int, expectedTokenNum: Int): (CreateDelegationTokenResult, ExpireDelegationTokenResult) = { - val createResult = client.createDelegationToken(new CreateDelegationTokenOptions().renewers(renewer.asJava).maxlifeTimeMs(maxLifeTimeMs)) + val createResult = client.createDelegationToken(new CreateDelegationTokenOptions().renewers(renewer.asJava).maxLifetimeMs(maxLifeTimeMs)) val tokenCreated = createResult.delegationToken.get TestUtils.waitUntilTrue(() => brokers.forall(server => server.tokenCache.tokens().size() == expectedTokenNum), "Timed out waiting for token to propagate to all servers") @@ -216,7 +217,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.UNKNOWN, AclPermissionType.ALLOW)) val results2 = client.createAcls(List(aclUnknown).asJava) assertEquals(Set(aclUnknown), results2.values.keySet().asScala) - assertFutureExceptionTypeEquals(results2.all, classOf[InvalidRequestException]) + assertFutureThrows(results2.all, classOf[InvalidRequestException]) val results3 = client.deleteAcls(List(acl.toFilter, acl2.toFilter, acl3.toFilter).asJava).values assertEquals(Set(acl.toFilter, acl2.toFilter, acl3.toFilter), results3.keySet.asScala) assertEquals(0, results3.get(acl.toFilter).get.values.size()) @@ -403,8 +404,8 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.READ, AclPermissionType.ALLOW)) val results = client.createAcls(List(clusterAcl, emptyResourceNameAcl).asJava, new CreateAclsOptions()) assertEquals(Set(clusterAcl, emptyResourceNameAcl), results.values.keySet().asScala) - assertFutureExceptionTypeEquals(results.values.get(clusterAcl), classOf[InvalidRequestException]) - assertFutureExceptionTypeEquals(results.values.get(emptyResourceNameAcl), classOf[InvalidRequestException]) + assertFutureThrows(results.values.get(clusterAcl), classOf[InvalidRequestException]) + assertFutureThrows(results.values.get(emptyResourceNameAcl), classOf[InvalidRequestException]) } @ParameterizedTest @@ -413,7 +414,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu client = createAdminClient val timeout = 5000 - val options = new CreateDelegationTokenOptions().maxlifeTimeMs(timeout) + val options = new CreateDelegationTokenOptions().maxLifetimeMs(timeout) val tokenInfo = client.createDelegationToken(options).delegationToken.get.tokenInfo assertEquals(timeout, tokenInfo.maxTimestamp - tokenInfo.issueTimestamp) @@ -426,7 +427,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu client = createAdminClient val timeout = 5000 - val createOptions = new CreateDelegationTokenOptions().maxlifeTimeMs(timeout) + val createOptions = new CreateDelegationTokenOptions().maxLifetimeMs(timeout) val token = client.createDelegationToken(createOptions).delegationToken.get val tokenInfo = token.tokenInfo @@ -601,9 +602,9 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, compressionConfig.value) assertEquals(ConfigEntry.ConfigSource.DEFAULT_CONFIG, compressionConfig.source) - assertFutureExceptionTypeEquals(result.numPartitions(topic2), classOf[TopicAuthorizationException]) - assertFutureExceptionTypeEquals(result.replicationFactor(topic2), classOf[TopicAuthorizationException]) - assertFutureExceptionTypeEquals(result.config(topic2), classOf[TopicAuthorizationException]) + assertFutureThrows(result.numPartitions(topic2), classOf[TopicAuthorizationException]) + assertFutureThrows(result.replicationFactor(topic2), classOf[TopicAuthorizationException]) + assertFutureThrows(result.config(topic2), classOf[TopicAuthorizationException]) } validateMetadataAndConfigs(validateResult) @@ -614,7 +615,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val topicIds = getTopicIds() assertNotEquals(Uuid.ZERO_UUID, createResult.topicId(topic1).get()) assertEquals(topicIds(topic1), createResult.topicId(topic1).get()) - assertFutureExceptionTypeEquals(createResult.topicId(topic2), classOf[TopicAuthorizationException]) + assertFutureThrows(createResult.topicId(topic2), classOf[TopicAuthorizationException]) val createResponseConfig = createResult.config(topic1).get().entries.asScala @@ -634,10 +635,10 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu @ValueSource(strings = Array("kraft")) def testExpireDelegationToken(quorum: String): Unit = { client = createAdminClient - val createDelegationTokenOptions = new CreateDelegationTokenOptions().maxlifeTimeMs(5000) + val createDelegationTokenOptions = new CreateDelegationTokenOptions().maxLifetimeMs(5000) // Test expiration for non-exists token - TestUtils.assertFutureExceptionTypeEquals( + assertFutureThrows( client.expireDelegationToken("".getBytes()).expiryTimestamp(), classOf[DelegationTokenNotFoundException] ) @@ -647,10 +648,10 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu TestUtils.retry(maxWaitMs = 1000) { assertTrue(expireTokenOrFailWithAssert(token1, -1) < System.currentTimeMillis()) } // Test expiring the expired token - val token2 = client.createDelegationToken(createDelegationTokenOptions.maxlifeTimeMs(1000)).delegationToken().get() + val token2 = client.createDelegationToken(createDelegationTokenOptions.maxLifetimeMs(1000)).delegationToken().get() // Ensure current time > maxLifeTimeMs of token Thread.sleep(1000) - TestUtils.assertFutureExceptionTypeEquals( + assertFutureThrows( client.expireDelegationToken(token2.hmac(), new ExpireDelegationTokenOptions().expiryTimePeriodMs(1)).expiryTimestamp(), classOf[DelegationTokenExpiredException] ) @@ -667,7 +668,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu @ValueSource(strings = Array("kraft")) def testCreateTokenWithOverflowTimestamp(quorum: String): Unit = { client = createAdminClient - val token = client.createDelegationToken(new CreateDelegationTokenOptions().maxlifeTimeMs(Long.MaxValue)).delegationToken().get() + val token = client.createDelegationToken(new CreateDelegationTokenOptions().maxLifetimeMs(Long.MaxValue)).delegationToken().get() assertEquals(Long.MaxValue, token.tokenInfo().expiryTimestamp()) } @@ -675,7 +676,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu @ValueSource(strings = Array("kraft")) def testExpireTokenWithOverflowTimestamp(quorum: String): Unit = { client = createAdminClient - val token = client.createDelegationToken(new CreateDelegationTokenOptions().maxlifeTimeMs(Long.MaxValue)).delegationToken().get() + val token = client.createDelegationToken(new CreateDelegationTokenOptions().maxLifetimeMs(Long.MaxValue)).delegationToken().get() TestUtils.retry(1000) { assertTrue(expireTokenOrFailWithAssert(token, Long.MaxValue) == Long.MaxValue) } } diff --git a/core/src/test/scala/integration/kafka/api/SaslSslConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslSslConsumerTest.scala index 0bfbb81cc6383..460ebe2cb4e75 100644 --- a/core/src/test/scala/integration/kafka/api/SaslSslConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslSslConsumerTest.scala @@ -26,7 +26,7 @@ class SaslSslConsumerTest extends BaseConsumerTest with SaslSetup { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), Both, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(Seq("GSSAPI"), Some("GSSAPI"), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) super.setUp(testInfo) } diff --git a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala index 5063e79ad08da..9e5930d978af4 100644 --- a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala @@ -18,7 +18,7 @@ import java.util.Properties import com.yammer.metrics.core.Gauge import kafka.security.JaasTestUtils import kafka.utils.TestUtils -import org.apache.kafka.clients.admin.{AdminClientConfig, CreateAclsResult} +import org.apache.kafka.clients.admin.{AdminClientConfig, CreateAclsResult, DescribeClusterOptions} import org.apache.kafka.common.acl._ import org.apache.kafka.common.config.SslConfigs import org.apache.kafka.common.config.internals.BrokerSecurityConfigs @@ -32,7 +32,7 @@ import org.apache.kafka.common.network.ConnectionMode import org.apache.kafka.common.utils.Utils import org.apache.kafka.metadata.authorizer.{ClusterMetadataAuthorizer, StandardAuthorizer} import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotNull, assertTrue} +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotNull, assertThrows, assertTrue} import org.junit.jupiter.api.{AfterEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -127,7 +127,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { SslAdminIntegrationTest.executor = None SslAdminIntegrationTest.lastUpdateRequestContext = None - startSasl(jaasSections(List.empty, None, KafkaSasl)) + startSasl(jaasSections(List.empty, None)) } override def createConfig: util.Map[String, Object] = { @@ -158,6 +158,25 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { super.tearDown() } + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesFromControllersIncludingFencedBrokers(quorum: String): Unit = { + useBoostrapControllers() + client = createAdminClient + val result = client.describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)) + val exception = assertThrows(classOf[Exception], () => { result.nodes().get()}) + assertTrue(exception.getCause.getCause.getMessage.contains("Cannot request fenced brokers from controller endpoint")) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesFromControllers(quorum: String): Unit = { + useBoostrapControllers() + client = createAdminClient + val result = client.describeCluster(new DescribeClusterOptions()) + assertTrue(result.nodes().get().size().equals(controllerServers.size)) + } + @ParameterizedTest @ValueSource(strings = Array("kraft")) def testAclUpdatesUsingSynchronousAuthorizer(quorum: String): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala index d53d44a691841..3e0ba00d3f924 100644 --- a/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala @@ -75,7 +75,7 @@ class SslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { override val kafkaPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "server") @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(List.empty, None, KafkaSasl)) + startSasl(jaasSections(List.empty, None)) super.setUp(testInfo) } diff --git a/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala index 3c58eb2e596db..a6cd0d905decd 100644 --- a/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala @@ -32,7 +32,6 @@ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ import scala.collection.mutable @@ -70,20 +69,12 @@ class TransactionsBounceTest extends IntegrationTestHarness { // Since such quick rotation of servers is incredibly unrealistic, we allow this one test to preallocate ports, leaving // a small risk of hitting errors due to port conflicts. Hopefully this is infrequent enough to not cause problems. override def generateConfigs = { - FixedPortTestUtils.createBrokerConfigs(brokerCount, zkConnectOrNull) + FixedPortTestUtils.createBrokerConfigs(brokerCount) .map(KafkaConfig.fromProps(_, overridingProps)) } override protected def brokerCount: Int = 4 - @nowarn("cat=deprecation") - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17961")) - def testWithGroupId(quorum: String, groupProtocol: String): Unit = { - testBrokerFailure((producer, groupId, consumer) => - producer.sendOffsetsToTransaction(TestUtils.consumerPositions(consumer).asJava, groupId)) - } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testWithGroupMetadata(quorum: String, groupProtocol: String): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala index c718f71a5ba95..2449bcc986bc2 100644 --- a/core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala +++ b/core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala @@ -20,20 +20,20 @@ package kafka.api import java.util.{Collections, Properties} import kafka.integration.KafkaServerTestHarness import kafka.server.KafkaConfig -import kafka.utils.{TestInfoUtils, TestUtils} +import kafka.utils.TestUtils import kafka.utils.TestUtils.{consumeRecords, createAdminClient} import org.apache.kafka.clients.admin.{Admin, ProducerState} import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.producer.KafkaProducer -import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.{KafkaException, TopicPartition} import org.apache.kafka.common.errors.{InvalidPidMappingException, TransactionalIdNotFoundException} import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} -import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{CsvSource, MethodSource} +import org.junit.jupiter.params.provider.CsvSource import scala.jdk.CollectionConverters._ import scala.collection.Seq @@ -51,7 +51,7 @@ class TransactionsExpirationTest extends KafkaServerTestHarness { var admin: Admin = _ override def generateConfigs: Seq[KafkaConfig] = { - TestUtils.createBrokerConfigs(3, zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps())) + TestUtils.createBrokerConfigs(3).map(KafkaConfig.fromProps(_, serverProps())) } @BeforeEach @@ -81,9 +81,14 @@ class TransactionsExpirationTest extends KafkaServerTestHarness { super.tearDown() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) - def testBumpTransactionalEpochAfterInvalidProducerIdMapping(quorum: String, groupProtocol: String): Unit = { + @ParameterizedTest + @CsvSource(Array( + "kraft,classic,false", + "kraft,consumer,false", + "kraft,classic,true", + "kraft,consumer,true", + )) + def testFatalErrorAfterInvalidProducerIdMapping(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { producer.initTransactions() // Start and then abort a transaction to allow the transactional ID to expire. @@ -96,14 +101,22 @@ class TransactionsExpirationTest extends KafkaServerTestHarness { waitUntilTransactionalStateExists() waitUntilTransactionalStateExpires() - // Start a new transaction and attempt to send, which will trigger an AddPartitionsToTxnRequest, which will fail due to the expired transactional ID. + // Start a new transaction and attempt to send, triggering an AddPartitionsToTxnRequest that will fail + // due to the expired transactional ID, resulting in a fatal error. producer.beginTransaction() val failedFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, "1", "1", willBeCommitted = false)) TestUtils.waitUntilTrue(() => failedFuture.isDone, "Producer future never completed.") - org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, classOf[InvalidPidMappingException]) - producer.abortTransaction() + // Assert that aborting the transaction throws a KafkaException due to the fatal error. + assertThrows(classOf[KafkaException], () => producer.abortTransaction()) + + // Close the producer and reinitialize to recover from the fatal error. + producer.close() + producer = TestUtils.createTransactionalProducer("transactionalProducer", brokers) + producer.initTransactions() + + // Proceed with a new transaction after reinitializing. producer.beginTransaction() producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "2", willBeCommitted = true)) producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 2, "4", "4", willBeCommitted = true)) @@ -170,7 +183,9 @@ class TransactionsExpirationTest extends KafkaServerTestHarness { // soon after the first will re-use the same producerId, while bumping the epoch to indicate that they are distinct. assertEquals(oldProducerId, newProducerId) if (isTV2Enabled) { - assertEquals(oldProducerEpoch + 3, newProducerEpoch) + // TV2 bumps epoch on EndTxn, and the final commit may or may not have bumped the epoch in the producer state. + // The epoch should be at least oldProducerEpoch + 2 for the first commit and the restarted producer. + assertTrue(oldProducerEpoch + 2 <= newProducerEpoch) } else { assertEquals(oldProducerEpoch + 1, newProducerEpoch) } diff --git a/core/src/test/scala/integration/kafka/api/TransactionsTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsTest.scala index fba101e1f76c4..66ff64f2cdc03 100644 --- a/core/src/test/scala/integration/kafka/api/TransactionsTest.scala +++ b/core/src/test/scala/integration/kafka/api/TransactionsTest.scala @@ -17,20 +17,20 @@ package kafka.api -import kafka.utils.{TestInfoUtils, TestUtils} import kafka.utils.TestUtils.{consumeRecords, waitUntilTrue} -import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, ConsumerGroupMetadata, ConsumerRecord, OffsetAndMetadata} +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.consumer._ import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.{InvalidProducerEpochException, ProducerFencedException, TimeoutException} +import org.apache.kafka.common.test.api.Flaky import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource -import org.junit.jupiter.params.provider.CsvSource +import org.junit.jupiter.params.provider.{CsvSource, MethodSource} import java.lang.{Long => JLong} import java.nio.charset.StandardCharsets @@ -38,9 +38,8 @@ import java.time.Duration import java.util import java.util.concurrent.TimeUnit import java.util.{Optional, Properties} -import scala.annotation.nowarn -import scala.collection.{Seq, mutable} import scala.collection.mutable.{ArrayBuffer, ListBuffer} +import scala.collection.{Seq, mutable} import scala.concurrent.ExecutionException import scala.jdk.CollectionConverters._ @@ -299,14 +298,6 @@ class TransactionsTest extends IntegrationTestHarness { assertEquals(3L, second.offset) } - @nowarn("cat=deprecation") - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17961")) - def testSendOffsetsWithGroupId(quorum: String, groupProtocol: String): Unit = { - sendOffset((producer, groupId, consumer) => - producer.sendOffsetsToTransaction(TestUtils.consumerPositions(consumer).asJava, groupId)) - } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testSendOffsetsWithGroupMetadata(quorum: String, groupProtocol: String): Unit = { @@ -401,6 +392,7 @@ class TransactionsTest extends IntegrationTestHarness { producer1.beginTransaction() producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "1", "1", willBeCommitted = false)) producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "3", "3", willBeCommitted = false)) + producer1.flush() producer2.initTransactions() // ok, will abort the open transaction. producer2.beginTransaction() @@ -438,7 +430,7 @@ class TransactionsTest extends IntegrationTestHarness { producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "4", willBeCommitted = true)) producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "4", willBeCommitted = true)) - assertThrows(classOf[ProducerFencedException], () => producer1.sendOffsetsToTransaction(Map(new TopicPartition("foobartopic", 0) + assertThrows(classOf[ProducerFencedException], () => producer1.sendOffsetsToTransaction(Map(new TopicPartition(topic1, 0) -> new OffsetAndMetadata(110L)).asJava, new ConsumerGroupMetadata("foobarGroup"))) producer2.commitTransaction() // ok @@ -568,6 +560,8 @@ class TransactionsTest extends IntegrationTestHarness { val consumer = transactionalConsumers(0) consumer.subscribe(List(topic1, topic2).asJava) + TestUtils.waitUntilLeaderIsKnown(brokers, new TopicPartition(topic1, 0)) + TestUtils.waitUntilLeaderIsKnown(brokers, new TopicPartition(topic2, 0)) producer1.initTransactions() producer1.beginTransaction() @@ -586,15 +580,21 @@ class TransactionsTest extends IntegrationTestHarness { producer1.beginTransaction() val result = producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "1", "5", willBeCommitted = false)) val recordMetadata = result.get() - error(s"Missed a producer fenced exception when writing to ${recordMetadata.topic}-${recordMetadata.partition}. Grab the logs!!") + error(s"Missed an exception when writing to ${recordMetadata.topic}-${recordMetadata.partition}. Grab the logs!!") brokers.foreach { broker => error(s"log dirs: ${broker.logManager.liveLogDirs.map(_.getAbsolutePath).head}") } fail("Should not be able to send messages from a fenced producer.") } catch { - case _: ProducerFencedException => - case e: ExecutionException => - assertTrue(e.getCause.isInstanceOf[ProducerFencedException]) + case _: InvalidProducerEpochException => + case e: ExecutionException => { + if (quorum == "zk") { + assertTrue(e.getCause.isInstanceOf[ProducerFencedException]) + } else { + // In kraft mode, transactionV2 is used. + assertTrue(e.getCause.isInstanceOf[InvalidProducerEpochException]) + } + } case e: Exception => throw new AssertionError("Got an unexpected exception from a fenced producer.", e) } @@ -622,14 +622,27 @@ class TransactionsTest extends IntegrationTestHarness { // Wait for the expiration cycle to kick in. Thread.sleep(600) - try { - // Now that the transaction has expired, the second send should fail with a ProducerFencedException. - producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "2", willBeCommitted = false)).get() - fail("should have raised a ProducerFencedException since the transaction has expired") - } catch { - case _: ProducerFencedException => - case e: ExecutionException => - assertTrue(e.getCause.isInstanceOf[ProducerFencedException]) + if (quorum == "zk") { + // In zk mode, transaction v1 is used. + try { + // Now that the transaction has expired, the second send should fail with a ProducerFencedException. + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "2", willBeCommitted = false)).get() + fail("should have raised a ProducerFencedException since the transaction has expired") + } catch { + case _: ProducerFencedException => + case e: ExecutionException => + assertTrue(e.getCause.isInstanceOf[ProducerFencedException]) + } + } else { + try { + // Now that the transaction has expired, the second send should fail with a InvalidProducerEpochException. + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "2", willBeCommitted = false)).get() + fail("should have raised a InvalidProducerEpochException since the transaction has expired") + } catch { + case _: InvalidProducerEpochException => + case e: ExecutionException => + assertTrue(e.getCause.isInstanceOf[InvalidProducerEpochException]) + } } // Verify that the first message was aborted and the second one was never written at all. @@ -697,14 +710,16 @@ class TransactionsTest extends IntegrationTestHarness { assertThrows(classOf[IllegalStateException], () => producer.initTransactions()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @Flaky("KAFKA-18035") + @ParameterizedTest @CsvSource(Array( "kraft,classic,false", "kraft,consumer,false", )) def testBumpTransactionalEpochWithTV2Disabled(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { + val defaultLinger = 5; val producer = createTransactionalProducer("transactionalProducer", - deliveryTimeoutMs = 5000, requestTimeoutMs = 5000) + deliveryTimeoutMs = 5000 + defaultLinger, requestTimeoutMs = 5000) val consumer = transactionalConsumers.head try { // Create a topic with RF=1 so that a single broker failure will render it unavailable @@ -726,7 +741,8 @@ class TransactionsTest extends IntegrationTestHarness { val initialProducerEpoch = producerStateEntry.producerEpoch producer.beginTransaction() - producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "2", willBeCommitted = false)) + val successfulFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "2", willBeCommitted = false)) + successfulFuture.get(20, TimeUnit.SECONDS) killBroker(partitionLeader) // kill the partition leader to prevent the batch from being submitted val failedFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = false)) @@ -762,11 +778,15 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @CsvSource(Array("kraft, classic, true", "kraft, consumer, true")) + @ParameterizedTest + @CsvSource(Array( + "kraft, classic, true", + "kraft, consumer, true" + )) def testBumpTransactionalEpochWithTV2Enabled(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { + val defaultLinger = 5; val producer = createTransactionalProducer("transactionalProducer", - deliveryTimeoutMs = 5000, requestTimeoutMs = 5000) + deliveryTimeoutMs = 5000 + defaultLinger, requestTimeoutMs = 5000) val consumer = transactionalConsumers.head try { @@ -782,18 +802,19 @@ class TransactionsTest extends IntegrationTestHarness { producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "4", "4", willBeCommitted = true)) producer.commitTransaction() - // Get producerId and epoch after first commit + // Second transaction: abort + producer.beginTransaction() + val successfulFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "2", willBeCommitted = false)) + successfulFuture.get(20, TimeUnit.SECONDS) + + // Get producerId and epoch after first commit. Check after the first successful send of the next transaction to confirm the commit is complete. val log = brokers(partitionLeader).logManager.getLog(new TopicPartition(testTopic, 0)).get val producerStateManager = log.producerStateManager val activeProducersIter = producerStateManager.activeProducers.entrySet().iterator() assertTrue(activeProducersIter.hasNext) var producerStateEntry = activeProducersIter.next().getValue val producerId = producerStateEntry.producerId - var previousProducerEpoch = producerStateEntry.producerEpoch - - // Second transaction: abort - producer.beginTransaction() - producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "2", willBeCommitted = false)) + val previousProducerEpoch = producerStateEntry.producerEpoch killBroker(partitionLeader) // kill the partition leader to prevent the batch from being submitted val failedFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = false)) @@ -803,40 +824,22 @@ class TransactionsTest extends IntegrationTestHarness { org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, classOf[TimeoutException]) producer.abortTransaction() - // Get producer epoch after abortTransaction and verify it has increased. - producerStateEntry = - brokers(partitionLeader).logManager.getLog(new TopicPartition(testTopic, 0)).get.producerStateManager.activeProducers.get(producerId) - // Assert that producerStateEntry is not null - assertNotNull(producerStateEntry, "Producer state entry should not be null after abortTransaction") - - val currentProducerEpoch = producerStateEntry.producerEpoch - assertTrue(currentProducerEpoch > previousProducerEpoch, - s"Producer epoch after abortTransaction ($currentProducerEpoch) should be greater than after first commit ($previousProducerEpoch)" - ) - // Update previousProducerEpoch - previousProducerEpoch = currentProducerEpoch - // Third transaction: commit producer.beginTransaction() - producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "2", willBeCommitted = true)) + val nextSuccessfulFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "2", willBeCommitted = true)) + nextSuccessfulFuture.get(20, TimeUnit.SECONDS) + + // Confirm the epoch bumped after the previous abort. + producerStateEntry = + brokers(partitionLeader).logManager.getLog(new TopicPartition(topic2, 0)).get.producerStateManager.activeProducers.get(producerId) + assertNotNull(producerStateEntry) + assertTrue(producerStateEntry.producerEpoch > previousProducerEpoch) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "4", "4", willBeCommitted = true)) producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "1", "1", willBeCommitted = true)) producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = true)) producer.commitTransaction() - // Wait until the producer epoch has been updated on the broker - TestUtils.waitUntilTrue(() => { - val logOption = brokers(partitionLeader).logManager.getLog(new TopicPartition(testTopic, 0)) - logOption.exists { log => - val producerStateEntry = log.producerStateManager.activeProducers.get(producerId) - producerStateEntry != null && producerStateEntry.producerEpoch > previousProducerEpoch - } - }, "Timed out waiting for producer epoch to be incremented after second commit", 10000) - - // Now that we've verified that the producer epoch has increased, - // update the previous producer epoch. - previousProducerEpoch = currentProducerEpoch - consumer.subscribe(List(topic1, topic2, testTopic).asJava) val records = consumeRecords(consumer, 5) @@ -901,7 +904,8 @@ class TransactionsTest extends IntegrationTestHarness { producer1.close() } - val producer3 = createTransactionalProducer("transactional-producer", maxBlockMs = 5000) + // Make sure to leave this producer enough time before request timeout. The broker restart can take some time. + val producer3 = createTransactionalProducer("transactional-producer") producer3.initTransactions() producer3.beginTransaction() @@ -910,7 +914,7 @@ class TransactionsTest extends IntegrationTestHarness { // Check that the epoch only increased by 1 when TV2 is disabled. // With TV2 and the latest EndTxnRequest version, the epoch will be bumped at the end of every transaction aka - // three times (once after each commit and once after the timeout exception) + // three times (once after each commit and once after the timeout exception). The last bump is less consistent, so ensure the first two happen. producerStateEntry = brokers(partitionLeader).logManager.getLog(new TopicPartition(topic1, 0)).get.producerStateManager.activeProducers.get(producerId) assertNotNull(producerStateEntry) @@ -918,13 +922,26 @@ class TransactionsTest extends IntegrationTestHarness { if (!isTV2Enabled) { assertEquals((initialProducerEpoch + 1).toShort, producerStateEntry.producerEpoch) } else { - // Wait until the producer epoch has been updated on the broker. - TestUtils.waitUntilTrue(() => { - producerStateEntry != null && producerStateEntry.producerEpoch == initialProducerEpoch + 3 - }, "Timed out waiting for producer epoch to be incremented after second commit", 10000) + assertTrue((initialProducerEpoch + 1).toShort <= producerStateEntry.producerEpoch) } } + @ParameterizedTest(name = "{displayName}.quorum={0}.groupProtocol={1}.isTV2Enabled={2}") + @CsvSource(Array( + "kraft, consumer, true", + )) + def testEmptyAbortAfterCommit(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { + val producer = transactionalProducers.head + + producer.initTransactions() + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 1, "4", "4", willBeCommitted = false)) + producer.commitTransaction() + + producer.beginTransaction() + producer.abortTransaction() + } + private def sendTransactionalMessagesWithValueRange(producer: KafkaProducer[Array[Byte], Array[Byte]], topic: String, start: Int, end: Int, willBeCommitted: Boolean): Unit = { for (i <- start until end) { diff --git a/core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala index 53899a4374335..c59997bd37c8a 100644 --- a/core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala +++ b/core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala @@ -50,7 +50,7 @@ class TransactionsWithMaxInFlightOneTest extends KafkaServerTestHarness { val transactionalConsumers = mutable.Buffer[Consumer[Array[Byte], Array[Byte]]]() override def generateConfigs: Seq[KafkaConfig] = { - TestUtils.createBrokerConfigs(numBrokers, zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps())) + TestUtils.createBrokerConfigs(numBrokers).map(KafkaConfig.fromProps(_, serverProps())) } @BeforeEach diff --git a/core/src/test/scala/integration/kafka/api/UserQuotaTest.scala b/core/src/test/scala/integration/kafka/api/UserQuotaTest.scala index 5a5fb83aab5a5..3dc04c5f2c165 100644 --- a/core/src/test/scala/integration/kafka/api/UserQuotaTest.scala +++ b/core/src/test/scala/integration/kafka/api/UserQuotaTest.scala @@ -31,7 +31,7 @@ class UserQuotaTest extends BaseQuotaTest with SaslSetup { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some("GSSAPI"), KafkaSasl, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Some("GSSAPI"), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) super.setUp(testInfo) quotaTestClients.alterClientQuotas( quotaTestClients.clientQuotaAlteration( @@ -42,6 +42,8 @@ class UserQuotaTest extends BaseQuotaTest with SaslSetup { quotaTestClients.waitForQuotaUpdate(defaultProducerQuota, defaultConsumerQuota, defaultRequestQuota) } + // @Flaky("KAFKA-8073") -> testThrottledProducerConsumer (in super class) + @AfterEach override def tearDown(): Unit = { super.tearDown() diff --git a/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIdsIntegrationTest.scala b/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIdsIntegrationTest.scala deleted file mode 100644 index d2b1358b369ed..0000000000000 --- a/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIdsIntegrationTest.scala +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.coordinator.transaction - -import kafka.network.SocketServer -import kafka.server.IntegrationTestUtils -import org.apache.kafka.common.test.api.ClusterInstance -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, ClusterTests, Type} -import org.apache.kafka.common.test.api.ClusterTestExtensions -import org.apache.kafka.common.message.InitProducerIdRequestData -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.common.record.RecordBatch -import org.apache.kafka.common.requests.{InitProducerIdRequest, InitProducerIdResponse} -import org.apache.kafka.server.common.MetadataVersion -import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.api.extension.ExtendWith - -import java.util.stream.{Collectors, IntStream} -import scala.concurrent.duration.DurationInt -import scala.jdk.CollectionConverters._ - -@ClusterTestDefaults(serverProperties = Array( - new ClusterConfigProperty(key = "transaction.state.log.num.partitions", value = "1") -)) -@ExtendWith(value = Array(classOf[ClusterTestExtensions])) -class ProducerIdsIntegrationTest { - - @ClusterTests(Array( - new ClusterTest(types = Array(Type.KRAFT), brokers = 3, metadataVersion = MetadataVersion.IBP_3_3_IV0) - )) - def testUniqueProducerIds(clusterInstance: ClusterInstance): Unit = { - verifyUniqueIds(clusterInstance) - } - - private def verifyUniqueIds(clusterInstance: ClusterInstance): Unit = { - // Request enough PIDs from each broker to ensure each broker generates two blocks - val ids = clusterInstance.brokerSocketServers().stream().flatMap( broker => { - IntStream.range(0, 1001).parallel().mapToObj( _ => - nextProducerId(broker, clusterInstance.clientListener()) - )}).collect(Collectors.toList[Long]).asScala.toSeq - - val brokerCount = clusterInstance.brokerIds.size - val expectedTotalCount = 1001 * brokerCount - assertEquals(expectedTotalCount, ids.size, s"Expected exactly $expectedTotalCount IDs") - assertEquals(expectedTotalCount, ids.distinct.size, "Found duplicate producer IDs") - } - - private def nextProducerId(broker: SocketServer, listener: ListenerName): Long = { - // Generating producer ids may fail while waiting for the initial block and also - // when the current block is full and waiting for the prefetched block. - val deadline = 5.seconds.fromNow - var shouldRetry = true - var response: InitProducerIdResponse = null - while (shouldRetry && deadline.hasTimeLeft()) { - val data = new InitProducerIdRequestData() - .setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) - .setProducerId(RecordBatch.NO_PRODUCER_ID) - .setTransactionalId(null) - .setTransactionTimeoutMs(10) - val request = new InitProducerIdRequest.Builder(data).build() - - response = IntegrationTestUtils.connectAndReceive[InitProducerIdResponse](request, - destination = broker, - listenerName = listener) - - shouldRetry = response.data.errorCode == Errors.COORDINATOR_LOAD_IN_PROGRESS.code - } - assertTrue(deadline.hasTimeLeft()) - assertEquals(Errors.NONE.code, response.data.errorCode) - response.data().producerId() - } -} diff --git a/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala b/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala new file mode 100644 index 0000000000000..5fe06748631cf --- /dev/null +++ b/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala @@ -0,0 +1,223 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.transaction + +import kafka.network.SocketServer +import kafka.server.IntegrationTestUtils +import org.apache.kafka.clients.admin.{Admin, NewTopic, TransactionState} +import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, ConsumerRecords, OffsetAndMetadata} +import org.apache.kafka.clients.producer.{Producer, ProducerConfig, ProducerRecord} +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.errors.RecordTooLargeException +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterInstance, ClusterTest, ClusterTestDefaults, ClusterTestExtensions, ClusterTests, Type} +import org.apache.kafka.common.message.InitProducerIdRequestData +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.common.requests.{InitProducerIdRequest, InitProducerIdResponse} +import org.apache.kafka.common.test.TestUtils +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.server.common.{Feature, MetadataVersion} +import org.junit.jupiter.api.Assertions.{assertEquals, assertInstanceOf, assertThrows, assertTrue} +import org.junit.jupiter.api.extension.ExtendWith + +import java.time.Duration +import java.util +import java.util.Collections +import java.util.concurrent.ExecutionException +import java.util.stream.{Collectors, IntStream, StreamSupport} +import scala.concurrent.duration.DurationInt +import scala.jdk.CollectionConverters._ + +@ClusterTestDefaults(types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), +)) +@ExtendWith(value = Array(classOf[ClusterTestExtensions])) +class ProducerIntegrationTest { + + @ClusterTests(Array( + new ClusterTest(metadataVersion = MetadataVersion.IBP_3_3_IV0) + )) + def testUniqueProducerIds(clusterInstance: ClusterInstance): Unit = { + verifyUniqueIds(clusterInstance) + } + + @ClusterTests(Array( + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 0))), + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 1))), + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 2))), + )) + def testTransactionWithAndWithoutSend(cluster: ClusterInstance): Unit = { + val properties = new util.HashMap[String, Object] + properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "foobar") + properties.put(ProducerConfig.CLIENT_ID_CONFIG, "test") + properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true") + val producer: Producer[Array[Byte], Array[Byte]] = cluster.producer(properties) + try { + producer.initTransactions() + producer.beginTransaction() + producer.send(new ProducerRecord[Array[Byte], Array[Byte]]("test", "key".getBytes, "value".getBytes)) + producer.commitTransaction() + + producer.beginTransaction() + producer.commitTransaction() + } finally if (producer != null) producer.close() + } + + @ClusterTests(Array( + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 0))), + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 1))), + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 2))), + )) + def testTransactionWithInvalidSendAndEndTxnRequestSent(cluster: ClusterInstance): Unit = { + val topic = new NewTopic("foobar", 1, 1.toShort).configs(Collections.singletonMap(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "100")) + val txnId = "test-txn" + val properties = new util.HashMap[String, Object] + properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, txnId) + properties.put(ProducerConfig.CLIENT_ID_CONFIG, "test") + properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true") + + val admin = cluster.admin() + val producer: Producer[Array[Byte], Array[Byte]] = cluster.producer(properties) + try { + admin.createTopics(List(topic).asJava) + + producer.initTransactions() + producer.beginTransaction() + assertInstanceOf(classOf[RecordTooLargeException], + assertThrows(classOf[ExecutionException], + () => producer.send(new ProducerRecord[Array[Byte], Array[Byte]]( + topic.name(), Array.fill(100)(0: Byte), Array.fill(100)(0: Byte))).get()).getCause) + + producer.abortTransaction() + } finally { + if (admin != null) admin.close() + if (producer != null) producer.close() + } + } + + @ClusterTests(Array( + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 0))), + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 1))), + new ClusterTest(features = Array( + new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 2))), + )) + def testTransactionWithSendOffset(cluster: ClusterInstance): Unit = { + val inputTopic: String = "my-input-topic" + var producer: Producer[Array[Byte], Array[Byte]] = cluster.producer + try { + for (i <- 0 until 5) { + val key: Array[Byte] = ("key-" + i).getBytes + val value: Array[Byte] = ("value-" + i).getBytes + producer.send(new ProducerRecord[Array[Byte], Array[Byte]](inputTopic, key, value)).get + } + } finally if (producer != null) producer.close() + + val txnId: String = "foobar" + val producerProperties: util.Map[String, Object] = new util.HashMap[String, Object] + producerProperties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, txnId) + producerProperties.put(ProducerConfig.CLIENT_ID_CONFIG, "test") + producerProperties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true") + + val consumerProperties: util.Map[String, Object] = new util.HashMap[String, Object] + consumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG, "test-consumer-group") + consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + + producer = cluster.producer(producerProperties) + val consumer: Consumer[Array[Byte], Array[Byte]] = cluster.consumer(consumerProperties) + try { + producer.initTransactions() + producer.beginTransaction() + consumer.subscribe(util.List.of(inputTopic)) + var records: ConsumerRecords[Array[Byte], Array[Byte]] = null + TestUtils.waitForCondition(() => { + records = consumer.poll(Duration.ZERO) + records.count == 5 + }, "poll records size not match") + val lastRecord = StreamSupport.stream(records.spliterator, false).reduce((_, second) => second).orElse(null) + val offsets = Collections.singletonMap( + new TopicPartition(lastRecord.topic, lastRecord.partition), new OffsetAndMetadata(lastRecord.offset + 1)) + producer.sendOffsetsToTransaction(offsets, consumer.groupMetadata) + producer.commitTransaction() + } finally { + if (producer != null) producer.close() + if (consumer != null) consumer.close() + } + + val admin: Admin = cluster.admin + try { + TestUtils.waitForCondition(() => { + admin.listTransactions.all.get.stream + .filter(txn => txn.transactionalId == txnId) + .anyMatch(txn => txn.state eq TransactionState.COMPLETE_COMMIT) + }, "transaction is not in COMPLETE_COMMIT state") + } finally if (admin != null) admin.close() + } + + private def verifyUniqueIds(clusterInstance: ClusterInstance): Unit = { + // Request enough PIDs from each broker to ensure each broker generates two blocks + val ids = clusterInstance.brokerSocketServers().stream().flatMap( broker => { + IntStream.range(0, 1001).parallel().mapToObj( _ => + nextProducerId(broker, clusterInstance.clientListener()) + )}).collect(Collectors.toList[Long]).asScala.toSeq + + val brokerCount = clusterInstance.brokerIds.size + val expectedTotalCount = 1001 * brokerCount + assertEquals(expectedTotalCount, ids.size, s"Expected exactly $expectedTotalCount IDs") + assertEquals(expectedTotalCount, ids.distinct.size, "Found duplicate producer IDs") + } + + private def nextProducerId(broker: SocketServer, listener: ListenerName): Long = { + // Generating producer ids may fail while waiting for the initial block and also + // when the current block is full and waiting for the prefetched block. + val deadline = 5.seconds.fromNow + var shouldRetry = true + var response: InitProducerIdResponse = null + while (shouldRetry && deadline.hasTimeLeft()) { + val data = new InitProducerIdRequestData() + .setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) + .setProducerId(RecordBatch.NO_PRODUCER_ID) + .setTransactionalId(null) + .setTransactionTimeoutMs(10) + val request = new InitProducerIdRequest.Builder(data).build() + + response = IntegrationTestUtils.connectAndReceive[InitProducerIdResponse](request, + destination = broker, + listenerName = listener) + + shouldRetry = response.data.errorCode == Errors.COORDINATOR_LOAD_IN_PROGRESS.code + } + assertTrue(deadline.hasTimeLeft()) + assertEquals(Errors.NONE.code, response.data.errorCode) + response.data().producerId() + } +} diff --git a/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala b/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala index 0f2dc0ea978de..4f5cd7f4a2803 100644 --- a/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala +++ b/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala @@ -29,6 +29,7 @@ import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity} import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.requests.{ProduceRequest, ProduceResponse} import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.common.test.api.Flaky import org.apache.kafka.common.{KafkaException, requests} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.config.QuotaConfig @@ -81,6 +82,7 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { } } + @Flaky("KAFKA-17999") @ParameterizedTest @ValueSource(strings = Array("kraft")) def testDynamicConnectionQuota(quorum: String): Unit = { @@ -303,7 +305,7 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { } private def produceRequest: ProduceRequest = - requests.ProduceRequest.forCurrentMagic(new ProduceRequestData() + requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(topic) diff --git a/core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala b/core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala index 4061e6aaaf8ff..96664d41a809c 100644 --- a/core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala +++ b/core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala @@ -16,7 +16,6 @@ */ package kafka.server -import kafka.log.AsyncOffsetReadFutureHolder import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.NotLeaderOrFollowerException import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse @@ -25,6 +24,7 @@ import org.apache.kafka.common.record.FileRecords.TimestampAndOffset import org.apache.kafka.common.requests.ListOffsetsResponse import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.util.timer.MockTimer +import org.apache.kafka.storage.internals.log.{AsyncOffsetReadFutureHolder, OffsetResultHolder} import org.junit.jupiter.api.{AfterEach, Test} import org.junit.jupiter.api.Assertions.assertEquals import org.mockito.ArgumentMatchers.anyBoolean @@ -41,7 +41,7 @@ class DelayedRemoteListOffsetsTest { val delayMs = 10 val timer = new MockTimer() val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - type T = Either[Exception, Option[TimestampAndOffset]] + type T = OffsetResultHolder.FileRecordsOrError val purgatory = new DelayedOperationPurgatory[DelayedRemoteListOffsets]("test-purgatory", timer, 0, 10, true, true) @@ -76,9 +76,9 @@ class DelayedRemoteListOffsetsTest { }) val statusByPartition = mutable.Map( - new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Some(holder)) + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)) ) val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) @@ -115,7 +115,7 @@ class DelayedRemoteListOffsetsTest { val timestampAndOffset = new TimestampAndOffset(100L, 100L, Optional.of(50)) val taskFuture = new CompletableFuture[T]() - taskFuture.complete(Right(Some(timestampAndOffset))) + taskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.empty(), Optional.of(timestampAndOffset))) var cancelledCount = 0 val jobFuture = mock(classOf[CompletableFuture[Void]]) @@ -128,9 +128,9 @@ class DelayedRemoteListOffsetsTest { }) val statusByPartition = mutable.Map( - new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Some(holder)) + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)) ) val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) @@ -165,7 +165,7 @@ class DelayedRemoteListOffsetsTest { val timestampAndOffset = new TimestampAndOffset(100L, 100L, Optional.of(50)) val taskFuture = new CompletableFuture[T]() - taskFuture.complete(Right(Some(timestampAndOffset))) + taskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.empty(), Optional.of(timestampAndOffset))) var cancelledCount = 0 val jobFuture = mock(classOf[CompletableFuture[Void]]) @@ -179,14 +179,14 @@ class DelayedRemoteListOffsetsTest { val errorFutureHolder: AsyncOffsetReadFutureHolder[T] = mock(classOf[AsyncOffsetReadFutureHolder[T]]) val errorTaskFuture = new CompletableFuture[T]() - errorTaskFuture.complete(Left(new TimeoutException("Timed out!"))) + errorTaskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.of(new TimeoutException("Timed out!")), Optional.empty())) when(errorFutureHolder.taskFuture).thenAnswer(_ => errorTaskFuture) when(errorFutureHolder.jobFuture).thenReturn(jobFuture) val statusByPartition = mutable.Map( - new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Some(errorFutureHolder)) + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(errorFutureHolder)) ) val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) @@ -221,7 +221,7 @@ class DelayedRemoteListOffsetsTest { val timestampAndOffset = new TimestampAndOffset(100L, 100L, Optional.of(50)) val taskFuture = new CompletableFuture[T]() - taskFuture.complete(Right(Some(timestampAndOffset))) + taskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.empty(), Optional.of(timestampAndOffset))) var cancelledCount = 0 val jobFuture = mock(classOf[CompletableFuture[Void]]) @@ -241,10 +241,10 @@ class DelayedRemoteListOffsetsTest { when(errorFutureHolder.jobFuture).thenReturn(jobFuture) val statusByPartition = mutable.Map( - new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Some(holder)), - new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Some(errorFutureHolder)), - new TopicPartition("test1", 1) -> ListOffsetsPartitionStatus(None, Some(holder)) + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(errorFutureHolder)), + new TopicPartition("test1", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)) ) val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) diff --git a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala index f19af34500d5b..49a0ebc21f4bf 100644 --- a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala +++ b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala @@ -29,15 +29,12 @@ import java.util.concurrent._ import javax.management.ObjectName import com.yammer.metrics.core.MetricName import kafka.admin.ConfigCommand -import kafka.api.{KafkaSasl, SaslSetup} -import kafka.controller.{ControllerBrokerStateInfo, ControllerChannelManager} +import kafka.api.SaslSetup import kafka.log.UnifiedLog import kafka.network.{DataPlaneAcceptor, Processor, RequestChannel} import kafka.security.JaasTestUtils import kafka.utils._ import kafka.utils.Implicits._ -import kafka.utils.TestUtils.TestControllerRequestCompletionHandler -import kafka.zk.ConfigEntityChangeNotificationZNode import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.ConfigEntry.{ConfigSource, ConfigSynonym} @@ -45,38 +42,33 @@ import org.apache.kafka.clients.admin._ import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, ConsumerRecord, ConsumerRecords, KafkaConsumer} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.{ClusterResource, ClusterResourceListener, Reconfigurable, TopicPartition, TopicPartitionInfo} -import org.apache.kafka.common.config.{ConfigException, ConfigResource, SaslConfigs} +import org.apache.kafka.common.config.{ConfigException, ConfigResource} import org.apache.kafka.common.config.SslConfigs._ import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.config.types.Password import org.apache.kafka.common.config.provider.FileConfigProvider import org.apache.kafka.common.errors.{AuthenticationException, InvalidRequestException} import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.message.MetadataRequestData import org.apache.kafka.common.metrics.{JmxReporter, KafkaMetric, MetricsContext, MetricsReporter, Quota} import org.apache.kafka.common.network.{ConnectionMode, ListenerName} import org.apache.kafka.common.network.CertStores.{KEYSTORE_PROPS, TRUSTSTORE_PROPS} import org.apache.kafka.common.record.TimestampType -import org.apache.kafka.common.requests.MetadataRequest import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.common.security.scram.ScramCredential import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.security.{PasswordEncoder, PasswordEncoderConfigs} -import org.apache.kafka.server.config.{ConfigType, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms, ZkConfigs} +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} import org.apache.kafka.server.metrics.{KafkaYammerMetrics, MetricConfigs} import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.server.util.ShutdownableThread import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig} -import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils} +import org.apache.kafka.test.TestSslUtils import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Disabled, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{CsvSource, MethodSource} +import org.junit.jupiter.params.provider.MethodSource import java.util.concurrent.atomic.AtomicInteger -import scala.annotation.nowarn import scala.collection._ import scala.collection.mutable.ArrayBuffer import scala.jdk.CollectionConverters._ @@ -111,6 +103,12 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup private val sslProperties2 = JaasTestUtils.sslConfigs(ConnectionMode.SERVER, false, Optional.of(trustStoreFile2), "kafka") private val invalidSslProperties = invalidSslConfigs + override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { + val propsSeq = super.kraftControllerConfigs(testInfo) + propsSeq.foreach(props => props.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_INTERVAL_MS_CONFIG, "100")) + propsSeq + } + @BeforeEach override def setUp(testInfo: TestInfo): Unit = { startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism))) @@ -120,15 +118,8 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup (0 until numServers).foreach { brokerId => - val props = if (isKRaftTest()) { - val properties = TestUtils.createBrokerConfig(brokerId, null) - properties.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0") - properties - } else { - val properties = TestUtils.createBrokerConfig(brokerId, zkConnect) - properties.put(ZkConfigs.ZK_ENABLE_SECURE_ACLS_CONFIG, "true") - properties - } + val props = TestUtils.createBrokerConfig(brokerId) + props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0") props ++= securityProps(sslProperties1, TRUSTSTORE_PROPS) // Ensure that we can support multiple listeners per security protocol and multiple security protocols props.put(SocketServerConfigs.LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0") @@ -137,9 +128,8 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup props.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "requested") props.put(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG, "PLAIN") props.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, kafkaServerSaslMechanisms.mkString(",")) - props.put(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "2000") // low value to test log rolling on config update + props.put(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "1048576") // low value to test log rolling on config update props.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2") // greater than one to test reducing threads - props.put(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, "dynamic-config-secret") props.put(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, 1680000000.toString) props.put(ServerLogConfigs.LOG_RETENTION_TIME_HOURS_CONFIG, 168.toString) @@ -153,9 +143,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup props ++= securityProps(sslProperties1, KEYSTORE_PROPS, listenerPrefix(SecureExternal)) val kafkaConfig = KafkaConfig.fromProps(props) - if (!isKRaftTest()) { - configureDynamicKeystoreInZooKeeper(kafkaConfig, sslProperties1) - } servers += createBroker(kafkaConfig) } @@ -333,14 +320,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup assertFalse(reporter.kafkaMetrics.isEmpty, "No metrics found") } - if (!isKRaftTest()) { - // fetch from ZK, values should be unresolved - val props = fetchBrokerConfigsFromZooKeeper(servers.head) - assertTrue(props.getProperty(TestMetricsReporter.PollingIntervalProp) == PollingIntervalVal, "polling interval is not updated in ZK") - assertTrue(props.getProperty(configPrefix + SSL_TRUSTSTORE_TYPE_CONFIG) == SslTruststoreTypeVal, "store type is not updated in ZK") - assertTrue(props.getProperty(configPrefix + SSL_KEYSTORE_PASSWORD_CONFIG) == SslKeystorePasswordVal, "keystore password is not updated in ZK") - } - // verify the update // 1. verify update not occurring if the value of property is same. alterConfigsUsingConfigCommand(updatedProps) @@ -459,23 +438,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup verifyProduceConsume(producer, consumer, 10, topic) } - def verifyBrokerToControllerCall(controller: KafkaServer): Unit = { - val nonControllerBroker = servers.find(_.config.brokerId != controller.config.brokerId).get - val brokerToControllerManager = nonControllerBroker.clientToControllerChannelManager - val completionHandler = new TestControllerRequestCompletionHandler() - brokerToControllerManager.sendRequest(new MetadataRequest.Builder(new MetadataRequestData()), completionHandler) - TestUtils.waitUntilTrue(() => { - completionHandler.completed.get() || completionHandler.timedOut.get() - }, "Timed out while waiting for broker to controller API call") - // we do not expect a timeout from broker to controller request - assertFalse(completionHandler.timedOut.get(), "broker to controller request is timeout") - assertTrue(completionHandler.actualResponse.isDefined, "No response recorded even though request is completed") - val response = completionHandler.actualResponse.get - assertNull(response.authenticationException(), s"Request failed due to authentication error ${response.authenticationException}") - assertNull(response.versionMismatch(), s"Request failed due to unsupported version error ${response.versionMismatch}") - assertFalse(response.wasDisconnected(), "Request failed because broker is not available") - } - val group_id = new AtomicInteger(1) def next_group_name(): String = s"alter-truststore-${group_id.getAndIncrement()}" @@ -518,18 +480,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup TestUtils.incrementalAlterConfigs(servers, adminClients.head, props2, perBrokerConfig = true).all.get(15, TimeUnit.SECONDS) verifySslProduceConsume(sslProperties2, next_group_name()) waitForAuthenticationFailure(producerBuilder.keyStoreProps(sslProperties1)) - - if (!isKRaftTest()) { - val controller = servers.find(_.config.brokerId == TestUtils.waitUntilControllerElected(zkClient)).get.asInstanceOf[KafkaServer] - val controllerChannelManager = controller.kafkaController.controllerChannelManager - val brokerStateInfo: mutable.HashMap[Int, ControllerBrokerStateInfo] = - JTestUtils.fieldValue(controllerChannelManager, classOf[ControllerChannelManager], "brokerStateInfo") - brokerStateInfo(0).networkClient.disconnect("0") - TestUtils.createTopic(zkClient, "testtopic2", numPartitions, replicationFactor = numServers, servers) - - // validate that the brokerToController request works fine - verifyBrokerToControllerCall(controller) - } } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @@ -631,19 +581,20 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) - @nowarn("cat=deprecation") // See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for deprecation details def testDefaultTopicConfig(quorum: String, groupProtocol: String): Unit = { val (producerThread, consumerThread) = startProduceConsume(retries = 0, groupProtocol) val props = new Properties - props.put(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "4000") + val logIndexSizeMaxBytes = "100000" + val logRetentionMs = TimeUnit.DAYS.toMillis(1) + props.put(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "1048576") props.put(ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG, TimeUnit.HOURS.toMillis(2).toString) props.put(ServerLogConfigs.LOG_ROLL_TIME_JITTER_MILLIS_CONFIG, TimeUnit.HOURS.toMillis(1).toString) - props.put(ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_CONFIG, "100000") + props.put(ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_CONFIG, logIndexSizeMaxBytes) props.put(ServerLogConfigs.LOG_FLUSH_INTERVAL_MESSAGES_CONFIG, "1000") props.put(ServerLogConfigs.LOG_FLUSH_INTERVAL_MS_CONFIG, "60000") props.put(ServerLogConfigs.LOG_RETENTION_BYTES_CONFIG, "10000000") - props.put(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, TimeUnit.DAYS.toMillis(1).toString) + props.put(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, logRetentionMs.toString) props.put(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, "100000") props.put(ServerLogConfigs.LOG_INDEX_INTERVAL_BYTES_CONFIG, "10000") props.put(CleanerConfig.LOG_CLEANER_DELETE_RETENTION_MS_PROP, TimeUnit.DAYS.toMillis(1).toString) @@ -656,15 +607,14 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup props.put(ServerConfigs.COMPRESSION_TYPE_CONFIG, "gzip") props.put(ServerLogConfigs.LOG_PRE_ALLOCATE_CONFIG, true.toString) props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.toString) - props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, "1000") props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, "1000") props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, "1000") - props.put(ServerLogConfigs.LOG_MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, "false") - reconfigureServers(props, perBrokerConfig = false, (ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "4000")) + reconfigureServers(props, perBrokerConfig = false, (ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "1048576")) // Verify that all broker defaults have been updated servers.foreach { server => props.forEach { (k, v) => + TestUtils.waitUntilTrue(() => server.config.originals.get(k) != null, "Configs not present") assertEquals(server.config.originals.get(k).toString, v, s"Not reconfigured $k") } } @@ -675,7 +625,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup "Config not updated in LogManager") val log = servers.head.logManager.getLog(new TopicPartition(topic, 0)).getOrElse(throw new IllegalStateException("Log not found")) - TestUtils.waitUntilTrue(() => log.config.segmentSize == 4000, "Existing topic config using defaults not updated") + TestUtils.waitUntilTrue(() => log.config.segmentSize == 1048576, "Existing topic config using defaults not updated") val KafkaConfigToLogConfigName: Map[String, String] = ServerTopicConfigSynonyms.TOPIC_CONFIG_SYNONYMS.asScala.map { case (k, v) => (v, k) } props.asScala.foreach { case (k, v) => @@ -698,14 +648,12 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup // Verify that we can alter subset of log configs props.clear() props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG, TimestampType.CREATE_TIME.toString) - props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, "1000") props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, "1000") props.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, "1000") reconfigureServers(props, perBrokerConfig = false, (ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG, TimestampType.CREATE_TIME.toString)) consumerThread.waitForMatchingRecords(record => record.timestampType == TimestampType.CREATE_TIME) // Verify that invalid configs are not applied val invalidProps = Map( - ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG -> "abc", // Invalid type ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG -> "abc", // Invalid type ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG -> "abc", // Invalid type ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG -> "invalid", // Invalid value @@ -727,8 +675,8 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup assertEquals(500000, servers.head.config.values.get(ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_CONFIG)) assertEquals(TimeUnit.DAYS.toMillis(2), servers.head.config.values.get(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG)) servers.tail.foreach { server => - assertEquals(ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_DEFAULT, server.config.values.get(ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_CONFIG)) - assertEquals(1680000000L, server.config.values.get(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG)) + assertEquals(logIndexSizeMaxBytes.toInt, server.config.values.get(ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_CONFIG)) + assertEquals(logRetentionMs, server.config.values.get(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG)) } // Verify that produce/consume worked throughout this test without any retries in producer @@ -752,16 +700,12 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } } - @ParameterizedTest(name = "{displayName}.groupProtocol={0}") - @CsvSource(Array("classic, consumer")) - def testUncleanLeaderElectionEnable(groupProtocol: String): Unit = { - val controller = servers.find(_.config.brokerId == TestUtils.waitUntilControllerElected(zkClient)).get - val controllerId = controller.config.brokerId - + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUncleanLeaderElectionEnable(quorum: String, groupProtocol: String): Unit = { // Create a topic with two replicas on brokers other than the controller val topic = "testtopic2" - val assignment = Map(0 -> Seq((controllerId + 1) % servers.size, (controllerId + 2) % servers.size)) - TestUtils.createTopic(zkClient, topic, assignment, servers) + TestUtils.createTopicWithAdmin(adminClients.head, topic, servers, controllerServers, replicaAssignment = Map(0 -> Seq(0, 1))) val producer = ProducerBuilder().acks(1).build() val consumer = ConsumerBuilder("unclean-leader-test", groupProtocol).enableAutoCommit(false).topic(topic).build() @@ -796,7 +740,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup val newProps = new Properties newProps.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") TestUtils.incrementalAlterConfigs(servers, adminClients.head, newProps, perBrokerConfig = false).all.get - waitForConfigOnServer(controller, ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") + TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer) // Verify that the old follower with missing records is elected as the new leader val (newLeader, elected) = TestUtils.computeUntilTrue(partitionInfo.leader)(leader => leader != null) @@ -814,9 +758,20 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup consumer.commitSync() } - @ParameterizedTest(name = "{displayName}.groupProtocol={0}") - @CsvSource(Array("classic, consumer")) - def testThreadPoolResize(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testThreadPoolResize(quorum: String, groupProtocol: String): Unit = { + + // In kraft mode, the StripedReplicaPlacer#initialize includes some randomization, + // so the replica assignment is not deterministic. + // If a fetcher thread is not assigned any topic partition, it will not be created. + // Change the replica assignment to ensure that all fetcher threads are created. + TestUtils.deleteTopicWithAdmin(adminClients.head, topic, servers, controllerServers) + val replicaAssignment = Map( + 0 -> Seq(0, 1, 2), 1 -> Seq(1, 2, 0), 2 -> Seq(2, 1, 0), 3 -> Seq(0, 1, 2), 4 -> Seq(1, 2, 0), + 5 -> Seq(2, 1, 0), 6 -> Seq(0, 1, 2), 7 -> Seq(1, 2, 0), 8 -> Seq(2, 1, 0), 9 -> Seq(0, 1, 2)) + TestUtils.createTopicWithAdmin(adminClients.head, topic, servers, controllerServers, replicaAssignment = replicaAssignment) + val requestHandlerPrefix = "data-plane-kafka-request-handler-" val networkThreadPrefix = "data-plane-kafka-network-thread-" val fetcherThreadPrefix = "ReplicaFetcherThread-" @@ -893,7 +848,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup "", mayReceiveDuplicates = false) verifyThreadPoolResize(SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG, config.numNetworkThreads, networkThreadPrefix, mayReceiveDuplicates = true) - verifyThreads("data-plane-kafka-socket-acceptor-", config.listeners.size) + verifyThreads("data-plane-kafka-socket-acceptor-", config.listeners.size, 1) verifyProcessorMetrics() verifyMarkPartitionsForTruncation() @@ -934,9 +889,13 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup // to obtain partition assignment private def verifyMarkPartitionsForTruncation(): Unit = { val leaderId = 0 - val partitions = (0 until numPartitions).map(i => new TopicPartition(topic, i)).filter { tp => - zkClient.getLeaderForPartition(tp).contains(leaderId) - } + val topicDescription = adminClients.head. + describeTopics(java.util.Arrays.asList(topic)). + allTopicNames(). + get(3, TimeUnit.MINUTES).get(topic) + val partitions = topicDescription.partitions().asScala. + filter(p => p.leader().id() == leaderId). + map(p => new TopicPartition(topic, p.partition())) assertTrue(partitions.nonEmpty, s"Partitions not found with leader $leaderId") partitions.foreach { tp => (1 to 2).foreach { i => @@ -953,21 +912,24 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } } - @ParameterizedTest(name = "{displayName}.groupProtocol={0}") - @CsvSource(Array("classic, consumer")) - def testMetricsReporterUpdate(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMetricsReporterUpdate(quorum: String, groupProtocol: String): Unit = { // Add a new metrics reporter val newProps = new Properties newProps.put(TestMetricsReporter.PollingIntervalProp, "100") configureMetricsReporters(Seq(classOf[JmxReporter], classOf[TestMetricsReporter]), newProps) - val reporters = TestMetricsReporter.waitForReporters(servers.size) + val reporters = TestMetricsReporter.waitForReporters(servers.size + controllerServers.size) reporters.foreach { reporter => reporter.verifyState(reconfigureCount = 0, deleteCount = 0, pollingInterval = 100) assertFalse(reporter.kafkaMetrics.isEmpty, "No metrics found") - reporter.verifyMetricValue("request-total", "socket-server-metrics") + TestUtils.retry(30_000) { + reporter.verifyMetricValue("request-total", "socket-server-metrics") + } } - assertEquals(servers.map(_.config.brokerId).toSet, TestMetricsReporter.configuredBrokers.toSet) + assertEquals(Set(controllerServer.config.nodeId) ++ servers.map(_.config.brokerId), + TestMetricsReporter.configuredBrokers.toSet) // non-default value to trigger a new metric val clientId = "test-client-1" @@ -1001,7 +963,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup // Verify recreation of metrics reporter newProps.put(TestMetricsReporter.PollingIntervalProp, "2000") configureMetricsReporters(Seq(classOf[TestMetricsReporter]), newProps) - val newReporters = TestMetricsReporter.waitForReporters(servers.size) + val newReporters = TestMetricsReporter.waitForReporters(servers.size + controllerServers.size) newReporters.foreach(_.verifyState(reconfigureCount = 0, deleteCount = 0, pollingInterval = 2000)) // Verify that validation failure of metrics reporter fails reconfiguration and leaves config unchanged @@ -1041,160 +1003,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup stopAndVerifyProduceConsume(producerThread, consumerThread) } - @ParameterizedTest(name = "{displayName}.groupProtocol={0}") - @CsvSource(Array("classic")) - // Modifying advertised listeners is not supported in KRaft - def testAdvertisedListenerUpdate(groupProtocol: String): Unit = { - val adminClient = adminClients.head - val externalAdminClient = createAdminClient(SecurityProtocol.SASL_SSL, SecureExternal) - - // Ensure connections are made to brokers before external listener is made inaccessible - describeConfig(externalAdminClient) - - // Update broker external listener to use invalid listener address - // any address other than localhost is sufficient to fail (either connection or host name verification failure) - val invalidHost = "192.168.0.1" - alterAdvertisedListener(adminClient, externalAdminClient, "localhost", invalidHost) - - def validateEndpointsInZooKeeper(server: KafkaServer, endpointMatcher: String => Boolean): Unit = { - val brokerInfo = zkClient.getBroker(server.config.brokerId) - assertTrue(brokerInfo.nonEmpty, "Broker not registered") - val endpoints = brokerInfo.get.endPoints.toString - assertTrue(endpointMatcher(endpoints), s"Endpoint update not saved $endpoints") - } - - // Verify that endpoints have been updated in ZK for all brokers - servers.foreach { server => - validateEndpointsInZooKeeper(server.asInstanceOf[KafkaServer], endpoints => endpoints.contains(invalidHost)) - } - - // Trigger session expiry and ensure that controller registers new advertised listener after expiry - val controllerEpoch = zkClient.getControllerEpoch - val controllerServer = servers(zkClient.getControllerId.getOrElse(throw new IllegalStateException("No controller"))).asInstanceOf[KafkaServer] - val controllerZkClient = controllerServer.zkClient - val sessionExpiringClient = createZooKeeperClientToTriggerSessionExpiry(controllerZkClient.currentZooKeeper) - sessionExpiringClient.close() - TestUtils.waitUntilTrue(() => zkClient.getControllerEpoch != controllerEpoch, - "Controller not re-elected after ZK session expiry") - TestUtils.retry(10000)(validateEndpointsInZooKeeper(controllerServer, endpoints => endpoints.contains(invalidHost))) - - // Verify that producer connections fail since advertised listener is invalid - val bootstrap = TestUtils.bootstrapServers(servers, new ListenerName(SecureExternal)) - .replaceAll(invalidHost, "localhost") // allow bootstrap connection to succeed - val producer1 = ProducerBuilder() - .trustStoreProps(sslProperties1) - .maxRetries(0) - .requestTimeoutMs(1000) - .deliveryTimeoutMs(1000) - .bootstrapServers(bootstrap) - .build() - - val future = producer1.send(new ProducerRecord(topic, "key", "value")) - assertTrue(assertThrows(classOf[ExecutionException], () => future.get(2, TimeUnit.SECONDS)) - .getCause.isInstanceOf[org.apache.kafka.common.errors.TimeoutException]) - - alterAdvertisedListener(adminClient, externalAdminClient, invalidHost, "localhost") - servers.foreach { server => - validateEndpointsInZooKeeper(server.asInstanceOf[KafkaServer], endpoints => !endpoints.contains(invalidHost)) - } - - // Verify that produce/consume work now - val topic2 = "testtopic2" - TestUtils.createTopic(zkClient, topic2, numPartitions, replicationFactor = numServers, servers) - val producer = ProducerBuilder().trustStoreProps(sslProperties1).maxRetries(0).build() - val consumer = ConsumerBuilder("group2", groupProtocol).trustStoreProps(sslProperties1).topic(topic2).build() - verifyProduceConsume(producer, consumer, 10, topic2) - - // Verify updating inter-broker listener - val props = new Properties - props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, SecureExternal) - val e = assertThrows(classOf[ExecutionException], () => reconfigureServers(props, perBrokerConfig = true, (ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, SecureExternal))) - assertTrue(e.getCause.isInstanceOf[InvalidRequestException], s"Unexpected exception ${e.getCause}") - servers.foreach(server => assertEquals(SecureInternal, server.config.interBrokerListenerName.value)) - } - - @ParameterizedTest(name = "{displayName}.groupProtocol={0}") - @CsvSource(Array("classic, consumer")) - @Disabled // Re-enable once we make it less flaky (KAFKA-6824) - def testAddRemoveSslListener(groupProtocol: String): Unit = { - verifyAddListener("SSL", SecurityProtocol.SSL, Seq.empty, groupProtocol) - - // Restart servers and check secret rotation - servers.foreach(_.shutdown()) - servers.foreach(_.awaitShutdown()) - adminClients.foreach(_.close()) - adminClients.clear() - - // All passwords are currently encoded with password.encoder.secret. Encode with password.encoder.old.secret - // and update ZK. When each server is started, it should decode using password.encoder.old.secret and update - // ZK with newly encoded values using password.encoder.secret. - servers.foreach { server => - val props = adminZkClient.fetchEntityConfig(ConfigType.BROKER, server.config.brokerId.toString) - val propsEncodedWithOldSecret = props.clone().asInstanceOf[Properties] - val config = server.config - val oldSecret = "old-dynamic-config-secret" - config.dynamicConfig.staticBrokerConfigs.put(PasswordEncoderConfigs.PASSWORD_ENCODER_OLD_SECRET_CONFIG, oldSecret) - val passwordConfigs = props.asScala.filter { case (k, _) => DynamicBrokerConfig.isPasswordConfig(k) } - assertTrue(passwordConfigs.nonEmpty, "Password configs not found") - val passwordDecoder = createPasswordEncoder(config, config.passwordEncoderSecret) - val passwordEncoder = createPasswordEncoder(config, Some(new Password(oldSecret))) - passwordConfigs.foreach { case (name, value) => - val decoded = passwordDecoder.decode(value).value - propsEncodedWithOldSecret.put(name, passwordEncoder.encode(new Password(decoded))) - } - val brokerId = server.config.brokerId - adminZkClient.changeBrokerConfig(Seq(brokerId), propsEncodedWithOldSecret) - val updatedProps = adminZkClient.fetchEntityConfig(ConfigType.BROKER, brokerId.toString) - passwordConfigs.foreach { case (name, value) => assertNotEquals(props.get(value), updatedProps.get(name)) } - - server.startup() - TestUtils.retry(10000) { - val newProps = adminZkClient.fetchEntityConfig(ConfigType.BROKER, brokerId.toString) - passwordConfigs.foreach { case (name, value) => - assertEquals(passwordDecoder.decode(value), passwordDecoder.decode(newProps.getProperty(name))) } - } - } - - verifyListener(SecurityProtocol.SSL, None, "add-ssl-listener-group2", groupProtocol) - createAdminClient(SecurityProtocol.SSL, SecureInternal) - verifyRemoveListener("SSL", SecurityProtocol.SSL, Seq.empty, groupProtocol) - } - - @ParameterizedTest(name = "{displayName}.groupProtocol={0}") - @CsvSource(Array("classic, consumer")) - def testAddRemoveSaslListeners(groupProtocol: String): Unit = { - createScramCredentials(adminClients.head, JaasTestUtils.KAFKA_SCRAM_USER, JaasTestUtils.KAFKA_SCRAM_PASSWORD) - createScramCredentials(adminClients.head, JaasTestUtils.KAFKA_SCRAM_ADMIN, JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD) - initializeKerberos() - // make sure each server's credential cache has all the created credentials - // (check after initializing Kerberos to minimize delays) - List(JaasTestUtils.KAFKA_SCRAM_USER, JaasTestUtils.KAFKA_SCRAM_ADMIN).foreach { scramUser => - servers.foreach { server => - ScramMechanism.values().filter(_ != ScramMechanism.UNKNOWN).foreach(mechanism => - TestUtils.waitUntilTrue(() => server.credentialProvider.credentialCache.cache( - mechanism.mechanismName(), classOf[ScramCredential]).get(scramUser) != null, - s"$mechanism credentials not created for $scramUser")) - }} - - //verifyAddListener("SASL_SSL", SecurityProtocol.SASL_SSL, Seq("SCRAM-SHA-512", "SCRAM-SHA-256", "PLAIN")) - verifyAddListener("SASL_PLAINTEXT", SecurityProtocol.SASL_PLAINTEXT, Seq("GSSAPI"), groupProtocol) - //verifyRemoveListener("SASL_SSL", SecurityProtocol.SASL_SSL, Seq("SCRAM-SHA-512", "SCRAM-SHA-256", "PLAIN")) - verifyRemoveListener("SASL_PLAINTEXT", SecurityProtocol.SASL_PLAINTEXT, Seq("GSSAPI"), groupProtocol) - - // Verify that a listener added to a subset of servers doesn't cause any issues - // when metadata is processed by the client. - addListener(servers.tail, "SCRAM_LISTENER", SecurityProtocol.SASL_PLAINTEXT, Seq("SCRAM-SHA-256")) - val bootstrap = TestUtils.bootstrapServers(servers.tail, new ListenerName("SCRAM_LISTENER")) - val producer = ProducerBuilder().bootstrapServers(bootstrap) - .securityProtocol(SecurityProtocol.SASL_PLAINTEXT) - .saslMechanism("SCRAM-SHA-256") - .maxRetries(1000) - .build() - val partitions = producer.partitionsFor(topic).asScala - assertEquals(0, partitions.count(p => p.leader != null && p.leader.id == servers.head.config.brokerId)) - assertTrue(partitions.exists(_.leader == null), "Did not find partitions with no leader") - } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testReconfigureRemovedListener(quorum: String, groupProtocol: String): Unit = { @@ -1224,57 +1032,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup s"failed to remove DataPlaneAcceptor. current: ${acceptors.map(_.endPoint.toString).mkString(",")}") } - private def addListener(servers: Seq[KafkaBroker], listenerName: String, securityProtocol: SecurityProtocol, - saslMechanisms: Seq[String]): Unit = { - val config = servers.head.config - val existingListenerCount = config.listeners.size - val listeners = config.listeners - .map(e => s"${e.listenerName.value}://${e.host}:${e.port}") - .mkString(",") + s",$listenerName://localhost:0" - val listenerMap = config.effectiveListenerSecurityProtocolMap - .map { case (name, protocol) => s"${name.value}:${protocol.name}" } - .mkString(",") + s",$listenerName:${securityProtocol.name}" - - val props = fetchBrokerConfigsFromZooKeeper(servers.head) - props.put(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, listenerMap) - securityProtocol match { - case SecurityProtocol.SSL => - addListenerPropsSsl(listenerName, props) - case SecurityProtocol.SASL_PLAINTEXT => - addListenerPropsSasl(listenerName, saslMechanisms, props) - case SecurityProtocol.SASL_SSL => - addListenerPropsSasl(listenerName, saslMechanisms, props) - addListenerPropsSsl(listenerName, props) - case SecurityProtocol.PLAINTEXT => // no additional props - } - - // Add a config to verify that configs whose types are not known are not returned by describeConfigs() - val unknownConfig = "some.config" - props.put(unknownConfig, "some.config.value") - - TestUtils.incrementalAlterConfigs(servers, adminClients.head, props, perBrokerConfig = true).all.get - - TestUtils.waitUntilTrue(() => servers.forall(server => server.config.listeners.size == existingListenerCount + 1), - "Listener config not updated") - TestUtils.waitUntilTrue(() => servers.forall(server => { - try { - server.socketServer.boundPort(new ListenerName(listenerName)) > 0 - } catch { - case _: Exception => false - } - }), "Listener not created") - - val brokerConfigs = describeConfig(adminClients.head, servers).entries.asScala - props.asScala.foreach { case (name, value) => - val entry = brokerConfigs.find(_.name == name).getOrElse(throw new IllegalArgumentException(s"Config not found $name")) - if (DynamicBrokerConfig.isPasswordConfig(name) || name == unknownConfig) - assertNull(entry.value, s"Password or unknown config returned $entry") - else - assertEquals(value, entry.value) - } - } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testTransactionVerificationEnable(quorum: String, groupProtocol: String): Unit = { @@ -1307,111 +1064,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup verifyConfiguration(true) } - private def verifyAddListener(listenerName: String, securityProtocol: SecurityProtocol, - saslMechanisms: Seq[String], - groupProtocol: String): Unit = { - addListener(servers, listenerName, securityProtocol, saslMechanisms) - TestUtils.waitUntilTrue(() => servers.forall(hasListenerMetric(_, listenerName)), - "Processors not started for new listener") - if (saslMechanisms.nonEmpty) - saslMechanisms.foreach { mechanism => - verifyListener(securityProtocol, Some(mechanism), s"add-listener-group-$securityProtocol-$mechanism", groupProtocol) - } - else - verifyListener(securityProtocol, None, s"add-listener-group-$securityProtocol", groupProtocol) - } - - private def verifyRemoveListener(listenerName: String, securityProtocol: SecurityProtocol, - saslMechanisms: Seq[String], - groupProtocol: String): Unit = { - val saslMechanism = if (saslMechanisms.isEmpty) "" else saslMechanisms.head - val producer1 = ProducerBuilder().listenerName(listenerName) - .securityProtocol(securityProtocol) - .saslMechanism(saslMechanism) - .maxRetries(1000) - .build() - val consumer1 = ConsumerBuilder(s"remove-listener-group-$securityProtocol", groupProtocol) - .listenerName(listenerName) - .securityProtocol(securityProtocol) - .saslMechanism(saslMechanism) - .autoOffsetReset("latest") - .build() - verifyProduceConsume(producer1, consumer1, numRecords = 10, topic) - - val config = servers.head.config - val existingListenerCount = config.listeners.size - val listeners = config.listeners - .filter(e => e.listenerName.value != securityProtocol.name) - .map(e => s"${e.listenerName.value}://${e.host}:${e.port}") - .mkString(",") - val listenerMap = config.effectiveListenerSecurityProtocolMap - .filter { case (listenerName, _) => listenerName.value != securityProtocol.name } - .map { case (listenerName, protocol) => s"${listenerName.value}:${protocol.name}" } - .mkString(",") - - val props = fetchBrokerConfigsFromZooKeeper(servers.head) - val deleteListenerProps = new Properties() - deleteListenerProps ++= props.asScala.filter(entry => entry._1.startsWith(listenerPrefix(listenerName))) - TestUtils.incrementalAlterConfigs(servers, adminClients.head, deleteListenerProps, perBrokerConfig = true, opType = OpType.DELETE).all.get - - props.clear() - props.put(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, listenerMap) - TestUtils.incrementalAlterConfigs(servers, adminClients.head, props, perBrokerConfig = true).all.get - - TestUtils.waitUntilTrue(() => servers.forall(server => server.config.listeners.size == existingListenerCount - 1), - "Listeners not updated") - // Wait until metrics of the listener have been removed to ensure that processors have been shutdown before - // verifying that connections to the removed listener fail. - TestUtils.waitUntilTrue(() => !servers.exists(hasListenerMetric(_, listenerName)), - "Processors not shutdown for removed listener") - - // Test that connections using deleted listener don't work - val producerFuture = verifyConnectionFailure(producer1) - val consumerFuture = verifyConnectionFailure(consumer1) - - // Test that other listeners still work - val topic2 = "testtopic2" - TestUtils.createTopic(zkClient, topic2, numPartitions, replicationFactor = numServers, servers) - val producer2 = ProducerBuilder().trustStoreProps(sslProperties1).maxRetries(0).build() - val consumer2 = ConsumerBuilder(s"remove-listener-group2-$securityProtocol", groupProtocol) - .trustStoreProps(sslProperties1) - .topic(topic2) - .autoOffsetReset("latest") - .build() - verifyProduceConsume(producer2, consumer2, numRecords = 10, topic2) - - // Verify that producer/consumer using old listener don't work - verifyTimeout(producerFuture) - verifyTimeout(consumerFuture) - } - - private def verifyListener(securityProtocol: SecurityProtocol, saslMechanism: Option[String], groupId: String, groupProtocol: String): Unit = { - val mechanism = saslMechanism.getOrElse("") - val retries = 1000 // since it may take time for metadata to be updated on all brokers - val producer = ProducerBuilder().listenerName(securityProtocol.name) - .securityProtocol(securityProtocol) - .saslMechanism(mechanism) - .maxRetries(retries) - .build() - val consumer = ConsumerBuilder(groupId, groupProtocol) - .listenerName(securityProtocol.name) - .securityProtocol(securityProtocol) - .saslMechanism(mechanism) - .autoOffsetReset("latest") - .build() - verifyProduceConsume(producer, consumer, numRecords = 10, topic) - } - - private def hasListenerMetric(server: KafkaBroker, listenerName: String): Boolean = { - server.socketServer.metrics.metrics.keySet.asScala.exists(_.tags.get("listener") == listenerName) - } - - private def fetchBrokerConfigsFromZooKeeper(server: KafkaBroker): Properties = { - val props = adminZkClient.fetchEntityConfig(ConfigType.BROKER, server.config.brokerId.toString) - server.config.dynamicConfig.fromPersistentProps(props, perBrokerConfig = true) - } - private def awaitInitialPositions(consumer: Consumer[_, _]): Unit = { TestUtils.pollUntilTrue(consumer, () => !consumer.assignment.isEmpty, "Timed out while waiting for assignment") consumer.assignment.forEach(tp => consumer.position(tp)) @@ -1523,63 +1175,27 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup waitForConfig(s"$configPrefix$SSL_KEYSTORE_LOCATION_CONFIG", props.getProperty(SSL_KEYSTORE_LOCATION_CONFIG)) } - private def serverEndpoints(adminClient: Admin): String = { - val nodes = adminClient.describeCluster().nodes().get - nodes.asScala.map { node => - s"${node.host}:${node.port}" - }.mkString(",") - } - - @nowarn("cat=deprecation") - private def alterAdvertisedListener(adminClient: Admin, externalAdminClient: Admin, oldHost: String, newHost: String): Unit = { - val configs = servers.map { server => - val resource = new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString) - val newListeners = server.config.effectiveAdvertisedBrokerListeners.map { e => - if (e.listenerName.value == SecureExternal) - s"${e.listenerName.value}://$newHost:${server.boundPort(e.listenerName)}" - else - s"${e.listenerName.value}://${e.host}:${server.boundPort(e.listenerName)}" - }.mkString(",") - val configEntry = new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, newListeners) - (resource, new Config(Collections.singleton(configEntry))) - }.toMap.asJava - adminClient.alterConfigs(configs).all.get - servers.foreach { server => - TestUtils.retry(10000) { - val externalListener = server.config.effectiveAdvertisedBrokerListeners.find(_.listenerName.value == SecureExternal) - .getOrElse(throw new IllegalStateException("External listener not found")) - assertEquals(newHost, externalListener.host, "Config not updated") - } - } - val (endpoints, altered) = TestUtils.computeUntilTrue(serverEndpoints(externalAdminClient)) { endpoints => - !endpoints.contains(oldHost) - } - assertTrue(altered, s"Advertised listener update not propagated by controller: $endpoints") - } - - @nowarn("cat=deprecation") private def alterConfigsOnServer(server: KafkaBroker, props: Properties): Unit = { - val configEntries = props.asScala.map { case (k, v) => new ConfigEntry(k, v) }.toList.asJava - val newConfig = new Config(configEntries) - val configs = Map(new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString) -> newConfig).asJava - adminClients.head.alterConfigs(configs).all.get +val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new ConfigEntry(k, v), OpType.SET) }.toList.asJava + val alterConfigs = new java.util.HashMap[ConfigResource, java.util.Collection[AlterConfigOp]]() + alterConfigs.put(new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString), configEntries) + adminClients.head.incrementalAlterConfigs(alterConfigs) props.asScala.foreach { case (k, v) => waitForConfigOnServer(server, k, v) } } - @nowarn("cat=deprecation") private def alterConfigs(servers: Seq[KafkaBroker], adminClient: Admin, props: Properties, perBrokerConfig: Boolean): AlterConfigsResult = { - val configEntries = props.asScala.map { case (k, v) => new ConfigEntry(k, v) }.toList.asJava - val newConfig = new Config(configEntries) + val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new ConfigEntry(k, v), OpType.SET) }.toList.asJava val configs = if (perBrokerConfig) { - servers.map { server => - val resource = new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString) - (resource, newConfig) - }.toMap.asJava + val alterConfigs = new java.util.HashMap[ConfigResource, java.util.Collection[AlterConfigOp]]() + servers.foreach(server => alterConfigs.put(new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString), configEntries)) + alterConfigs } else { - Map(new ConfigResource(ConfigResource.Type.BROKER, "") -> newConfig).asJava + val alterConfigs = new java.util.HashMap[ConfigResource, java.util.Collection[AlterConfigOp]]() + alterConfigs.put(new ConfigResource(ConfigResource.Type.BROKER, ""), configEntries) + alterConfigs } - adminClient.alterConfigs(configs) + adminClient.incrementalAlterConfigs(configs) } private def reconfigureServers(newProps: Properties, perBrokerConfig: Boolean, aPropToVerify: (String, String), expectFailure: Boolean = false): Unit = { @@ -1588,8 +1204,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup val oldProps = servers.head.config.values.asScala.filter { case (k, _) => newProps.containsKey(k) } val brokerResources = if (perBrokerConfig) servers.map(server => new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString)) - else + else { Seq(new ConfigResource(ConfigResource.Type.BROKER, "")) + } brokerResources.foreach { brokerResource => val exception = assertThrows(classOf[ExecutionException], () => alterResult.values.get(brokerResource).get) assertEquals(classOf[InvalidRequestException], exception.getCause.getClass) @@ -1610,49 +1227,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup private def listenerPrefix(name: String): String = new ListenerName(name).configPrefix - private def configureDynamicKeystoreInZooKeeper(kafkaConfig: KafkaConfig, sslProperties: Properties): Unit = { - val externalListenerPrefix = listenerPrefix(SecureExternal) - val sslStoreProps = new Properties - sslStoreProps ++= securityProps(sslProperties, KEYSTORE_PROPS, externalListenerPrefix) - sslStoreProps.put(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, kafkaConfig.passwordEncoderSecret.map(_.value).orNull) - zkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path) - - val entityType = ConfigType.BROKER - val entityName = kafkaConfig.brokerId.toString - - val passwordConfigs = sslStoreProps.asScala.keySet.filter(DynamicBrokerConfig.isPasswordConfig) - val passwordEncoder = createPasswordEncoder(kafkaConfig, kafkaConfig.passwordEncoderSecret) - - if (passwordConfigs.nonEmpty) { - passwordConfigs.foreach { configName => - val encodedValue = passwordEncoder.encode(new Password(sslStoreProps.getProperty(configName))) - sslStoreProps.setProperty(configName, encodedValue) - } - } - sslStoreProps.remove(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG) - adminZkClient.changeConfigs(entityType, entityName, sslStoreProps) - - val brokerProps = adminZkClient.fetchEntityConfig("brokers", kafkaConfig.brokerId.toString) - assertEquals(4, brokerProps.size) - assertEquals(sslProperties.get(SSL_KEYSTORE_TYPE_CONFIG), - brokerProps.getProperty(s"$externalListenerPrefix$SSL_KEYSTORE_TYPE_CONFIG")) - assertEquals(sslProperties.get(SSL_KEYSTORE_LOCATION_CONFIG), - brokerProps.getProperty(s"$externalListenerPrefix$SSL_KEYSTORE_LOCATION_CONFIG")) - assertEquals(sslProperties.get(SSL_KEYSTORE_PASSWORD_CONFIG), - passwordEncoder.decode(brokerProps.getProperty(s"$externalListenerPrefix$SSL_KEYSTORE_PASSWORD_CONFIG"))) - assertEquals(sslProperties.get(SSL_KEY_PASSWORD_CONFIG), - passwordEncoder.decode(brokerProps.getProperty(s"$externalListenerPrefix$SSL_KEY_PASSWORD_CONFIG"))) - } - - private def createPasswordEncoder(config: KafkaConfig, secret: Option[Password]): PasswordEncoder = { - val encoderSecret = secret.getOrElse(throw new IllegalStateException("Password encoder secret not configured")) - PasswordEncoder.encrypting(encoderSecret, - config.passwordEncoderKeyFactoryAlgorithm, - config.passwordEncoderCipherAlgorithm, - config.passwordEncoderKeyLength, - config.passwordEncoderIterations) - } - private def waitForConfig(propName: String, propValue: String, maxWaitMs: Long = 10000): Unit = { servers.foreach { server => waitForConfigOnServer(server, propName, propValue, maxWaitMs) } } @@ -1668,6 +1242,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup val reporterStr = reporters.map(_.getName).mkString(",") props.put(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, reporterStr) reconfigureServers(props, perBrokerConfig, (MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, reporterStr)) + TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer) } private def invalidSslConfigs: Properties = { @@ -1719,34 +1294,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup assertFalse(consumerThread.outOfOrder, "Some messages received out of order") } - private def verifyConnectionFailure(producer: KafkaProducer[String, String]): Future[_] = { - val executor = Executors.newSingleThreadExecutor - executors += executor - val future = executor.submit(new Runnable() { - def run(): Unit = { - producer.send(new ProducerRecord(topic, "key", "value")).get - } - }) - verifyTimeout(future) - future - } - - private def verifyConnectionFailure(consumer: Consumer[String, String]): Future[_] = { - val executor = Executors.newSingleThreadExecutor - executors += executor - val future = executor.submit(new Runnable() { - def run(): Unit = { - consumer.commitSync() - } - }) - verifyTimeout(future) - future - } - - private def verifyTimeout(future: Future[_]): Unit = { - assertThrows(classOf[TimeoutException], () => future.get(100, TimeUnit.MILLISECONDS)) - } - private def configValueAsString(value: Any): String = { value match { case password: Password => password.value @@ -1755,23 +1302,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } } - private def addListenerPropsSsl(listenerName: String, props: Properties): Unit = { - props ++= securityProps(sslProperties1, KEYSTORE_PROPS, listenerPrefix(listenerName)) - props ++= securityProps(sslProperties1, TRUSTSTORE_PROPS, listenerPrefix(listenerName)) - } - - private def addListenerPropsSasl(listener: String, mechanisms: Seq[String], props: Properties): Unit = { - val listenerName = new ListenerName(listener) - val prefix = listenerName.configPrefix - props.put(prefix + BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, mechanisms.mkString(",")) - props.put(prefix + SaslConfigs.SASL_KERBEROS_SERVICE_NAME, "kafka") - mechanisms.foreach { mechanism => - val jaasSection = jaasSections(Seq(mechanism), None, KafkaSasl, "").head - val jaasConfig = jaasSection.getModules.get(0).toString - props.put(listenerName.saslMechanismConfigPrefix(mechanism) + SaslConfigs.SASL_JAAS_CONFIG, jaasConfig) - } - } - private def alterConfigsUsingConfigCommand(props: Properties): Unit = { val propsFile = tempPropertiesFile(clientProps(SecurityProtocol.SSL)) @@ -1820,7 +1350,8 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup private var _retries = Int.MaxValue private var _acks = -1 private var _requestTimeoutMs = 30000 - private var _deliveryTimeoutMs = 30000 + private val defaultLingerMs = 5; + private var _deliveryTimeoutMs = 30000 + defaultLingerMs def maxRetries(retries: Int): ProducerBuilder = { _retries = retries; this } def acks(acks: Int): ProducerBuilder = { _acks = acks; this } @@ -2035,7 +1566,7 @@ class TestMetricsReporter extends MetricsReporter with Reconfigurable with Close val matchingMetrics = kafkaMetrics.filter(metric => metric.metricName.name == name && metric.metricName.group == group) assertTrue(matchingMetrics.nonEmpty, "Metric not found") val total = matchingMetrics.foldLeft(0.0)((total, metric) => total + metric.metricValue.asInstanceOf[Double]) - assertTrue(total > 0.0, "Invalid metric value") + assertTrue(total > 0.0, "Invalid metric value " + total + " for name " + name + " , group " + group) } } diff --git a/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala b/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala index 8a8013921dbef..a9961c7c48225 100644 --- a/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala @@ -52,7 +52,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { } override def generateConfigs: collection.Seq[KafkaConfig] = { - TestUtils.createBrokerConfigs(numNodes, zkConnectOrNull, enableControlledShutdown = false, enableFetchFromFollower = true) + TestUtils.createBrokerConfigs(numNodes, enableControlledShutdown = false, enableFetchFromFollower = true) .map(KafkaConfig.fromProps(_, overridingProps)) } diff --git a/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala b/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala index c19dffd6b865a..667b552333670 100644 --- a/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala +++ b/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala @@ -23,7 +23,7 @@ import java.time.Duration import java.util.{Collections, Properties} import java.util.concurrent.{CountDownLatch, Executors, TimeUnit} import javax.security.auth.login.LoginContext -import kafka.api.{Both, IntegrationTestHarness, SaslSetup} +import kafka.api.{IntegrationTestHarness, SaslSetup} import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.common.TopicPartition @@ -38,9 +38,9 @@ import org.apache.kafka.common.security.kerberos.KerberosLogin import org.apache.kafka.common.utils.{LogContext, MockTime} import org.apache.kafka.network.SocketServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource +import org.junit.jupiter.params.provider.{MethodSource, ValueSource} import scala.jdk.CollectionConverters._ @@ -64,12 +64,12 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { TestableKerberosLogin.reset() - startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism), Both)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism))) serverConfig.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "required") serverConfig.put(SocketServerConfigs.FAILED_AUTHENTICATION_DELAY_MS_CONFIG, failedAuthenticationDelayMs.toString) super.setUp(testInfo) serverAddr = new InetSocketAddress("localhost", - servers.head.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT))) + brokers.head.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT))) clientConfig.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name) clientConfig.put(SaslConfigs.SASL_MECHANISM, kafkaClientSaslMechanism) @@ -77,7 +77,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { clientConfig.put(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG, "5000") // create the test topic with all the brokers as replicas - createTopic(topic, 2, brokerCount) + createTopic(topic, 2, brokerCount, listenerName = listenerName, adminClientConfig = adminClientConfig) } @AfterEach @@ -92,15 +92,16 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * Tests that Kerberos replay error `Request is a replay (34)` is not handled as an authentication exception * since replay detection used to detect DoS attacks may occasionally reject valid concurrent requests. */ - @Test - def testRequestIsAReplay(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testRequestIsAReplay(quorum: String): Unit = { val successfulAuthsPerThread = 10 val futures = (0 until numThreads).map(_ => executor.submit(new Runnable { override def run(): Unit = verifyRetriableFailuresDuringAuthentication(successfulAuthsPerThread) })) futures.foreach(_.get(60, TimeUnit.SECONDS)) - assertEquals(0, TestUtils.totalMetricValue(servers.head, "failed-authentication-total")) - val successfulAuths = TestUtils.totalMetricValue(servers.head, "successful-authentication-total") + assertEquals(0, TestUtils.totalMetricValue(brokers.head, "failed-authentication-total")) + val successfulAuths = TestUtils.totalMetricValue(brokers.head, "successful-authentication-total") assertTrue(successfulAuths > successfulAuthsPerThread * numThreads, "Too few authentications: " + successfulAuths) } @@ -109,8 +110,9 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * are able to connect after the second re-login. Verifies that logout is performed only once * since duplicate logouts without successful login results in NPE from Java 9 onwards. */ - @Test - def testLoginFailure(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testLoginFailure(quorum: String): Unit = { val selector = createSelectorWithRelogin() try { val login = TestableKerberosLogin.instance @@ -132,8 +134,9 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * is performed when credentials are unavailable between logout and login, we handle it as a * transient error and not an authentication failure so that clients may retry. */ - @Test - def testReLogin(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testReLogin(quorum: String): Unit = { val selector = createSelectorWithRelogin() try { val login = TestableKerberosLogin.instance @@ -163,8 +166,9 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * Tests that Kerberos error `Server not found in Kerberos database (7)` is handled * as a fatal authentication failure. */ - @Test - def testServerNotFoundInKerberosDatabase(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testServerNotFoundInKerberosDatabase(quorum: String): Unit = { val jaasConfig = clientConfig.getProperty(SaslConfigs.SASL_JAAS_CONFIG) val invalidServiceConfig = jaasConfig.replace("serviceName=\"kafka\"", "serviceName=\"invalid-service\"") clientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, invalidServiceConfig) @@ -177,7 +181,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * is thrown immediately, and is not affected by connection.failed.authentication.delay.ms. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testServerAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { // Setup client with a non-existent service principal, so that server authentication fails on the client val clientLoginContext = jaasClientLoginModule(kafkaClientSaslMechanism, Some("another-kafka-service")) @@ -256,8 +260,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { private def createSelector(): Selector = { val channelBuilder = ChannelBuilders.clientChannelBuilder(securityProtocol, - JaasContext.Type.CLIENT, new TestSecurityConfig(clientConfig), null, kafkaClientSaslMechanism, - time, true, new LogContext()) + JaasContext.Type.CLIENT, new TestSecurityConfig(clientConfig), null, kafkaClientSaslMechanism, time, new LogContext()) NetworkTestUtils.createSelector(channelBuilder, time) } @@ -266,7 +269,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { val config = new TestSecurityConfig(clientConfig) val jaasContexts = Collections.singletonMap("GSSAPI", JaasContext.loadClientContext(config.values())) val channelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, - null, false, kafkaClientSaslMechanism, true, null, null, null, time, new LogContext(), + null, false, kafkaClientSaslMechanism, null, null, null, time, new LogContext(), _ => org.apache.kafka.test.TestUtils.defaultApiVersionsResponse(ListenerType.ZK_BROKER)) { override protected def defaultLoginClass(): Class[_ <: Login] = classOf[TestableKerberosLogin] } diff --git a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala index 21ae6c379bd16..f9b7950f64f20 100644 --- a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala +++ b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala @@ -22,7 +22,6 @@ import kafka.network.SocketServer import kafka.server.IntegrationTestUtils.connectAndReceive import org.apache.kafka.common.test.{KafkaClusterTestKit, TestKitNodes} import kafka.utils.TestUtils -import org.apache.commons.io.FileUtils import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin._ import org.apache.kafka.common.acl.{AclBinding, AclBindingFilter} @@ -31,7 +30,6 @@ import org.apache.kafka.common.config.ConfigResource.Type import org.apache.kafka.common.errors.{InvalidPartitionsException, PolicyViolationException, UnsupportedVersionException} import org.apache.kafka.common.message.DescribeClusterRequestData import org.apache.kafka.common.metadata.{ConfigRecord, FeatureLevelRecord} -import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.quota.ClientQuotaAlteration.Op @@ -58,12 +56,11 @@ import org.slf4j.LoggerFactory import java.io.File import java.nio.charset.StandardCharsets -import java.nio.file.{FileSystems, Files, Path} +import java.nio.file.{FileSystems, Files, Path, Paths} import java.{lang, util} import java.util.concurrent.{CompletableFuture, CompletionStage, ExecutionException, TimeUnit} import java.util.concurrent.atomic.AtomicInteger import java.util.{Collections, Optional, OptionalLong, Properties} -import scala.annotation.nowarn import scala.collection.{Seq, mutable} import scala.concurrent.duration.{FiniteDuration, MILLISECONDS, SECONDS} import scala.jdk.CollectionConverters._ @@ -123,8 +120,6 @@ class KRaftClusterTest { val config = controller.sharedServer.controllerConfig.props config.asInstanceOf[util.HashMap[String,String]].put(SocketServerConfigs.LISTENERS_CONFIG, s"CONTROLLER://localhost:$port") controller.sharedServer.controllerConfig.updateCurrentConfig(new KafkaConfig(config)) - // metrics will be set to null when closing a controller, so we should recreate it for testing - controller.sharedServer.metrics = new Metrics() // restart controller controller.startup() @@ -448,7 +443,7 @@ class KRaftClusterTest { "metadata from testkit", assertThrows(classOf[RuntimeException], () => { new KafkaClusterTestKit.Builder( new TestKitNodes.Builder(). - setBootstrapMetadataVersion(MetadataVersion.IBP_2_7_IV0). + setBootstrapMetadataVersion(MetadataVersion.IBP_3_0_IV1). setNumBrokerNodes(1). setNumControllerNodes(1).build()).build() }).getMessage) @@ -792,83 +787,6 @@ class KRaftClusterTest { } } - @nowarn("cat=deprecation") // Suppress warnings about using legacy alterConfigs - def legacyAlter( - admin: Admin, - resources: Map[ConfigResource, Seq[ConfigEntry]] - ): Seq[ApiError] = { - val configs = new util.HashMap[ConfigResource, Config]() - resources.foreach { - case (resource, entries) => configs.put(resource, new Config(entries.asJava)) - } - val values = admin.alterConfigs(configs).values() - resources.map { - case (resource, _) => try { - values.get(resource).get() - ApiError.NONE - } catch { - case e: ExecutionException => ApiError.fromThrowable(e.getCause) - case t: Throwable => ApiError.fromThrowable(t) - } - }.toSeq - } - - @Test - def testLegacyAlterConfigs(): Unit = { - val cluster = new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(4). - setNumControllerNodes(3).build()).build() - try { - cluster.format() - cluster.startup() - cluster.waitForReadyBrokers() - val admin = Admin.create(cluster.clientProperties()) - try { - val defaultBroker = new ConfigResource(Type.BROKER, "") - - assertEquals(Seq(ApiError.NONE), legacyAlter(admin, Map(defaultBroker -> Seq( - new ConfigEntry("log.roll.ms", "1234567"), - new ConfigEntry("max.connections.per.ip", "6"))))) - - validateConfigs(admin, Map(defaultBroker -> Seq( - ("log.roll.ms", "1234567"), - ("max.connections.per.ip", "6"))), exhaustive = true) - - assertEquals(Seq(ApiError.NONE), legacyAlter(admin, Map(defaultBroker -> Seq( - new ConfigEntry("log.roll.ms", "1234567"))))) - - // Since max.connections.per.ip was left out of the previous legacyAlter, it is removed. - validateConfigs(admin, Map(defaultBroker -> Seq( - ("log.roll.ms", "1234567"))), exhaustive = true) - - admin.createTopics(util.Arrays.asList( - new NewTopic("foo", 2, 3.toShort), - new NewTopic("bar", 2, 3.toShort))).all().get() - TestUtils.waitForAllPartitionsMetadata(cluster.brokers().values().asScala.toSeq, "foo", 2) - TestUtils.waitForAllPartitionsMetadata(cluster.brokers().values().asScala.toSeq, "bar", 2) - assertEquals(Seq(ApiError.NONE, - new ApiError(INVALID_CONFIG, "Unknown topic config name: not.a.real.topic.config"), - new ApiError(UNKNOWN_TOPIC_OR_PARTITION, "The topic 'baz' does not exist.")), - legacyAlter(admin, Map( - new ConfigResource(Type.TOPIC, "foo") -> Seq( - new ConfigEntry("segment.jitter.ms", "345")), - new ConfigResource(Type.TOPIC, "bar") -> Seq( - new ConfigEntry("not.a.real.topic.config", "789")), - new ConfigResource(Type.TOPIC, "baz") -> Seq( - new ConfigEntry("segment.jitter.ms", "678"))))) - - validateConfigs(admin, Map(new ConfigResource(Type.TOPIC, "foo") -> Seq( - ("segment.jitter.ms", "345")))) - - } finally { - admin.close() - } - } finally { - cluster.close() - } - } - @ParameterizedTest @ValueSource(strings = Array("3.7-IV0", "3.7-IV2")) def testCreatePartitions(metadataVersionString: String): Unit = { @@ -1516,6 +1434,15 @@ class KRaftClusterTest { } } + def copyDirectory(src: String, dest: String): Unit = { + Files.walk(Paths.get(src)).forEach(p => { + val out = Paths.get(dest, p.toString().substring(src.length())) + if (!p.toString().equals(src)) { + Files.copy(p, out); + } + }); + } + @Test def testAbandonedFutureReplicaRecovered_mainReplicaInOnlineLogDir(): Unit = { val cluster = new KafkaClusterTestKit.Builder( @@ -1557,7 +1484,8 @@ class KRaftClusterTest { val parentDir = log.parentDir val targetParentDir = broker0.config.logDirs.filter(_ != parentDir).head val targetDirFile = new File(targetParentDir, log.dir.getName) - FileUtils.copyDirectory(log.dir, targetDirFile) + targetDirFile.mkdir() + copyDirectory(log.dir.toString(), targetDirFile.toString()) assertTrue(targetDirFile.exists()) // Rename original log to a future diff --git a/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala b/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala index 08f11f7e80a40..522e70732f4fa 100644 --- a/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala @@ -40,42 +40,54 @@ class MetadataVersionIntegrationTest { new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_4_IV0) )) def testBasicMetadataVersionUpgrade(clusterInstance: ClusterInstance): Unit = { - val admin = clusterInstance.createAdminClient() - val describeResult = admin.describeFeatures() - val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) - assertEquals(ff.minVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) - assertEquals(ff.maxVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) + val admin = clusterInstance.admin() + try { + val describeResult = admin.describeFeatures() + val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) + assertEquals(ff.minVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) + assertEquals(ff.maxVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) - // Update to new version - val updateVersion = MetadataVersion.IBP_3_5_IV1.featureLevel.shortValue - val updateResult = admin.updateFeatures( - Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) - updateResult.all().get() + // Update to new version + val updateVersion = MetadataVersion.IBP_3_5_IV1.featureLevel.shortValue + val updateResult = admin.updateFeatures( + Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) + updateResult.all().get() - // Verify that new version is visible on broker - TestUtils.waitUntilTrue(() => { - val describeResult2 = admin.describeFeatures() - val ff2 = describeResult2.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) - ff2.minVersionLevel() == updateVersion && ff2.maxVersionLevel() == updateVersion - }, "Never saw metadata.version increase on broker") + // Verify that new version is visible on broker + TestUtils.waitUntilTrue(() => { + val describeResult2 = admin.describeFeatures() + val ff2 = describeResult2.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) + ff2.minVersionLevel() == updateVersion && ff2.maxVersionLevel() == updateVersion + }, "Never saw metadata.version increase on broker") + } finally { + admin.close() + } } @ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_3_IV0) def testUpgradeSameVersion(clusterInstance: ClusterInstance): Unit = { - val admin = clusterInstance.createAdminClient() - val updateVersion = MetadataVersion.IBP_3_3_IV0.featureLevel.shortValue - val updateResult = admin.updateFeatures( - Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) - updateResult.all().get() + val admin = clusterInstance.admin() + try { + val updateVersion = MetadataVersion.IBP_3_3_IV0.featureLevel.shortValue + val updateResult = admin.updateFeatures( + Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) + updateResult.all().get() + } finally { + admin.close() + } } @ClusterTest(types = Array(Type.KRAFT)) def testDefaultIsLatestVersion(clusterInstance: ClusterInstance): Unit = { - val admin = clusterInstance.createAdminClient() - val describeResult = admin.describeFeatures() - val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) - assertEquals(ff.minVersionLevel(), MetadataVersion.latestTesting().featureLevel(), - "If this test fails, check the default MetadataVersion in the @ClusterTest annotation") - assertEquals(ff.maxVersionLevel(), MetadataVersion.latestTesting().featureLevel()) + val admin = clusterInstance.admin() + try { + val describeResult = admin.describeFeatures() + val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) + assertEquals(ff.minVersionLevel(), MetadataVersion.latestTesting().featureLevel(), + "If this test fails, check the default MetadataVersion in the @ClusterTest annotation") + assertEquals(ff.maxVersionLevel(), MetadataVersion.latestTesting().featureLevel()) + } finally { + admin.close() + } } } diff --git a/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala b/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala index de07e044fc08d..8bf7860d151a0 100644 --- a/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala +++ b/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala @@ -32,8 +32,7 @@ class MultipleListenersWithAdditionalJaasContextTest extends MultipleListenersWi override def staticJaasSections: Seq[JaasSection] = { val (serverKeytabFile, _) = maybeCreateEmptyKeytabFiles() - JaasTestUtils.zkSections.asScala :+ - JaasTestUtils.kafkaServerSection("secure_external.KafkaServer", kafkaServerSaslMechanisms(SecureExternal).asJava, Some(serverKeytabFile).toJava) + Seq(JaasTestUtils.kafkaServerSection("secure_external.KafkaServer", kafkaServerSaslMechanisms(SecureExternal).asJava, Some(serverKeytabFile).toJava)) } override protected def dynamicJaasSections: Properties = { diff --git a/core/src/test/scala/integration/kafka/server/MultipleListenersWithDefaultJaasContextTest.scala b/core/src/test/scala/integration/kafka/server/MultipleListenersWithDefaultJaasContextTest.scala index e78bb73fe6b3a..6334b6421a673 100644 --- a/core/src/test/scala/integration/kafka/server/MultipleListenersWithDefaultJaasContextTest.scala +++ b/core/src/test/scala/integration/kafka/server/MultipleListenersWithDefaultJaasContextTest.scala @@ -19,13 +19,12 @@ package kafka.server import java.util.Properties import scala.collection.Seq -import kafka.api.Both import kafka.security.JaasTestUtils.JaasSection class MultipleListenersWithDefaultJaasContextTest extends MultipleListenersWithSameSecurityProtocolBaseTest { override def staticJaasSections: Seq[JaasSection] = - jaasSections(kafkaServerSaslMechanisms.values.flatten.toSeq, Some(kafkaClientSaslMechanism), Both) + jaasSections(kafkaServerSaslMechanisms.values.flatten.toSeq, Some(kafkaClientSaslMechanism)) override protected def dynamicJaasSections: Properties = new Properties diff --git a/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala b/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala index 16d1454604ecc..db2d570b761b7 100644 --- a/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala +++ b/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala @@ -25,13 +25,14 @@ import kafka.security.JaasTestUtils import kafka.security.JaasTestUtils.JaasSection import kafka.utils.{TestInfoUtils, TestUtils} import kafka.utils.Implicits._ +import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, NewTopic} import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.config.{SaslConfigs, SslConfigs} import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.network.{ConnectionMode, ListenerName} -import org.apache.kafka.server.config.{ReplicationConfigs, ZkConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.network.SocketServerConfigs import org.junit.jupiter.api.Assertions.assertEquals @@ -57,7 +58,8 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT import MultipleListenersWithSameSecurityProtocolBaseTest._ private val trustStoreFile = TestUtils.tempFile("truststore", ".jks") - private val servers = new ArrayBuffer[KafkaServer] + private val servers = new ArrayBuffer[KafkaBroker] + private var admin: Admin = null private val producers = mutable.Map[ClientMetadata, KafkaProducer[Array[Byte], Array[Byte]]]() private val consumers = mutable.Map[ClientMetadata, Consumer[Array[Byte], Array[Byte]]]() @@ -78,14 +80,15 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT (0 until numServers).foreach { brokerId => - val props = TestUtils.createBrokerConfig(brokerId, zkConnect, trustStoreFile = Some(trustStoreFile)) + val props = TestUtils.createBrokerConfig(brokerId, trustStoreFile = Some(trustStoreFile)) // Ensure that we can support multiple listeners per security protocol and multiple security protocols props.put(SocketServerConfigs.LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $Internal://localhost:0, " + s"$SecureExternal://localhost:0, $External://localhost:0") + props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, props.get(SocketServerConfigs.LISTENERS_CONFIG)) props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, s"$Internal:PLAINTEXT, $SecureInternal:SASL_SSL," + - s"$External:PLAINTEXT, $SecureExternal:SASL_SSL") + s"$External:PLAINTEXT, $SecureExternal:SASL_SSL, CONTROLLER:PLAINTEXT") + props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, Internal) - props.put(ZkConfigs.ZK_ENABLE_SECURE_ACLS_CONFIG, "true") props.put(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG, kafkaClientSaslMechanism) props.put(s"${new ListenerName(SecureInternal).configPrefix}${BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG}", kafkaServerSaslMechanisms(SecureInternal).mkString(",")) @@ -103,7 +106,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT } props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "invalid/file/path") - servers += TestUtils.createServer(KafkaConfig.fromProps(props)) + servers += createBroker(KafkaConfig.fromProps(props)) } servers.map(_.config).foreach { config => @@ -113,10 +116,20 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT s"Unexpected ${ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG} for broker ${config.brokerId}") } - TestUtils.createTopic(zkClient, Topic.GROUP_METADATA_TOPIC_NAME, GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_DEFAULT, - replicationFactor = 2, servers, servers.head.groupCoordinator.groupMetadataTopicConfigs) - - createScramCredentials(zkConnect, JaasTestUtils.KAFKA_SCRAM_USER, JaasTestUtils.KAFKA_SCRAM_PASSWORD) + val adminClientConfig = new java.util.HashMap[String, Object]() + adminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, + TestUtils.bootstrapServers(servers, new ListenerName(Internal))) + admin = Admin.create(adminClientConfig) + val newTopic = new NewTopic(Topic.GROUP_METADATA_TOPIC_NAME, + GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_DEFAULT, 2.toShort) + val newTopicConfigs = new java.util.HashMap[String, String]() + servers.head.groupCoordinator.groupMetadataTopicConfigs.entrySet(). + forEach(e => newTopicConfigs.put(e.getKey.toString, e.getValue.toString)) + newTopic.configs(newTopicConfigs) + admin.createTopics(java.util.Arrays.asList(newTopic)).all().get(5, TimeUnit.MINUTES) + + createScramCredentials(admin, JaasTestUtils.KAFKA_SCRAM_USER, JaasTestUtils.KAFKA_SCRAM_PASSWORD) + TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer) servers.head.config.listeners.foreach { endPoint => val listenerName = endPoint.listenerName @@ -130,7 +143,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT def addProducerConsumer(listenerName: ListenerName, mechanism: String, saslProps: Option[Properties]): Unit = { val topic = s"${listenerName.value}${producers.size}" - TestUtils.createTopic(zkClient, topic, 2, 2, servers) + admin.createTopics(java.util.Arrays.asList(new NewTopic(topic, 2, 2.toShort))).all().get(5, TimeUnit.MINUTES) val clientMetadata = ClientMetadata(listenerName, mechanism, topic) producers(clientMetadata) = TestUtils.createProducer(bootstrapServers, acks = -1, @@ -153,6 +166,8 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT @AfterEach override def tearDown(): Unit = { + Option(admin).foreach(_.close()) + admin = null producers.values.foreach(_.close()) consumers.values.foreach(_.close()) TestUtils.shutdownServers(servers) @@ -165,7 +180,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT * with acks=-1 to ensure that replication is also working. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) def testProduceConsume(quorum: String, groupProtocol: String): Unit = { producers.foreach { case (clientMetadata, producer) => val producerRecords = (1 to 10).map(i => new ProducerRecord(clientMetadata.topic, s"key$i".getBytes, diff --git a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala index 68004876c7791..72b76cd152d25 100755 --- a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala +++ b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala @@ -17,8 +17,6 @@ package kafka.server -import kafka.controller.ControllerEventManager - import java.io.File import java.net.InetSocketAddress import java.util @@ -26,7 +24,6 @@ import java.util.{Collections, Locale, Optional, OptionalInt, Properties, stream import java.util.concurrent.{CompletableFuture, TimeUnit} import javax.security.auth.login.Configuration import kafka.utils.{CoreUtils, Logging, TestInfoUtils, TestUtils} -import kafka.zk.{AdminZkClient, EmbeddedZookeeper, KafkaZkClient} import org.apache.kafka.clients.admin.AdminClientUnitTestEnv import org.apache.kafka.clients.consumer.GroupProtocol import org.apache.kafka.clients.consumer.internals.AbstractCoordinator @@ -34,7 +31,7 @@ import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.security.JaasUtils import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.common.utils.{Exit, Time, Utils} +import org.apache.kafka.common.utils.{Exit, Time} import org.apache.kafka.common.{DirectoryId, Uuid} import org.apache.kafka.metadata.properties.MetaPropertiesEnsemble.VerificationFlag.{REQUIRE_AT_LEAST_ONE_VALID, REQUIRE_METADATA_LOG_DIR} import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion} @@ -43,12 +40,10 @@ import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.queue.KafkaEventQueue import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.{ClientMetricsManager, ServerSocketFactory} -import org.apache.kafka.server.common.{MetadataVersion, TransactionVersion} +import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, MetadataVersion, TransactionVersion} import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.fault.{FaultHandler, MockFaultHandler} import org.apache.kafka.server.util.timer.SystemTimer -import org.apache.zookeeper.client.ZKClientConfig -import org.apache.zookeeper.{WatchedEvent, Watcher, ZooKeeper} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterAll, AfterEach, BeforeAll, BeforeEach, Tag, TestInfo} import org.junit.jupiter.params.provider.Arguments @@ -69,30 +64,6 @@ trait QuorumImplementation { def shutdown(): Unit } -class ZooKeeperQuorumImplementation( - val zookeeper: EmbeddedZookeeper, - val zkConnect: String, - val zkClient: KafkaZkClient, - val adminZkClient: AdminZkClient, - val log: Logging -) extends QuorumImplementation { - override def createBroker( - config: KafkaConfig, - time: Time, - startup: Boolean, - threadNamePrefix: Option[String], - ): KafkaBroker = { - val server = new KafkaServer(config, time, threadNamePrefix, false) - if (startup) server.startup() - server - } - - override def shutdown(): Unit = { - Utils.closeQuietly(zkClient, "zk client") - CoreUtils.swallow(zookeeper.shutdown(), log) - } -} - class KRaftQuorumImplementation( val controllerServer: ControllerServer, val faultHandlerFactory: FaultHandlerFactory, @@ -172,11 +143,6 @@ class QuorumTestHarnessFaultHandlerFactory( @Tag("integration") abstract class QuorumTestHarness extends Logging { - val zkConnectionTimeout = 10000 - val zkSessionTimeout = 15000 // Allows us to avoid ZK session expiration due to GC up to 2/3 * 15000ms = 10 secs - val zkMaxInFlightRequests = Int.MaxValue - - protected def zkAclsEnabled: Option[Boolean] = None /** * When in KRaft mode, the security protocol to use for the controller listener. @@ -193,14 +159,6 @@ abstract class QuorumTestHarness extends Logging { private var testInfo: TestInfo = _ protected var implementation: QuorumImplementation = _ - def isKRaftTest(): Boolean = { - TestInfoUtils.isKRaft(testInfo) - } - - def isZkMigrationTest(): Boolean = { - TestInfoUtils.isZkMigrationTest(testInfo) - } - def isShareGroupTest(): Boolean = { TestInfoUtils.isShareGroupTest(testInfo) } @@ -218,53 +176,11 @@ abstract class QuorumTestHarness extends Logging { gp.get } - def checkIsZKTest(): Unit = { - if (isKRaftTest()) { - throw new RuntimeException("This function can't be accessed when running the test " + - "in KRaft mode. ZooKeeper mode is required.") - } - } - - def checkIsKRaftTest(): Unit = { - if (!isKRaftTest()) { - throw new RuntimeException("This function can't be accessed when running the test " + - "in ZooKeeper mode. KRaft mode is required.") - } - } - - private def asZk(): ZooKeeperQuorumImplementation = { - checkIsZKTest() - implementation.asInstanceOf[ZooKeeperQuorumImplementation] - } - - private def asKRaft(): KRaftQuorumImplementation = { - checkIsKRaftTest() - implementation.asInstanceOf[KRaftQuorumImplementation] - } - - def zookeeper: EmbeddedZookeeper = asZk().zookeeper - - def zkClient: KafkaZkClient = asZk().zkClient - - def zkClientOrNull: KafkaZkClient = if (isKRaftTest()) null else asZk().zkClient - - def adminZkClient: AdminZkClient = asZk().adminZkClient - - def zkPort: Int = asZk().zookeeper.port - - def zkConnect: String = s"127.0.0.1:$zkPort" - - def zkConnectOrNull: String = if (isKRaftTest()) null else zkConnect + private def asKRaft(): KRaftQuorumImplementation = implementation.asInstanceOf[KRaftQuorumImplementation] def controllerServer: ControllerServer = asKRaft().controllerServer - def controllerServers: Seq[ControllerServer] = { - if (isKRaftTest()) { - Seq(asKRaft().controllerServer) - } else { - Seq() - } - } + def controllerServers: Seq[ControllerServer] = Seq(asKRaft().controllerServer) val faultHandlerFactory = new QuorumTestHarnessFaultHandlerFactory(new MockFaultHandler("quorumTestHarnessFaultHandler")) @@ -301,13 +217,8 @@ abstract class QuorumTestHarness extends Logging { val name = testInfo.getTestMethod.toScala .map(_.toString) .getOrElse("[unspecified]") - if (TestInfoUtils.isKRaft(testInfo)) { - info(s"Running KRAFT test $name") - implementation = newKRaftQuorum(testInfo) - } else { - info(s"Running ZK test $name") - implementation = newZooKeeperQuorum() - } + info(s"Running KRAFT test $name") + implementation = newKRaftQuorum(testInfo) } def createBroker( @@ -319,8 +230,6 @@ abstract class QuorumTestHarness extends Logging { implementation.createBroker(config, time, startup, threadNamePrefix) } - def shutdownZooKeeper(): Unit = asZk().shutdown() - def shutdownKRaftController(): Unit = { // Note that the RaftManager instance is left running; it will be shut down in tearDown() val kRaftQuorumImplementation = asKRaft() @@ -380,6 +289,12 @@ abstract class QuorumTestHarness extends Logging { } else TransactionVersion.TV_1.featureLevel() formatter.setFeatureLevel(TransactionVersion.FEATURE_NAME, transactionVersion) + val elrVersion = + if (TestInfoUtils.isEligibleLeaderReplicasV1Enabled(testInfo)) { + EligibleLeaderReplicasVersion.ELRV_1.featureLevel() + } else EligibleLeaderReplicasVersion.ELRV_0.featureLevel() + formatter.setFeatureLevel(EligibleLeaderReplicasVersion.FEATURE_NAME, elrVersion) + addFormatterSettings(formatter) formatter.run() val bootstrapMetadata = formatter.bootstrapMetadata() @@ -436,38 +351,6 @@ abstract class QuorumTestHarness extends Logging { ) } - private def newZooKeeperQuorum(): ZooKeeperQuorumImplementation = { - val zookeeper = new EmbeddedZookeeper() - var zkClient: KafkaZkClient = null - var adminZkClient: AdminZkClient = null - val zkConnect = s"127.0.0.1:${zookeeper.port}" - try { - zkClient = KafkaZkClient( - zkConnect, - zkAclsEnabled.getOrElse(JaasUtils.isZkSaslEnabled), - zkSessionTimeout, - zkConnectionTimeout, - zkMaxInFlightRequests, - Time.SYSTEM, - name = "ZooKeeperTestHarness", - new ZKClientConfig, - enableEntityConfigControllerCheck = false) - adminZkClient = new AdminZkClient(zkClient) - } catch { - case t: Throwable => - CoreUtils.swallow(zookeeper.shutdown(), this) - Utils.closeQuietly(zkClient, "zk client") - throw t - } - new ZooKeeperQuorumImplementation( - zookeeper, - zkConnect, - zkClient, - adminZkClient, - this - ) - } - @AfterEach def tearDown(): Unit = { if (implementation != null) { @@ -480,22 +363,9 @@ abstract class QuorumTestHarness extends Logging { Configuration.setConfiguration(null) faultHandler.maybeRethrowFirstException() } - - // Trigger session expiry by reusing the session id in another client - def createZooKeeperClientToTriggerSessionExpiry(zooKeeper: ZooKeeper): ZooKeeper = { - val dummyWatcher = new Watcher { - override def process(event: WatchedEvent): Unit = {} - } - val anotherZkClient = new ZooKeeper(zkConnect, 1000, dummyWatcher, - zooKeeper.getSessionId, - zooKeeper.getSessionPasswd) - assertNull(anotherZkClient.exists("/nonexistent", false)) // Make sure new client works - anotherZkClient - } } object QuorumTestHarness { - val ZkClientEventThreadSuffix = "-EventThread" /** * Verify that a previous test that doesn't use QuorumTestHarness hasn't left behind an unexpected thread. @@ -521,11 +391,10 @@ object QuorumTestHarness { // when broker ports are reused (e.g. auto-create topics) as well as threads // which reset static JAAS configuration. val unexpectedThreadNames = Set( - ControllerEventManager.ControllerEventThreadName, + "controller-event-thread", KafkaProducer.NETWORK_THREAD_PREFIX, AdminClientUnitTestEnv.kafkaAdminClientNetworkThreadPrefix(), AbstractCoordinator.HEARTBEAT_THREAD_PREFIX, - QuorumTestHarness.ZkClientEventThreadSuffix, KafkaEventQueue.EVENT_HANDLER_THREAD_SUFFIX, ClientMetricsManager.CLIENT_METRICS_REAPER_THREAD_NAME, SystemTimer.SYSTEM_TIMER_THREAD_PREFIX, @@ -573,9 +442,5 @@ object QuorumTestHarness { // The following parameter groups are to *temporarily* avoid bugs with the CONSUMER group protocol Consumer // implementation that would otherwise cause tests to fail. - def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_16176: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly - def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly - def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17960: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly - def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17961: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly - def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17964: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly + def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly } diff --git a/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala b/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala index a8919605f2b36..ad47da549ff41 100644 --- a/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala +++ b/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala @@ -41,7 +41,7 @@ class RaftClusterSnapshotTest { val numberOfBrokers = 3 val numberOfControllers = 3 - Using( + Using.resource( new KafkaClusterTestKit .Builder( new TestKitNodes.Builder() @@ -74,7 +74,7 @@ class RaftClusterSnapshotTest { // For every controller and broker perform some sanity checks against the latest snapshot for ((_, raftManager) <- cluster.raftManagers().asScala) { - Using( + Using.resource( RecordsSnapshotReader.of( raftManager.replicatedLog.latestSnapshot.get(), new MetadataRecordSerde(), diff --git a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala index cca56f7aa96bc..285560d382686 100644 --- a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala +++ b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala @@ -124,10 +124,7 @@ final class KafkaMetadataLogTest { append(log, numberOfRecords, epoch) log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) - - Using(log.createNewSnapshot(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshot(log, snapshotId) assertEquals(0, log.readSnapshot(snapshotId).get().sizeInBytes()) } @@ -145,13 +142,24 @@ final class KafkaMetadataLogTest { // Test finding the first epoch log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords, firstEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, firstEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(1, firstEpoch)).get().close() // Test finding the second epoch log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords, secondEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords - 1, secondEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords + 1, secondEpoch)).get().close() + } + + @Test + def testCreateSnapshotInMiddleOfBatch(): Unit = { + val numberOfRecords = 10 + val epoch = 1 + val log = buildMetadataLog(tempDir, mockTime) + + append(log, numberOfRecords, epoch) + log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) + + assertThrows( + classOf[IllegalArgumentException], + () => log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, epoch)) + ) } @Test @@ -209,11 +217,9 @@ final class KafkaMetadataLogTest { val snapshotId = new OffsetAndEpoch(numberOfRecords-4, epoch) val log = buildMetadataLog(tempDir, mockTime) - append(log, numberOfRecords, epoch) + (1 to numberOfRecords).foreach(_ => append(log, 1, epoch)) log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) - Using(log.createNewSnapshot(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshot(log, snapshotId) // Simulate log cleanup that advances the LSO log.log.maybeIncrementLogStartOffset(snapshotId.offset - 1, LogStartOffsetIncrementReason.SegmentDeletion) @@ -246,10 +252,7 @@ final class KafkaMetadataLogTest { append(log, numberOfRecords, epoch) log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) - - Using(log.createNewSnapshot(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshot(log, snapshotId) assertThrows( classOf[IllegalArgumentException], @@ -290,15 +293,12 @@ final class KafkaMetadataLogTest { def testCreateExistingSnapshot(): Unit = { val numberOfRecords = 10 val epoch = 1 - val snapshotId = new OffsetAndEpoch(numberOfRecords - 1, epoch) + val snapshotId = new OffsetAndEpoch(numberOfRecords, epoch) val log = buildMetadataLog(tempDir, mockTime) append(log, numberOfRecords, epoch) log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) - - Using(log.createNewSnapshot(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshot(log, snapshotId) assertEquals(Optional.empty(), log.createNewSnapshot(snapshotId), "Creating an existing snapshot should not do anything") @@ -342,10 +342,7 @@ final class KafkaMetadataLogTest { val sameEpochSnapshotId = new OffsetAndEpoch(2 * numberOfRecords, epoch) append(log, numberOfRecords, epoch) - - Using(log.createNewSnapshotUnchecked(sameEpochSnapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, sameEpochSnapshotId) assertTrue(log.truncateToLatestSnapshot()) assertEquals(sameEpochSnapshotId.offset, log.startOffset) @@ -356,10 +353,7 @@ final class KafkaMetadataLogTest { val greaterEpochSnapshotId = new OffsetAndEpoch(3 * numberOfRecords, epoch + 1) append(log, numberOfRecords, epoch) - - Using(log.createNewSnapshotUnchecked(greaterEpochSnapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, greaterEpochSnapshotId) assertTrue(log.truncateToLatestSnapshot()) assertEquals(greaterEpochSnapshotId.offset, log.startOffset) @@ -376,27 +370,18 @@ final class KafkaMetadataLogTest { append(log, 1, epoch - 1) val oldSnapshotId1 = new OffsetAndEpoch(1, epoch - 1) - Using(log.createNewSnapshotUnchecked(oldSnapshotId1).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, oldSnapshotId1) append(log, 1, epoch) val oldSnapshotId2 = new OffsetAndEpoch(2, epoch) - Using(log.createNewSnapshotUnchecked(oldSnapshotId2).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, oldSnapshotId2) append(log, numberOfRecords - 2, epoch) val oldSnapshotId3 = new OffsetAndEpoch(numberOfRecords, epoch) - Using(log.createNewSnapshotUnchecked(oldSnapshotId3).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, oldSnapshotId3) val greaterSnapshotId = new OffsetAndEpoch(3 * numberOfRecords, epoch) - append(log, numberOfRecords, epoch) - Using(log.createNewSnapshotUnchecked(greaterSnapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, greaterSnapshotId) assertNotEquals(log.earliestSnapshotId(), log.latestSnapshotId()) assertTrue(log.truncateToLatestSnapshot()) @@ -487,7 +472,7 @@ final class KafkaMetadataLogTest { metadataDir: File, snapshotId: OffsetAndEpoch ): Unit = { - Using(FileRawSnapshotWriter.create(metadataDir.toPath, snapshotId))(_.freeze()) + Using.resource(FileRawSnapshotWriter.create(metadataDir.toPath, snapshotId))(_.freeze()) } @Test @@ -499,18 +484,14 @@ final class KafkaMetadataLogTest { append(log, numberOfRecords, epoch) val olderEpochSnapshotId = new OffsetAndEpoch(numberOfRecords, epoch - 1) - Using(log.createNewSnapshotUnchecked(olderEpochSnapshotId).get()) { snapshot => - snapshot.freeze() - } - + createNewSnapshotUnckecked(log, olderEpochSnapshotId) assertFalse(log.truncateToLatestSnapshot()) append(log, numberOfRecords, epoch) + val olderOffsetSnapshotId = new OffsetAndEpoch(numberOfRecords, epoch) - Using(log.createNewSnapshotUnchecked(olderOffsetSnapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, olderOffsetSnapshotId) assertFalse(log.truncateToLatestSnapshot()) } @@ -523,10 +504,7 @@ final class KafkaMetadataLogTest { val snapshotId = new OffsetAndEpoch(1, epoch) append(log, numberOfRecords, epoch) - Using(log.createNewSnapshotUnchecked(snapshotId).get()) { snapshot => - snapshot.freeze() - } - + createNewSnapshotUnckecked(log, snapshotId) log.close() // Create a few partial snapshots @@ -560,27 +538,19 @@ final class KafkaMetadataLogTest { append(log, 1, epoch - 1) val oldSnapshotId1 = new OffsetAndEpoch(1, epoch - 1) - Using(log.createNewSnapshotUnchecked(oldSnapshotId1).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, oldSnapshotId1) append(log, 1, epoch) val oldSnapshotId2 = new OffsetAndEpoch(2, epoch) - Using(log.createNewSnapshotUnchecked(oldSnapshotId2).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, oldSnapshotId2) append(log, numberOfRecords - 2, epoch) val oldSnapshotId3 = new OffsetAndEpoch(numberOfRecords, epoch) - Using(log.createNewSnapshotUnchecked(oldSnapshotId3).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, oldSnapshotId3) val greaterSnapshotId = new OffsetAndEpoch(3 * numberOfRecords, epoch) append(log, numberOfRecords, epoch) - Using(log.createNewSnapshotUnchecked(greaterSnapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, greaterSnapshotId) log.close() @@ -609,9 +579,7 @@ final class KafkaMetadataLogTest { val snapshotId = new OffsetAndEpoch(numberOfRecords + 1, epoch + 1) append(log, numberOfRecords, epoch) - Using(log.createNewSnapshotUnchecked(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId) log.close() @@ -707,9 +675,7 @@ final class KafkaMetadataLogTest { log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) val snapshotId = new OffsetAndEpoch(numberOfRecords, epoch) - Using(log.createNewSnapshot(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshot(log, snapshotId) val resultOffsetAndEpoch = log.validateOffsetAndEpoch(numberOfRecords, epoch - 1) assertEquals(ValidOffsetAndEpoch.Kind.SNAPSHOT, resultOffsetAndEpoch.kind) @@ -727,9 +693,8 @@ final class KafkaMetadataLogTest { log.updateHighWatermark(new LogOffsetMetadata(offset)) val snapshotId = new OffsetAndEpoch(offset, epoch) - Using(log.createNewSnapshot(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshot(log, snapshotId) + // Simulate log cleaning advancing the LSO log.log.maybeIncrementLogStartOffset(offset, LogStartOffsetIncrementReason.SegmentDeletion) @@ -749,9 +714,7 @@ final class KafkaMetadataLogTest { log.updateHighWatermark(new LogOffsetMetadata(offset)) val snapshotId = new OffsetAndEpoch(offset, epoch) - Using(log.createNewSnapshot(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshot(log, snapshotId) val resultOffsetAndEpoch = log.validateOffsetAndEpoch(offset, epoch) assertEquals(ValidOffsetAndEpoch.Kind.VALID, resultOffsetAndEpoch.kind) @@ -766,9 +729,7 @@ final class KafkaMetadataLogTest { val log = buildMetadataLog(tempDir, mockTime) log.updateHighWatermark(new LogOffsetMetadata(offset)) val snapshotId = new OffsetAndEpoch(offset, 1) - Using(log.createNewSnapshotUnchecked(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId) log.truncateToLatestSnapshot() @@ -790,9 +751,7 @@ final class KafkaMetadataLogTest { val log = buildMetadataLog(tempDir, mockTime) log.updateHighWatermark(new LogOffsetMetadata(offset)) val snapshotId = new OffsetAndEpoch(offset, 1) - Using(log.createNewSnapshotUnchecked(snapshotId).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId) log.truncateToLatestSnapshot() append(log, numOfRecords, epoch = 3) @@ -872,16 +831,10 @@ final class KafkaMetadataLogTest { assertFalse(log.maybeClean(), "Should not clean since no snapshots exist") val snapshotId1 = new OffsetAndEpoch(1000, 1) - Using(log.createNewSnapshotUnchecked(snapshotId1).get()) { snapshot => - append(snapshot, 100) - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId1) val snapshotId2 = new OffsetAndEpoch(2000, 1) - Using(log.createNewSnapshotUnchecked(snapshotId2).get()) { snapshot => - append(snapshot, 100) - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId2) val lsoBefore = log.startOffset() assertTrue(log.maybeClean(), "Expected to clean since there was at least one snapshot") @@ -910,10 +863,7 @@ final class KafkaMetadataLogTest { for (offset <- Seq(100, 200, 300, 400, 500, 600)) { val snapshotId = new OffsetAndEpoch(offset, 1) - Using(log.createNewSnapshotUnchecked(snapshotId).get()) { snapshot => - append(snapshot, 10) - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId) } assertEquals(6, log.snapshotCount()) @@ -945,14 +895,14 @@ final class KafkaMetadataLogTest { // Then generate two snapshots val snapshotId1 = new OffsetAndEpoch(1000, 1) - Using(log.createNewSnapshotUnchecked(snapshotId1).get()) { snapshot => + Using.resource(log.createNewSnapshotUnchecked(snapshotId1).get()) { snapshot => append(snapshot, 500) snapshot.freeze() } // Then generate a snapshot val snapshotId2 = new OffsetAndEpoch(2000, 1) - Using(log.createNewSnapshotUnchecked(snapshotId2).get()) { snapshot => + Using.resource(log.createNewSnapshotUnchecked(snapshotId2).get()) { snapshot => append(snapshot, 500) snapshot.freeze() } @@ -992,17 +942,14 @@ final class KafkaMetadataLogTest { log.log.logSegments.asScala.drop(1).head.baseOffset, 1 ) - Using(log.createNewSnapshotUnchecked(snapshotId1).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId1) + // Generate second snapshots that includes the second segment by using the base offset of the third segment val snapshotId2 = new OffsetAndEpoch( log.log.logSegments.asScala.drop(2).head.baseOffset, 1 ) - Using(log.createNewSnapshotUnchecked(snapshotId2).get()) { snapshot => - snapshot.freeze() - } + createNewSnapshotUnckecked(log, snapshotId2) // Sleep long enough to trigger a possible segment delete because of the default retention val defaultLogRetentionMs = LogConfig.DEFAULT_RETENTION_MS * 2 @@ -1074,6 +1021,18 @@ object KafkaMetadataLogTest { log } + def createNewSnapshot(log: KafkaMetadataLog, snapshotId: OffsetAndEpoch): Unit = { + Using.resource(log.createNewSnapshot(snapshotId).get()) { snapshot => + snapshot.freeze() + } + } + + def createNewSnapshotUnckecked(log: KafkaMetadataLog, snapshotId: OffsetAndEpoch): Unit = { + Using.resource(log.createNewSnapshotUnchecked(snapshotId).get()) { snapshot => + snapshot.freeze() + } + } + def append(log: ReplicatedLog, numberOfRecords: Int, epoch: Int): LogAppendInfo = { log.appendAsLeader( MemoryRecords.withRecords( @@ -1103,4 +1062,4 @@ object KafkaMetadataLogTest { } dir } -} +} \ No newline at end of file diff --git a/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala b/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala index 6832b3a0cf7ce..25e32ca4d79fb 100644 --- a/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala +++ b/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala @@ -29,7 +29,7 @@ import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.requests.LeaderAndIsrRequest import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse -import org.apache.kafka.server.common.OffsetAndEpoch +import org.apache.kafka.server.common.{KRaftVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.util.{MockScheduler, MockTime} import org.apache.kafka.storage.internals.checkpoint.LazyOffsetCheckpoints @@ -56,7 +56,7 @@ class LocalLeaderEndPointTest extends Logging { @BeforeEach def setUp(): Unit = { - val props = TestUtils.createBrokerConfig(sourceBroker.id, TestUtils.MockZkConnect, port = sourceBroker.port) + val props = TestUtils.createBrokerConfig(sourceBroker.id, port = sourceBroker.port) val config = KafkaConfig.fromProps(props) val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) val alterPartitionManager = mock(classOf[AlterPartitionManager]) @@ -69,7 +69,7 @@ class LocalLeaderEndPointTest extends Logging { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager) val partition = replicaManager.createPartition(topicPartition) diff --git a/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala b/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala index 7a58f4148266d..cb08a021e2c6d 100644 --- a/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala +++ b/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala @@ -40,7 +40,7 @@ import org.mockito.Mockito._ class NodeToControllerRequestThreadTest { private def controllerInfo(node: Option[Node]): ControllerInformation = { - ControllerInformation(node, new ListenerName(""), SecurityProtocol.PLAINTEXT, "", isZkController = true) + ControllerInformation(node, new ListenerName(""), SecurityProtocol.PLAINTEXT, "") } private def emptyControllerInfo: ControllerInformation = { @@ -50,7 +50,7 @@ class NodeToControllerRequestThreadTest { @Test def testRetryTimeoutWhileControllerNotAvailable(): Unit = { val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val metadata = mock(classOf[Metadata]) val mockClient = new MockClient(time, metadata) val controllerNodeProvider = mock(classOf[ControllerNodeProvider]) @@ -59,7 +59,7 @@ class NodeToControllerRequestThreadTest { val retryTimeoutMs = 30000 val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true, _ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs) testRequestThread.started = true @@ -84,7 +84,7 @@ class NodeToControllerRequestThreadTest { def testRequestsSent(): Unit = { // just a simple test that tests whether the request from 1 -> 2 is sent and the response callback is called val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val controllerId = 2 val metadata = mock(classOf[Metadata]) @@ -97,7 +97,7 @@ class NodeToControllerRequestThreadTest { val expectedResponse = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true, _ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) testRequestThread.started = true mockClient.prepareResponse(expectedResponse) @@ -125,7 +125,7 @@ class NodeToControllerRequestThreadTest { def testControllerChanged(): Unit = { // in this test the current broker is 1, and the controller changes from 2 -> 3 then back: 3 -> 2 val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val oldControllerId = 1 val newControllerId = 2 @@ -141,7 +141,7 @@ class NodeToControllerRequestThreadTest { val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true, _ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) testRequestThread.started = true @@ -173,7 +173,7 @@ class NodeToControllerRequestThreadTest { @Test def testNotController(): Unit = { val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val oldControllerId = 1 val newControllerId = 2 @@ -193,7 +193,7 @@ class NodeToControllerRequestThreadTest { Collections.singletonMap("a", 2)) val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true,_ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) testRequestThread.started = true @@ -233,7 +233,7 @@ class NodeToControllerRequestThreadTest { @Test def testEnvelopeResponseWithNotControllerError(): Unit = { val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val oldControllerId = 1 val newControllerId = 2 @@ -260,7 +260,7 @@ class NodeToControllerRequestThreadTest { val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true, _ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) testRequestThread.started = true @@ -307,7 +307,7 @@ class NodeToControllerRequestThreadTest { @Test def testRetryTimeout(): Unit = { val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val controllerId = 1 val metadata = mock(classOf[Metadata]) @@ -323,7 +323,7 @@ class NodeToControllerRequestThreadTest { Collections.singletonMap("a", Errors.NOT_CONTROLLER), Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true,_ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs) testRequestThread.started = true @@ -356,7 +356,7 @@ class NodeToControllerRequestThreadTest { @Test def testUnsupportedVersionHandling(): Unit = { val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val controllerId = 2 val metadata = mock(classOf[Metadata]) @@ -382,7 +382,7 @@ class NodeToControllerRequestThreadTest { mockClient.prepareUnsupportedVersionResponse(request => request.apiKey == ApiKeys.METADATA) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true, _ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) testRequestThread.started = true @@ -394,7 +394,7 @@ class NodeToControllerRequestThreadTest { @Test def testAuthenticationExceptionHandling(): Unit = { val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val controllerId = 2 val metadata = mock(classOf[Metadata]) @@ -420,7 +420,7 @@ class NodeToControllerRequestThreadTest { mockClient.createPendingAuthenticationError(activeController, 50) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true, _ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) testRequestThread.started = true @@ -434,7 +434,7 @@ class NodeToControllerRequestThreadTest { def testThreadNotStarted(): Unit = { // Make sure we throw if we enqueue anything while the thread is not running val time = new MockTime() - val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")) + val config = new KafkaConfig(TestUtils.createBrokerConfig(1)) val metadata = mock(classOf[Metadata]) val mockClient = new MockClient(time, metadata) @@ -443,7 +443,7 @@ class NodeToControllerRequestThreadTest { when(controllerNodeProvider.getControllerInfo()).thenReturn(emptyControllerInfo) val testRequestThread = new NodeToControllerRequestThread( - mockClient, isNetworkClientForZkController = true, _ => mockClient, new ManualMetadataUpdater(), + mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) val completionHandler = new TestControllerRequestCompletionHandler(None) diff --git a/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala b/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala index 514ca0a1b28bf..0b1198a64c321 100644 --- a/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala +++ b/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala @@ -59,7 +59,7 @@ class RemoteLeaderEndPointTest { val time = new MockTime val logPrefix = "remote-leader-endpoint" val sourceBroker: BrokerEndPoint = new BrokerEndPoint(0, "localhost", 9092) - val props = TestUtils.createBrokerConfig(sourceBroker.id, TestUtils.MockZkConnect, port = sourceBroker.port) + val props = TestUtils.createBrokerConfig(sourceBroker.id, port = sourceBroker.port) val fetchSessionHandler = new FetchSessionHandler(new LogContext(logPrefix), sourceBroker.id) val config = KafkaConfig.fromProps(props) blockingSend = new MockBlockingSender(offsets = new util.HashMap[TopicPartition, EpochEndOffset](), diff --git a/core/src/test/scala/kafka/utils/TestInfoUtils.scala b/core/src/test/scala/kafka/utils/TestInfoUtils.scala index 61483760cf936..a74d1ca1612ff 100644 --- a/core/src/test/scala/kafka/utils/TestInfoUtils.scala +++ b/core/src/test/scala/kafka/utils/TestInfoUtils.scala @@ -31,28 +31,7 @@ class EmptyTestInfo extends TestInfo { } object TestInfoUtils { - def isKRaft(testInfo: TestInfo): Boolean = { - if (testInfo.getDisplayName.contains("quorum=")) { - if (testInfo.getDisplayName.contains("quorum=kraft")) { - true - } else if (testInfo.getDisplayName.contains("quorum=zk")) { - false - } else { - throw new RuntimeException(s"Unknown quorum value") - } - } else { - false - } - } - - def isZkMigrationTest(testInfo: TestInfo): Boolean = { - if (!isKRaft(testInfo)) { - false - } else { - testInfo.getDisplayName.contains("quorum=zkMigration") - } - } - + final val TestWithParameterizedQuorumAndGroupProtocolNames = "{displayName}.quorum={0}.groupProtocol={1}" def isShareGroupTest(testInfo: TestInfo): Boolean = { @@ -75,4 +54,12 @@ object TestInfoUtils { def isTransactionV2Enabled(testInfo: TestInfo): Boolean = { !testInfo.getDisplayName.contains("isTV2Enabled=false") } + + /** + * Returns whether eligible leader replicas version 1 is enabled. + * When no parameter is provided, the default returned is false. + */ + def isEligibleLeaderReplicasV1Enabled(testInfo: TestInfo): Boolean = { + testInfo.getDisplayName.contains("isELRV1Enabled=true") + } } diff --git a/core/src/test/scala/other/kafka.log4j.properties b/core/src/test/scala/other/kafka.log4j.properties deleted file mode 100644 index 1a53fd5d28618..0000000000000 --- a/core/src/test/scala/other/kafka.log4j.properties +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, KAFKA - -log4j.appender.KAFKA=kafka.log4j.KafkaAppender - -log4j.appender.KAFKA.Port=9092 -log4j.appender.KAFKA.Host=localhost -log4j.appender.KAFKA.Topic=test-logger -log4j.appender.KAFKA.Serializer=kafka.AppenderStringSerializer diff --git a/core/src/test/scala/other/kafka/TestLinearWriteSpeed.scala b/core/src/test/scala/other/kafka/TestLinearWriteSpeed.scala deleted file mode 100755 index adef2c6380986..0000000000000 --- a/core/src/test/scala/other/kafka/TestLinearWriteSpeed.scala +++ /dev/null @@ -1,254 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka - -import java.io._ -import java.nio._ -import java.nio.channels._ -import java.nio.file.StandardOpenOption -import java.util.{Properties, Random} -import joptsimple._ -import kafka.log._ -import org.apache.kafka.common.compress.{Compression, GzipCompression, Lz4Compression, ZstdCompression} -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.record._ -import org.apache.kafka.common.utils.{Exit, Time, Utils} -import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.server.util.{KafkaScheduler, Scheduler} -import org.apache.kafka.server.util.CommandLineUtils -import org.apache.kafka.storage.internals.log.{LogConfig, LogDirFailureChannel, ProducerStateManagerConfig} -import org.apache.kafka.storage.log.metrics.BrokerTopicStats - -import scala.math.max - -/** - * This test does linear writes using either a kafka log or a file and measures throughput and latency. - */ -object TestLinearWriteSpeed { - - def main(args: Array[String]): Unit = { - val parser = new OptionParser(false) - val dirOpt = parser.accepts("dir", "The directory to write to.") - .withRequiredArg - .describedAs("path") - .ofType(classOf[java.lang.String]) - .defaultsTo(System.getProperty("java.io.tmpdir")) - val bytesOpt = parser.accepts("bytes", "REQUIRED: The total number of bytes to write.") - .withRequiredArg - .describedAs("num_bytes") - .ofType(classOf[java.lang.Long]) - val sizeOpt = parser.accepts("size", "REQUIRED: The size of each write.") - .withRequiredArg - .describedAs("num_bytes") - .ofType(classOf[java.lang.Integer]) - val messageSizeOpt = parser.accepts("message-size", "REQUIRED: The size of each message in the message set.") - .withRequiredArg - .describedAs("num_bytes") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(1024) - val filesOpt = parser.accepts("files", "REQUIRED: The number of logs or files.") - .withRequiredArg - .describedAs("num_files") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(1) - val reportingIntervalOpt = parser.accepts("reporting-interval", "The number of ms between updates.") - .withRequiredArg - .describedAs("ms") - .ofType(classOf[java.lang.Long]) - .defaultsTo(1000L) - val maxThroughputOpt = parser.accepts("max-throughput-mb", "The maximum throughput.") - .withRequiredArg - .describedAs("mb") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(Integer.MAX_VALUE) - val flushIntervalOpt = parser.accepts("flush-interval", "The number of messages between flushes") - .withRequiredArg() - .describedAs("message_count") - .ofType(classOf[java.lang.Long]) - .defaultsTo(Long.MaxValue) - val compressionCodecOpt = parser.accepts("compression", "The compression codec to use") - .withRequiredArg - .describedAs("codec") - .ofType(classOf[java.lang.String]) - .defaultsTo(CompressionType.NONE.name) - val compressionLevelOpt = parser.accepts("level", "The compression level to use") - .withRequiredArg - .describedAs("level") - .ofType(classOf[java.lang.Integer]) - .defaultsTo(0) - val mmapOpt = parser.accepts("mmap", "Do writes to memory-mapped files.") - val channelOpt = parser.accepts("channel", "Do writes to file channels.") - val logOpt = parser.accepts("log", "Do writes to kafka logs.") - - val options = parser.parse(args : _*) - - CommandLineUtils.checkRequiredArgs(parser, options, bytesOpt, sizeOpt, filesOpt) - - var bytesToWrite = options.valueOf(bytesOpt).longValue - val bufferSize = options.valueOf(sizeOpt).intValue - val numFiles = options.valueOf(filesOpt).intValue - val reportingInterval = options.valueOf(reportingIntervalOpt).longValue - val dir = options.valueOf(dirOpt) - val maxThroughputBytes = options.valueOf(maxThroughputOpt).intValue * 1024L * 1024L - val buffer = ByteBuffer.allocate(bufferSize) - val messageSize = options.valueOf(messageSizeOpt).intValue - val flushInterval = options.valueOf(flushIntervalOpt).longValue - val compressionType = CompressionType.forName(options.valueOf(compressionCodecOpt)) - val compressionBuilder = Compression.of(compressionType) - val compressionLevel = options.valueOf(compressionLevelOpt) - compressionType match { - case CompressionType.GZIP => compressionBuilder.asInstanceOf[GzipCompression.Builder].level(compressionLevel) - case CompressionType.LZ4 => compressionBuilder.asInstanceOf[Lz4Compression.Builder].level(compressionLevel) - case CompressionType.ZSTD => compressionBuilder.asInstanceOf[ZstdCompression.Builder].level(compressionLevel) - case _ => //Noop - } - val compression = compressionBuilder.build() - val rand = new Random - rand.nextBytes(buffer.array) - val numMessages = bufferSize / (messageSize + Records.LOG_OVERHEAD) - val createTime = System.currentTimeMillis - val messageSet = { - val records = (0 until numMessages).map(_ => new SimpleRecord(createTime, null, new Array[Byte](messageSize))) - MemoryRecords.withRecords(compression, records: _*) - } - - val writables = new Array[Writable](numFiles) - val scheduler = new KafkaScheduler(1) - scheduler.startup() - for (i <- 0 until numFiles) { - if (options.has(mmapOpt)) { - writables(i) = new MmapWritable(new File(dir, "kafka-test-" + i + ".dat"), bytesToWrite / numFiles, buffer) - } else if (options.has(channelOpt)) { - writables(i) = new ChannelWritable(new File(dir, "kafka-test-" + i + ".dat"), buffer) - } else if (options.has(logOpt)) { - val segmentSize = rand.nextInt(512)*1024*1024 + 64*1024*1024 // vary size to avoid herd effect - val logProperties = new Properties() - logProperties.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentSize: java.lang.Integer) - logProperties.put(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, flushInterval: java.lang.Long) - writables(i) = new LogWritable(new File(dir, "kafka-test-" + i), new LogConfig(logProperties), scheduler, messageSet) - } else { - System.err.println("Must specify what to write to with one of --log, --channel, or --mmap") - Exit.exit(1) - } - } - bytesToWrite = (bytesToWrite / numFiles) * numFiles - - println("%10s\t%10s\t%10s".format("mb_sec", "avg_latency", "max_latency")) - - val beginTest = System.nanoTime - var maxLatency = 0L - var totalLatency = 0L - var count = 0L - var written = 0L - var totalWritten = 0L - var lastReport = beginTest - while (totalWritten + bufferSize < bytesToWrite) { - val start = System.nanoTime - val writeSize = writables((count % numFiles).toInt.abs).write() - val elapsed = System.nanoTime - start - maxLatency = max(elapsed, maxLatency) - totalLatency += elapsed - written += writeSize - count += 1 - totalWritten += writeSize - if ((start - lastReport)/(1000.0*1000.0) > reportingInterval.doubleValue) { - val elapsedSecs = (start - lastReport) / (1000.0*1000.0*1000.0) - val mb = written / (1024.0*1024.0) - println("%10.3f\t%10.3f\t%10.3f".format(mb / elapsedSecs, totalLatency / count.toDouble / (1000.0*1000.0), maxLatency / (1000.0 * 1000.0))) - lastReport = start - written = 0 - maxLatency = 0L - totalLatency = 0L - } else if (written > maxThroughputBytes * (reportingInterval / 1000.0)) { - // if we have written enough, just sit out this reporting interval - val lastReportMs = lastReport / (1000*1000) - val now = System.nanoTime / (1000*1000) - val sleepMs = lastReportMs + reportingInterval - now - if (sleepMs > 0) - Thread.sleep(sleepMs) - } - } - val elapsedSecs = (System.nanoTime - beginTest) / (1000.0*1000.0*1000.0) - println((bytesToWrite / (1024.0 * 1024.0 * elapsedSecs)).toString + " MB per sec") - scheduler.shutdown() - } - - trait Writable { - def write(): Int - def close(): Unit - } - - class MmapWritable(val file: File, size: Long, val content: ByteBuffer) extends Writable { - file.deleteOnExit() - val raf = new RandomAccessFile(file, "rw") - raf.setLength(size) - val buffer = raf.getChannel.map(FileChannel.MapMode.READ_WRITE, 0, raf.length()) - def write(): Int = { - buffer.put(content) - content.rewind() - content.limit() - } - def close(): Unit = { - raf.close() - Utils.delete(file) - } - } - - class ChannelWritable(val file: File, val content: ByteBuffer) extends Writable { - file.deleteOnExit() - val channel: FileChannel = FileChannel.open(file.toPath, StandardOpenOption.CREATE, StandardOpenOption.READ, - StandardOpenOption.WRITE) - def write(): Int = { - channel.write(content) - content.rewind() - content.limit() - } - def close(): Unit = { - channel.close() - Utils.delete(file) - } - } - - class LogWritable(val dir: File, config: LogConfig, scheduler: Scheduler, val messages: MemoryRecords) extends Writable { - Utils.delete(dir) - val log: UnifiedLog = UnifiedLog( - dir = dir, - config = config, - logStartOffset = 0L, - recoveryPoint = 0L, - scheduler = scheduler, - brokerTopicStats = new BrokerTopicStats, - time = Time.SYSTEM, - maxTransactionTimeoutMs = 5 * 60 * 1000, - producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), - producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true - ) - def write(): Int = { - log.appendAsLeader(messages, leaderEpoch = 0) - messages.sizeInBytes - } - def close(): Unit = { - log.close() - Utils.delete(log.dir) - } - } - -} diff --git a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala index 07b52dd3c65a9..8834f6f36083c 100644 --- a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala @@ -29,13 +29,10 @@ import org.apache.kafka.common.internals.FatalExitError import org.apache.kafka.common.utils.Exit import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.server.config.{KRaftConfigs, ZkConfigs} -import org.apache.kafka.server.config.ReplicationConfigs +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.junit.jupiter.api.Assertions._ -import scala.jdk.CollectionConverters._ - class KafkaConfigTest { @BeforeEach @@ -44,26 +41,86 @@ class KafkaConfigTest { @AfterEach def tearDown(): Unit = Exit.resetExitProcedure() + @Test + def testBrokerRequiredProperties(): Unit = { + val properties = new Properties() + assertBadConfigContainingMessage(properties, + "Missing required configuration \"process.roles\" which has no default value.") + + properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + assertBadConfigContainingMessage(properties, + "Missing required configuration \"node.id\" which has no default value.") + + properties.put(KRaftConfigs.NODE_ID_CONFIG, -1) + assertBadConfigContainingMessage(properties, + "Invalid value -1 for configuration node.id: Value must be at least 0") + + properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) + assertBadConfigContainingMessage(properties, + "If using process.roles, either controller.quorum.bootstrap.servers must contain the set of bootstrap controllers or controller.quorum.voters must contain a parseable set of controllers.") + + properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + assertBadConfigContainingMessage(properties, + "requirement failed: controller.listener.names must contain at least one value when running KRaft with just the broker role") + + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + KafkaConfig.fromProps(properties) + } + + @Test + def testControllerRequiredProperties(): Unit = { + val properties = new Properties() + assertBadConfigContainingMessage(properties, + "Missing required configuration \"process.roles\" which has no default value.") + + properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") + assertBadConfigContainingMessage(properties, + "Missing required configuration \"node.id\" which has no default value.") + + properties.put(KRaftConfigs.NODE_ID_CONFIG, -1) + assertBadConfigContainingMessage(properties, + "Invalid value -1 for configuration node.id: Value must be at least 0") + + properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) + assertBadConfigContainingMessage(properties, + "If using process.roles, either controller.quorum.bootstrap.servers must contain the set of bootstrap controllers or controller.quorum.voters must contain a parseable set of controllers.") + + properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + assertBadConfigContainingMessage(properties, + "requirement failed: The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") + + properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") + assertBadConfigContainingMessage(properties, + "No security protocol defined for listener CONTROLLER") + + properties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT") + assertBadConfigContainingMessage(properties, + "requirement failed: The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") + + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + KafkaConfig.fromProps(properties) + } + @Test def testGetKafkaConfigFromArgs(): Unit = { val propertiesFile = prepareDefaultConfig() // We should load configuration file without any arguments val config1 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile))) - assertEquals(1, config1.brokerId) + assertEquals(1, config1.nodeId) // We should be able to override given property on command line - val config2 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "broker.id=2"))) - assertEquals(2, config2.brokerId) + val config2 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "node.id=2"))) + assertEquals(2, config2.nodeId) // We should be also able to set completely new property val config3 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact"))) - assertEquals(1, config3.brokerId) + assertEquals(1, config3.nodeId) assertEquals(util.Arrays.asList("compact"), config3.logCleanupPolicy) // We should be also able to set several properties - val config4 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact,delete", "--override", "broker.id=2"))) - assertEquals(2, config4.brokerId) + val config4 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact,delete", "--override", "node.id=2"))) + assertEquals(2, config4.nodeId) assertEquals(util.Arrays.asList("compact","delete"), config4.logCleanupPolicy) } @@ -158,16 +215,6 @@ class KafkaConfigTest { |must contain the set of bootstrap controllers or controller.quorum.voters must contain a |parseable set of controllers.""".stripMargin.replace("\n", " ") ) - - // Ensure that if neither process.roles nor controller.quorum.voters is populated, then an exception is thrown if zookeeper.connect is not defined - propertiesFile.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "") - assertBadConfigContainingMessage(propertiesFile, - "Missing required configuration `zookeeper.connect` which has no default value.") - - // Ensure that no exception is thrown once zookeeper.connect is defined (and we clear controller.listener.names) - propertiesFile.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") - propertiesFile.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "") - KafkaConfig.fromProps(propertiesFile) } private def setListenerProps(props: Properties): Unit = { @@ -176,17 +223,13 @@ class KafkaConfigTest { val controllerListener = "SASL_PLAINTEXT://localhost:9092" val brokerListener = "PLAINTEXT://localhost:9093" - if (hasBrokerRole || hasControllerRole) { // KRaft - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SASL_PLAINTEXT") - if (hasBrokerRole && hasControllerRole) { - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, s"$brokerListener,$controllerListener") - } else if (hasControllerRole) { - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, controllerListener) - } else if (hasBrokerRole) { - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, brokerListener) - } - } else { // ZK-based - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, brokerListener) + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SASL_PLAINTEXT") + if (hasBrokerRole && hasControllerRole) { + props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, s"$brokerListener,$controllerListener") + } else if (hasControllerRole) { + props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, controllerListener) + } else if (hasBrokerRole) { + props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, brokerListener) } if (!(hasControllerRole & !hasBrokerRole)) { // not controller-only props.setProperty(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "PLAINTEXT") @@ -235,118 +278,6 @@ class KafkaConfigTest { assertEquals(password, config.getPassword(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value) } - private val booleanPropValueToSet = true - private val stringPropValueToSet = "foo" - private val passwordPropValueToSet = "ThePa$$word!" - private val listPropValueToSet = List("A", "B") - - @Test - def testZkSslClientEnable(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_CLIENT_ENABLE_CONFIG, "zookeeper.ssl.client.enable", - "zookeeper.client.secure", booleanPropValueToSet, config => Some(config.zkSslClientEnable), booleanPropValueToSet, Some(false)) - } - - @Test - def testZkSslKeyStoreLocation(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_KEY_STORE_LOCATION_CONFIG, "zookeeper.ssl.keystore.location", - "zookeeper.ssl.keyStore.location", stringPropValueToSet, config => config.zkSslKeyStoreLocation, stringPropValueToSet) - } - - @Test - def testZkSslTrustStoreLocation(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_TRUST_STORE_LOCATION_CONFIG, "zookeeper.ssl.truststore.location", - "zookeeper.ssl.trustStore.location", stringPropValueToSet, config => config.zkSslTrustStoreLocation, stringPropValueToSet) - } - - @Test - def testZookeeperKeyStorePassword(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_KEY_STORE_PASSWORD_CONFIG, "zookeeper.ssl.keystore.password", - "zookeeper.ssl.keyStore.password", passwordPropValueToSet, config => config.zkSslKeyStorePassword, new Password(passwordPropValueToSet)) - } - - @Test - def testZookeeperTrustStorePassword(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_TRUST_STORE_PASSWORD_CONFIG, "zookeeper.ssl.truststore.password", - "zookeeper.ssl.trustStore.password", passwordPropValueToSet, config => config.zkSslTrustStorePassword, new Password(passwordPropValueToSet)) - } - - @Test - def testZkSslKeyStoreType(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_KEY_STORE_TYPE_CONFIG, "zookeeper.ssl.keystore.type", - "zookeeper.ssl.keyStore.type", stringPropValueToSet, config => config.zkSslKeyStoreType, stringPropValueToSet) - } - - @Test - def testZkSslTrustStoreType(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_TRUST_STORE_TYPE_CONFIG, "zookeeper.ssl.truststore.type", - "zookeeper.ssl.trustStore.type", stringPropValueToSet, config => config.zkSslTrustStoreType, stringPropValueToSet) - } - - @Test - def testZkSslProtocol(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_PROTOCOL_CONFIG, "zookeeper.ssl.protocol", - "zookeeper.ssl.protocol", stringPropValueToSet, config => Some(config.ZkSslProtocol), stringPropValueToSet, Some("TLSv1.2")) - } - - @Test - def testZkSslEnabledProtocols(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_ENABLED_PROTOCOLS_CONFIG, "zookeeper.ssl.enabled.protocols", - "zookeeper.ssl.enabledProtocols", listPropValueToSet.mkString(","), config => config.ZkSslEnabledProtocols, listPropValueToSet.asJava) - } - - @Test - def testZkSslCipherSuites(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_CIPHER_SUITES_CONFIG, "zookeeper.ssl.cipher.suites", - "zookeeper.ssl.ciphersuites", listPropValueToSet.mkString(","), config => config.ZkSslCipherSuites, listPropValueToSet.asJava) - } - - @Test - def testZkSslEndpointIdentificationAlgorithm(): Unit = { - // this property is different than the others - // because the system property values and the Kafka property values don't match - val kafkaPropName = ZkConfigs.ZK_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG - assertEquals("zookeeper.ssl.endpoint.identification.algorithm", kafkaPropName) - val sysProp = "zookeeper.ssl.hostnameVerification" - val expectedDefaultValue = "HTTPS" - val propertiesFile = prepareDefaultConfig() - // first make sure there is the correct default value - val emptyConfig = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile))) - assertNull(emptyConfig.originals.get(kafkaPropName)) // doesn't appear in the originals - assertEquals(expectedDefaultValue, emptyConfig.values.get(kafkaPropName)) // but default value appears in the values - assertEquals(expectedDefaultValue, emptyConfig.ZkSslEndpointIdentificationAlgorithm) // and has the correct default value - // next set system property alone - Map("true" -> "HTTPS", "false" -> "").foreach { case (sysPropValue, expected) => - try { - System.setProperty(sysProp, sysPropValue) - val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile))) - assertNull(config.originals.get(kafkaPropName)) // doesn't appear in the originals - assertEquals(expectedDefaultValue, config.values.get(kafkaPropName)) // default value appears in the values - assertEquals(expected, config.ZkSslEndpointIdentificationAlgorithm) // system property impacts the ultimate value of the property - } finally { - System.clearProperty(sysProp) - } - } - // finally set Kafka config alone - List("https", "").foreach(expected => { - val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", s"$kafkaPropName=$expected"))) - assertEquals(expected, config.originals.get(kafkaPropName)) // appears in the originals - assertEquals(expected, config.values.get(kafkaPropName)) // appears in the values - assertEquals(expected, config.ZkSslEndpointIdentificationAlgorithm) // is the ultimate value - }) - } - - @Test - def testZkSslCrlEnable(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_CRL_ENABLE_CONFIG, "zookeeper.ssl.crl.enable", - "zookeeper.ssl.crl", booleanPropValueToSet, config => Some(config.ZkSslCrlEnable), booleanPropValueToSet, Some(false)) - } - - @Test - def testZkSslOcspEnable(): Unit = { - testZkConfig(ZkConfigs.ZK_SSL_OCSP_ENABLE_CONFIG, "zookeeper.ssl.ocsp.enable", - "zookeeper.ssl.ocsp", booleanPropValueToSet, config => Some(config.ZkSslOcspEnable), booleanPropValueToSet, Some(false)) - } - @Test def testConnectionsMaxReauthMsDefault(): Unit = { val propertiesFile = prepareDefaultConfig() @@ -362,51 +293,15 @@ class KafkaConfigTest { assertEquals(expected, config.valuesWithPrefixOverride("sasl_ssl.oauthbearer.").get(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG).asInstanceOf[Long]) } - private def testZkConfig[T, U](kafkaPropName: String, - expectedKafkaPropName: String, - sysPropName: String, - propValueToSet: T, - getPropValueFrom: KafkaConfig => Option[T], - expectedPropertyValue: U, - expectedDefaultValue: Option[T] = None): Unit = { - assertEquals(expectedKafkaPropName, kafkaPropName) - val propertiesFile = prepareDefaultConfig() - // first make sure there is the correct default value (if any) - val emptyConfig = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile))) - assertNull(emptyConfig.originals.get(kafkaPropName)) // doesn't appear in the originals - if (expectedDefaultValue.isDefined) { - // confirm default value behavior - assertEquals(expectedDefaultValue.get, emptyConfig.values.get(kafkaPropName)) // default value appears in the values - assertEquals(expectedDefaultValue.get, getPropValueFrom(emptyConfig).get) // default value appears in the property - } else { - // confirm no default value behavior - assertNull(emptyConfig.values.get(kafkaPropName)) // doesn't appear in the values - assertEquals(None, getPropValueFrom(emptyConfig)) // has no default value - } - // next set system property alone - try { - System.setProperty(sysPropName, s"$propValueToSet") - // need to create a new Kafka config for the system property to be recognized - val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile))) - assertNull(config.originals.get(kafkaPropName)) // doesn't appear in the originals - // confirm default value (if any) overridden by system property - if (expectedDefaultValue.isDefined) - assertEquals(expectedDefaultValue.get, config.values.get(kafkaPropName)) // default value (different from system property) appears in the values - else - assertNull(config.values.get(kafkaPropName)) // doesn't appear in the values - // confirm system property appears in the property - assertEquals(Some(expectedPropertyValue), getPropValueFrom(config)) - } finally { - System.clearProperty(sysPropName) - } - // finally set Kafka config alone - val config = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", s"$kafkaPropName=$propValueToSet"))) - assertEquals(expectedPropertyValue, config.values.get(kafkaPropName)) // appears in the values - assertEquals(Some(expectedPropertyValue), getPropValueFrom(config)) // appears in the property - } - def prepareDefaultConfig(): String = { - prepareConfig(Array("broker.id=1", "zookeeper.connect=somewhere")) + prepareConfig(Array( + "node.id=1", + "process.roles=controller", + "controller.listener.names=CONTROLLER", + "controller.quorum.voters=1@localhost:9093,2@localhost:9093", + "listeners=CONTROLLER://:9093", + "advertised.listeners=CONTROLLER://127.0.0.1:9093" + )) } def prepareConfig(lines : Array[String]): String = { diff --git a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala index 90f2b3e1979f4..a9901ba65e760 100755 --- a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala @@ -17,16 +17,14 @@ package kafka.admin -import java.util.{Collections, Optional} +import java.util.Collections import kafka.controller.ReplicaAssignment import kafka.server.{BaseRequestTest, BrokerServer} import kafka.utils.TestUtils import kafka.utils.TestUtils._ import org.apache.kafka.clients.admin.{Admin, NewPartitions, NewTopic} import org.apache.kafka.common.errors.InvalidReplicaAssignmentException -import org.apache.kafka.common.requests.MetadataResponse.TopicMetadata import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse} -import org.apache.kafka.server.common.AdminOperationException import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest @@ -59,10 +57,7 @@ class AddPartitionsTest extends BaseRequestTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - - if (isKRaftTest()) { - brokers.foreach(broker => broker.asInstanceOf[BrokerServer].lifecycleManager.initialUnfenceFuture.get()) - } + brokers.foreach(broker => broker.asInstanceOf[BrokerServer].lifecycleManager.initialUnfenceFuture.get()) createTopicWithAssignment(topic1, partitionReplicaAssignment = topic1Assignment.map { case (k, v) => k -> v.replicas }) createTopicWithAssignment(topic2, partitionReplicaAssignment = topic2Assignment.map { case (k, v) => k -> v.replicas }) createTopicWithAssignment(topic3, partitionReplicaAssignment = topic3Assignment.map { case (k, v) => k -> v.replicas }) @@ -116,21 +111,8 @@ class AddPartitionsTest extends BaseRequestTest { admin.createPartitions(Collections.singletonMap(topic1, NewPartitions.increaseTo(3, singletonList(asList(0, 1, 2))))).all().get()).getCause assertEquals(classOf[InvalidReplicaAssignmentException], cause.getClass) - if (isKRaftTest()) { - assertTrue(cause.getMessage.contains("Attempted to add 2 additional partition(s), but only 1 assignment(s) " + - "were specified."), "Unexpected error message: " + cause.getMessage) - } else { - assertTrue(cause.getMessage.contains("Increasing the number of partitions by 2 but 1 assignments provided."), - "Unexpected error message: " + cause.getMessage) - } - if (!isKRaftTest()) { - // In ZK mode, test the raw AdminZkClient method as well. - val e = assertThrows(classOf[AdminOperationException], () => adminZkClient.addPartitions( - topic5, topic5Assignment, adminZkClient.getBrokerMetadatas(), 2, - Some(Map(1 -> Seq(0, 1), 2 -> Seq(0, 1, 2))))) - assertTrue(e.getMessage.contains("Unexpected existing replica assignment for topic 'new-topic5', partition " + - "id 0 is missing")) - } + assertTrue(cause.getMessage.contains("Attempted to add 2 additional partition(s), but only 1 assignment(s) " + + "were specified."), "Unexpected error message: " + cause.getMessage) } @ParameterizedTest @@ -192,7 +174,7 @@ class AddPartitionsTest extends BaseRequestTest { } @ParameterizedTest - @ValueSource(strings = Array("zk")) // TODO: add kraft support + @ValueSource(strings = Array("kraft")) def testReplicaPlacementAllServers(quorum: String): Unit = { admin.createPartitions(Collections.singletonMap(topic3, NewPartitions.increaseTo(7))).all().get() @@ -208,17 +190,19 @@ class AddPartitionsTest extends BaseRequestTest { new MetadataRequest.Builder(Seq(topic3).asJava, false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head - validateLeaderAndReplicas(topicMetadata, 0, 2, Set(2, 3, 0, 1)) - validateLeaderAndReplicas(topicMetadata, 1, 3, Set(3, 2, 0, 1)) - validateLeaderAndReplicas(topicMetadata, 2, 0, Set(0, 3, 1, 2)) - validateLeaderAndReplicas(topicMetadata, 3, 1, Set(1, 0, 2, 3)) - validateLeaderAndReplicas(topicMetadata, 4, 2, Set(2, 3, 0, 1)) - validateLeaderAndReplicas(topicMetadata, 5, 3, Set(3, 0, 1, 2)) - validateLeaderAndReplicas(topicMetadata, 6, 0, Set(0, 1, 2, 3)) + + assertEquals(7, topicMetadata.partitionMetadata.size) + for (partition <- topicMetadata.partitionMetadata.asScala) { + val replicas = partition.replicaIds.asScala.toSet + assertEquals(4, replicas.size, s"Partition ${partition.partition} should have 4 replicas") + assertTrue(replicas.subsetOf(Set(0, 1, 2, 3)), s"Replicas should only include brokers 0-3") + assertTrue(partition.leaderId.isPresent, s"Partition ${partition.partition} should have a leader") + assertTrue(replicas.contains(partition.leaderId.get), "Leader should be one of the replicas") + } } @ParameterizedTest - @ValueSource(strings = Array("zk")) // TODO: add kraft support + @ValueSource(strings = Array("kraft")) def testReplicaPlacementPartialServers(quorum: String): Unit = { admin.createPartitions(Collections.singletonMap(topic2, NewPartitions.increaseTo(3))).all().get() @@ -230,19 +214,15 @@ class AddPartitionsTest extends BaseRequestTest { new MetadataRequest.Builder(Seq(topic2).asJava, false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head - validateLeaderAndReplicas(topicMetadata, 0, 1, Set(1, 2)) - validateLeaderAndReplicas(topicMetadata, 1, 2, Set(0, 2)) - validateLeaderAndReplicas(topicMetadata, 2, 3, Set(1, 3)) - } - def validateLeaderAndReplicas(metadata: TopicMetadata, partitionId: Int, expectedLeaderId: Int, - expectedReplicas: Set[Int]): Unit = { - val partitionOpt = metadata.partitionMetadata.asScala.find(_.partition == partitionId) - assertTrue(partitionOpt.isDefined, s"Partition $partitionId should exist") - val partition = partitionOpt.get - - assertEquals(Optional.of(expectedLeaderId), partition.leaderId, "Partition leader id should match") - assertEquals(expectedReplicas, partition.replicaIds.asScala.toSet, "Replica set should match") + assertEquals(3, topicMetadata.partitionMetadata.size) + for (partition <- topicMetadata.partitionMetadata.asScala) { + val replicas = partition.replicaIds.asScala.toSet + assertEquals(2, replicas.size, s"Partition ${partition.partition} should have 2 replicas") + assertTrue(replicas.subsetOf(Set(0, 1, 2, 3)), s"Replicas should only include brokers 0-3") + assertTrue(partition.leaderId.isPresent, s"Partition ${partition.partition} should have a leader") + assertTrue(replicas.contains(partition.leaderId.get), "Leader should be one of the replicas") + } } } diff --git a/core/src/test/scala/unit/kafka/admin/ReplicationQuotaUtils.scala b/core/src/test/scala/unit/kafka/admin/ReplicationQuotaUtils.scala deleted file mode 100644 index bdea54262df10..0000000000000 --- a/core/src/test/scala/unit/kafka/admin/ReplicationQuotaUtils.scala +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package kafka.admin - -import kafka.server.KafkaServer -import kafka.utils.TestUtils -import kafka.zk.AdminZkClient -import org.apache.kafka.server.config.{ConfigType, QuotaConfig} - -import scala.collection.Seq - -object ReplicationQuotaUtils { - - def checkThrottleConfigRemovedFromZK(adminZkClient: AdminZkClient, topic: String, servers: Seq[KafkaServer]): Unit = { - TestUtils.waitUntilTrue(() => { - val hasRateProp = servers.forall { server => - val brokerConfig = adminZkClient.fetchEntityConfig(ConfigType.BROKER, server.config.brokerId.toString) - brokerConfig.contains(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG) || - brokerConfig.contains(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG) - } - val topicConfig = adminZkClient.fetchEntityConfig(ConfigType.TOPIC, topic) - val hasReplicasProp = topicConfig.contains(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) || - topicConfig.contains(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG) - !hasRateProp && !hasReplicasProp - }, "Throttle limit/replicas was not unset") - } - - def checkThrottleConfigAddedToZK(adminZkClient: AdminZkClient, expectedThrottleRate: Long, servers: Seq[KafkaServer], topic: String, throttledLeaders: Set[String], throttledFollowers: Set[String]): Unit = { - TestUtils.waitUntilTrue(() => { - //Check for limit in ZK - val brokerConfigAvailable = servers.forall { server => - val configInZk = adminZkClient.fetchEntityConfig(ConfigType.BROKER, server.config.brokerId.toString) - val zkLeaderRate = configInZk.getProperty(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG) - val zkFollowerRate = configInZk.getProperty(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG) - zkLeaderRate != null && expectedThrottleRate == zkLeaderRate.toLong && - zkFollowerRate != null && expectedThrottleRate == zkFollowerRate.toLong - } - //Check replicas assigned - val topicConfig = adminZkClient.fetchEntityConfig(ConfigType.TOPIC, topic) - val leader = topicConfig.getProperty(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG).split(",").toSet - val follower = topicConfig.getProperty(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG).split(",").toSet - val topicConfigAvailable = leader == throttledLeaders && follower == throttledFollowers - brokerConfigAvailable && topicConfigAvailable - }, "throttle limit/replicas was not set") - } -} diff --git a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala index b3ad8a844007d..3c8e20701b0a6 100644 --- a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala @@ -84,7 +84,6 @@ class AbstractPartitionTest { alterPartitionListener = createIsrChangeListener() partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = interBrokerProtocolVersion, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, diff --git a/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala b/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala deleted file mode 100644 index ca9179049532a..0000000000000 --- a/core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.cluster - -import java.nio.charset.StandardCharsets -import kafka.zk.BrokerIdZNode -import org.apache.kafka.common.feature.{Features, SupportedVersionRange} -import org.apache.kafka.common.feature.Features._ -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.server.network.BrokerEndPoint -import org.junit.jupiter.api.Assertions.{assertEquals, assertNotEquals} -import org.junit.jupiter.api.Test - -import scala.jdk.CollectionConverters._ - -class BrokerEndPointTest { - - @Test - def testHashAndEquals(): Unit = { - val broker1 = new BrokerEndPoint(1, "myhost", 9092) - val broker2 = new BrokerEndPoint(1, "myhost", 9092) - val broker3 = new BrokerEndPoint(2, "myhost", 1111) - val broker4 = new BrokerEndPoint(1, "other", 1111) - - assertEquals(broker1, broker2) - assertNotEquals(broker1, broker3) - assertNotEquals(broker1, broker4) - assertEquals(broker1.hashCode, broker2.hashCode) - assertNotEquals(broker1.hashCode, broker3.hashCode) - assertNotEquals(broker1.hashCode, broker4.hashCode) - - assertEquals(Some(1), Map(broker1 -> 1).get(broker1)) - } - - @Test - def testFromJsonFutureVersion(): Unit = { - // Future compatible versions should be supported, we use a hypothetical future version here - val brokerInfoStr = """{ - "foo":"bar", - "version":100, - "host":"localhost", - "port":9092, - "jmx_port":9999, - "timestamp":"1416974968782", - "endpoints":["SSL://localhost:9093"] - }""" - val broker = parseBrokerJson(1, brokerInfoStr) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.SSL)) - assertEquals("localhost", brokerEndPoint.host) - assertEquals(9093, brokerEndPoint.port) - } - - @Test - def testFromJsonV2(): Unit = { - val brokerInfoStr = """{ - "version":2, - "host":"localhost", - "port":9092, - "jmx_port":9999, - "timestamp":"1416974968782", - "endpoints":["PLAINTEXT://localhost:9092"] - }""" - val broker = parseBrokerJson(1, brokerInfoStr) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) - assertEquals("localhost", brokerEndPoint.host) - assertEquals(9092, brokerEndPoint.port) - } - - @Test - def testFromJsonV1(): Unit = { - val brokerInfoStr = """{"jmx_port":-1,"timestamp":"1420485325400","host":"172.16.8.243","version":1,"port":9091}""" - val broker = parseBrokerJson(1, brokerInfoStr) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) - assertEquals("172.16.8.243", brokerEndPoint.host) - assertEquals(9091, brokerEndPoint.port) - } - - @Test - def testFromJsonV3(): Unit = { - val json = """{ - "version":3, - "host":"localhost", - "port":9092, - "jmx_port":9999, - "timestamp":"2233345666", - "endpoints":["PLAINTEXT://host1:9092", "SSL://host1:9093"], - "rack":"dc1" - }""" - val broker = parseBrokerJson(1, json) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.SSL)) - assertEquals("host1", brokerEndPoint.host) - assertEquals(9093, brokerEndPoint.port) - assertEquals(Some("dc1"), broker.rack) - } - - @Test - def testFromJsonV4WithNullRack(): Unit = { - val json = """{ - "version":4, - "host":"localhost", - "port":9092, - "jmx_port":9999, - "timestamp":"2233345666", - "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"], - "listener_security_protocol_map":{"CLIENT":"SSL", "REPLICATION":"PLAINTEXT"}, - "rack":null - }""" - val broker = parseBrokerJson(1, json) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(new ListenerName("CLIENT")) - assertEquals("host1", brokerEndPoint.host) - assertEquals(9092, brokerEndPoint.port) - assertEquals(None, broker.rack) - } - - @Test - def testFromJsonV4WithNoRack(): Unit = { - val json = """{ - "version":4, - "host":"localhost", - "port":9092, - "jmx_port":9999, - "timestamp":"2233345666", - "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"], - "listener_security_protocol_map":{"CLIENT":"SSL", "REPLICATION":"PLAINTEXT"} - }""" - val broker = parseBrokerJson(1, json) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(new ListenerName("CLIENT")) - assertEquals("host1", brokerEndPoint.host) - assertEquals(9092, brokerEndPoint.port) - assertEquals(None, broker.rack) - } - - @Test - def testFromJsonV4WithNoFeatures(): Unit = { - val json = """{ - "version":4, - "host":"localhost", - "port":9092, - "jmx_port":9999, - "timestamp":"2233345666", - "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"], - "listener_security_protocol_map":{"CLIENT":"SSL", "REPLICATION":"PLAINTEXT"}, - "rack":"dc1" - }""" - val broker = parseBrokerJson(1, json) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(new ListenerName("CLIENT")) - assertEquals("host1", brokerEndPoint.host) - assertEquals(9092, brokerEndPoint.port) - assertEquals(Some("dc1"), broker.rack) - assertEquals(emptySupportedFeatures, broker.features) - } - - @Test - def testFromJsonV5(): Unit = { - val json = """{ - "version":5, - "host":"localhost", - "port":9092, - "jmx_port":9999, - "timestamp":"2233345666", - "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"], - "listener_security_protocol_map":{"CLIENT":"SSL", "REPLICATION":"PLAINTEXT"}, - "rack":"dc1", - "features": {"feature1": {"min_version": 1, "max_version": 2}, "feature2": {"min_version": 2, "max_version": 4}} - }""" - val broker = parseBrokerJson(1, json) - assertEquals(1, broker.id) - val brokerEndPoint = broker.brokerEndPoint(new ListenerName("CLIENT")) - assertEquals("host1", brokerEndPoint.host) - assertEquals(9092, brokerEndPoint.port) - assertEquals(Some("dc1"), broker.rack) - assertEquals(Features.supportedFeatures( - Map[String, SupportedVersionRange]( - "feature1" -> new SupportedVersionRange(1, 2), - "feature2" -> new SupportedVersionRange(2, 4)).asJava), - broker.features) - } - - private def parseBrokerJson(id: Int, jsonString: String): Broker = - BrokerIdZNode.decode(id, jsonString.getBytes(StandardCharsets.UTF_8)).broker -} diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala index 179e098c347c2..6c444f0e5602e 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala @@ -35,7 +35,7 @@ import org.apache.kafka.common.utils.Utils import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} +import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} import org.apache.kafka.server.util.MockTime @@ -50,7 +50,6 @@ import org.mockito.Mockito.{mock, when} import scala.concurrent.duration._ import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption /** * Verifies that slow appends to log don't block request threads processing replica fetch requests. @@ -276,7 +275,6 @@ class PartitionLockTest extends Logging { logManager.startup(Set.empty) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => 1L, mockTime, @@ -302,8 +300,8 @@ class PartitionLockTest extends Logging { val log = super.createLog(isNew, isFutureReplica, offsetCheckpoints, None, None) val logDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(log.topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - log.dir, log.topicPartition, logDirFailureChannel, log.config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + log.dir, log.topicPartition, logDirFailureChannel, None, mockTime.scheduler) val maxTransactionTimeout = 5 * 60 * 1000 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false) val producerStateManager = new ProducerStateManager( @@ -324,7 +322,7 @@ class PartitionLockTest extends Logging { segments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false @@ -343,8 +341,7 @@ class PartitionLockTest extends Logging { )).thenReturn(Optional.empty[JLong]) when(alterIsrManager.submit( ArgumentMatchers.eq(topicIdPartition), - ArgumentMatchers.any[LeaderAndIsr], - ArgumentMatchers.anyInt() + ArgumentMatchers.any[LeaderAndIsr] )).thenReturn(new CompletableFuture[LeaderAndIsr]()) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) @@ -444,7 +441,7 @@ class PartitionLockTest extends Logging { log: UnifiedLog, logStartOffset: Long, localLog: LocalLog, - leaderEpochCache: Option[LeaderEpochFileCache], + leaderEpochCache: LeaderEpochFileCache, producerStateManager: ProducerStateManager, appendSemaphore: Semaphore ) extends UnifiedLog( @@ -454,12 +451,11 @@ class PartitionLockTest extends Logging { log.producerIdExpirationCheckIntervalMs, leaderEpochCache, producerStateManager, - _topicId = None, - keepPartitionMetadataFile = true) { + _topicId = None) { override def appendAsLeader(records: MemoryRecords, leaderEpoch: Int, origin: AppendOrigin, - interBrokerProtocolVersion: MetadataVersion, requestLocal: RequestLocal, verificationGuard: VerificationGuard): LogAppendInfo = { - val appendInfo = super.appendAsLeader(records, leaderEpoch, origin, interBrokerProtocolVersion, requestLocal, verificationGuard) + requestLocal: RequestLocal, verificationGuard: VerificationGuard): LogAppendInfo = { + val appendInfo = super.appendAsLeader(records, leaderEpoch, origin, requestLocal, verificationGuard) appendSemaphore.acquire() appendInfo } diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala index b6804d46bfd6b..3b5220d944161 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala @@ -18,11 +18,9 @@ package kafka.cluster import java.net.InetAddress import com.yammer.metrics.core.Metric -import kafka.common.UnexpectedAppendOffsetException import kafka.log._ import kafka.server._ import kafka.utils._ -import kafka.zk.KafkaZkClient import org.apache.kafka.common.errors.{ApiException, FencedLeaderEpochException, InconsistentTopicIdException, InvalidTxnStateException, NotLeaderOrFollowerException, OffsetNotAvailableException, OffsetOutOfRangeException, UnknownLeaderEpochException} import org.apache.kafka.common.message.{AlterPartitionResponseData, FetchResponseData} import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState @@ -37,7 +35,7 @@ import org.apache.kafka.metadata.LeaderRecoveryState import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.mockito.ArgumentMatchers -import org.mockito.ArgumentMatchers.{any, anyBoolean, anyInt, anyLong, anyString} +import org.mockito.ArgumentMatchers.{any, anyBoolean, anyInt, anyLong} import org.mockito.Mockito._ import org.mockito.invocation.InvocationOnMock @@ -45,7 +43,7 @@ import java.lang.{Long => JLong} import java.nio.ByteBuffer import java.util.Optional import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, Semaphore} -import kafka.server.metadata.{KRaftMetadataCache, ZkMetadataCache} +import kafka.server.metadata.KRaftMetadataCache import kafka.server.share.DelayedShareFetch import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.compress.Compression @@ -56,11 +54,10 @@ import org.apache.kafka.common.replica.ClientMetadata.DefaultClientMetadata import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, MetadataVersion, NodeToControllerChannelManager, RequestLocal} -import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey -import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} +import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, UnexpectedAppendOffsetException} import org.apache.kafka.server.util.{KafkaScheduler, MockTime} import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpoints import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache @@ -77,6 +74,7 @@ object PartitionTest { private var highWatermark: Long = -1L private var failed: Boolean = false private var deleted: Boolean = false + private var follower: Boolean = false override def onHighWatermarkUpdated(partition: TopicPartition, offset: Long): Unit = { highWatermark = offset @@ -90,10 +88,15 @@ object PartitionTest { deleted = true } + override def onBecomingFollower(partition: TopicPartition): Unit = { + follower = true + } + private def clear(): Unit = { highWatermark = -1L failed = false deleted = false + follower = false } /** @@ -104,7 +107,8 @@ object PartitionTest { def verify( expectedHighWatermark: Long = -1L, expectedFailed: Boolean = false, - expectedDeleted: Boolean = false + expectedDeleted: Boolean = false, + expectedFollower: Boolean = false ): Unit = { assertEquals(expectedHighWatermark, highWatermark, "Unexpected high watermark") @@ -112,6 +116,8 @@ object PartitionTest { "Unexpected failed") assertEquals(expectedDeleted, deleted, "Unexpected deleted") + assertEquals(expectedFollower, follower, + "Unexpected follower") clear() } } @@ -425,7 +431,6 @@ class PartitionTest extends AbstractPartitionTest { partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -439,8 +444,8 @@ class PartitionTest extends AbstractPartitionTest { val log = super.createLog(isNew, isFutureReplica, offsetCheckpoints, None, None) val logDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(log.topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - log.dir, log.topicPartition, logDirFailureChannel, log.config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + log.dir, log.topicPartition, logDirFailureChannel, None, time.scheduler) val maxTransactionTimeoutMs = 5 * 60 * 1000 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, true) val producerStateManager = new ProducerStateManager( @@ -461,7 +466,7 @@ class PartitionTest extends AbstractPartitionTest { segments, 0L, 0L, - leaderEpochCache.asJava, + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false @@ -756,7 +761,7 @@ class PartitionTest extends AbstractPartitionTest { currentLeaderEpoch = Optional.empty(), fetchOnlyFromLeader = true).timestampAndOffsetOpt - assertTrue(timestampAndOffsetOpt.isDefined) + assertTrue(timestampAndOffsetOpt.isPresent) val timestampAndOffset = timestampAndOffsetOpt.get assertEquals(leaderEpoch, timestampAndOffset.leaderEpoch.get) @@ -815,11 +820,11 @@ class PartitionTest extends AbstractPartitionTest { fetchOnlyFromLeader = true ) val timestampAndOffsetOpt = offsetResultHolder.timestampAndOffsetOpt - if (timestampAndOffsetOpt.isEmpty || offsetResultHolder.lastFetchableOffset.isDefined && + if (timestampAndOffsetOpt.isEmpty || offsetResultHolder.lastFetchableOffset.isPresent && timestampAndOffsetOpt.get.offset >= offsetResultHolder.lastFetchableOffset.get) { offsetResultHolder.maybeOffsetsError.map(e => throw e) } - Right(timestampAndOffsetOpt) + Right(if (timestampAndOffsetOpt.isPresent) Some(timestampAndOffsetOpt.get) else None) } catch { case e: ApiException => Left(e) } @@ -1025,7 +1030,7 @@ class PartitionTest extends AbstractPartitionTest { isolationLevel = isolationLevel, currentLeaderEpoch = Optional.empty(), fetchOnlyFromLeader = true).timestampAndOffsetOpt - assertTrue(res.isDefined) + assertTrue(res.isPresent) res.get } @@ -1264,7 +1269,6 @@ class PartitionTest extends AbstractPartitionTest { configRepository.setTopicConfig(topicPartition.topic, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2") partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = interBrokerProtocolVersion, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -1355,7 +1359,6 @@ class PartitionTest extends AbstractPartitionTest { val mockMetadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache]) val partition = spy(new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = interBrokerProtocolVersion, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -1495,7 +1498,7 @@ class PartitionTest extends AbstractPartitionTest { val isrItem = alterPartitionManager.isrUpdates.head assertEquals(isrItem.leaderAndIsr.isr, List(brokerId, remoteBrokerId).map(Int.box).asJava) isrItem.leaderAndIsr.isrWithBrokerEpoch.asScala.foreach { brokerState => - // In ZK mode, the broker epochs in the leaderAndIsr should be -1. + // the broker epochs in the leaderAndIsr should be -1. assertEquals(-1, brokerState.brokerEpoch()) } assertEquals(Set(brokerId), partition.partitionState.isr) @@ -1587,7 +1590,6 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -1674,8 +1676,6 @@ class PartitionTest extends AbstractPartitionTest { @ParameterizedTest @ValueSource(strings = Array("kraft")) def testIsrNotExpandedIfReplicaIsFencedOrShutdown(quorum: String): Unit = { - val kraft = quorum == "kraft" - val log = logManager.getOrCreateLog(topicPartition, topicId = None) seedLogData(log, numRecords = 10, leaderEpoch = 4) @@ -1685,26 +1685,18 @@ class PartitionTest extends AbstractPartitionTest { val replicas = List(brokerId, remoteBrokerId) val isr = Set(brokerId) - val metadataCache: MetadataCache = if (kraft) mock(classOf[KRaftMetadataCache]) else mock(classOf[ZkMetadataCache]) - if (kraft) { - addBrokerEpochToMockMetadataCache(metadataCache.asInstanceOf[KRaftMetadataCache], replicas) - } + val metadataCache = mock(classOf[KRaftMetadataCache]) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) // Mark the remote broker as eligible or ineligible in the metadata cache of the leader. // When using kraft, we can make the broker ineligible by fencing it. - // In ZK mode, we must mark the broker as alive for it to be eligible. def markRemoteReplicaEligible(eligible: Boolean): Unit = { - if (kraft) { - when(metadataCache.asInstanceOf[KRaftMetadataCache].isBrokerFenced(remoteBrokerId)).thenReturn(!eligible) - } else { - when(metadataCache.hasAliveBroker(remoteBrokerId)).thenReturn(eligible) - } + when(metadataCache.isBrokerFenced(remoteBrokerId)).thenReturn(!eligible) } val partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -1811,7 +1803,6 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -1837,7 +1828,7 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(isr, partition.partitionState.maximalIsr) // Fetch to let the follower catch up to the log end offset, but using a wrong broker epoch. The expansion should fail. - addBrokerEpochToMockMetadataCache(metadataCache.asInstanceOf[KRaftMetadataCache], List(brokerId, remoteBrokerId2)) + addBrokerEpochToMockMetadataCache(metadataCache, List(brokerId, remoteBrokerId2)) // Create a race case where the replica epoch get bumped right after the previous fetch succeeded. val wrongReplicaEpoch = defaultBrokerEpoch(remoteBrokerId1) - 1 when(metadataCache.getAliveBrokerEpoch(remoteBrokerId1)).thenReturn(Option(wrongReplicaEpoch), Option(defaultBrokerEpoch(remoteBrokerId1))) @@ -1897,13 +1888,12 @@ class PartitionTest extends AbstractPartitionTest { val replicas = List(brokerId, remoteBrokerId1) val isr = Set(brokerId, remoteBrokerId1) - val metadataCache: MetadataCache = mock(classOf[KRaftMetadataCache]) - addBrokerEpochToMockMetadataCache(metadataCache.asInstanceOf[KRaftMetadataCache], replicas) + val metadataCache = mock(classOf[KRaftMetadataCache]) + addBrokerEpochToMockMetadataCache(metadataCache, replicas) val partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -1969,7 +1959,6 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -2125,7 +2114,6 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -2208,7 +2196,6 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition( topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.IBP_3_7_IV2, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -2581,12 +2568,11 @@ class PartitionTest extends AbstractPartitionTest { time = time, brokerId = brokerId, brokerEpochSupplier = () => 0, - metadataVersionSupplier = () => MetadataVersion.IBP_3_0_IV0 + metadataVersionSupplier = () => MetadataVersion.IBP_3_0_IV1 ) partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = interBrokerProtocolVersion, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -2690,71 +2676,6 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(alterPartitionManager.isrUpdates.size, 1) } - @Test - def testZkIsrManagerAsyncCallback(): Unit = { - // We need a real scheduler here so that the ISR write lock works properly - val scheduler = new KafkaScheduler(1, true, "zk-isr-test") - scheduler.startup() - val kafkaZkClient = mock(classOf[KafkaZkClient]) - - doAnswer(_ => (true, 2)) - .when(kafkaZkClient) - .conditionalUpdatePath(anyString(), any(), ArgumentMatchers.eq(1), any()) - - val zkIsrManager = AlterPartitionManager(scheduler, time, kafkaZkClient) - zkIsrManager.start() - - val partition = new Partition(topicPartition, - replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = IBP_2_6_IV0, // shouldn't matter, but set this to a ZK isr version - localBrokerId = brokerId, - () => defaultBrokerEpoch(brokerId), - time, - alterPartitionListener, - delayedOperations, - metadataCache, - logManager, - zkIsrManager) - - val log = logManager.getOrCreateLog(topicPartition, topicId = None) - seedLogData(log, numRecords = 10, leaderEpoch = 4) - - val controllerEpoch = 0 - val leaderEpoch = 5 - val follower1 = brokerId + 1 - val follower2 = brokerId + 2 - val follower3 = brokerId + 3 - val replicas = Seq(brokerId, follower1, follower2, follower3) - val isr = Seq(brokerId, follower1, follower2) - - doNothing().when(delayedOperations).checkAndCompleteAll() - - assertTrue(makeLeader( - partition = partition, - topicId = None, - controllerEpoch = controllerEpoch, - leaderEpoch = leaderEpoch, - isr = isr, - replicas = replicas, - partitionEpoch = 1, - isNew = true - )) - assertEquals(0L, partition.localLogOrException.highWatermark) - - // Expand ISR - fetchFollower(partition, replicaId = follower3, fetchOffset = 10L) - - // Try avoiding a race - TestUtils.waitUntilTrue(() => !partition.partitionState.isInflight, "Expected ISR state to be committed", 100) - - partition.partitionState match { - case CommittedPartitionState(isr, _) => assertEquals(Set(brokerId, follower1, follower2, follower3), isr) - case _ => fail("Expected a committed ISR following Zk expansion") - } - - scheduler.shutdown() - } - @Test def testUseCheckpointToInitializeHighWatermark(): Unit = { val log = logManager.getOrCreateLog(topicPartition, topicId = None) @@ -2798,7 +2719,6 @@ class PartitionTest extends AbstractPartitionTest { // Create new Partition object for same topicPartition val partition2 = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -2843,7 +2763,6 @@ class PartitionTest extends AbstractPartitionTest { // Create new Partition object for same topicPartition val partition2 = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -2926,9 +2845,9 @@ class PartitionTest extends AbstractPartitionTest { def testUpdateAssignmentAndIsr(): Unit = { val topicPartition = new TopicPartition("test", 1) val partition = new Partition( - topicPartition, 1000, MetadataVersion.latestTesting, 0, () => defaultBrokerEpoch(0), + topicPartition, 1000, 0, () => defaultBrokerEpoch(0), Time.SYSTEM, mock(classOf[AlterPartitionListener]), mock(classOf[DelayedOperations]), - mock(classOf[MetadataCache]), mock(classOf[LogManager]), mock(classOf[AlterPartitionManager])) + mock(classOf[KRaftMetadataCache]), mock(classOf[LogManager]), mock(classOf[AlterPartitionManager])) val replicas = Seq(0, 1, 2, 3) val followers = Seq(1, 2, 3) @@ -3001,7 +2920,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -3040,7 +2958,6 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -3082,7 +2999,6 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -3192,7 +3108,7 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(Some(0L), partition.leaderEpochStartOffsetOpt) val leaderLog = partition.localLogOrException - assertEquals(Optional.of(new EpochEntry(leaderEpoch, 0L)), leaderLog.leaderEpochCache.asJava.flatMap(_.latestEntry)) + assertEquals(Optional.of(new EpochEntry(leaderEpoch, 0L)), leaderLog.leaderEpochCache.latestEntry) // Write to the log to increment the log end offset. leaderLog.appendAsLeader(MemoryRecords.withRecords(0L, Compression.NONE, 0, @@ -3216,7 +3132,7 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) assertEquals(Some(0L), partition.leaderEpochStartOffsetOpt) - assertEquals(Optional.of(new EpochEntry(leaderEpoch, 0L)), leaderLog.leaderEpochCache.asJava.flatMap(_.latestEntry)) + assertEquals(Optional.of(new EpochEntry(leaderEpoch, 0L)), leaderLog.leaderEpochCache.latestEntry) } @Test @@ -3696,7 +3612,7 @@ class PartitionTest extends AbstractPartitionTest { log: UnifiedLog, logStartOffset: Long, localLog: LocalLog, - leaderEpochCache: Option[LeaderEpochFileCache], + leaderEpochCache: LeaderEpochFileCache, producerStateManager: ProducerStateManager, appendSemaphore: Semaphore ) extends UnifiedLog( @@ -3706,8 +3622,7 @@ class PartitionTest extends AbstractPartitionTest { log.producerIdExpirationCheckIntervalMs, leaderEpochCache, producerStateManager, - _topicId = None, - keepPartitionMetadataFile = true) { + _topicId = None) { override def appendAsFollower(records: MemoryRecords): LogAppendInfo = { appendSemaphore.acquire() @@ -3765,8 +3680,8 @@ class PartitionTest extends AbstractPartitionTest { fetchOffset, FetchRequest.INVALID_LOG_START_OFFSET, maxBytes, - leaderEpoch.map(Int.box).asJava, - lastFetchedEpoch.map(Int.box).asJava + leaderEpoch.map(Int.box).toJava, + lastFetchedEpoch.map(Int.box).toJava ) partition.fetchRecords( @@ -3802,8 +3717,8 @@ class PartitionTest extends AbstractPartitionTest { fetchOffset, logStartOffset, maxBytes, - leaderEpoch.map(Int.box).asJava, - lastFetchedEpoch.map(Int.box).asJava + leaderEpoch.map(Int.box).toJava, + lastFetchedEpoch.map(Int.box).toJava ) partition.fetchRecords( @@ -3834,7 +3749,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -3880,7 +3794,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -3926,7 +3839,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -3972,7 +3884,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -4018,7 +3929,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -4065,7 +3975,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, @@ -4120,7 +4029,6 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, - interBrokerProtocolVersion = MetadataVersion.latestTesting, localBrokerId = brokerId, () => defaultBrokerEpoch(brokerId), time, diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionWithLegacyMessageFormatTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionWithLegacyMessageFormatTest.scala deleted file mode 100644 index 8185781c135ce..0000000000000 --- a/core/src/test/scala/unit/kafka/cluster/PartitionWithLegacyMessageFormatTest.scala +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.cluster - -import kafka.utils.TestUtils -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.record.{RecordVersion, SimpleRecord} -import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} -import org.junit.jupiter.api.Assertions.assertEquals -import org.junit.jupiter.api.Test - -import java.util.Optional -import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.server.common.MetadataVersion.IBP_2_8_IV1 - -import scala.annotation.nowarn - -class PartitionWithLegacyMessageFormatTest extends AbstractPartitionTest { - - // legacy message formats are only supported with IBP < 3.0 - override protected def interBrokerProtocolVersion: MetadataVersion = IBP_2_8_IV1 - - @nowarn("cat=deprecation") - @Test - def testMakeLeaderDoesNotUpdateEpochCacheForOldFormats(): Unit = { - val leaderEpoch = 8 - configRepository.setTopicConfig(topicPartition.topic(), - TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, MetadataVersion.IBP_0_10_2_IV0.shortVersion) - val log = logManager.getOrCreateLog(topicPartition, topicId = None) - log.appendAsLeader(TestUtils.records(List( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes)), - magicValue = RecordVersion.V1.value - ), leaderEpoch = 0) - log.appendAsLeader(TestUtils.records(List( - new SimpleRecord("k3".getBytes, "v3".getBytes), - new SimpleRecord("k4".getBytes, "v4".getBytes)), - magicValue = RecordVersion.V1.value - ), leaderEpoch = 5) - assertEquals(4, log.logEndOffset) - - val partition = setupPartitionWithMocks(leaderEpoch = leaderEpoch, isLeader = true) - assertEquals(Some(4), partition.leaderLogIfLocal.map(_.logEndOffset)) - assertEquals(None, log.latestEpoch) - - val epochEndOffset = partition.lastOffsetForLeaderEpoch(currentLeaderEpoch = Optional.of(leaderEpoch), - leaderEpoch = leaderEpoch, fetchOnlyFromLeader = true) - assertEquals(UNDEFINED_EPOCH_OFFSET, epochEndOffset.endOffset) - assertEquals(UNDEFINED_EPOCH, epochEndOffset.leaderEpoch) - } - -} diff --git a/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala b/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala index 428d57ce9a7f2..55a49f31cbf7e 100644 --- a/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala @@ -17,15 +17,13 @@ package kafka.cluster import kafka.log.UnifiedLog -import kafka.server.metadata.{KRaftMetadataCache, ZkMetadataCache} +import kafka.server.metadata.KRaftMetadataCache import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.NotLeaderOrFollowerException import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.log.LogOffsetMetadata import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertThrows, assertTrue} import org.junit.jupiter.api.{BeforeEach, Test} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import org.mockito.Mockito.{mock, when} object ReplicaTest { @@ -320,16 +318,10 @@ class ReplicaTest { assertFalse(isCaughtUp(leaderEndOffset = 16L)) } - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testFenceStaleUpdates(isKraft: Boolean): Unit = { - val metadataCache = if (isKraft) { - val kRaftMetadataCache = mock(classOf[KRaftMetadataCache]) - when(kRaftMetadataCache.getAliveBrokerEpoch(BrokerId)).thenReturn(Option(2L)) - kRaftMetadataCache - } else { - mock(classOf[ZkMetadataCache]) - } + @Test + def testFenceStaleUpdates(): Unit = { + val metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.getAliveBrokerEpoch(BrokerId)).thenReturn(Option(2L)) val replica = new Replica(BrokerId, Partition, metadataCache) replica.updateFetchStateOrThrow( @@ -339,24 +331,13 @@ class ReplicaTest { leaderEndOffset = 10L, brokerEpoch = 2L ) - if (isKraft) { - assertThrows(classOf[NotLeaderOrFollowerException], () => replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(5L), - followerStartOffset = 2L, - followerFetchTimeMs = 3, - leaderEndOffset = 10L, - brokerEpoch = 1L - )) - } else { - // No exception to expect under ZK mode. - replica.updateFetchStateOrThrow( - followerFetchOffsetMetadata = new LogOffsetMetadata(5L), - followerStartOffset = 2L, - followerFetchTimeMs = 3, - leaderEndOffset = 10L, - brokerEpoch = 1L - ) - } + assertThrows(classOf[NotLeaderOrFollowerException], () => replica.updateFetchStateOrThrow( + followerFetchOffsetMetadata = new LogOffsetMetadata(5L), + followerStartOffset = 2L, + followerFetchTimeMs = 3, + leaderEndOffset = 10L, + brokerEpoch = 1L + )) replica.updateFetchStateOrThrow( followerFetchOffsetMetadata = new LogOffsetMetadata(5L), followerStartOffset = 2L, diff --git a/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala index e72f1986f469a..df8b6b6f6d441 100644 --- a/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala @@ -22,11 +22,11 @@ import java.util.{Collections, Random} import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.locks.Lock import kafka.coordinator.AbstractCoordinatorConcurrencyTest._ +import kafka.cluster.Partition import kafka.log.{LogManager, UnifiedLog} import kafka.server.QuotaFactory.QuotaManagers import kafka.server.{KafkaConfig, _} import kafka.utils._ -import kafka.zk.KafkaZkClient import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, RecordValidationStats} @@ -46,10 +46,9 @@ import scala.jdk.CollectionConverters._ abstract class AbstractCoordinatorConcurrencyTest[M <: CoordinatorMember] extends Logging { val nThreads = 5 - val serverProps = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") + val serverProps = TestUtils.createBrokerConfig(0) val random = new Random var replicaManager: TestReplicaManager = _ - var zkClient: KafkaZkClient = _ var time: MockTime = _ var timer: MockTimer = _ var executor: ExecutorService = _ @@ -66,7 +65,6 @@ abstract class AbstractCoordinatorConcurrencyTest[M <: CoordinatorMember] extend val producePurgatory = new DelayedOperationPurgatory[DelayedProduce]("Produce", timer, 1, 1000, false, true) val watchKeys = Collections.newSetFromMap(new ConcurrentHashMap[TopicPartitionOperationKey, java.lang.Boolean]()).asScala replicaManager = TestReplicaManager(KafkaConfig.fromProps(serverProps), time, scheduler, timer, mockLogMger, mock(classOf[QuotaManagers], withSettings().stubOnly()), producePurgatory, watchKeys) - zkClient = mock(classOf[KafkaZkClient], withSettings().stubOnly()) } @AfterEach @@ -253,8 +251,8 @@ object AbstractCoordinatorConcurrencyTest { producePurgatory.tryCompleteElseWatch(delayedProduce, producerRequestKeys.toList.asJava) } - override def getMagic(topicPartition: TopicPartition): Option[Byte] = { - Some(RecordBatch.MAGIC_VALUE_V2) + override def onlinePartition(topicPartition: TopicPartition): Option[Partition] = { + Some(mock(classOf[Partition])) } def getOrCreateLogs(): mutable.Map[TopicPartition, (UnifiedLog, Long)] = { diff --git a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala index 70397447f5c51..68a6ba5da1db8 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala @@ -59,7 +59,7 @@ class CoordinatorLoaderImplTest { val serde = mock(classOf[Deserializer[(String, String)]]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -79,7 +79,7 @@ class CoordinatorLoaderImplTest { val serde = mock(classOf[Deserializer[(String, String)]]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -100,7 +100,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -203,7 +203,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -246,7 +246,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -286,7 +286,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -312,7 +312,8 @@ class CoordinatorLoaderImplTest { .thenThrow(new RuntimeException("Error!")) val ex = assertFutureThrows(loader.load(tp, coordinator), classOf[RuntimeException]) - assertEquals("Error!", ex.getMessage) + + assertEquals(s"Deserializing record DefaultRecord(offset=0, timestamp=-1, key=2 bytes, value=2 bytes) from $tp failed due to: Error!", ex.getMessage) } } @@ -327,7 +328,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -359,7 +360,7 @@ class CoordinatorLoaderImplTest { val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) val time = new MockTime() - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time, replicaManager = replicaManager, deserializer = serde, @@ -414,7 +415,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -489,7 +490,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -515,7 +516,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, @@ -591,7 +592,7 @@ class CoordinatorLoaderImplTest { val log = mock(classOf[UnifiedLog]) val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) - Using(new CoordinatorLoaderImpl[(String, String)]( + Using.resource(new CoordinatorLoaderImpl[(String, String)]( time = Time.SYSTEM, replicaManager = replicaManager, deserializer = serde, diff --git a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala index f12d21019a71e..9b192e851e992 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala @@ -20,13 +20,14 @@ import kafka.server.ReplicaManager import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.errors.NotLeaderOrFollowerException +import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, SimpleRecord} import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.coordinator.common.runtime.PartitionWriter import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, VerificationGuard} import org.apache.kafka.test.TestUtils.assertFutureThrows -import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows} +import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows, assertTrue} import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.EnumSource @@ -238,4 +239,83 @@ class CoordinatorPartitionWriterTest { batch )) } + + @Test + def testDeleteRecordsResponseContainsError(): Unit = { + val replicaManager = mock(classOf[ReplicaManager]) + val partitionRecordWriter = new CoordinatorPartitionWriter( + replicaManager + ) + + val callbackCapture: ArgumentCaptor[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit]) + + // Response contains error. + when(replicaManager.deleteRecords( + ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), + callbackCapture.capture(), + ArgumentMatchers.eq(true) + )).thenAnswer { _ => + callbackCapture.getValue.apply(Map( + new TopicPartition("random-topic", 0) -> new DeleteRecordsPartitionResult() + .setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code + ))) + } + + partitionRecordWriter.deleteRecords( + new TopicPartition("random-topic", 0), + 10L + ).whenComplete { (_, exp) => + assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.exception, exp) + } + + // Empty response + when(replicaManager.deleteRecords( + ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), + callbackCapture.capture(), + ArgumentMatchers.eq(true) + )).thenAnswer { _ => + callbackCapture.getValue.apply(Map[TopicPartition, DeleteRecordsPartitionResult]()) + } + + partitionRecordWriter.deleteRecords( + new TopicPartition("random-topic", 0), + 10L + ).whenComplete { (_, exp) => + assertTrue(exp.isInstanceOf[IllegalStateException]) + } + } + + @Test + def testDeleteRecordsSuccess(): Unit = { + val replicaManager = mock(classOf[ReplicaManager]) + val partitionRecordWriter = new CoordinatorPartitionWriter( + replicaManager + ) + + val callbackCapture: ArgumentCaptor[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit]) + + // response contains error + when(replicaManager.deleteRecords( + ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), + callbackCapture.capture(), + ArgumentMatchers.eq(true) + )).thenAnswer { _ => + callbackCapture.getValue.apply(Map( + new TopicPartition("random-topic", 0) -> new DeleteRecordsPartitionResult() + .setErrorCode(Errors.NONE.code) + )) + } + + partitionRecordWriter.deleteRecords( + new TopicPartition("random-topic", 0), + 10L + ).whenComplete { (_, exp) => + assertNull(exp) + } + } } diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala index 7a9de453740b4..d950d5f417072 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala @@ -26,7 +26,7 @@ import org.apache.kafka.common.message.OffsetDeleteRequestData.{OffsetDeleteRequ import org.apache.kafka.common.message.OffsetDeleteResponseData.{OffsetDeleteResponsePartition, OffsetDeleteResponsePartitionCollection, OffsetDeleteResponseTopic, OffsetDeleteResponseTopicCollection} import org.apache.kafka.common.network.{ClientInformation, ListenerName} import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{OffsetFetchResponse, RequestContext, RequestHeader} +import org.apache.kafka.common.requests.{OffsetFetchResponse, RequestContext, RequestHeader, TransactionResult} import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.utils.{BufferSupplier, Time} import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource @@ -37,6 +37,7 @@ import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest +import org.mockito.ArgumentMatchers.any import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.Mockito.{mock, verify, when} @@ -414,12 +415,12 @@ class GroupCoordinatorAdapterTest { )) ) - when(groupCoordinator.handleDescribeGroup(groupId1)).thenReturn { - (Errors.NONE, groupSummary1) + when(groupCoordinator.handleDescribeGroup(groupId1, ApiKeys.DESCRIBE_GROUPS.latestVersion)).thenReturn { + (Errors.NONE, None, groupSummary1) } - when(groupCoordinator.handleDescribeGroup(groupId2)).thenReturn { - (Errors.NOT_COORDINATOR, GroupCoordinator.EmptyGroup) + when(groupCoordinator.handleDescribeGroup(groupId2, ApiKeys.DESCRIBE_GROUPS.latestVersion)).thenReturn { + (Errors.NOT_COORDINATOR, None, GroupCoordinator.EmptyGroup) } val ctx = makeContext(ApiKeys.DESCRIBE_GROUPS, ApiKeys.DESCRIBE_GROUPS.latestVersion) @@ -677,7 +678,6 @@ class GroupCoordinatorAdapterTest { new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100) - .setCommitTimestamp(now) .setCommittedLeaderEpoch(1) ).asJava) ).asJava) @@ -930,4 +930,26 @@ class GroupCoordinatorAdapterTest { assertTrue(future.isCompletedExceptionally) assertFutureThrows(future, classOf[UnsupportedVersionException]) } + + @Test + def testOnTransactionCompletedWithUnexpectedException(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + when(groupCoordinator.scheduleHandleTxnCompletion( + any(), + any(), + any() + )).thenThrow(new IllegalStateException("Oh no!")) + + val future = adapter.onTransactionCompleted( + 10, + Seq.empty[TopicPartition].asJava, + TransactionResult.COMMIT + ) + + assertTrue(future.isDone) + assertTrue(future.isCompletedExceptionally) + assertFutureThrows(future, classOf[IllegalStateException]) + } } diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala index a0917e1ee4291..3eecdfe65e190 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala @@ -37,7 +37,6 @@ import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.purgatory.DelayedOperationPurgatory import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} -import org.mockito.Mockito.when import scala.collection._ import scala.concurrent.duration.Duration @@ -71,9 +70,6 @@ class GroupCoordinatorConcurrencyTest extends AbstractCoordinatorConcurrencyTest override def setUp(): Unit = { super.setUp() - when(zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME)) - .thenReturn(Some(numPartitions)) - serverProps.setProperty(GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, ConsumerMinSessionTimeout.toString) serverProps.setProperty(GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, ConsumerMaxSessionTimeout.toString) serverProps.setProperty(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, GroupInitialRebalanceDelay.toString) @@ -85,8 +81,7 @@ class GroupCoordinatorConcurrencyTest extends AbstractCoordinatorConcurrencyTest metrics = new Metrics groupCoordinator = GroupCoordinator(config, replicaManager, heartbeatPurgatory, rebalancePurgatory, timer.time, metrics) - groupCoordinator.startup(() => zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME).getOrElse(config.groupCoordinatorConfig.offsetsTopicPartitions), - enableMetadataExpiration = false) + groupCoordinator.startup(() => numPartitions, enableMetadataExpiration = false) // Transactional appends attempt to schedule to the request handler thread using // a non request handler thread. Set this to avoid error. @@ -156,8 +151,7 @@ class GroupCoordinatorConcurrencyTest extends AbstractCoordinatorConcurrencyTest groupCoordinator.shutdown() groupCoordinator = GroupCoordinator(config, replicaManager, heartbeatPurgatory, rebalancePurgatory, timer.time, new Metrics()) - groupCoordinator.startup(() => zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME).getOrElse(config.groupCoordinatorConfig.offsetsTopicPartitions), - enableMetadataExpiration = false) + groupCoordinator.startup(() => numPartitions, enableMetadataExpiration = false) val members = new Group(s"group", nMembersPerGroup, groupCoordinator) .members diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala index df0e3483f5ad0..2373d09816cb4 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala @@ -29,7 +29,6 @@ import org.apache.kafka.common.requests.{JoinGroupRequest, OffsetCommitRequest, import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock import kafka.cluster.Partition -import kafka.zk.KafkaZkClient import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription import org.apache.kafka.clients.consumer.internals.ConsumerProtocol import org.apache.kafka.common.internals.Topic @@ -79,7 +78,6 @@ class GroupCoordinatorTest { var groupCoordinator: GroupCoordinator = _ var replicaManager: ReplicaManager = _ var scheduler: KafkaScheduler = _ - var zkClient: KafkaZkClient = _ private val groupId = "groupId" private val protocolType = "consumer" @@ -100,7 +98,7 @@ class GroupCoordinatorTest { @BeforeEach def setUp(): Unit = { - val props = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") + val props = TestUtils.createBrokerConfig(0) props.setProperty(GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, GroupMinSessionTimeout.toString) props.setProperty(GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, GroupMaxSessionTimeout.toString) props.setProperty(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, GroupMaxSize.toString) @@ -111,10 +109,6 @@ class GroupCoordinatorTest { replicaManager = mock(classOf[ReplicaManager]) - zkClient = mock(classOf[KafkaZkClient]) - // make two partitions of the group topic to make sure some partitions are not owned by the coordinator - when(zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME)).thenReturn(Some(2)) - timer = new MockTimer val config = KafkaConfig.fromProps(props) @@ -123,8 +117,8 @@ class GroupCoordinatorTest { val rebalancePurgatory = new DelayedOperationPurgatory[DelayedRebalance]("Rebalance", timer, 1000, config.brokerId, false, true) groupCoordinator = GroupCoordinator(config, replicaManager, heartbeatPurgatory, rebalancePurgatory, timer.time, new Metrics()) - groupCoordinator.startup(() => zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME).getOrElse(config.groupCoordinatorConfig.offsetsTopicPartitions), - enableMetadataExpiration = false) + // make two partitions of the group topic to make sure some partitions are not owned by the coordinator + groupCoordinator.startup(() => 2, enableMetadataExpiration = false) // add the partition into the owned partition list groupPartitionId = groupCoordinator.partitionFor(groupId) @@ -175,7 +169,7 @@ class GroupCoordinatorTest { assertEquals(Some(Errors.NONE), heartbeatError) // DescribeGroups - val (describeGroupError, _) = groupCoordinator.handleDescribeGroup(otherGroupId) + val (describeGroupError, _, _) = groupCoordinator.handleDescribeGroup(otherGroupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, describeGroupError) // ListGroups @@ -187,20 +181,21 @@ class GroupCoordinatorTest { assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), deleteGroupsErrors.get(otherGroupId)) // Check that non-loading groups are still accessible - assertEquals(Errors.NONE, groupCoordinator.handleDescribeGroup(groupId)._1) + assertEquals(Errors.GROUP_ID_NOT_FOUND, groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion)._1) // After loading, we should be able to access the group val otherGroupMetadataTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, otherGroupPartitionId) when(replicaManager.getLog(otherGroupMetadataTopicPartition)).thenReturn(None) + // Call removeGroupsAndOffsets so that partition removed from loadingPartitions groupCoordinator.groupManager.removeGroupsAndOffsets(otherGroupMetadataTopicPartition, OptionalInt.of(1), group => {}) groupCoordinator.groupManager.loadGroupsAndOffsets(otherGroupMetadataTopicPartition, 1, group => {}, 0L) - assertEquals(Errors.NONE, groupCoordinator.handleDescribeGroup(otherGroupId)._1) + assertEquals(Errors.GROUP_ID_NOT_FOUND, groupCoordinator.handleDescribeGroup(otherGroupId, ApiKeys.DESCRIBE_GROUPS.latestVersion)._1) } @Test def testOffsetsRetentionMsIntegerOverflow(): Unit = { - val props = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") + val props = TestUtils.createBrokerConfig(0) props.setProperty(GroupCoordinatorConfig.OFFSETS_RETENTION_MINUTES_CONFIG, Integer.MAX_VALUE.toString) val config = KafkaConfig.fromProps(props) val offsetConfig = GroupCoordinator.offsetConfig(config) @@ -415,6 +410,8 @@ class GroupCoordinatorTest { } // advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin + when(replicaManager.onlinePartition(any[TopicPartition])) + .thenReturn(Some(mock(classOf[Partition]))) timer.advanceClock(DefaultRebalanceTimeout + 1) // Awaiting results @@ -635,8 +632,8 @@ class GroupCoordinatorTest { } private def verifySessionExpiration(groupId: String): Unit = { - when(replicaManager.getMagic(any[TopicPartition])) - .thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any[TopicPartition])) + .thenReturn(Some(mock(classOf[Partition]))) timer.advanceClock(DefaultSessionTimeout + 1) @@ -1590,6 +1587,7 @@ class GroupCoordinatorTest { assertEquals(newGeneration, followerJoinGroupResult.generationId) val leaderId = leaderJoinGroupResult.memberId + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) val leaderSyncGroupResult = syncGroupLeader(groupId, leaderJoinGroupResult.generationId, leaderId, Map(leaderId -> Array[Byte]())) assertEquals(Errors.NONE, leaderSyncGroupResult.error) assertTrue(getGroup(groupId).is(Stable)) @@ -1748,7 +1746,6 @@ class GroupCoordinatorTest { when(replicaManager.getPartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId))) .thenReturn(HostedPartition.None) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) timer.advanceClock(DefaultSessionTimeout + 100) @@ -2060,8 +2057,6 @@ class GroupCoordinatorTest { assertEquals(1, group.numPending) assertEquals(Stable, group.currentState) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) - // advance clock to timeout the pending member assertEquals(Set(firstMemberId), group.allMembers) assertEquals(1, group.numPending) @@ -2154,7 +2149,6 @@ class GroupCoordinatorTest { // Advancing Clock by > 100 (session timeout for third and fourth member) // and < 500 (for first and second members). This will force the coordinator to attempt join // completion on heartbeat expiration (since we are in PendingRebalance stage). - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) timer.advanceClock(120) assertGroupState(groupState = CompletingRebalance) @@ -2229,8 +2223,8 @@ class GroupCoordinatorTest { } // Advance part the rebalance timeout to trigger the delayed operation. - when(replicaManager.getMagic(any[TopicPartition])) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) + when(replicaManager.onlinePartition(any[TopicPartition])) + .thenReturn(Some(mock(classOf[Partition]))) timer.advanceClock(DefaultRebalanceTimeout / 2 + 1) @@ -2609,14 +2603,14 @@ class GroupCoordinatorTest { assertEquals(Errors.NONE, fetchError) assertEquals(Some(0), partitionData.get(tip.topicPartition).map(_.offset)) - val (describeError, summary) = groupCoordinator.handleDescribeGroup(groupId) + var (describeError, describeErrorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) assertEquals(Errors.NONE, describeError) + assertTrue(describeErrorMessage.isEmpty) assertEquals(Empty.toString, summary.state) val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) val partition: Partition = mock(classOf[Partition]) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) @@ -3405,15 +3399,21 @@ class GroupCoordinatorTest { @Test def testDescribeGroupWrongCoordinator(): Unit = { - val (error, _) = groupCoordinator.handleDescribeGroup(otherGroupId) + val (error, _, _) = groupCoordinator.handleDescribeGroup(otherGroupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) assertEquals(Errors.NOT_COORDINATOR, error) } @Test def testDescribeGroupInactiveGroup(): Unit = { - val (error, summary) = groupCoordinator.handleDescribeGroup(groupId) + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, 5) assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) assertEquals(GroupCoordinator.DeadGroup, summary) + + val (errorV6, errorMessageV6, summaryV6) = groupCoordinator.handleDescribeGroup(groupId, 6) + assertEquals(Errors.GROUP_ID_NOT_FOUND, errorV6) + assertEquals(s"Group $groupId not found.", errorMessageV6.get) + assertEquals(GroupCoordinator.DeadGroup, summaryV6) } @Test @@ -3427,8 +3427,9 @@ class GroupCoordinatorTest { val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) assertEquals(Errors.NONE, syncGroupResult.error) - val (error, summary) = groupCoordinator.handleDescribeGroup(groupId) + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) assertEquals(protocolType, summary.protocolType) assertEquals("range", summary.protocol) assertEquals(List(assignedMemberId), summary.members.map(_.memberId)) @@ -3445,8 +3446,9 @@ class GroupCoordinatorTest { val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) assertEquals(Errors.NONE, syncGroupResult.error) - val (error, summary) = groupCoordinator.handleDescribeGroup(groupId) + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) assertEquals(protocolType, summary.protocolType) assertEquals("range", summary.protocol) assertEquals(List(assignedMemberId), summary.members.map(_.memberId)) @@ -3460,8 +3462,9 @@ class GroupCoordinatorTest { val joinGroupError = joinGroupResult.error assertEquals(Errors.NONE, joinGroupError) - val (error, summary) = groupCoordinator.handleDescribeGroup(groupId) + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) assertEquals(protocolType, summary.protocolType) assertEquals(GroupCoordinator.NoProtocol, summary.protocol) assertEquals(CompletingRebalance.toString, summary.state) @@ -3503,7 +3506,6 @@ class GroupCoordinatorTest { val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) val partition: Partition = mock(classOf[Partition]) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) @@ -3528,9 +3530,9 @@ class GroupCoordinatorTest { val commitOffsetResult = commitOffsets(groupId, assignedMemberId, joinGroupResult.generationId, Map(tip -> offset)) assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) - val describeGroupResult = groupCoordinator.handleDescribeGroup(groupId) - assertEquals(Stable.toString, describeGroupResult._2.state) - assertEquals(assignedMemberId, describeGroupResult._2.members.head.memberId) + val describeGroupResult = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Stable.toString, describeGroupResult._3.state) + assertEquals(assignedMemberId, describeGroupResult._3.members.head.memberId) val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId) verifyLeaveGroupResult(leaveGroupResults) @@ -3538,14 +3540,13 @@ class GroupCoordinatorTest { val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) val partition: Partition = mock(classOf[Partition]) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) val result = groupCoordinator.handleDeleteGroups(Set(groupId)) assert(result.size == 1 && result.contains(groupId) && result.get(groupId).contains(Errors.NONE)) - assertEquals(Dead.toString, groupCoordinator.handleDescribeGroup(groupId)._2.state) + assertEquals(Dead.toString, groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion)._3.state) } @Test @@ -3598,7 +3599,6 @@ class GroupCoordinatorTest { val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) val partition: Partition = mock(classOf[Partition]) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) @@ -3679,7 +3679,6 @@ class GroupCoordinatorTest { val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) val partition: Partition = mock(classOf[Partition]) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) @@ -3722,7 +3721,6 @@ class GroupCoordinatorTest { val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) val partition: Partition = mock(classOf[Partition]) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) @@ -3917,8 +3915,6 @@ class GroupCoordinatorTest { supportSkippingAssignment: Boolean = true): Future[JoinGroupResult] = { val (responseFuture, responseCallback) = setupJoinGroupCallback - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) - groupCoordinator.handleJoinGroup(groupId, memberId, groupInstanceId, requireKnownMemberId, supportSkippingAssignment, "clientId", "clientHost", rebalanceTimeout, sessionTimeout, protocolType, protocols, responseCallback) responseFuture @@ -3956,7 +3952,6 @@ class GroupCoordinatorTest { ) ) }) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) groupCoordinator.handleJoinGroup(groupId, memberId, Some(groupInstanceId), requireKnownMemberId, supportSkippingAssignment, "clientId", "clientHost", rebalanceTimeout, sessionTimeout, protocolType, protocols, responseCallback) @@ -3993,7 +3988,7 @@ class GroupCoordinatorTest { ) } ) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) groupCoordinator.handleSyncGroup(groupId, generation, leaderId, protocolType, protocolName, groupInstanceId, assignment, responseCallback) @@ -4139,7 +4134,7 @@ class GroupCoordinatorTest { ) ) }) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) groupCoordinator.handleCommitOffsets(groupId, memberId, groupInstanceId, generationId, offsets, responseCallback) Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) @@ -4197,7 +4192,7 @@ class GroupCoordinatorTest { ) ) }) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V2)) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) groupCoordinator.handleTxnCommitOffsets(groupId, transactionalId, producerId, producerEpoch, memberId, groupInstanceId, generationId, offsets, responseCallback, RequestLocal.noCaching, ApiKeys.TXN_OFFSET_COMMIT.latestVersion()) @@ -4221,7 +4216,7 @@ class GroupCoordinatorTest { when(replicaManager.getPartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId))) .thenReturn(HostedPartition.None) - when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) groupCoordinator.handleLeaveGroup(groupId, memberIdentities, responseCallback) Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala index f16dba44cbd37..c08064c18d15b 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala @@ -43,9 +43,8 @@ import org.apache.kafka.common.requests.OffsetFetchResponse import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.group.{GroupCoordinatorConfig, OffsetAndMetadata, OffsetConfig} -import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitValue} -import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} -import org.apache.kafka.server.common.MetadataVersion._ +import org.apache.kafka.coordinator.group.generated.{CoordinatorRecordType, GroupMetadataValue, OffsetCommitKey, OffsetCommitValue, GroupMetadataKey => GroupMetadataKeyData} +import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.{KafkaScheduler, MockTime} @@ -82,7 +81,7 @@ class GroupMetadataManagerTest { val noExpiration = OptionalLong.empty() private val offsetConfig = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)) new OffsetConfig(config.groupCoordinatorConfig.offsetMetadataMaxSize, config.groupCoordinatorConfig.offsetsLoadBufferSize, config.groupCoordinatorConfig.offsetsRetentionMs, @@ -100,8 +99,7 @@ class GroupMetadataManagerTest { metrics = new kMetrics() time = new MockTime replicaManager = mock(classOf[ReplicaManager]) - groupMetadataManager = new GroupMetadataManager(0, MetadataVersion.latestTesting, offsetConfig, replicaManager, - time, metrics) + groupMetadataManager = new GroupMetadataManager(0, offsetConfig, replicaManager, time, metrics) groupMetadataManager.startup(() => numOffsetsPartitions, enableMetadataExpiration = false) partition = mock(classOf[Partition]) } @@ -115,7 +113,7 @@ class GroupMetadataManagerTest { def testLogInfoFromCleanupGroupMetadata(): Unit = { var expiredOffsets: Int = 0 var infoCount = 0 - val gmm = new GroupMetadataManager(0, MetadataVersion.latestTesting, offsetConfig, replicaManager, time, metrics) { + val gmm = new GroupMetadataManager(0, offsetConfig, replicaManager, time, metrics) { override def cleanupGroupMetadata(groups: Iterable[GroupMetadata], requestLocal: RequestLocal, selector: GroupMetadata => Map[TopicPartition, OffsetAndMetadata]): Int = expiredOffsets @@ -1059,22 +1057,20 @@ class GroupMetadataManagerTest { } @Test - def testCurrentStateTimestampForAllGroupMetadataVersions(): Unit = { + def testCurrentStateTimestampForAllVersions(): Unit = { val generation = 1 val protocol = "range" val memberId = "memberId" - for (metadataVersion <- MetadataVersion.VERSIONS) { - val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) - + for (version <- 0 to 3) { + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, + groupMetadataValueVersion = version.toShort) val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) - // GROUP_METADATA_VALUE_SCHEMA_V2 or higher should correctly set the currentStateTimestamp - if (metadataVersion.isAtLeast(IBP_2_1_IV0)) - assertEquals(Some(time.milliseconds()), deserializedGroupMetadata.currentStateTimestamp, - s"the metadataVersion $metadataVersion doesn't set the currentStateTimestamp correctly.") + + if (version >= 2) + assertEquals(Some(time.milliseconds()), deserializedGroupMetadata.currentStateTimestamp) else - assertTrue(deserializedGroupMetadata.currentStateTimestamp.isEmpty, - s"the metadataVersion $metadataVersion should not set the currentStateTimestamp.") + assertTrue(deserializedGroupMetadata.currentStateTimestamp.isEmpty) } } @@ -1083,10 +1079,10 @@ class GroupMetadataManagerTest { val generation = 1 val protocol = "range" val memberId = "memberId" - val oldMetadataVersions = Array(IBP_0_9_0, IBP_0_10_1_IV0, IBP_2_1_IV0) - for (metadataVersion <- oldMetadataVersions) { - val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) + for (version <- 0 to 2) { + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, + groupMetadataValueVersion = version.toShort) val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) assertEquals(groupId, deserializedGroupMetadata.groupId) @@ -1189,7 +1185,7 @@ class GroupMetadataManagerTest { any(), any(), any()) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) } @Test @@ -1227,12 +1223,12 @@ class GroupMetadataManagerTest { any(), any(), any()) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) } @Test def testStoreNonEmptyGroupWhenCoordinatorHasMoved(): Unit = { - when(replicaManager.getMagic(any())).thenReturn(None) + when(replicaManager.onlinePartition(any())).thenReturn(None) val memberId = "memberId" val clientId = "clientId" val clientHost = "localhost" @@ -1253,7 +1249,7 @@ class GroupMetadataManagerTest { groupMetadataManager.storeGroup(group, Map(memberId -> Array[Byte]()), callback) assertEquals(Some(Errors.NOT_COORDINATOR), maybeError) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) } @Test @@ -1326,7 +1322,7 @@ class GroupMetadataManagerTest { val offsets = immutable.Map(topicIdPartition -> offsetAndMetadata) val capturedResponseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { commitErrors = Some(errors) @@ -1348,7 +1344,7 @@ class GroupMetadataManagerTest { any(), any(), ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) @@ -1377,7 +1373,7 @@ class GroupMetadataManagerTest { val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { @@ -1410,7 +1406,7 @@ class GroupMetadataManagerTest { any(), any(), ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) } @Test @@ -1429,7 +1425,7 @@ class GroupMetadataManagerTest { val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { @@ -1462,12 +1458,12 @@ class GroupMetadataManagerTest { any(), any(), ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) } @Test def testCommitOffsetWhenCoordinatorHasMoved(): Unit = { - when(replicaManager.getMagic(any())).thenReturn(None) + when(replicaManager.onlinePartition(any())).thenReturn(None) val memberId = "" val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") val offset = 37 @@ -1491,7 +1487,7 @@ class GroupMetadataManagerTest { val maybeError = commitErrors.get.get(topicIdPartition) assertEquals(Some(Errors.NOT_COORDINATOR), maybeError) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) } @Test @@ -1520,7 +1516,7 @@ class GroupMetadataManagerTest { val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { @@ -1549,7 +1545,7 @@ class GroupMetadataManagerTest { cachedOffsets.get(topicIdPartition.topicPartition).map(_.offset) ) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) // Will not update sensor if failed assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) } @@ -1573,7 +1569,7 @@ class GroupMetadataManagerTest { topicIdPartitionFailed -> new OffsetAndMetadata(offset, noLeader, "s" * (offsetConfig.maxMetadataSize + 1) , time.milliseconds(), noExpiration) ) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { @@ -1617,7 +1613,7 @@ class GroupMetadataManagerTest { any(), any(), any()) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) assertEquals(1, TestUtils.totalMetricValue(metrics, "offset-commit-count")) } @@ -1740,7 +1736,7 @@ class GroupMetadataManagerTest { val capturedResponseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { @@ -1773,7 +1769,7 @@ class GroupMetadataManagerTest { any(), any(), ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) - verify(replicaManager).getMagic(any()) + verify(replicaManager).onlinePartition(any()) capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) @@ -1854,7 +1850,7 @@ class GroupMetadataManagerTest { any(), any(), any()) - verify(replicaManager, times(2)).getMagic(any()) + verify(replicaManager, times(2)).onlinePartition(any()) } @Test @@ -1871,7 +1867,7 @@ class GroupMetadataManagerTest { // expect the group metadata tombstone val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords]) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) mockGetPartition() when(partition.appendRecordsToLeader(recordsCapture.capture(), origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), @@ -1914,7 +1910,7 @@ class GroupMetadataManagerTest { // expect the group metadata tombstone val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords]) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) mockGetPartition() when(partition.appendRecordsToLeader(recordsCapture.capture(), origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), @@ -1984,7 +1980,7 @@ class GroupMetadataManagerTest { // expect the offset tombstone val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords]) - + when(replicaManager.onlinePartition(any())).thenReturn(Some(partition)) when(partition.appendRecordsToLeader(recordsCapture.capture(), origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) @@ -2017,7 +2013,7 @@ class GroupMetadataManagerTest { cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset) ) - verify(replicaManager).onlinePartition(groupTopicPartition) + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) } @Test @@ -2088,7 +2084,7 @@ class GroupMetadataManagerTest { assertEquals(Some(offset), cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset)) assertEquals(Some(offset), cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset)) - verify(replicaManager).onlinePartition(groupTopicPartition) + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) group.transitionTo(PreparingRebalance) group.transitionTo(Empty) @@ -2114,7 +2110,7 @@ class GroupMetadataManagerTest { assertEquals(Some(offset), cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset)) assertEquals(Some(offset), cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset)) - verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) + verify(replicaManager, times(3)).onlinePartition(groupTopicPartition) time.sleep(2) @@ -2139,7 +2135,7 @@ class GroupMetadataManagerTest { assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset)) assertEquals(Some(offset), cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset)) - verify(replicaManager, times(3)).onlinePartition(groupTopicPartition) + verify(replicaManager, times(4)).onlinePartition(groupTopicPartition) // advance time to just before the offset of last partition is to be expired, no offset should expire time.sleep(group.currentStateTimestamp.get + defaultOffsetRetentionMs - time.milliseconds() - 1) @@ -2170,7 +2166,7 @@ class GroupMetadataManagerTest { cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset) ) - verify(replicaManager, times(4)).onlinePartition(groupTopicPartition) + verify(replicaManager, times(5)).onlinePartition(groupTopicPartition) // advance time enough for that last offset to expire time.sleep(2) @@ -2205,7 +2201,7 @@ class GroupMetadataManagerTest { cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset) ) - verify(replicaManager, times(5)).onlinePartition(groupTopicPartition) + verify(replicaManager, times(6)).onlinePartition(groupTopicPartition) assert(group.is(Dead)) } @@ -2261,7 +2257,7 @@ class GroupMetadataManagerTest { ) assertEquals(Some(offset), cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset)) - verify(replicaManager).onlinePartition(groupTopicPartition) + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) // advance time to enough for offsets to expire time.sleep(2) @@ -2286,7 +2282,7 @@ class GroupMetadataManagerTest { cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset) ) - verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) + verify(replicaManager, times(3)).onlinePartition(groupTopicPartition) assert(group.is(Dead)) } @@ -2401,7 +2397,7 @@ class GroupMetadataManagerTest { cachedOffsets.get(topic2IdPartition1.topicPartition).map(_.offset) ) - verify(replicaManager).onlinePartition(groupTopicPartition) + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) group.transitionTo(PreparingRebalance) @@ -2419,6 +2415,7 @@ class GroupMetadataManagerTest { group.initNextGeneration() group.transitionTo(Stable) + when(replicaManager.onlinePartition(any)).thenReturn(Some(partition)) // expect the offset tombstone when(partition.appendRecordsToLeader(any[MemoryRecords], origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), @@ -2429,7 +2426,7 @@ class GroupMetadataManagerTest { verify(partition).appendRecordsToLeader(any[MemoryRecords], origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), any(), any()) - verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) + verify(replicaManager, times(3)).onlinePartition(groupTopicPartition) assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) assert(group.is(Stable)) @@ -2476,10 +2473,11 @@ class GroupMetadataManagerTest { new TopicPartition("bar", 0) -> 8992L ) - val metadataVersion = IBP_1_1_IV0 - val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, metadataVersion = metadataVersion, retentionTimeOpt = Some(100)) + val offsetCommitValueVersion = 1.toShort + val groupMetadataValueVersion = 1.toShort + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, offsetCommitValueVersion = offsetCommitValueVersion, retentionTimeOpt = Some(100)) val memberId = "98098230493" - val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, groupMetadataValueVersion = groupMetadataValueVersion) val records = MemoryRecords.withRecords(startOffset, Compression.NONE, (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) @@ -2550,34 +2548,17 @@ class GroupMetadataManagerTest { time.milliseconds(), noExpiration) - def verifySerde(metadataVersion: MetadataVersion, expectedOffsetCommitValueVersion: Int): Unit = { - val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) - val buffer = ByteBuffer.wrap(bytes) - - assertEquals(expectedOffsetCommitValueVersion, buffer.getShort(0).toInt) - - val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) - assertEquals(offsetAndMetadata.committedOffset, deserializedOffsetAndMetadata.committedOffset) - assertEquals(offsetAndMetadata.metadata, deserializedOffsetAndMetadata.metadata) - assertEquals(offsetAndMetadata.commitTimestampMs, deserializedOffsetAndMetadata.commitTimestampMs) - - // Serialization drops the leader epoch silently if an older inter-broker protocol is in use - val expectedLeaderEpoch = if (expectedOffsetCommitValueVersion >= 3) - offsetAndMetadata.leaderEpoch - else - noLeader - - assertEquals(expectedLeaderEpoch, deserializedOffsetAndMetadata.leaderEpoch) - } - - for (version <- MetadataVersion.VERSIONS) { - val expectedSchemaVersion = version match { - case v if v.isLessThan(IBP_2_1_IV0) => 1 - case v if v.isLessThan(IBP_2_1_IV1) => 2 - case _ => 3 - } - verifySerde(version, expectedSchemaVersion) - } + val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata) + val buffer = ByteBuffer.wrap(bytes) + val expectedOffsetCommitValueVersion = 3 + assertEquals(expectedOffsetCommitValueVersion, buffer.getShort(0).toInt) + val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) + assertEquals(offsetAndMetadata.committedOffset, deserializedOffsetAndMetadata.committedOffset) + assertEquals(offsetAndMetadata.metadata, deserializedOffsetAndMetadata.metadata) + assertEquals(offsetAndMetadata.commitTimestampMs, deserializedOffsetAndMetadata.commitTimestampMs) + val expectedLeaderEpoch = offsetAndMetadata.leaderEpoch + assertEquals(expectedLeaderEpoch, deserializedOffsetAndMetadata.leaderEpoch) + assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) } @Test @@ -2592,45 +2573,12 @@ class GroupMetadataManagerTest { time.milliseconds(), OptionalLong.of(time.milliseconds() + 1000)) - def verifySerde(metadataVersion: MetadataVersion): Unit = { - val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) - val buffer = ByteBuffer.wrap(bytes) - assertEquals(1, buffer.getShort(0).toInt) + val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata) + val buffer = ByteBuffer.wrap(bytes) + assertEquals(1, buffer.getShort(0).toInt) - val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) - assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) - } - - for (version <- MetadataVersion.VERSIONS) - verifySerde(version) - } - - @Test - def testSerdeOffsetCommitValueWithNoneExpireTimestamp(): Unit = { - val offsetAndMetadata = new OffsetAndMetadata( - 537L, - noLeader, - "metadata", - time.milliseconds(), - noExpiration) - - def verifySerde(metadataVersion: MetadataVersion): Unit = { - val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) - val buffer = ByteBuffer.wrap(bytes) - val version = buffer.getShort(0).toInt - if (metadataVersion.isLessThan(IBP_2_1_IV0)) - assertEquals(1, version) - else if (metadataVersion.isLessThan(IBP_2_1_IV1)) - assertEquals(2, version) - else - assertEquals(3, version) - - val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) - assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) - } - - for (version <- MetadataVersion.VERSIONS) - verifySerde(version) + val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) + assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) } @Test @@ -2932,7 +2880,7 @@ class GroupMetadataManagerTest { new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L) ) )}) - when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE)) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) capturedRecords } @@ -2941,20 +2889,20 @@ class GroupMetadataManagerTest { protocol: String, memberId: String, assignmentBytes: Array[Byte] = Array.emptyByteArray, - metadataVersion: MetadataVersion = MetadataVersion.latestTesting): SimpleRecord = { + groupMetadataValueVersion: Short = 3): SimpleRecord = { val memberProtocols = List((protocol, Array.emptyByteArray)) val member = new MemberMetadata(memberId, Some(groupInstanceId), "clientId", "clientHost", 30000, 10000, protocolType, memberProtocols) val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, memberId, - if (metadataVersion.isAtLeast(IBP_2_1_IV0)) Some(time.milliseconds()) else None, Seq(member), time) + if (groupMetadataValueVersion >= 2.toShort) Some(time.milliseconds()) else None, Seq(member), time) val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) - val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), metadataVersion) + val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), groupMetadataValueVersion) new SimpleRecord(groupMetadataKey, groupMetadataValue) } private def buildEmptyGroupRecord(generation: Int, protocolType: String): SimpleRecord = { val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time) val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) - val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty, MetadataVersion.latestTesting) + val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty) new SimpleRecord(groupMetadataKey, groupMetadataValue) } @@ -2998,7 +2946,7 @@ class GroupMetadataManagerTest { private def createCommittedOffsetRecords(committedOffsets: Map[TopicPartition, Long], groupId: String = groupId, - metadataVersion: MetadataVersion = MetadataVersion.latestTesting, + offsetCommitValueVersion: Short = 3, retentionTimeOpt: Option[Long] = None): Seq[SimpleRecord] = { committedOffsets.map { case (topicPartition, offset) => val commitTimestamp = time.milliseconds() @@ -3010,7 +2958,7 @@ class GroupMetadataManagerTest { new OffsetAndMetadata(offset, noLeader, "", commitTimestamp, noExpiration) } val offsetCommitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition) - val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) + val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, offsetCommitValueVersion) new SimpleRecord(offsetCommitKey, offsetCommitValue) }.toSeq } @@ -3115,8 +3063,7 @@ class GroupMetadataManagerTest { // Should ignore unknown record val unknownKey = new org.apache.kafka.coordinator.group.generated.GroupMetadataKey() - val lowestUnsupportedVersion = (org.apache.kafka.coordinator.group.generated.GroupMetadataKey - .HIGHEST_SUPPORTED_VERSION + 1).toShort + val lowestUnsupportedVersion = (CoordinatorRecordType.GROUP_METADATA.id + 1).toShort val unknownMessage1 = MessageUtil.toVersionPrefixedBytes(Short.MaxValue, unknownKey) val unknownMessage2 = MessageUtil.toVersionPrefixedBytes(lowestUnsupportedVersion, unknownKey) @@ -3144,4 +3091,33 @@ class GroupMetadataManagerTest { assertTrue(group.offset(topicPartition).map(_.expireTimestampMs).get.isEmpty) } } + + @Test + def testOffsetCommitKey(): Unit = { + val bytes = GroupMetadataManager.offsetCommitKey( + "foo", + new TopicPartition("__consumer_offsets", 0) + ) + val buffer = ByteBuffer.wrap(bytes) + assertEquals(1.toShort, buffer.getShort) + assertEquals( + new OffsetCommitKey(new ByteBufferAccessor(buffer), 0.toShort), + new OffsetCommitKey() + .setGroup("foo") + .setTopic("__consumer_offsets") + .setPartition(0) + ) + } + + @Test + def testGroupMetadataKey(): Unit = { + val bytes = GroupMetadataManager.groupMetadataKey("foo") + val buffer = ByteBuffer.wrap(bytes) + assertEquals(2.toShort, buffer.getShort()) + assertEquals( + new GroupMetadataKeyData(new ByteBufferAccessor(buffer), 0.toShort), + new GroupMetadataKeyData() + .setGroup("foo") + ) + } } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/ProducerIdManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/ProducerIdManagerTest.scala deleted file mode 100644 index bb655dcd18b88..0000000000000 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/ProducerIdManagerTest.scala +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.coordinator.transaction - -import kafka.zk.{KafkaZkClient, ProducerIdBlockZNode} -import org.apache.kafka.common.KafkaException -import org.apache.kafka.server.common.{NodeToControllerChannelManager, ProducerIdsBlock} -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test -import org.mockito.ArgumentCaptor -import org.mockito.ArgumentMatchers.{any, anyString} -import org.mockito.Mockito.{mock, when} - -class ProducerIdManagerTest { - - var brokerToController: NodeToControllerChannelManager = mock(classOf[NodeToControllerChannelManager]) - val zkClient: KafkaZkClient = mock(classOf[KafkaZkClient]) - - @Test - def testGetProducerIdZk(): Unit = { - var zkVersion: Option[Int] = None - var data: Array[Byte] = null - when(zkClient.getDataAndVersion(anyString)).thenAnswer(_ => - zkVersion.map(Some(data) -> _).getOrElse(None, 0)) - - val capturedVersion: ArgumentCaptor[Int] = ArgumentCaptor.forClass(classOf[Int]) - val capturedData: ArgumentCaptor[Array[Byte]] = ArgumentCaptor.forClass(classOf[Array[Byte]]) - when(zkClient.conditionalUpdatePath(anyString(), - capturedData.capture(), - capturedVersion.capture(), - any[Option[(KafkaZkClient, String, Array[Byte]) => (Boolean, Int)]]) - ).thenAnswer(_ => { - val newZkVersion = capturedVersion.getValue + 1 - zkVersion = Some(newZkVersion) - data = capturedData.getValue - (true, newZkVersion) - }) - - val manager1 = new ZkProducerIdManager(0, zkClient) - val manager2 = new ZkProducerIdManager(1, zkClient) - - val pid1 = manager1.generateProducerId() - val pid2 = manager2.generateProducerId() - - assertEquals(0, pid1) - assertEquals(ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE, pid2) - - for (i <- 1L until ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE) - assertEquals(pid1 + i, manager1.generateProducerId()) - - for (i <- 1L until ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE) - assertEquals(pid2 + i, manager2.generateProducerId()) - - assertEquals(pid2 + ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE, manager1.generateProducerId()) - assertEquals(pid2 + ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE * 2, manager2.generateProducerId()) - } - - @Test - def testExceedProducerIdLimitZk(): Unit = { - when(zkClient.getDataAndVersion(anyString)).thenAnswer(_ => { - val json = ProducerIdBlockZNode.generateProducerIdBlockJson( - new ProducerIdsBlock(0, Long.MaxValue - ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE, ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE)) - (Some(json), 0) - }) - assertThrows(classOf[KafkaException], () => new ZkProducerIdManager(0, zkClient)) - } -} diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala index f446eb2bfb2ef..24000894fe9bb 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala @@ -74,9 +74,6 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def setUp(): Unit = { super.setUp() - when(zkClient.getTopicPartitionCount(TRANSACTION_STATE_TOPIC_NAME)) - .thenReturn(Some(numPartitions)) - val brokerNode = new Node(0, "host", 10) val metadataCache: MetadataCache = mock(classOf[MetadataCache]) when(metadataCache.getPartitionLeaderEndpoint( @@ -98,8 +95,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren txnStateManager = new TransactionStateManager(0, scheduler, replicaManager, metadataCache, txnConfig, time, new Metrics()) - txnStateManager.startup(() => zkClient.getTopicPartitionCount(TRANSACTION_STATE_TOPIC_NAME).get, - enableTransactionalIdExpiration = true) + txnStateManager.startup(() => numPartitions, enableTransactionalIdExpiration = true) for (i <- 0 until numPartitions) txnStateManager.addLoadedTransactionsToCache(i, coordinatorEpoch, new Pool[String, TransactionMetadata]()) @@ -507,7 +503,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren private def prepareExhaustedEpochTxnMetadata(txn: Transaction): TransactionMetadata = { new TransactionMetadata(transactionalId = txn.transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = (Short.MaxValue - 1).toShort, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -547,6 +543,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren txnMetadata.producerEpoch, partitions, resultCallback, + TransactionVersion.TV_2, RequestLocal.withThreadConfinedCaching) replicaManager.tryCompleteActions() } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala index bf3eb015cf493..c8e9a47df1a1e 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala @@ -30,9 +30,9 @@ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource -import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.ArgumentMatchers.{any, anyInt} -import org.mockito.Mockito.{mock, times, verify, when} +import org.mockito.Mockito._ +import org.mockito.{ArgumentCaptor, ArgumentMatchers} import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -209,19 +209,19 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(None)) - coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback, TV_0) assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, error) } @Test def shouldRespondWithInvalidRequestAddPartitionsToTransactionWhenTransactionalIdIsEmpty(): Unit = { - coordinator.handleAddPartitionsToTransaction("", 0L, 1, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction("", 0L, 1, partitions, errorsCallback, TV_0) assertEquals(Errors.INVALID_REQUEST, error) } @Test def shouldRespondWithInvalidRequestAddPartitionsToTransactionWhenTransactionalIdIsNull(): Unit = { - coordinator.handleAddPartitionsToTransaction(null, 0L, 1, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(null, 0L, 1, partitions, errorsCallback, TV_0) assertEquals(Errors.INVALID_REQUEST, error) } @@ -230,7 +230,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Left(Errors.NOT_COORDINATOR)) - coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback, TV_0) assertEquals(Errors.NOT_COORDINATOR, error) } @@ -239,7 +239,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Left(Errors.COORDINATOR_LOAD_IN_PROGRESS)) - coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback, TV_0) assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, error) } @@ -313,7 +313,7 @@ class TransactionCoordinatorTest { new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, mutable.Set.empty, 0, 0, TV_2))))) - coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_2) assertEquals(Errors.CONCURRENT_TRANSACTIONS, error) } @@ -325,7 +325,7 @@ class TransactionCoordinatorTest { new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, 10, 9, 0, PrepareCommit, mutable.Set.empty, 0, 0, TV_2))))) - coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_2) assertEquals(Errors.PRODUCER_FENCED, error) } @@ -359,7 +359,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - coordinator.handleAddPartitionsToTransaction(transactionalId, producerId, producerEpoch, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(transactionalId, producerId, producerEpoch, partitions, errorsCallback, clientTransactionVersion) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) verify(transactionManager).appendTransactionToLog( @@ -379,7 +379,7 @@ class TransactionCoordinatorTest { new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Empty, partitions, 0, 0, TV_0))))) - coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback) + coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_0) assertEquals(Errors.NONE, error) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } @@ -464,30 +464,104 @@ class TransactionCoordinatorTest { } @ParameterizedTest - @ValueSource(shorts = Array(0, 2)) - def shouldReturnOkOnEndTxnWhenStatusIsCompleteCommitAndResultIsCommit(transactionVersion: Short): Unit = { - val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) + @ValueSource(booleans = Array(false, true)) + def testEndTxnWhenStatusIsCompleteCommitAndResultIsCommitInV1(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) - coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch(clientTransactionVersion), TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) - assertEquals(Errors.NONE, error) + val epoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) + if (isRetry) { + assertEquals(Errors.PRODUCER_FENCED, error) + } else { + assertEquals(Errors.NONE, error) + verify(transactionManager, never()).appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any() + ) + } verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } @ParameterizedTest - @ValueSource(shorts = Array(0, 2)) - def shouldReturnOkOnEndTxnWhenStatusIsCompleteAbortAndResultIsAbort(transactionVersion: Short): Unit = { - val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) + @ValueSource(booleans = Array(false, true)) + def testEndTxnWhenStatusIsCompleteCommitAndResultIsCommitInV2(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, + new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, + (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + + val epoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) + if (isRetry) { + assertEquals(Errors.NONE, error) + } else { + assertEquals(Errors.INVALID_TXN_STATE, error) + } + verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) + } + + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def testEndTxnWhenStatusIsCompleteAbortAndResultIsAbortInV1(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch(clientTransactionVersion), TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) + val nextProducerEpoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, nextProducerEpoch.toShort , TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) + if (isRetry) { + assertEquals(Errors.PRODUCER_FENCED, error) + } else { + assertEquals(Errors.NONE, error) + } + verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) + } + + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def shouldReturnOkOnEndTxnWhenStatusIsCompleteAbortAndResultIsAbortInV2(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) + val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + + val nextProducerEpoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, nextProducerEpoch.toShort , TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.NONE, error) + if (isRetry) { + verify(transactionManager, never()).appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any() + ) + } else { + val newMetadata = ArgumentCaptor.forClass(classOf[TxnTransitMetadata]); + verify(transactionManager).appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.any(), + newMetadata.capture(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any() + ) + assertEquals(producerEpoch + 1, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].producerEpoch, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].toString) + assertEquals(time.milliseconds(), newMetadata.getValue.asInstanceOf[TxnTransitMetadata].txnStartTimestamp, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].toString) + } verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } @@ -505,20 +579,68 @@ class TransactionCoordinatorTest { verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } - @ParameterizedTest - @ValueSource(shorts = Array(0, 2)) - def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteCommitAndResultIsNotCommit(transactionVersion: Short): Unit = { - val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) + @Test + def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteCommitAndResultIsNotCommit(): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, (producerEpoch - 1).toShort,1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch(clientTransactionVersion), TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) + coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.INVALID_TXN_STATE, error) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def testEndTxnRequestWhenStatusIsCompleteCommitAndResultIsAbortInV1(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) + val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + + val epoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) + if (isRetry) { + assertEquals(Errors.PRODUCER_FENCED, error) + } else { + assertEquals(Errors.INVALID_TXN_STATE, error) + } + verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) + } + + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def testEndTxnRequestWhenStatusIsCompleteCommitAndResultIsAbortInV2(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) + val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) + when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) + .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) + + val epoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) + if (isRetry) { + assertEquals(Errors.INVALID_TXN_STATE, error) + } else { + assertEquals(Errors.NONE, error) + val newMetadata = ArgumentCaptor.forClass(classOf[TxnTransitMetadata]); + verify(transactionManager).appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.any(), + newMetadata.capture(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any() + ) + assertEquals(producerEpoch + 1, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].producerEpoch, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].toString) + assertEquals(time.milliseconds(), newMetadata.getValue.asInstanceOf[TxnTransitMetadata].txnStartTimestamp, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].toString) + } + verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) + } + @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldReturnConcurrentTransactionsOnEndTxnRequestWhenStatusIsPrepareCommit(transactionVersion: Short): Unit = { @@ -546,51 +668,62 @@ class TransactionCoordinatorTest { } @Test - def shouldReturnWhenTransactionVersionDowngraded(): Unit = { - // State was written when transactions V2 + def TestEndTxnRequestWhenEmptyTransactionStateForAbortInV1(): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) - // Return CONCURRENT_TRANSACTIONS as the transaction is still completing - coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_0, endTxnCallback) - assertEquals(Errors.CONCURRENT_TRANSACTIONS, error) - assertEquals(RecordBatch.NO_PRODUCER_ID, newProducerId) - assertEquals(RecordBatch.NO_PRODUCER_EPOCH, newEpoch) + coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) + assertEquals(Errors.INVALID_TXN_STATE, error) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) - - // Recognize the retry and return NONE - when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) - .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) - coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_0, endTxnCallback) - assertEquals(Errors.NONE, error) - assertEquals(producerId, newProducerId) - assertEquals((producerEpoch + 1).toShort, newEpoch) // epoch is bumped since we started as V2 - verify(transactionManager, times(2)).getTransactionState(ArgumentMatchers.eq(transactionalId)) } - @Test - def shouldReturnCorrectlyWhenTransactionVersionUpgraded(): Unit = { - // State was written when transactions V0 + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def TestEndTxnRequestWhenEmptyTransactionStateForAbortInV2(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_0))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) - // Transactions V0 throws the concurrent transactions error here. - coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_2, endTxnCallback) - assertEquals(Errors.CONCURRENT_TRANSACTIONS, error) + val epoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) + if (isRetry) { + assertEquals(Errors.PRODUCER_FENCED, error) + } else { + assertEquals(Errors.NONE, error) + val newMetadata = ArgumentCaptor.forClass(classOf[TxnTransitMetadata]); + verify(transactionManager).appendTransactionToLog( + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.any(), + newMetadata.capture(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), + ArgumentMatchers.any() + ) + assertEquals(producerEpoch + 1, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].producerEpoch, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].toString) + assertEquals(time.milliseconds(), newMetadata.getValue.asInstanceOf[TxnTransitMetadata].txnStartTimestamp, newMetadata.getValue.asInstanceOf[TxnTransitMetadata].toString) + } verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) + } - // When the transaction is completed, return and do not throw an error. + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def TestEndTxnRequestWhenEmptyTransactionStateForCommitInV2(isRetry: Boolean): Unit = { + val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_0))))) - coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_2, endTxnCallback) - assertEquals(Errors.NONE, error) - assertEquals(producerId, newProducerId) - assertEquals(producerEpoch, newEpoch) // epoch is not bumped since this started as V1 - verify(transactionManager, times(2)).getTransactionState(ArgumentMatchers.eq(transactionalId)) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) + + val epoch = if (isRetry) producerEpoch - 1 else producerEpoch + coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) + if (isRetry) { + assertEquals(Errors.PRODUCER_FENCED, error) + } else { + assertEquals(Errors.INVALID_TXN_STATE, error) + } + verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } @Test @@ -608,9 +741,9 @@ class TransactionCoordinatorTest { .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) - // If producerEpoch is the same, this is not a retry of the EndTxnRequest, but the next EndTxnRequest. Return PRODUCER_FENCED. + // If producerEpoch is the same, this is not a retry of the EndTxnRequest, but the next EndTxnRequest. Return INVALID_TXN_STATE. coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_2, endTxnCallback) - assertEquals(Errors.PRODUCER_FENCED, error) + assertEquals(Errors.INVALID_TXN_STATE, error) verify(transactionManager, times(2)).getTransactionState(ArgumentMatchers.eq(transactionalId)) } @@ -799,7 +932,7 @@ class TransactionCoordinatorTest { verify(transactionManager).appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds())), + ArgumentMatchers.eq(originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)), any(), any(), any()) @@ -820,6 +953,7 @@ class TransactionCoordinatorTest { .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, bumpedTxnMetadata)))) + when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.PRODUCER_FENCED), result) @@ -846,7 +980,7 @@ class TransactionCoordinatorTest { val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - val txnTransitMetadata = originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds()) + val txnTransitMetadata = originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), @@ -929,7 +1063,7 @@ class TransactionCoordinatorTest { lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs = txnTimeoutMs, txnState = PrepareAbort, - topicPartitions = partitions.toSet, + topicPartitions = partitions.clone, txnStartTimestamp = time.milliseconds(), txnLastUpdateTimestamp = time.milliseconds(), clientTransactionVersion = TV_0)), @@ -955,7 +1089,7 @@ class TransactionCoordinatorTest { lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs = txnTimeoutMs, txnState = PrepareAbort, - topicPartitions = partitions.toSet, + topicPartitions = partitions.clone, txnStartTimestamp = time.milliseconds(), txnLastUpdateTimestamp = time.milliseconds(), clientTransactionVersion = TV_0)), @@ -1096,7 +1230,7 @@ class TransactionCoordinatorTest { capturedErrorsCallback.getValue.apply(Errors.NONE) txnMetadata.pendingState = None txnMetadata.producerId = capturedTxnTransitMetadata.getValue.producerId - txnMetadata.previousProducerId = capturedTxnTransitMetadata.getValue.prevProducerId + txnMetadata.prevProducerId = capturedTxnTransitMetadata.getValue.prevProducerId txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch }) @@ -1137,7 +1271,7 @@ class TransactionCoordinatorTest { capturedErrorsCallback.getValue.apply(Errors.NONE) txnMetadata.pendingState = None txnMetadata.producerId = capturedTxnTransitMetadata.getValue.producerId - txnMetadata.previousProducerId = capturedTxnTransitMetadata.getValue.prevProducerId + txnMetadata.prevProducerId = capturedTxnTransitMetadata.getValue.prevProducerId txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch }) @@ -1173,7 +1307,7 @@ class TransactionCoordinatorTest { // Transaction timeouts use FenceProducerEpoch so clientTransactionVersion is 0. val expectedTransition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.toSet, now, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.clone, now, now + TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_DEFAULT, TV_0) when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) @@ -1230,7 +1364,7 @@ class TransactionCoordinatorTest { def shouldNotAbortExpiredTransactionsThatHaveAPendingStateTransition(): Unit = { val metadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - metadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds()) + metadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) @@ -1263,7 +1397,7 @@ class TransactionCoordinatorTest { // Transaction timeouts use FenceProducerEpoch so clientTransactionVersion is 0. val bumpedEpoch = (producerEpoch + 1).toShort val expectedTransition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, bumpedEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.toSet, now, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.clone, now, now + TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_DEFAULT, TV_0) when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) @@ -1297,7 +1431,7 @@ class TransactionCoordinatorTest { def shouldNotBumpEpochWithPendingTransaction(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds()) + txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) @@ -1430,7 +1564,7 @@ class TransactionCoordinatorTest { producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) val transition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, transactionState, partitions.toSet, now, now, clientTransactionVersion) + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, transactionState, partitions.clone, now, now, clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, originalMetadata)))) diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala index fd5f1e37a6598..c976ffc02105d 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala @@ -17,7 +17,6 @@ package kafka.coordinator.transaction -import kafka.utils.TestUtils import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.protocol.{ByteBufferAccessor, MessageUtil} @@ -30,7 +29,7 @@ import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} import org.junit.jupiter.api.Test import java.nio.ByteBuffer -import scala.collection.Seq +import scala.collection.{Seq, mutable} import scala.jdk.CollectionConverters._ class TransactionLogTest { @@ -110,50 +109,16 @@ class TransactionLogTest { assertEquals(pidMappings.size, count) } - @Test - def testTransactionMetadataParsing(): Unit = { - val transactionalId = "id" - val producerId = 1334L - val topicPartition = new TopicPartition("topic", 0) - - val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, 0, TV_0) - txnMetadata.addPartitions(Set(topicPartition)) - - val keyBytes = TransactionLog.keyToBytes(transactionalId) - val valueBytes = TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TV_2) - val transactionMetadataRecord = TestUtils.records(Seq( - new SimpleRecord(keyBytes, valueBytes) - )).records.asScala.head - - val (keyStringOpt, valueStringOpt) = TransactionLog.formatRecordKeyAndValue(transactionMetadataRecord) - assertEquals(Some(s"transaction_metadata::transactionalId=$transactionalId"), keyStringOpt) - assertEquals(Some(s"producerId:$producerId,producerEpoch:$producerEpoch,state=Ongoing," + - s"partitions=[$topicPartition],txnLastUpdateTimestamp=0,txnTimeoutMs=$transactionTimeoutMs"), valueStringOpt) - } - - @Test - def testTransactionMetadataTombstoneParsing(): Unit = { - val transactionalId = "id" - val transactionMetadataRecord = TestUtils.records(Seq( - new SimpleRecord(TransactionLog.keyToBytes(transactionalId), null) - )).records.asScala.head - - val (keyStringOpt, valueStringOpt) = TransactionLog.formatRecordKeyAndValue(transactionMetadataRecord) - assertEquals(Some(s"transaction_metadata::transactionalId=$transactionalId"), keyStringOpt) - assertEquals(Some(""), valueStringOpt) - } - @Test def testSerializeTransactionLogValueToHighestNonFlexibleVersion(): Unit = { - val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, Set.empty, 500, 500, TV_0) + val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, mutable.Set.empty, 500, 500, TV_0) val txnLogValueBuffer = ByteBuffer.wrap(TransactionLog.valueToBytes(txnTransitMetadata, TV_0)) assertEquals(0, txnLogValueBuffer.getShort) } @Test def testSerializeTransactionLogValueToFlexibleVersion(): Unit = { - val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, Set.empty, 500, 500, TV_2) + val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, mutable.Set.empty, 500, 500, TV_2) val txnLogValueBuffer = ByteBuffer.wrap(TransactionLog.valueToBytes(txnTransitMetadata, TV_2)) assertEquals(TransactionLogValue.HIGHEST_SUPPORTED_VERSION, txnLogValueBuffer.getShort) } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala index 852a076de5cf4..65e00d3de1452 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala @@ -74,7 +74,7 @@ class TransactionMarkerChannelManagerTest { private val time = new MockTime private val channelManager = new TransactionMarkerChannelManager( - KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:2181")), + KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)), metadataCache, networkClient, txnStateManager, @@ -98,7 +98,7 @@ class TransactionMarkerChannelManagerTest { val mockMetricsGroupCtor = mockConstruction(classOf[KafkaMetricsGroup]) try { val transactionMarkerChannelManager = new TransactionMarkerChannelManager( - KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:2181")), + KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)), metadataCache, networkClient, txnStateManager, @@ -298,10 +298,10 @@ class TransactionMarkerChannelManagerTest { assertEquals(1, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition1)) assertEquals(0, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition2)) - val expectedBroker1Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), + val expectedBroker1Request = new WriteTxnMarkersRequest.Builder( asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() - val expectedBroker2Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), + val expectedBroker2Request = new WriteTxnMarkersRequest.Builder( asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() val requests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => @@ -368,10 +368,10 @@ class TransactionMarkerChannelManagerTest { assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition1)) assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition2)) - val expectedBroker1Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), + val expectedBroker1Request = new WriteTxnMarkersRequest.Builder( asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() - val expectedBroker2Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), + val expectedBroker2Request = new WriteTxnMarkersRequest.Builder( asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() val firstDrainedRequests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala index c8d98870080fd..12536cddff731 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala @@ -42,7 +42,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -66,7 +66,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -90,7 +90,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -105,13 +105,88 @@ class TransactionMetadataTest { None, time.milliseconds())) } + @Test + def testTransitFromEmptyToPrepareAbortInV2(): Unit = { + val producerEpoch = 735.toShort + + val txnMetadata = new TransactionMetadata( + transactionalId = transactionalId, + producerId = producerId, + prevProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = -1, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + txnMetadata.completeTransitionTo(transitMetadata) + assertEquals(producerId, txnMetadata.producerId) + assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) + assertEquals(time.milliseconds() + 1, txnMetadata.txnStartTimestamp) + } + + @Test + def testTransitFromCompleteAbortToPrepareAbortInV2(): Unit = { + val producerEpoch = 735.toShort + + val txnMetadata = new TransactionMetadata( + transactionalId = transactionalId, + producerId = producerId, + prevProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteAbort, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = time.milliseconds() - 1, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + txnMetadata.completeTransitionTo(transitMetadata) + assertEquals(producerId, txnMetadata.producerId) + assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) + assertEquals(time.milliseconds() + 1, txnMetadata.txnStartTimestamp) + } + + @Test + def testTransitFromCompleteCommitToPrepareAbortInV2(): Unit = { + val producerEpoch = 735.toShort + + val txnMetadata = new TransactionMetadata( + transactionalId = transactionalId, + producerId = producerId, + prevProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteCommit, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = time.milliseconds() - 1, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + txnMetadata.completeTransitionTo(transitMetadata) + assertEquals(producerId, txnMetadata.producerId) + assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) + assertEquals(time.milliseconds() + 1, txnMetadata.txnStartTimestamp) + } + @Test def testTolerateUpdateTimeShiftDuringEpochBump(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -129,7 +204,7 @@ class TransactionMetadataTest { assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) assertEquals(producerEpoch, txnMetadata.lastProducerEpoch) - assertEquals(1L, txnMetadata.txnStartTimestamp) + assertEquals(-1L, txnMetadata.txnStartTimestamp) assertEquals(time.milliseconds() - 1, txnMetadata.txnLastUpdateTimestamp) } @@ -139,7 +214,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -156,7 +231,7 @@ class TransactionMetadataTest { assertEquals(producerId + 1, txnMetadata.producerId) assertEquals(producerEpoch, txnMetadata.lastProducerEpoch) assertEquals(0, txnMetadata.producerEpoch) - assertEquals(1L, txnMetadata.txnStartTimestamp) + assertEquals(-1L, txnMetadata.txnStartTimestamp) assertEquals(time.milliseconds() - 1, txnMetadata.txnLastUpdateTimestamp) } @@ -166,7 +241,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -178,7 +253,7 @@ class TransactionMetadataTest { clientTransactionVersion = TV_0) // let new time be smaller; when transiting from Empty the start time would be updated to the update-time - var transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0)), time.milliseconds() - 1) + var transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0)), time.milliseconds() - 1, TV_0) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(Set[TopicPartition](new TopicPartition("topic1", 0)), txnMetadata.topicPartitions) assertEquals(producerId, txnMetadata.producerId) @@ -188,7 +263,7 @@ class TransactionMetadataTest { assertEquals(time.milliseconds() - 1, txnMetadata.txnLastUpdateTimestamp) // add another partition, check that in Ongoing state the start timestamp would not change to update time - transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds() - 2) + transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds() - 2, TV_0) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic2", 0)), txnMetadata.topicPartitions) assertEquals(producerId, txnMetadata.producerId) @@ -204,7 +279,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -216,7 +291,7 @@ class TransactionMetadataTest { clientTransactionVersion = TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1) + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(PrepareCommit, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) @@ -232,7 +307,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -244,7 +319,7 @@ class TransactionMetadataTest { clientTransactionVersion = TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1) + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(PrepareAbort, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) @@ -263,7 +338,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = lastProducerEpoch, @@ -296,7 +371,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = lastProducerEpoch, @@ -327,7 +402,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -346,11 +421,53 @@ class TransactionMetadataTest { // We should reset the pending state to make way for the abort transition. txnMetadata.pendingState = None - val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds()) + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, transitMetadata.producerId) } + @Test + def testInvalidTransitionFromCompleteCommitToFence(): Unit = { + val producerEpoch = (Short.MaxValue - 1).toShort + + val txnMetadata = new TransactionMetadata( + transactionalId = transactionalId, + producerId = producerId, + prevProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteCommit, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + assertTrue(txnMetadata.isProducerEpochExhausted) + + assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) + } + + @Test + def testInvalidTransitionFromCompleteAbortToFence(): Unit = { + val producerEpoch = (Short.MaxValue - 1).toShort + + val txnMetadata = new TransactionMetadata( + transactionalId = transactionalId, + producerId = producerId, + prevProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteAbort, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + assertTrue(txnMetadata.isProducerEpochExhausted) + + assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) + } + @Test def testFenceProducerNotAllowedIfItWouldOverflow(): Unit = { val producerEpoch = Short.MaxValue @@ -358,7 +475,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -378,7 +495,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -392,7 +509,7 @@ class TransactionMetadataTest { val transitMetadata = txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), recordLastEpoch = true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(newProducerId, txnMetadata.producerId) - assertEquals(producerId, txnMetadata.previousProducerId) + assertEquals(producerId, txnMetadata.prevProducerId) assertEquals(0, txnMetadata.producerEpoch) assertEquals(producerEpoch, txnMetadata.lastProducerEpoch) } @@ -405,7 +522,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -416,7 +533,7 @@ class TransactionMetadataTest { txnLastUpdateTimestamp = time.milliseconds(), clientTransactionVersion = TV_2) - var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1) + var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals((producerEpoch + 1).toShort, txnMetadata.producerEpoch) @@ -437,7 +554,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -450,7 +567,7 @@ class TransactionMetadataTest { assertTrue(txnMetadata.isProducerEpochExhausted) val newProducerId = 9893L - var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, newProducerId, time.milliseconds() - 1) + var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, newProducerId, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(Short.MaxValue, txnMetadata.producerEpoch) @@ -491,7 +608,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = RecordBatch.NO_PRODUCER_EPOCH, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -515,7 +632,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, @@ -540,7 +657,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = RecordBatch.NO_PRODUCER_ID, + prevProducerId = RecordBatch.NO_PRODUCER_ID, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = lastProducerEpoch, @@ -565,7 +682,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = producerId, + prevProducerId = producerId, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = lastProducerEpoch, @@ -629,7 +746,7 @@ class TransactionMetadataTest { val txnMetadata = new TransactionMetadata( transactionalId = transactionalId, producerId = producerId, - previousProducerId = producerId, + prevProducerId = producerId, nextProducerId = RecordBatch.NO_PRODUCER_ID, producerEpoch = producerEpoch, lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala index 36dcaaa7e606d..41e6b1a954a5f 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala @@ -24,7 +24,6 @@ import javax.management.ObjectName import kafka.log.UnifiedLog import kafka.server.{MetadataCache, ReplicaManager} import kafka.utils.{Pool, TestUtils} -import kafka.zk.KafkaZkClient import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME @@ -64,13 +63,9 @@ class TransactionStateManagerTest { val time = new MockTime() val scheduler = new MockScheduler(time) - val zkClient: KafkaZkClient = mock(classOf[KafkaZkClient]) val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) val metadataCache: MetadataCache = mock(classOf[MetadataCache]) - when(zkClient.getTopicPartitionCount(TRANSACTION_STATE_TOPIC_NAME)) - .thenReturn(Some(numPartitions)) - when(metadataCache.features()).thenReturn { new FinalizedFeatures( MetadataVersion.latestTesting(), @@ -218,6 +213,65 @@ class TransactionStateManagerTest { assertEquals(Left(Errors.NOT_COORDINATOR), transactionManager.getTransactionState(txnMetadata1.transactionalId)) } + @Test + def testMakeFollowerLoadingPartition(): Unit = { + // Verify the handling of a call to make a partition a follower while it is in the + // process of being loaded. The partition should not be loaded. + + val startOffset = 0L + val endOffset = 1L + + val fileRecordsMock = mock[FileRecords](classOf[FileRecords]) + val logMock = mock[UnifiedLog](classOf[UnifiedLog]) + when(replicaManager.getLog(topicPartition)).thenReturn(Some(logMock)) + when(logMock.logStartOffset).thenReturn(startOffset) + when(logMock.read(ArgumentMatchers.eq(startOffset), + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true)) + ).thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) + when(replicaManager.getLogEndOffset(topicPartition)).thenReturn(Some(endOffset)) + + txnMetadata1.state = PrepareCommit + txnMetadata1.addPartitions(Set[TopicPartition]( + new TopicPartition("topic1", 0), + new TopicPartition("topic1", 1))) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2))) + + // We create a latch which is awaited while the log is loading. This ensures that the follower transition + // is triggered before the loading returns + val latch = new CountDownLatch(1) + + when(fileRecordsMock.sizeInBytes()).thenReturn(records.sizeInBytes) + val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer]) + when(fileRecordsMock.readInto(bufferCapture.capture(), anyInt())).thenAnswer(_ => { + latch.await() + val buffer = bufferCapture.getValue + buffer.put(records.buffer.duplicate) + buffer.flip() + }) + + val coordinatorEpoch = 0 + val partitionAndLeaderEpoch = TransactionPartitionAndLeaderEpoch(partitionId, coordinatorEpoch) + + val loadingThread = new Thread(() => { + transactionManager.loadTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch, (_, _, _, _) => ()) + }) + loadingThread.start() + TestUtils.waitUntilTrue(() => transactionManager.loadingPartitions.contains(partitionAndLeaderEpoch), + "Timed out waiting for loading partition", pause = 10) + + transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch + 1) + assertFalse(transactionManager.loadingPartitions.contains(partitionAndLeaderEpoch)) + + latch.countDown() + loadingThread.join() + + // Verify that transaction state was not loaded + assertEquals(Left(Errors.NOT_COORDINATOR), transactionManager.getTransactionState(txnMetadata1.transactionalId)) + } + @Test def testLoadAndRemoveTransactionsForPartition(): Unit = { // generate transaction log messages for two pids traces: @@ -330,7 +384,7 @@ class TransactionStateManagerTest { // update the metadata to ongoing with two partitions val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), - new TopicPartition("topic1", 1)), time.milliseconds()) + new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // append the new metadata into log transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch, newMetadata, assertCallback, requestLocal = RequestLocal.withThreadConfinedCaching) @@ -345,7 +399,7 @@ class TransactionStateManagerTest { transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_NOT_AVAILABLE - var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -353,19 +407,19 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.REQUEST_TIMED_OUT) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) @@ -378,7 +432,7 @@ class TransactionStateManagerTest { transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.NOT_COORDINATOR - var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_LEADER_OR_FOLLOWER) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -386,7 +440,7 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) @@ -409,7 +463,7 @@ class TransactionStateManagerTest { transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_LOAD_IN_PROGRESS - val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) @@ -423,7 +477,7 @@ class TransactionStateManagerTest { transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.UNKNOWN_SERVER_ERROR - var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.MESSAGE_TOO_LARGE) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -431,7 +485,7 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.RECORD_LIST_TOO_LARGE) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) @@ -444,7 +498,7 @@ class TransactionStateManagerTest { transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_NOT_AVAILABLE - val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds()) + val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, _ => true, RequestLocal.withThreadConfinedCaching) @@ -463,7 +517,7 @@ class TransactionStateManagerTest { expectedError = Errors.NOT_COORDINATOR val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), - new TopicPartition("topic1", 1)), time.milliseconds()) + new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // modify the cache while trying to append the new metadata txnMetadata1.producerEpoch = (txnMetadata1.producerEpoch + 1).toShort @@ -482,7 +536,7 @@ class TransactionStateManagerTest { expectedError = Errors.INVALID_PRODUCER_EPOCH val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), - new TopicPartition("topic1", 1)), time.milliseconds()) + new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // modify the cache while trying to append the new metadata txnMetadata1.pendingState = None @@ -1197,8 +1251,6 @@ class TransactionStateManagerTest { Map(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) -> new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L))) ) - when(replicaManager.getMagic(any())) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) } @Test @@ -1263,7 +1315,7 @@ class TransactionStateManagerTest { val txnMetadata = txnMetadataPool.get(transactionalId1) assertEquals(txnMetadata1.transactionalId, txnMetadata.transactionalId) assertEquals(txnMetadata1.producerId, txnMetadata.producerId) - assertEquals(txnMetadata1.previousProducerId, txnMetadata.previousProducerId) + assertEquals(txnMetadata1.prevProducerId, txnMetadata.prevProducerId) assertEquals(txnMetadata1.producerEpoch, txnMetadata.producerEpoch) assertEquals(txnMetadata1.lastProducerEpoch, txnMetadata.lastProducerEpoch) assertEquals(txnMetadata1.txnTimeoutMs, txnMetadata.txnTimeoutMs) diff --git a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala index 6c4b87ddf1a7d..0fbf014374839 100755 --- a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala +++ b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala @@ -20,10 +20,7 @@ package kafka.integration import kafka.server._ import kafka.utils.TestUtils import kafka.utils.TestUtils._ -import kafka.zk.KafkaZkClient import org.apache.kafka.common.acl.{AccessControlEntry, AccessControlEntryFilter, AclBinding, AclBindingFilter} -import org.apache.kafka.common.errors.TopicExistsException -import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity} import org.apache.kafka.common.resource.ResourcePattern @@ -51,23 +48,16 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { private val _brokers = new mutable.ArrayBuffer[KafkaBroker] /** - * Get the list of brokers, which could be either BrokerServer objects or KafkaServer objects. + * Get the list of brokers. */ def brokers: mutable.Buffer[KafkaBroker] = _brokers /** - * Get the list of brokers, as instances of KafkaServer. - * This method should only be used when dealing with brokers that use ZooKeeper. + * Get the list of brokers. */ - def servers: mutable.Buffer[KafkaServer] = { - checkIsZKTest() - _brokers.asInstanceOf[mutable.Buffer[KafkaServer]] - } + def servers: mutable.Buffer[KafkaBroker] = brokers - def brokerServers: mutable.Buffer[BrokerServer] = { - checkIsKRaftTest() - _brokers.asInstanceOf[mutable.Buffer[BrokerServer]] - } + def brokerServers: mutable.Buffer[BrokerServer] = _brokers.asInstanceOf[mutable.Buffer[BrokerServer]] var alive: Array[Boolean] = _ @@ -102,9 +92,9 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { instanceConfigs } - def serverForId(id: Int): Option[KafkaServer] = servers.find(s => s.config.brokerId == id) + def serverForId(id: Int): Option[KafkaBroker] = brokers.find(s => s.config.brokerId == id) - def boundPort(server: KafkaServer): Int = server.boundPort(listenerName) + def boundPort(server: KafkaBroker): Int = server.boundPort(listenerName) def bootstrapServers(listenerName: ListenerName = listenerName): String = { TestUtils.bootstrapServers(_brokers, listenerName) @@ -159,12 +149,8 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { listenerName: ListenerName = listenerName, adminClientConfig: Properties = new Properties ): Unit = { - if (isKRaftTest()) { - Using(createAdminClient(brokers, listenerName, adminClientConfig)) { admin => - TestUtils.createOffsetsTopicWithAdmin(admin, brokers, controllerServers) - } - } else { - createOffsetsTopic(zkClient, servers) + Using.resource(createAdminClient(brokers, listenerName, adminClientConfig)) { admin => + TestUtils.createOffsetsTopicWithAdmin(admin, brokers, controllerServers) } } @@ -181,25 +167,14 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { listenerName: ListenerName = listenerName, adminClientConfig: Properties = new Properties ): scala.collection.immutable.Map[Int, Int] = { - if (isKRaftTest()) { - Using.resource(createAdminClient(brokers, listenerName, adminClientConfig)) { admin => - TestUtils.createTopicWithAdmin( - admin = admin, - topic = topic, - brokers = brokers, - controllers = controllerServers, - numPartitions = numPartitions, - replicationFactor = replicationFactor, - topicConfig = topicConfig - ) - } - } else { - TestUtils.createTopic( - zkClient = zkClient, + Using.resource(createAdminClient(brokers, listenerName, adminClientConfig)) { admin => + TestUtils.createTopicWithAdmin( + admin = admin, topic = topic, + brokers = brokers, + controllers = controllerServers, numPartitions = numPartitions, replicationFactor = replicationFactor, - servers = servers, topicConfig = topicConfig ) } @@ -214,40 +189,28 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { topic: String, partitionReplicaAssignment: collection.Map[Int, Seq[Int]], listenerName: ListenerName = listenerName - ): scala.collection.immutable.Map[Int, Int] = - if (isKRaftTest()) { - Using.resource(createAdminClient(brokers, listenerName)) { admin => - TestUtils.createTopicWithAdmin( - admin = admin, - topic = topic, - replicaAssignment = partitionReplicaAssignment, - brokers = brokers, - controllers = controllerServers - ) - } - } else { - TestUtils.createTopic( - zkClient, - topic, - partitionReplicaAssignment, - servers + ): scala.collection.immutable.Map[Int, Int] = { + Using.resource(createAdminClient(brokers, listenerName)) { admin => + TestUtils.createTopicWithAdmin( + admin = admin, + topic = topic, + replicaAssignment = partitionReplicaAssignment, + brokers = brokers, + controllers = controllerServers ) } + } def deleteTopic( topic: String, listenerName: ListenerName = listenerName ): Unit = { - if (isKRaftTest()) { - Using(createAdminClient(brokers, listenerName)) { admin => - TestUtils.deleteTopicWithAdmin( - admin = admin, - topic = topic, - brokers = aliveBrokers, - controllers = controllerServers) - } - } else { - adminZkClient.deleteTopic(topic) + Using.resource(createAdminClient(brokers, listenerName)) { admin => + TestUtils.deleteTopicWithAdmin( + admin = admin, + topic = topic, + brokers = aliveBrokers, + controllers = controllerServers) } } @@ -345,47 +308,26 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { } } - def getController(): KafkaServer = { - checkIsZKTest() - val controllerId = TestUtils.waitUntilControllerElected(zkClient) - servers.filter(s => s.config.brokerId == controllerId).head - } - def getTopicIds(names: Seq[String]): Map[String, Uuid] = { val result = new util.HashMap[String, Uuid]() - if (isKRaftTest()) { - val topicIdsMap = controllerServer.controller.findTopicIds(ANONYMOUS_CONTEXT, names.asJava).get() - names.foreach { name => - val response = topicIdsMap.get(name) - result.put(name, response.result()) - } - } else { - val topicIdsMap = getController().kafkaController.controllerContext.topicIds.toMap - names.foreach { name => - if (topicIdsMap.contains(name)) result.put(name, topicIdsMap(name)) - } + val topicIdsMap = controllerServer.controller.findTopicIds(ANONYMOUS_CONTEXT, names.asJava).get() + names.foreach { name => + val response = topicIdsMap.get(name) + result.put(name, response.result()) } result.asScala.toMap } def getTopicIds(): Map[String, Uuid] = { - if (isKRaftTest()) { - controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().asScala.toMap - } else { - getController().kafkaController.controllerContext.topicIds.toMap - } + controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().asScala.toMap } def getTopicNames(): Map[Uuid, String] = { - if (isKRaftTest()) { - val result = new util.HashMap[Uuid, String]() - controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().forEach { - (key, value) => result.put(value, key) - } - result.asScala.toMap - } else { - getController().kafkaController.controllerContext.topicNames.toMap + val result = new util.HashMap[Uuid, String]() + controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().forEach { + (key, value) => result.put(value, key) } + result.asScala.toMap } private def createBrokers(startup: Boolean): Unit = { @@ -405,17 +347,7 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { } private def createBrokerFromConfig(config: KafkaConfig): KafkaBroker = { - if (isKRaftTest()) { - createBroker(config, brokerTime(config.brokerId), startup = false) - } else { - TestUtils.createServer( - config, - time = brokerTime(config.brokerId), - threadNamePrefix = None, - startup = false, - enableZkApiForwarding = isZkMigrationTest() || (config.migrationEnabled && config.interBrokerProtocolVersion.isApiForwardingEnabled) - ) - } + createBroker(config, brokerTime(config.brokerId), startup = false) } def aliveBrokers: Seq[KafkaBroker] = { @@ -423,63 +355,20 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { } def ensureConsistentKRaftMetadata(): Unit = { - if (isKRaftTest()) { - TestUtils.ensureConsistentKRaftMetadata( - aliveBrokers, - controllerServer - ) - } + TestUtils.ensureConsistentKRaftMetadata( + aliveBrokers, + controllerServer + ) } def changeClientIdConfig(sanitizedClientId: String, configs: Properties): Unit = { - if (isKRaftTest()) { - Using(createAdminClient(brokers, listenerName)) { - admin => { - admin.alterClientQuotas(Collections.singleton( - new ClientQuotaAlteration( - new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> (if (sanitizedClientId == "") null else sanitizedClientId)).asJava), - configs.asScala.map { case (key, value) => new ClientQuotaAlteration.Op(key, value.toDouble) }.toList.asJava))).all().get() - } + Using.resource(createAdminClient(brokers, listenerName)) { + admin => { + admin.alterClientQuotas(Collections.singleton( + new ClientQuotaAlteration( + new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> (if (sanitizedClientId == "") null else sanitizedClientId)).asJava), + configs.asScala.map { case (key, value) => new ClientQuotaAlteration.Op(key, value.toDouble) }.toList.asJava))).all().get() } } - else { - adminZkClient.changeClientIdConfig(sanitizedClientId, configs) - } - } - - /** - * Ensures that the consumer offsets/group metadata topic exists. If it does not, the topic is created and the method waits - * until the leader is elected and metadata is propagated to all brokers. If it does, the method verifies that it has - * the expected number of partition and replication factor however it does not guarantee that the topic is empty. - */ - private def createOffsetsTopic(zkClient: KafkaZkClient, servers: Seq[KafkaBroker]): Unit = { - val server = servers.head - val numPartitions = server.config.groupCoordinatorConfig.offsetsTopicPartitions - val replicationFactor = server.config.groupCoordinatorConfig.offsetsTopicReplicationFactor.toInt - - try { - TestUtils.createTopic( - zkClient, - Topic.GROUP_METADATA_TOPIC_NAME, - numPartitions, - replicationFactor, - servers, - server.groupCoordinator.groupMetadataTopicConfigs - ) - } catch { - case ex: TopicExistsException => - val allPartitionsMetadata = waitForAllPartitionsMetadata( - servers, - Topic.GROUP_METADATA_TOPIC_NAME, - numPartitions - ) - - // If the topic already exists, we ensure that it has the required - // number of partitions and replication factor. If it has not, the - // exception is thrown further. - if (allPartitionsMetadata.size != numPartitions || allPartitionsMetadata.head._2.replicas.size != replicationFactor) { - throw ex - } - } } } diff --git a/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala b/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala index 6555937d9fd00..4bfb5a7104b00 100644 --- a/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala +++ b/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala @@ -24,7 +24,7 @@ import kafka.utils.{Logging, TestUtils} import scala.jdk.CollectionConverters._ import org.junit.jupiter.api.{BeforeEach, TestInfo} import com.yammer.metrics.core.Gauge -import org.apache.kafka.server.config.{ServerConfigs, ReplicationConfigs, ServerLogConfigs} +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -50,7 +50,7 @@ class MetricsDuringTopicCreationDeletionTest extends KafkaServerTestHarness with @volatile private var running = true - override def generateConfigs = TestUtils.createBrokerConfigs(nodesNum, zkConnectOrNull) + override def generateConfigs = TestUtils.createBrokerConfigs(nodesNum) .map(KafkaConfig.fromProps(_, overridingProps)) @BeforeEach @@ -145,8 +145,8 @@ class MetricsDuringTopicCreationDeletionTest extends KafkaServerTestHarness with // Delete topics for (t <- topics if running) { try { - adminZkClient.deleteTopic(t) - TestUtils.verifyTopicDeletion(zkClient, t, partitionNum, servers) + deleteTopic(t) + TestUtils.verifyTopicDeletion(t, partitionNum, servers) } catch { case e: Exception => e.printStackTrace() } diff --git a/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala b/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala index 0b7beeaab7acd..fb981369e6b66 100644 --- a/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala +++ b/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala @@ -29,7 +29,7 @@ import org.junit.jupiter.params.provider.ValueSource class MinIsrConfigTest extends KafkaServerTestHarness { val overridingProps = new Properties() overridingProps.put(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_CONFIG, "5") - def generateConfigs: Seq[KafkaConfig] = TestUtils.createBrokerConfigs(1, zkConnectOrNull).map(KafkaConfig.fromProps(_, overridingProps)) + def generateConfigs: Seq[KafkaConfig] = TestUtils.createBrokerConfigs(1).map(KafkaConfig.fromProps(_, overridingProps)) @ParameterizedTest @ValueSource(strings = Array("kraft")) diff --git a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala index b12e60980fc97..b4d3bee35d171 100755 --- a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala @@ -34,13 +34,14 @@ import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsResult, ConfigEntry} import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.log4j.{Level, Logger} +import org.apache.logging.log4j.{Level, LogManager} import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource import com.yammer.metrics.core.Meter import org.apache.kafka.metadata.LeaderConstants +import org.apache.logging.log4j.core.config.Configurator class UncleanLeaderElectionTest extends QuorumTestHarness { val brokerId1 = 0 @@ -63,25 +64,23 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { val partitionId = 0 val topicPartition = new TopicPartition(topic, partitionId) - val kafkaApisLogger = Logger.getLogger(classOf[kafka.server.KafkaApis]) - val networkProcessorLogger = Logger.getLogger(classOf[kafka.network.Processor]) + val kafkaApisLogger = LogManager.getLogger(classOf[kafka.server.KafkaApis]) + val networkProcessorLogger = LogManager.getLogger(classOf[kafka.network.Processor]) @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - configProps1 = createBrokerConfig(brokerId1, zkConnectOrNull) - configProps2 = createBrokerConfig(brokerId2, zkConnectOrNull) + configProps1 = createBrokerConfig(brokerId1) + configProps2 = createBrokerConfig(brokerId2) for (configProps <- List(configProps1, configProps2)) { configProps.put("controlled.shutdown.enable", enableControlledShutdown.toString) - configProps.put("controlled.shutdown.max.retries", "1") - configProps.put("controlled.shutdown.retry.backoff.ms", "1000") } // temporarily set loggers to a higher level so that tests run quietly - kafkaApisLogger.setLevel(Level.FATAL) - networkProcessorLogger.setLevel(Level.FATAL) + Configurator.setLevel(kafkaApisLogger.getName, Level.FATAL) + Configurator.setLevel(networkProcessorLogger.getName, Level.FATAL) } @AfterEach @@ -90,8 +89,8 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { brokers.foreach(broker => CoreUtils.delete(broker.config.logDirs)) // restore log levels - kafkaApisLogger.setLevel(Level.ERROR) - networkProcessorLogger.setLevel(Level.ERROR) + Configurator.setLevel(kafkaApisLogger.getName, Level.ERROR) + Configurator.setLevel(networkProcessorLogger.getName, Level.ERROR) admin.close() diff --git a/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala index 874701685274c..38937012065f5 100644 --- a/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala @@ -24,7 +24,7 @@ import kafka.utils.Implicits._ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.record.{MemoryRecords, RecordBatch} +import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, RecordVersion} import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.util.MockTime @@ -117,8 +117,7 @@ abstract class AbstractLogCleanerIntegrationTest { producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true) + topicId = None) logMap.put(partition, log) this.logs += log } @@ -147,8 +146,8 @@ abstract class AbstractLogCleanerIntegrationTest { startKey: Int = 0, magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): Seq[(Int, String, Long)] = { for (_ <- 0 until numDups; key <- startKey until (startKey + numKeys)) yield { val value = counter.toString - val appendInfo = log.appendAsLeader(TestUtils.singletonRecords(value = value.getBytes, codec = codec, - key = key.toString.getBytes, magicValue = magicValue), leaderEpoch = 0) + val appendInfo = log.appendAsLeaderWithRecordVersion(TestUtils.singletonRecords(value = value.getBytes, codec = codec, + key = key.toString.getBytes, magicValue = magicValue), leaderEpoch = 0, recordVersion = RecordVersion.lookup(magicValue)) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) incCounter() diff --git a/core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala b/core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala index 87f56a9c25074..cf28d1cef681d 100755 --- a/core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala +++ b/core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala @@ -69,8 +69,7 @@ class BrokerCompressionTest { producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true + topicId = None ) /* append two messages */ diff --git a/core/src/test/scala/unit/kafka/log/LocalLogTest.scala b/core/src/test/scala/unit/kafka/log/LocalLogTest.scala deleted file mode 100644 index 0b840ddde626b..0000000000000 --- a/core/src/test/scala/unit/kafka/log/LocalLogTest.scala +++ /dev/null @@ -1,714 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.log - -import java.io.File -import java.nio.channels.ClosedChannelException -import java.nio.charset.StandardCharsets -import java.util -import java.util.regex.Pattern -import java.util.Collections -import kafka.server.KafkaConfig -import kafka.utils.TestUtils -import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.{KafkaException, TopicPartition} -import org.apache.kafka.common.errors.KafkaStorageException -import org.apache.kafka.common.record.{MemoryRecords, Record, SimpleRecord} -import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.server.util.{MockTime, Scheduler} -import org.apache.kafka.storage.internals.log.{FetchDataInfo, LocalLog, LogConfig, LogDirFailureChannel, LogFileUtils, LogOffsetMetadata, LogSegment, LogSegments, LogTruncation, SegmentDeletionReason} -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.function.Executable -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} -import org.mockito.Mockito.{doReturn, spy} - -import scala.jdk.CollectionConverters._ - -class LocalLogTest { - - var config: KafkaConfig = _ - val tmpDir: File = TestUtils.tempDir() - val logDir: File = TestUtils.randomPartitionLogDir(tmpDir) - val topicPartition = new TopicPartition("test_topic", 1) - val logDirFailureChannel = new LogDirFailureChannel(10) - val mockTime = new MockTime() - val log: LocalLog = createLocalLogWithActiveSegment(config = LogTestUtils.createLogConfig()) - - @BeforeEach - def setUp(): Unit = { - val props = TestUtils.createBrokerConfig(0, "127.0.0.1:1", port = -1) - config = KafkaConfig.fromProps(props) - } - - @AfterEach - def tearDown(): Unit = { - try { - log.close() - } catch { - case _: KafkaStorageException => // ignore - } - Utils.delete(tmpDir) - } - - case class KeyValue(key: String, value: String) { - def toRecord(timestamp: => Long = mockTime.milliseconds): SimpleRecord = { - new SimpleRecord(timestamp, key.getBytes, value.getBytes) - } - } - - object KeyValue { - def fromRecord(record: Record): KeyValue = { - val key = - if (record.hasKey) - StandardCharsets.UTF_8.decode(record.key()).toString - else - "" - val value = - if (record.hasValue) - StandardCharsets.UTF_8.decode(record.value()).toString - else - "" - KeyValue(key, value) - } - } - - private def kvsToRecords(keyValues: Iterable[KeyValue]): Iterable[SimpleRecord] = { - keyValues.map(kv => kv.toRecord()) - } - - private def recordsToKvs(records: Iterable[Record]): Iterable[KeyValue] = { - records.map(r => KeyValue.fromRecord(r)) - } - - private def appendRecords(records: Iterable[SimpleRecord], - log: LocalLog = log, - initialOffset: Long = 0L): Unit = { - log.append(initialOffset + records.size - 1, - records.head.timestamp, - initialOffset, - MemoryRecords.withRecords(initialOffset, Compression.NONE, 0, records.toList : _*)) - } - - private def readRecords(log: LocalLog = log, - startOffset: Long = 0L, - maxLength: => Int = log.segments.activeSegment.size, - minOneMessage: Boolean = false, - maxOffsetMetadata: => LogOffsetMetadata = log.logEndOffsetMetadata, - includeAbortedTxns: Boolean = false): FetchDataInfo = { - log.read(startOffset, - maxLength, - minOneMessage, - maxOffsetMetadata, - includeAbortedTxns) - } - - @Test - def testLogDeleteSegmentsSuccess(): Unit = { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record)) - log.roll(0L) - assertEquals(2, log.segments.numberOfSegments) - assertFalse(logDir.listFiles.isEmpty) - val segmentsBeforeDelete = new util.ArrayList(log.segments.values) - val deletedSegments = log.deleteAllSegments() - assertTrue(log.segments.isEmpty) - assertEquals(segmentsBeforeDelete, deletedSegments) - assertThrows(classOf[KafkaStorageException], () => log.checkIfMemoryMappedBufferClosed()) - assertTrue(logDir.exists) - } - - @Test - def testRollEmptyActiveSegment(): Unit = { - val oldActiveSegment = log.segments.activeSegment - log.roll(0L) - assertEquals(1, log.segments.numberOfSegments) - assertNotEquals(oldActiveSegment, log.segments.activeSegment) - assertFalse(logDir.listFiles.isEmpty) - assertTrue(oldActiveSegment.hasSuffix(LogFileUtils.DELETED_FILE_SUFFIX)) - } - - @Test - def testLogDeleteDirSuccessWhenEmptyAndFailureWhenNonEmpty(): Unit ={ - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record)) - log.roll(0L) - assertEquals(2, log.segments.numberOfSegments) - assertFalse(logDir.listFiles.isEmpty) - - assertThrows(classOf[IllegalStateException], () => log.deleteEmptyDir()) - assertTrue(logDir.exists) - - log.deleteAllSegments() - log.deleteEmptyDir() - assertFalse(logDir.exists) - } - - @Test - def testUpdateConfig(): Unit = { - val oldConfig = log.config - assertEquals(oldConfig, log.config) - - val newConfig = LogTestUtils.createLogConfig(segmentBytes = oldConfig.segmentSize + 1) - log.updateConfig(newConfig) - assertEquals(newConfig, log.config) - } - - @Test - def testLogDirRenameToNewDir(): Unit = { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record)) - log.roll(0L) - assertEquals(2, log.segments.numberOfSegments) - val newLogDir = TestUtils.randomPartitionLogDir(tmpDir) - assertTrue(log.renameDir(newLogDir.getName)) - assertFalse(logDir.exists()) - assertTrue(newLogDir.exists()) - assertEquals(newLogDir, log.dir) - assertEquals(newLogDir.getParent, log.parentDir) - assertEquals(newLogDir.getParent, log.dir.getParent) - log.segments.values.forEach(segment => assertEquals(newLogDir.getPath, segment.log.file().getParentFile.getPath)) - assertEquals(2, log.segments.numberOfSegments) - } - - @Test - def testLogDirRenameToExistingDir(): Unit = { - assertFalse(log.renameDir(log.dir.getName)) - } - - @Test - def testLogFlush(): Unit = { - assertEquals(0L, log.recoveryPoint) - assertEquals(mockTime.milliseconds, log.lastFlushTime) - - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record)) - mockTime.sleep(1) - val newSegment = log.roll(0L) - log.flush(newSegment.baseOffset) - log.markFlushed(newSegment.baseOffset) - assertEquals(1L, log.recoveryPoint) - assertEquals(mockTime.milliseconds, log.lastFlushTime) - } - - @Test - def testLogAppend(): Unit = { - val fetchDataInfoBeforeAppend = readRecords(maxLength = 1) - assertTrue(fetchDataInfoBeforeAppend.records.records.asScala.isEmpty) - - mockTime.sleep(1) - val keyValues = Seq(KeyValue("abc", "ABC"), KeyValue("de", "DE")) - appendRecords(kvsToRecords(keyValues)) - assertEquals(2L, log.logEndOffset) - assertEquals(0L, log.recoveryPoint) - val fetchDataInfo = readRecords() - assertEquals(2L, fetchDataInfo.records.records.asScala.size) - assertEquals(keyValues, recordsToKvs(fetchDataInfo.records.records.asScala)) - } - - @Test - def testLogCloseSuccess(): Unit = { - val keyValues = Seq(KeyValue("abc", "ABC"), KeyValue("de", "DE")) - appendRecords(kvsToRecords(keyValues)) - log.close() - assertThrows(classOf[ClosedChannelException], () => appendRecords(kvsToRecords(keyValues), initialOffset = 2L)) - } - - @Test - def testLogCloseIdempotent(): Unit = { - log.close() - // Check that LocalLog.close() is idempotent - log.close() - } - - @Test - def testLogCloseFailureWhenInMemoryBufferClosed(): Unit = { - val keyValues = Seq(KeyValue("abc", "ABC"), KeyValue("de", "DE")) - appendRecords(kvsToRecords(keyValues)) - log.closeHandlers() - assertThrows(classOf[KafkaStorageException], () => log.close()) - } - - @Test - def testLogCloseHandlers(): Unit = { - val keyValues = Seq(KeyValue("abc", "ABC"), KeyValue("de", "DE")) - appendRecords(kvsToRecords(keyValues)) - log.closeHandlers() - assertThrows(classOf[ClosedChannelException], - () => appendRecords(kvsToRecords(keyValues), initialOffset = 2L)) - } - - @Test - def testLogCloseHandlersIdempotent(): Unit = { - log.closeHandlers() - // Check that LocalLog.closeHandlers() is idempotent - log.closeHandlers() - } - - private def testRemoveAndDeleteSegments(asyncDelete: Boolean): Unit = { - for (offset <- 0 to 8) { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record), initialOffset = offset) - log.roll(0L) - } - - assertEquals(10L, log.segments.numberOfSegments) - - class TestDeletionReason extends SegmentDeletionReason { - private var _deletedSegments: util.Collection[LogSegment] = new util.ArrayList() - - override def logReason(toDelete: util.List[LogSegment]): Unit = { - _deletedSegments = new util.ArrayList(toDelete) - } - - def deletedSegments: util.Collection[LogSegment] = _deletedSegments - } - val reason = new TestDeletionReason() - val toDelete = new util.ArrayList(log.segments.values) - log.removeAndDeleteSegments(toDelete, asyncDelete, reason) - if (asyncDelete) { - mockTime.sleep(log.config.fileDeleteDelayMs + 1) - } - assertTrue(log.segments.isEmpty) - assertEquals(toDelete, reason.deletedSegments) - toDelete.forEach(segment => assertTrue(segment.deleted())) - } - - @Test - def testRemoveAndDeleteSegmentsSync(): Unit = { - testRemoveAndDeleteSegments(asyncDelete = false) - } - - @Test - def testRemoveAndDeleteSegmentsAsync(): Unit = { - testRemoveAndDeleteSegments(asyncDelete = true) - } - - private def testDeleteSegmentFiles(asyncDelete: Boolean): Unit = { - for (offset <- 0 to 8) { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record), initialOffset = offset) - log.roll(0L) - } - - assertEquals(10L, log.segments.numberOfSegments) - - val toDelete = log.segments.values - LocalLog.deleteSegmentFiles(toDelete, asyncDelete, log.dir, log.topicPartition, log.config, log.scheduler, log.logDirFailureChannel, "") - if (asyncDelete) { - toDelete.forEach { - segment => - assertFalse(segment.deleted()) - assertTrue(segment.hasSuffix(LogFileUtils.DELETED_FILE_SUFFIX)) - } - mockTime.sleep(log.config.fileDeleteDelayMs + 1) - } - toDelete.forEach(segment => assertTrue(segment.deleted())) - } - - @Test - def testDeleteSegmentFilesSync(): Unit = { - testDeleteSegmentFiles(asyncDelete = false) - } - - @Test - def testDeleteSegmentFilesAsync(): Unit = { - testDeleteSegmentFiles(asyncDelete = true) - } - - @Test - def testCreateAndDeleteSegment(): Unit = { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record)) - val newOffset = log.segments.activeSegment.baseOffset + 1 - val oldActiveSegment = log.segments.activeSegment - val newActiveSegment = log.createAndDeleteSegment(newOffset, log.segments.activeSegment, true, new LogTruncation(log.logger)) - assertEquals(1, log.segments.numberOfSegments) - assertEquals(newActiveSegment, log.segments.activeSegment) - assertNotEquals(oldActiveSegment, log.segments.activeSegment) - assertTrue(oldActiveSegment.hasSuffix(LogFileUtils.DELETED_FILE_SUFFIX)) - assertEquals(newOffset, log.segments.activeSegment.baseOffset) - assertEquals(0L, log.recoveryPoint) - assertEquals(newOffset, log.logEndOffset) - val fetchDataInfo = readRecords(startOffset = newOffset) - assertTrue(fetchDataInfo.records.records.asScala.isEmpty) - } - - @Test - def testTruncateFullyAndStartAt(): Unit = { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - for (offset <- 0 to 7) { - appendRecords(List(record), initialOffset = offset) - if (offset % 2 != 0) - log.roll(0L) - } - for (offset <- 8 to 12) { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record), initialOffset = offset) - } - assertEquals(5, log.segments.numberOfSegments) - assertNotEquals(10L, log.segments.activeSegment.baseOffset) - val expected = new util.ArrayList(log.segments.values) - val deleted = log.truncateFullyAndStartAt(10L) - assertEquals(expected, deleted) - assertEquals(1, log.segments.numberOfSegments) - assertEquals(10L, log.segments.activeSegment.baseOffset) - assertEquals(0L, log.recoveryPoint) - assertEquals(10L, log.logEndOffset) - val fetchDataInfo = readRecords(startOffset = 10L) - assertTrue(fetchDataInfo.records.records.asScala.isEmpty) - } - - @Test - def testWhenFetchOffsetHigherThanMaxOffset(): Unit = { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - for (offset <- 0 to 4) { - appendRecords(List(record), initialOffset = offset) - if (offset % 2 != 0) - log.roll(0L) - } - assertEquals(3, log.segments.numberOfSegments) - - // case-0: valid case, `startOffset` < `maxOffsetMetadata.offset` - var fetchDataInfo = readRecords(startOffset = 3L, maxOffsetMetadata = new LogOffsetMetadata(4L, 4L, 0)) - assertEquals(1, fetchDataInfo.records.records.asScala.size) - assertEquals(new LogOffsetMetadata(3, 2L, 69), fetchDataInfo.fetchOffsetMetadata) - - // case-1: `startOffset` == `maxOffsetMetadata.offset` - fetchDataInfo = readRecords(startOffset = 4L, maxOffsetMetadata = new LogOffsetMetadata(4L, 4L, 0)) - assertTrue(fetchDataInfo.records.records.asScala.isEmpty) - assertEquals(new LogOffsetMetadata(4L, 4L, 0), fetchDataInfo.fetchOffsetMetadata) - - // case-2: `startOffset` > `maxOffsetMetadata.offset` - fetchDataInfo = readRecords(startOffset = 5L, maxOffsetMetadata = new LogOffsetMetadata(4L, 4L, 0)) - assertTrue(fetchDataInfo.records.records.asScala.isEmpty) - assertEquals(new LogOffsetMetadata(5L, 4L, 69), fetchDataInfo.fetchOffsetMetadata) - - // case-3: `startOffset` < `maxMessageOffset.offset` but `maxMessageOffset.messageOnlyOffset` is true - fetchDataInfo = readRecords(startOffset = 3L, maxOffsetMetadata = new LogOffsetMetadata(4L, -1L, -1)) - assertTrue(fetchDataInfo.records.records.asScala.isEmpty) - assertEquals(new LogOffsetMetadata(3L, 2L, 69), fetchDataInfo.fetchOffsetMetadata) - - // case-4: `startOffset` < `maxMessageOffset.offset`, `maxMessageOffset.messageOnlyOffset` is false, but - // `maxOffsetMetadata.segmentBaseOffset` < `startOffset.segmentBaseOffset` - fetchDataInfo = readRecords(startOffset = 3L, maxOffsetMetadata = new LogOffsetMetadata(4L, 0L, 40)) - assertTrue(fetchDataInfo.records.records.asScala.isEmpty) - assertEquals(new LogOffsetMetadata(3L, 2L, 69), fetchDataInfo.fetchOffsetMetadata) - } - - @Test - def testTruncateTo(): Unit = { - for (offset <- 0 to 11) { - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record), initialOffset = offset) - if (offset % 3 == 2) - log.roll(0L) - } - assertEquals(5, log.segments.numberOfSegments) - assertEquals(12L, log.logEndOffset) - - val expected = new util.ArrayList(log.segments.values(9L, log.logEndOffset + 1)) - // Truncate to an offset before the base offset of the active segment - val deleted = log.truncateTo(7L) - assertEquals(expected, deleted) - assertEquals(3, log.segments.numberOfSegments) - assertEquals(6L, log.segments.activeSegment.baseOffset) - assertEquals(0L, log.recoveryPoint) - assertEquals(7L, log.logEndOffset) - val fetchDataInfo = readRecords(startOffset = 6L) - assertEquals(1, fetchDataInfo.records.records.asScala.size) - assertEquals(Seq(KeyValue("", "a")), recordsToKvs(fetchDataInfo.records.records.asScala)) - - // Verify that we can still append to the active segment - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record), initialOffset = 7L) - assertEquals(8L, log.logEndOffset) - } - - @Test - def testNonActiveSegmentsFrom(): Unit = { - for (i <- 0 until 5) { - val keyValues = Seq(KeyValue(i.toString, i.toString)) - appendRecords(kvsToRecords(keyValues), initialOffset = i) - log.roll(0L) - } - - def nonActiveBaseOffsetsFrom(startOffset: Long): Seq[Long] = { - log.segments.nonActiveLogSegmentsFrom(startOffset).asScala.map(_.baseOffset).toSeq - } - - assertEquals(5L, log.segments.activeSegment.baseOffset) - assertEquals(0 until 5, nonActiveBaseOffsetsFrom(0L)) - assertEquals(Seq.empty, nonActiveBaseOffsetsFrom(5L)) - assertEquals(2 until 5, nonActiveBaseOffsetsFrom(2L)) - assertEquals(Seq.empty, nonActiveBaseOffsetsFrom(6L)) - } - - private def topicPartitionName(topic: String, partition: String): String = topic + "-" + partition - - @Test - def testParseTopicPartitionName(): Unit = { - val topic = "test_topic" - val partition = "143" - val dir = new File(logDir, topicPartitionName(topic, partition)) - val topicPartition = LocalLog.parseTopicPartitionName(dir) - assertEquals(topic, topicPartition.topic) - assertEquals(partition.toInt, topicPartition.partition) - } - - /** - * Tests that log directories with a period in their name that have been marked for deletion - * are parsed correctly by `Log.parseTopicPartitionName` (see KAFKA-5232 for details). - */ - @Test - def testParseTopicPartitionNameWithPeriodForDeletedTopic(): Unit = { - val topic = "foo.bar-testtopic" - val partition = "42" - val dir = new File(logDir, LocalLog.logDeleteDirName(new TopicPartition(topic, partition.toInt))) - val topicPartition = LocalLog.parseTopicPartitionName(dir) - assertEquals(topic, topicPartition.topic, "Unexpected topic name parsed") - assertEquals(partition.toInt, topicPartition.partition, "Unexpected partition number parsed") - } - - @Test - def testParseTopicPartitionNameForEmptyName(): Unit = { - val dir = new File("") - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir), - () => "KafkaException should have been thrown for dir: " + dir.getCanonicalPath) - } - - @Test - def testParseTopicPartitionNameForNull(): Unit = { - val dir: File = null - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir), - () => "KafkaException should have been thrown for dir: " + dir) - } - - @Test - def testParseTopicPartitionNameForMissingSeparator(): Unit = { - val topic = "test_topic" - val partition = "1999" - val dir = new File(logDir, topic + partition) - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir), - () => "KafkaException should have been thrown for dir: " + dir.getCanonicalPath) - // also test the "-delete" marker case - val deleteMarkerDir = new File(logDir, topic + partition + "." + LogFileUtils.DELETE_DIR_SUFFIX) - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(deleteMarkerDir), - () => "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath) - } - - @Test - def testParseTopicPartitionNameForMissingTopic(): Unit = { - val topic = "" - val partition = "1999" - val dir = new File(logDir, topicPartitionName(topic, partition)) - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir), - () => "KafkaException should have been thrown for dir: " + dir.getCanonicalPath) - - // also test the "-delete" marker case - val deleteMarkerDir = new File(logDir, LocalLog.logDeleteDirName(new TopicPartition(topic, partition.toInt))) - - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(deleteMarkerDir), - () => "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath) - } - - @Test - def testParseTopicPartitionNameForMissingPartition(): Unit = { - val topic = "test_topic" - val partition = "" - val dir = new File(logDir.getPath + topicPartitionName(topic, partition)) - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir), - () => "KafkaException should have been thrown for dir: " + dir.getCanonicalPath) - - // also test the "-delete" marker case - val deleteMarkerDir = new File(logDir, topicPartitionName(topic, partition) + "." + LogFileUtils.DELETE_DIR_SUFFIX) - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(deleteMarkerDir), - () => "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath) - } - - @Test - def testParseTopicPartitionNameForInvalidPartition(): Unit = { - val topic = "test_topic" - val partition = "1999a" - val dir = new File(logDir, topicPartitionName(topic, partition)) - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir), - () => "KafkaException should have been thrown for dir: " + dir.getCanonicalPath) - - // also test the "-delete" marker case - val deleteMarkerDir = new File(logDir, topic + partition + "." + LogFileUtils.DELETE_DIR_SUFFIX) - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(deleteMarkerDir), - () => "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath) - } - - @Test - def testParseTopicPartitionNameForExistingInvalidDir(): Unit = { - val dir1 = new File(logDir.getPath + "/non_kafka_dir") - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir1), - () => "KafkaException should have been thrown for dir: " + dir1.getCanonicalPath) - val dir2 = new File(logDir.getPath + "/non_kafka_dir-delete") - assertThrows(classOf[KafkaException], () => LocalLog.parseTopicPartitionName(dir2), - () => "KafkaException should have been thrown for dir: " + dir2.getCanonicalPath) - } - - @Test - def testLogDeleteDirName(): Unit = { - val name1 = LocalLog.logDeleteDirName(new TopicPartition("foo", 3)) - assertTrue(name1.length <= 255) - assertTrue(Pattern.compile("foo-3\\.[0-9a-z]{32}-delete").matcher(name1).matches()) - assertTrue(LocalLog.DELETE_DIR_PATTERN.matcher(name1).matches()) - assertFalse(LocalLog.FUTURE_DIR_PATTERN.matcher(name1).matches()) - val name2 = LocalLog.logDeleteDirName( - new TopicPartition("n" + String.join("", Collections.nCopies(248, "o")), 5)) - assertEquals(255, name2.length) - assertTrue(Pattern.compile("n[o]{212}-5\\.[0-9a-z]{32}-delete").matcher(name2).matches()) - assertTrue(LocalLog.DELETE_DIR_PATTERN.matcher(name2).matches()) - assertFalse(LocalLog.FUTURE_DIR_PATTERN.matcher(name2).matches()) - } - - @Test - def testOffsetFromFile(): Unit = { - val offset = 23423423L - - val logFile = LogFileUtils.logFile(tmpDir, offset) - assertEquals(offset, LogFileUtils.offsetFromFile(logFile)) - - val offsetIndexFile = LogFileUtils.offsetIndexFile(tmpDir, offset) - assertEquals(offset, LogFileUtils.offsetFromFile(offsetIndexFile)) - - val timeIndexFile = LogFileUtils.timeIndexFile(tmpDir, offset) - assertEquals(offset, LogFileUtils.offsetFromFile(timeIndexFile)) - } - - @Test - def testRollSegmentThatAlreadyExists(): Unit = { - assertEquals(1, log.segments.numberOfSegments, "Log begins with a single empty segment.") - - // roll active segment with the same base offset of size zero should recreate the segment - log.roll(0L) - assertEquals(1, log.segments.numberOfSegments, "Expect 1 segment after roll() empty segment with base offset.") - - // should be able to append records to active segment - val keyValues1 = List(KeyValue("k1", "v1")) - appendRecords(kvsToRecords(keyValues1)) - assertEquals(0L, log.segments.activeSegment.baseOffset) - // make sure we can append more records - val keyValues2 = List(KeyValue("k2", "v2")) - appendRecords(keyValues2.map(_.toRecord(mockTime.milliseconds + 10)), initialOffset = 1L) - assertEquals(2, log.logEndOffset, "Expect two records in the log") - val readResult = readRecords() - assertEquals(2L, readResult.records.records.asScala.size) - assertEquals(keyValues1 ++ keyValues2, recordsToKvs(readResult.records.records.asScala)) - - // roll so that active segment is empty - log.roll(0L) - assertEquals(2L, log.segments.activeSegment.baseOffset, "Expect base offset of active segment to be LEO") - assertEquals(2, log.segments.numberOfSegments, "Expect two segments.") - assertEquals(2L, log.logEndOffset) - } - - @Test - def testNewSegmentsAfterRoll(): Unit = { - assertEquals(1, log.segments.numberOfSegments, "Log begins with a single empty segment.") - - // roll active segment with the same base offset of size zero should recreate the segment - { - val newSegment = log.roll(0L) - assertEquals(0L, newSegment.baseOffset) - assertEquals(1, log.segments.numberOfSegments) - assertEquals(0L, log.logEndOffset) - } - - appendRecords(List(KeyValue("k1", "v1").toRecord())) - - { - val newSegment = log.roll(0L) - assertEquals(1L, newSegment.baseOffset) - assertEquals(2, log.segments.numberOfSegments) - assertEquals(1L, log.logEndOffset) - } - - appendRecords(List(KeyValue("k2", "v2").toRecord()), initialOffset = 1L) - - { - val newSegment = log.roll(1L) - assertEquals(2L, newSegment.baseOffset) - assertEquals(3, log.segments.numberOfSegments) - assertEquals(2L, log.logEndOffset) - } - } - - @Test - def testRollSegmentErrorWhenNextOffsetIsIllegal(): Unit = { - assertEquals(1, log.segments.numberOfSegments, "Log begins with a single empty segment.") - - val keyValues = List(KeyValue("k1", "v1"), KeyValue("k2", "v2"), KeyValue("k3", "v3")) - appendRecords(kvsToRecords(keyValues)) - assertEquals(0L, log.segments.activeSegment.baseOffset) - assertEquals(3, log.logEndOffset, "Expect two records in the log") - - // roll to create an empty active segment - log.roll(0L) - assertEquals(3L, log.segments.activeSegment.baseOffset) - - // intentionally setup the logEndOffset to introduce an error later - log.updateLogEndOffset(1L) - - // expect an error because of attempt to roll to a new offset (1L) that's lower than the - // base offset (3L) of the active segment - assertThrows(classOf[KafkaException], () => log.roll(0L)) - } - - @Test - def testFlushingNonExistentDir(): Unit = { - val spyLog = spy(log) - - val record = new SimpleRecord(mockTime.milliseconds, "a".getBytes) - appendRecords(List(record)) - mockTime.sleep(1) - val newSegment = log.roll(0L) - - // simulate the directory is renamed concurrently - doReturn(new File("__NON_EXISTENT__"), Nil: _*).when(spyLog).dir - assertDoesNotThrow((() => spyLog.flush(newSegment.baseOffset)): Executable) - } - - private def createLocalLogWithActiveSegment(dir: File = logDir, - config: LogConfig, - segments: LogSegments = new LogSegments(topicPartition), - recoveryPoint: Long = 0L, - nextOffsetMetadata: LogOffsetMetadata = new LogOffsetMetadata(0L, 0L, 0), - scheduler: Scheduler = mockTime.scheduler, - time: Time = mockTime, - topicPartition: TopicPartition = topicPartition, - logDirFailureChannel: LogDirFailureChannel = logDirFailureChannel): LocalLog = { - segments.add(LogSegment.open(dir, - 0L, - config, - time, - config.initFileSize, - config.preallocate)) - new LocalLog(dir, - config, - segments, - recoveryPoint, - nextOffsetMetadata, - scheduler, - time, - topicPartition, - logDirFailureChannel) - } -} diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala index 5b3cc00732d3d..2315acc3fb31c 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala @@ -37,7 +37,6 @@ import java.lang.{Long => JLong} import java.util import java.util.concurrent.ConcurrentHashMap import scala.collection.mutable -import scala.jdk.OptionConverters.RichOption /** * Unit tests for the log cleaning logic @@ -110,8 +109,8 @@ class LogCleanerManagerTest extends Logging { val maxTransactionTimeoutMs = 5 * 60 * 1000 val producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT val segments = new LogSegments(tp) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - tpDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + tpDir, topicPartition, logDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, tpDir, maxTransactionTimeoutMs, producerStateManagerConfig, time) val offsets = new LogLoader( tpDir, @@ -124,7 +123,7 @@ class LogCleanerManagerTest extends Logging { segments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false @@ -134,7 +133,7 @@ class LogCleanerManagerTest extends Logging { // the exception should be caught and the partition that caused it marked as uncleanable class LogMock extends UnifiedLog(offsets.logStartOffset, localLog, new BrokerTopicStats, producerIdExpirationCheckIntervalMs, leaderEpochCache, - producerStateManager, _topicId = None, keepPartitionMetadataFile = true) { + producerStateManager, _topicId = None) { // Throw an error in getFirstBatchTimestampForSegments since it is called in grabFilthiestLog() override def getFirstBatchTimestampForSegments(segments: util.Collection[LogSegment]): util.Collection[java.lang.Long] = throw new IllegalStateException("Error!") @@ -822,8 +821,7 @@ class LogCleanerManagerTest extends Logging { producerStateManagerConfig = producerStateManagerConfig, producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true) + topicId = None) } private def createLowRetentionLogConfig(segmentSize: Int, cleanupPolicy: String): LogConfig = { @@ -876,8 +874,7 @@ class LogCleanerManagerTest extends Logging { producerStateManagerConfig = producerStateManagerConfig, producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true + topicId = None ) } diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala index 2601846b1a066..df461855a9fa9 100755 --- a/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala @@ -25,7 +25,7 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.record._ -import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_0_IV1, IBP_0_11_0_IV0, IBP_0_9_0} +import org.apache.kafka.common.utils.Time import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile @@ -35,7 +35,6 @@ import org.junit.jupiter.api.extension.ExtensionContext import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, ArgumentsProvider, ArgumentsSource} -import scala.annotation.nowarn import scala.collection._ import scala.jdk.CollectionConverters._ @@ -136,14 +135,13 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati assertEquals(toMap(messages), toMap(read), "Contents of the map shouldn't change") } - @nowarn("cat=deprecation") @ParameterizedTest @ArgumentsSource(classOf[LogCleanerParameterizedIntegrationTest.ExcludeZstd]) - def testCleanerWithMessageFormatV0(compressionType: CompressionType): Unit = { - val codec: Compression = Compression.of(compressionType).build() + def testCleanerWithMessageFormatV0V1V2(compressionType: CompressionType): Unit = { + val compression = Compression.of(compressionType).build() val largeMessageKey = 20 - val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.MAGIC_VALUE_V0, codec) - val maxMessageSize = codec match { + val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.MAGIC_VALUE_V0, compression) + val maxMessageSize = compression match { case Compression.NONE => largeMessageSet.sizeInBytes case _ => // the broker assigns absolute offsets for message format 0 which potentially causes the compressed size to @@ -157,10 +155,11 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati val log = cleaner.logs.get(topicPartitions(0)) val props = logConfigProperties(maxMessageSize = maxMessageSize) - props.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, IBP_0_9_0.version) - log.updateConfig(new LogConfig(props)) + props.put(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.name) + val logConfig = new LogConfig(props) + log.updateConfig(logConfig) - val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0) + val appends1 = writeDups(numKeys = 100, numDups = 3, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V0) val startSize = log.size cleaner.startup() @@ -169,66 +168,96 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati val compactedSize = log.logSegments.asScala.map(_.size).sum assertTrue(startSize > compactedSize, s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize") - checkLogAfterAppendingDups(log, startSize, appends) + checkLogAfterAppendingDups(log, startSize, appends1) - val appends2: Seq[(Int, String, Long)] = { - val dupsV0 = writeDups(numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0) - val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0) - // move LSO forward to increase compaction bound - log.updateHighWatermark(log.logEndOffset) - val largeMessageOffset = appendInfo.firstOffset - - // also add some messages with version 1 and version 2 to check that we handle mixed format versions correctly - props.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, IBP_0_11_0_IV0.version) - log.updateConfig(new LogConfig(props)) - val dupsV1 = writeDups(startKey = 30, numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1) - val dupsV2 = writeDups(startKey = 15, numKeys = 5, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V2) - appends ++ dupsV0 ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dupsV1 ++ dupsV2 - } + val dupsV0 = writeDups(numKeys = 40, numDups = 3, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V0) + val appendInfo = log.appendAsLeaderWithRecordVersion(largeMessageSet, leaderEpoch = 0, recordVersion = RecordVersion.V0) + // move LSO forward to increase compaction bound + log.updateHighWatermark(log.logEndOffset) + val largeMessageOffset = appendInfo.firstOffset + + // also add some messages with version 1 and version 2 to check that we handle mixed format versions correctly + val dupsV1 = writeDups(startKey = 30, numKeys = 40, numDups = 3, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V1) + val dupsV2 = writeDups(startKey = 15, numKeys = 5, numDups = 3, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V2) + + val v0RecordKeysWithNoV1V2Updates = (appends1.map(_._1).toSet -- dupsV1.map(_._1) -- dupsV2.map(_._1)).map(_.toString) + val appends2: Seq[(Int, String, Long)] = + appends1 ++ dupsV0 ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dupsV1 ++ dupsV2 + + // roll the log so that all appended messages can be compacted + log.roll() val firstDirty2 = log.activeSegment.baseOffset checkLastCleaned("log", 0, firstDirty2) checkLogAfterAppendingDups(log, startSize, appends2) + checkLogAfterConvertingToV2(compressionType, log, logConfig.messageTimestampType, v0RecordKeysWithNoV1V2Updates) } - @nowarn("cat=deprecation") @ParameterizedTest @ArgumentsSource(classOf[LogCleanerParameterizedIntegrationTest.ExcludeZstd]) - def testCleaningNestedMessagesWithV0AndV1(codec: CompressionType): Unit = { - val compression = Compression.of(codec).build() + def testCleaningNestedMessagesWithV0V1(compressionType: CompressionType): Unit = { + val compression = Compression.of(compressionType).build() val maxMessageSize = 192 cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize, segmentSize = 256) val log = cleaner.logs.get(topicPartitions(0)) - val props = logConfigProperties(maxMessageSize = maxMessageSize, segmentSize = 256) - props.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, IBP_0_9_0.version) - log.updateConfig(new LogConfig(props)) + val logConfig = new LogConfig(logConfigProperties(maxMessageSize = maxMessageSize, segmentSize = 256)) + log.updateConfig(logConfig) - // with compression enabled, these messages will be written as a single message containing - // all of the individual messages + // with compression enabled, these messages will be written as a single message containing all the individual messages var appendsV0 = writeDupsSingleMessageSet(numKeys = 2, numDups = 3, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V0) appendsV0 ++= writeDupsSingleMessageSet(numKeys = 2, startKey = 3, numDups = 2, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V0) - props.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, IBP_0_10_0_IV1.version) - log.updateConfig(new LogConfig(props)) - var appendsV1 = writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V1) appendsV1 ++= writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V1) appendsV1 ++= writeDupsSingleMessageSet(startKey = 6, numKeys = 2, numDups = 2, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V1) val appends = appendsV0 ++ appendsV1 + val v0RecordKeysWithNoV1V2Updates = (appendsV0.map(_._1).toSet -- appendsV1.map(_._1)).map(_.toString) + + // roll the log so that all appended messages can be compacted + log.roll() val startSize = log.size cleaner.startup() val firstDirty = log.activeSegment.baseOffset - assertTrue(firstDirty > appendsV0.size) // ensure we clean data from V0 and V1 + assertTrue(firstDirty >= appends.size) // ensure we clean data from V0 and V1 checkLastCleaned("log", 0, firstDirty) val compactedSize = log.logSegments.asScala.map(_.size).sum assertTrue(startSize > compactedSize, s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize") checkLogAfterAppendingDups(log, startSize, appends) + checkLogAfterConvertingToV2(compressionType, log, logConfig.messageTimestampType, v0RecordKeysWithNoV1V2Updates) + } + + private def checkLogAfterConvertingToV2(compressionType: CompressionType, log: UnifiedLog, timestampType: TimestampType, + keysForV0RecordsWithNoV1V2Updates: Set[String]): Unit = { + for (segment <- log.logSegments.asScala; recordBatch <- segment.log.batches.asScala) { + // Uncompressed v0/v1 records are always converted into single record v2 batches via compaction if they are retained + // Compressed v0/v1 record batches are converted into record batches v2 with one or more records (depending on the + // number of retained records after compaction) + assertEquals(RecordVersion.V2.value, recordBatch.magic) + if (compressionType == CompressionType.NONE) + assertEquals(1, recordBatch.iterator().asScala.size) + else + assertTrue(recordBatch.iterator().asScala.size >= 1) + + val firstRecordKey = TestUtils.readString(recordBatch.iterator().next().key()) + if (keysForV0RecordsWithNoV1V2Updates.contains(firstRecordKey)) + assertEquals(TimestampType.CREATE_TIME, recordBatch.timestampType) + else + assertEquals(timestampType, recordBatch.timestampType) + + recordBatch.iterator.asScala.foreach { record => + val recordKey = TestUtils.readString(record.key) + if (keysForV0RecordsWithNoV1V2Updates.contains(recordKey)) + assertEquals(RecordBatch.NO_TIMESTAMP, record.timestamp, "Record " + recordKey + " with unexpected timestamp ") + else + assertNotEquals(RecordBatch.NO_TIMESTAMP, record.timestamp, "Record " + recordKey + " with unexpected timestamp " + RecordBatch.NO_TIMESTAMP) + } + } } @ParameterizedTest @@ -255,7 +284,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati assertTrue(cleaner.cleanerManager.allCleanerCheckpoints.isEmpty, "Should not have cleaned") def kafkaConfigWithCleanerConfig(cleanerConfig: CleanerConfig): KafkaConfig = { - val props = TestUtils.createBrokerConfig(0, "localhost:2181") + val props = TestUtils.createBrokerConfig(0) props.put(CleanerConfig.LOG_CLEANER_THREADS_PROP, cleanerConfig.numThreads.toString) props.put(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, cleanerConfig.dedupeBufferSize.toString) props.put(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_LOAD_FACTOR_PROP, cleanerConfig.dedupeBufferLoadFactor.toString) @@ -320,10 +349,11 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati } val records = kvs.map { case (key, payload) => - new SimpleRecord(key.toString.getBytes, payload.getBytes) + new SimpleRecord(Time.SYSTEM.milliseconds(), key.toString.getBytes, payload.getBytes) } - val appendInfo = log.appendAsLeader(MemoryRecords.withRecords(magicValue, codec, records: _*), leaderEpoch = 0) + val appendInfo = log.appendAsLeaderWithRecordVersion(MemoryRecords.withRecords(magicValue, codec, records: _*), + leaderEpoch = 0, recordVersion = RecordVersion.lookup(magicValue)) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) val offsets = appendInfo.firstOffset to appendInfo.lastOffset diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala index 81600b0f20124..b83a36a4b5ddd 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala @@ -17,7 +17,6 @@ package kafka.log -import kafka.common._ import kafka.server.KafkaConfig import kafka.utils.{CoreUtils, Logging, Pool, TestUtils} import org.apache.kafka.common.TopicPartition @@ -29,7 +28,7 @@ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, CleanerConfig, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetMap, ProducerStateManager, ProducerStateManagerConfig} +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, CleanerConfig, LocalLog, LogAppendInfo, LogCleaningAbortedException, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetMap, ProducerStateManager, ProducerStateManagerConfig} import org.apache.kafka.storage.internals.utils.Throttler import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ @@ -46,7 +45,6 @@ import java.util.Properties import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, TimeUnit} import scala.collection._ import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption /** * Unit tests for the log cleaning logic @@ -133,8 +131,8 @@ class LogCleanerTest extends Logging { var nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) assertEquals(0, nonexistent.size, s"$nonexistent should be existent") - logCleaner.reconfigure(new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181")), - new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))) + logCleaner.reconfigure(new KafkaConfig(TestUtils.createBrokerConfig(1)), + new KafkaConfig(TestUtils.createBrokerConfig(1))) nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) assertEquals(0, nonexistent.size, s"$nonexistent should be existent") @@ -189,8 +187,8 @@ class LogCleanerTest extends Logging { val maxTransactionTimeoutMs = 5 * 60 * 1000 val producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT val logSegments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - dir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + dir, topicPartition, logDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, dir, maxTransactionTimeoutMs, producerStateManagerConfig, time) val offsets = new LogLoader( @@ -204,7 +202,7 @@ class LogCleanerTest extends Logging { logSegments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false @@ -217,8 +215,7 @@ class LogCleanerTest extends Logging { producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs, leaderEpochCache = leaderEpochCache, producerStateManager = producerStateManager, - _topicId = None, - keepPartitionMetadataFile = true) { + _topicId = None) { override def replaceSegments(newSegments: Seq[LogSegment], oldSegments: Seq[LogSegment]): Unit = { deleteStartLatch.countDown() if (!deleteCompleteLatch.await(5000, TimeUnit.MILLISECONDS)) { @@ -1219,7 +1216,7 @@ class LogCleanerTest extends Logging { def distinctValuesBySegment = log.logSegments.asScala.map(s => s.log.records.asScala.map(record => TestUtils.readString(record.value)).toSet.size).toSeq - val disctinctValuesBySegmentBeforeClean = distinctValuesBySegment + val distinctValuesBySegmentBeforeClean = distinctValuesBySegment assertTrue(distinctValuesBySegment.reverse.tail.forall(_ > N), "Test is not effective unless each segment contains duplicates. Increase segment size or decrease number of keys.") @@ -1227,10 +1224,10 @@ class LogCleanerTest extends Logging { val distinctValuesBySegmentAfterClean = distinctValuesBySegment - assertTrue(disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) + assertTrue(distinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) .take(numCleanableSegments).forall { case (before, after) => after < before }, "The cleanable segments should have fewer number of values after cleaning") - assertTrue(disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) + assertTrue(distinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) .slice(numCleanableSegments, numTotalSegments).forall { x => x._1 == x._2 }, "The uncleanable segments should have the same number of values after cleaning") } @@ -1242,9 +1239,9 @@ class LogCleanerTest extends Logging { val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment - def createRecorcs = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) + def createRecords = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) for (_ <- 0 until 6) - log.appendAsLeader(createRecorcs, leaderEpoch = 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) val logToClean = LogToClean(new TopicPartition("test", 0), log, log.activeSegment.baseOffset, log.activeSegment.baseOffset) @@ -1919,7 +1916,10 @@ class LogCleanerTest extends Logging { @Test def testCleanTombstone(): Unit = { - val logConfig = new LogConfig(new Properties()) + val properties = new Properties() + // This test uses future timestamps beyond the default of 1 hour. + properties.put(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.MaxValue.toString) + val logConfig = new LogConfig(properties) val log = makeLog(config = logConfig) val cleaner = makeCleaner(10) @@ -2017,7 +2017,7 @@ class LogCleanerTest extends Logging { @Test def testReconfigureLogCleanerIoMaxBytesPerSecond(): Unit = { - val oldKafkaProps = TestUtils.createBrokerConfig(1, "localhost:2181") + val oldKafkaProps = TestUtils.createBrokerConfig(1) oldKafkaProps.setProperty(CleanerConfig.LOG_CLEANER_IO_MAX_BYTES_PER_SECOND_PROP, "10000000") val logCleaner = new LogCleaner(LogCleaner.cleanerConfig(new KafkaConfig(oldKafkaProps)), @@ -2034,7 +2034,7 @@ class LogCleanerTest extends Logging { try { assertEquals(10000000, logCleaner.throttler.desiredRatePerSec, s"Throttler.desiredRatePerSec should be initialized from initial `${CleanerConfig.LOG_CLEANER_IO_MAX_BYTES_PER_SECOND_PROP}` config.") - val newKafkaProps = TestUtils.createBrokerConfig(1, "localhost:2181") + val newKafkaProps = TestUtils.createBrokerConfig(1) newKafkaProps.setProperty(CleanerConfig.LOG_CLEANER_IO_MAX_BYTES_PER_SECOND_PROP, "20000000") logCleaner.reconfigure(new KafkaConfig(oldKafkaProps), new KafkaConfig(newKafkaProps)) @@ -2045,6 +2045,45 @@ class LogCleanerTest extends Logging { } } + @Test + def testMaxOverCleanerThreads(): Unit = { + val logCleaner = new LogCleaner(new CleanerConfig(true), + logDirs = Array(TestUtils.tempDir(), TestUtils.tempDir()), + logs = new Pool[TopicPartition, UnifiedLog](), + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) + + val cleaners = logCleaner.cleaners + + val cleaner1 = new logCleaner.CleanerThread(1) + cleaner1.lastStats = new CleanerStats(time) + cleaner1.lastStats.bufferUtilization = 0.75 + cleaners += cleaner1 + + val cleaner2 = new logCleaner.CleanerThread(2) + cleaner2.lastStats = new CleanerStats(time) + cleaner2.lastStats.bufferUtilization = 0.85 + cleaners += cleaner2 + + val cleaner3 = new logCleaner.CleanerThread(3) + cleaner3.lastStats = new CleanerStats(time) + cleaner3.lastStats.bufferUtilization = 0.65 + cleaners += cleaner3 + + assertEquals(0, logCleaner.maxOverCleanerThreads(_.lastStats.bufferUtilization)) + + cleaners.clear() + + cleaner1.lastStats.bufferUtilization = 5d + cleaners += cleaner1 + cleaner2.lastStats.bufferUtilization = 6d + cleaners += cleaner2 + cleaner3.lastStats.bufferUtilization = 7d + cleaners += cleaner3 + + assertEquals(7, logCleaner.maxOverCleanerThreads(_.lastStats.bufferUtilization)) + } + private def writeToLog(log: UnifiedLog, keysAndValues: Iterable[(Int, Int)], offsetSeq: Iterable[Long]): Iterable[Long] = { for (((key, value), offset) <- keysAndValues.zip(offsetSeq)) yield log.appendAsFollower(messageWithOffset(key, value, offset)).lastOffset @@ -2094,8 +2133,7 @@ class LogCleanerTest extends Logging { producerStateManagerConfig = producerStateManagerConfig, producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true + topicId = None ) } diff --git a/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala b/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala index d6d2b0665064b..342ef145b6ddb 100644 --- a/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala @@ -156,8 +156,7 @@ class LogConcurrencyTest { producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true + topicId = None ) } diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index 8126bb08b077e..3a0a450f05a6c 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -28,14 +28,12 @@ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import java.util.{Collections, Properties} -import org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV1 import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.storage.internals.log.{LogConfig, ThrottledReplicaListValidator} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ class LogConfigTest { @@ -59,17 +57,15 @@ class LogConfigTest { }) } - @nowarn("cat=deprecation") @Test def testKafkaConfigToProps(): Unit = { val millisInHour = 60L * 60L * 1000L val millisInDay = 24L * millisInHour val bytesInGB: Long = 1024 * 1024 * 1024 - val kafkaProps = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") + val kafkaProps = TestUtils.createBrokerConfig(nodeId = 0) kafkaProps.put(ServerLogConfigs.LOG_ROLL_TIME_HOURS_CONFIG, "2") kafkaProps.put(ServerLogConfigs.LOG_ROLL_TIME_JITTER_HOURS_CONFIG, "2") kafkaProps.put(ServerLogConfigs.LOG_RETENTION_TIME_HOURS_CONFIG, "960") // 40 days - kafkaProps.put(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG, "0.11.0") kafkaProps.put(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_MS_PROP, "2592000000") // 30 days kafkaProps.put(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP, "4294967296") // 4 GB @@ -77,13 +73,10 @@ class LogConfigTest { assertEquals(2 * millisInHour, logProps.get(TopicConfig.SEGMENT_MS_CONFIG)) assertEquals(2 * millisInHour, logProps.get(TopicConfig.SEGMENT_JITTER_MS_CONFIG)) assertEquals(40 * millisInDay, logProps.get(TopicConfig.RETENTION_MS_CONFIG)) - // The message format version should always be 3.0 if the inter-broker protocol version is 3.0 or higher - assertEquals(IBP_3_0_IV1.version, logProps.get(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG)) assertEquals(30 * millisInDay, logProps.get(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) assertEquals(4 * bytesInGB, logProps.get(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } - @nowarn("cat=deprecation") @Test def testFromPropsInvalid(): Unit = { LogConfig.configNames.forEach(name => name match { @@ -93,7 +86,6 @@ class LogConfigTest { case TopicConfig.CLEANUP_POLICY_CONFIG => assertPropertyInvalid(name, "true", "foobar") case TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG => assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2") case TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG => assertPropertyInvalid(name, "not_a_number", "0", "-1") - case TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG => assertPropertyInvalid(name, "") case TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG => assertPropertyInvalid(name, "not_a_boolean") case TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG => assertPropertyInvalid(name, "not_a_number", "-3") case TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG => assertPropertyInvalid(name, "not_a_number", "-3") @@ -189,7 +181,7 @@ class LogConfigTest { @Test def testOverriddenConfigsAsLoggableString(): Unit = { - val kafkaProps = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") + val kafkaProps = TestUtils.createBrokerConfig(nodeId = 0) kafkaProps.put("unknown.broker.password.config", "aaaaa") kafkaProps.put(ServerLogConfigs.LOG_RETENTION_BYTES_CONFIG, "50") kafkaProps.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "somekeypassword") @@ -470,29 +462,11 @@ class LogConfigTest { "`remote.log.copy.disable` under Zookeeper's mode.")) } - /* Verify that when the deprecated config LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG has non default value the new configs - * LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG and LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG are not changed from the default we are using - * the deprecated config for backward compatibility. - * See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for deprecation details */ - @nowarn("cat=deprecation") - @Test - def testTimestampBeforeMaxMsUsesDeprecatedConfig(): Unit = { - val oneDayInMillis = 24 * 60 * 60 * 1000L - val kafkaProps = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") - kafkaProps.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, Long.MaxValue.toString) - kafkaProps.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.MaxValue.toString) - kafkaProps.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, oneDayInMillis.toString) - - val logProps = KafkaConfig.fromProps(kafkaProps).extractLogConfigMap - assertEquals(oneDayInMillis, logProps.get(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG)) - assertEquals(oneDayInMillis, logProps.get(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG)) - } - @Test def testValidateWithMetadataVersionJbodSupport(): Unit = { def validate(metadataVersion: MetadataVersion, jbodConfig: Boolean): Unit = KafkaConfig.fromProps( - TestUtils.createBrokerConfig(nodeId = 0, zkConnect = null, logDirCount = if (jbodConfig) 2 else 1) + TestUtils.createBrokerConfig(nodeId = 0, logDirCount = if (jbodConfig) 2 else 1) ).validateWithMetadataVersion(metadataVersion) validate(MetadataVersion.IBP_3_6_IV2, jbodConfig = false) diff --git a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala index e7e99852c539c..a2b49685b4357 100644 --- a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala @@ -24,11 +24,9 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.KafkaStorageException -import org.apache.kafka.common.record.{ControlRecordType, DefaultRecordBatch, MemoryRecords, RecordBatch, RecordVersion, SimpleRecord, TimestampType} +import org.apache.kafka.common.record.{ControlRecordType, DefaultRecordBatch, MemoryRecords, RecordBatch, SimpleRecord, TimestampType} import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.server.common.MetadataVersion.IBP_0_11_0_IV0 import org.apache.kafka.server.util.{MockTime, Scheduler} import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, EpochEntry, LocalLog, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetIndex, ProducerStateManager, ProducerStateManagerConfig, SnapshotFile} @@ -50,11 +48,10 @@ import java.nio.file.{Files, NoSuchFileException, Paths} import java.util import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.{Optional, OptionalLong, Properties} -import scala.annotation.nowarn import scala.collection.mutable.ListBuffer import scala.collection.{Iterable, Map, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOption, RichOptional} +import scala.jdk.OptionConverters.RichOptional class LogLoaderTest { var config: KafkaConfig = _ @@ -69,7 +66,7 @@ class LogLoaderTest { @BeforeEach def setUp(): Unit = { - val props = TestUtils.createBrokerConfig(0, "127.0.0.1:1", port = -1) + val props = TestUtils.createBrokerConfig(0, port = -1) config = KafkaConfig.fromProps(props) } @@ -130,7 +127,6 @@ class LogLoaderTest { brokerTopicStats = new BrokerTopicStats(), logDirFailureChannel = logDirFailureChannel, time = time, - keepPartitionMetadataFile = config.usesTopicId, remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), initialTaskDelayMs = config.logInitialTaskDelayMs) { @@ -157,13 +153,13 @@ class LogLoaderTest { val logStartOffset = logStartOffsets.getOrDefault(topicPartition, 0L) val logDirFailureChannel: LogDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, topicPartition, logDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, logDir, this.maxTransactionTimeoutMs, this.producerStateManagerConfig, time) val logLoader = new LogLoader(logDir, topicPartition, config, time.scheduler, time, logDirFailureChannel, hadCleanShutdown, segments, logStartOffset, logRecoveryPoint, - leaderEpochCache.toJava, producerStateManager, new ConcurrentHashMap[String, Integer], false) + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false) val offsets = logLoader.load() val localLog = new LocalLog(logDir, logConfig, segments, offsets.recoveryPoint, offsets.nextOffsetMetadata, mockTime.scheduler, mockTime, topicPartition, @@ -248,74 +244,9 @@ class LogLoaderTest { } @Test - def testProducerSnapshotsRecoveryAfterUncleanShutdownV1(): Unit = { - testProducerSnapshotsRecoveryAfterUncleanShutdown(MetadataVersion.minSupportedFor(RecordVersion.V1).version) - } - - @Test - def testProducerSnapshotsRecoveryAfterUncleanShutdownCurrentMessageFormat(): Unit = { - testProducerSnapshotsRecoveryAfterUncleanShutdown(MetadataVersion.latestTesting.version) - } - - private def createLog(dir: File, - config: LogConfig, - brokerTopicStats: BrokerTopicStats = brokerTopicStats, - logStartOffset: Long = 0L, - recoveryPoint: Long = 0L, - scheduler: Scheduler = mockTime.scheduler, - time: Time = mockTime, - maxTransactionTimeoutMs: Int = maxTransactionTimeoutMs, - maxProducerIdExpirationMs: Int = producerStateManagerConfig.producerIdExpirationMs, - producerIdExpirationCheckIntervalMs: Int = producerIdExpirationCheckIntervalMs, - lastShutdownClean: Boolean = true): UnifiedLog = { - val log = LogTestUtils.createLog(dir, config, brokerTopicStats, scheduler, time, logStartOffset, recoveryPoint, - maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, false), producerIdExpirationCheckIntervalMs, lastShutdownClean) - logsToClose = logsToClose :+ log - log - } - - private def createLogWithOffsetOverflow(logConfig: LogConfig): (UnifiedLog, LogSegment) = { - LogTestUtils.initializeLogDirWithOverflowedSegment(logDir) - - val log = createLog(logDir, logConfig, recoveryPoint = Long.MaxValue) - val segmentWithOverflow = LogTestUtils.firstOverflowSegment(log).getOrElse { - throw new AssertionError("Failed to create log with a segment which has overflowed offsets") - } - - (log, segmentWithOverflow) - } - - private def recoverAndCheck(config: LogConfig, expectedKeys: Iterable[Long]): UnifiedLog = { - // method is called only in case of recovery from hard reset - val recoveredLog = LogTestUtils.recoverAndCheck(logDir, config, expectedKeys, brokerTopicStats, mockTime, mockTime.scheduler) - logsToClose = logsToClose :+ recoveredLog - recoveredLog - } - - /** - * Wrap a single record log buffer with leader epoch. - */ - private def singletonRecordsWithLeaderEpoch(value: Array[Byte], - key: Array[Byte] = null, - leaderEpoch: Int, - offset: Long, - codec: Compression = Compression.NONE, - timestamp: Long = RecordBatch.NO_TIMESTAMP, - magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = { - val records = Seq(new SimpleRecord(timestamp, key, value)) - - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) - val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, offset, - mockTime.milliseconds, leaderEpoch) - records.foreach(builder.append) - builder.build() - } - - @nowarn("cat=deprecation") - private def testProducerSnapshotsRecoveryAfterUncleanShutdown(messageFormatVersion: String): Unit = { + def testProducerSnapshotsRecoveryAfterUncleanShutdown(): Unit = { val logProps = new Properties() logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "640") - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, messageFormatVersion) val logConfig = new LogConfig(logProps) var log = createLog(logDir, logConfig) assertEquals(OptionalLong.empty(), log.oldestProducerSnapshotOffset) @@ -343,13 +274,8 @@ class LogLoaderTest { val expectedSegmentsWithReads = mutable.Set[Long]() val expectedSnapshotOffsets = mutable.Set[Long]() - if (logConfig.messageFormatVersion.isLessThan(IBP_0_11_0_IV0)) { - expectedSegmentsWithReads += activeSegmentOffset - expectedSnapshotOffsets ++= log.logSegments.asScala.map(_.baseOffset).toVector.takeRight(2) :+ log.logEndOffset - } else { - expectedSegmentsWithReads ++= segOffsetsBeforeRecovery ++ Set(activeSegmentOffset) - expectedSnapshotOffsets ++= log.logSegments.asScala.map(_.baseOffset).toVector.takeRight(4) :+ log.logEndOffset - } + expectedSegmentsWithReads ++= segOffsetsBeforeRecovery ++ Set(activeSegmentOffset) + expectedSnapshotOffsets ++= log.logSegments.asScala.map(_.baseOffset).toVector.takeRight(4) :+ log.logEndOffset def createLogWithInterceptedReads(recoveryPoint: Long): UnifiedLog = { val maxTransactionTimeoutMs = 5 * 60 * 1000 @@ -366,13 +292,13 @@ class LogLoaderTest { }.when(wrapper).read(ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any()) Mockito.doAnswer { in => recoveredSegments += wrapper - segment.recover(in.getArgument(0, classOf[ProducerStateManager]), in.getArgument(1, classOf[Optional[LeaderEpochFileCache]])) + segment.recover(in.getArgument(0, classOf[ProducerStateManager]), in.getArgument(1, classOf[LeaderEpochFileCache])) }.when(wrapper).recover(ArgumentMatchers.any(), ArgumentMatchers.any()) super.add(wrapper) } } - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, logDir, maxTransactionTimeoutMs, producerStateManagerConfig, mockTime) val logLoader = new LogLoader( @@ -386,7 +312,7 @@ class LogLoaderTest { interceptedLogSegments, 0L, recoveryPoint, - leaderEpochCache.toJava, + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false @@ -397,7 +323,7 @@ class LogLoaderTest { logDirFailureChannel) new UnifiedLog(offsets.logStartOffset, localLog, brokerTopicStats, producerIdExpirationCheckIntervalMs, leaderEpochCache, producerStateManager, - None, keepPartitionMetadataFile = true) + None) } // Retain snapshots for the last 2 segments @@ -422,6 +348,60 @@ class LogLoaderTest { log.close() } + private def createLog(dir: File, + config: LogConfig, + brokerTopicStats: BrokerTopicStats = brokerTopicStats, + logStartOffset: Long = 0L, + recoveryPoint: Long = 0L, + scheduler: Scheduler = mockTime.scheduler, + time: Time = mockTime, + maxTransactionTimeoutMs: Int = maxTransactionTimeoutMs, + maxProducerIdExpirationMs: Int = producerStateManagerConfig.producerIdExpirationMs, + producerIdExpirationCheckIntervalMs: Int = producerIdExpirationCheckIntervalMs, + lastShutdownClean: Boolean = true): UnifiedLog = { + val log = LogTestUtils.createLog(dir, config, brokerTopicStats, scheduler, time, logStartOffset, recoveryPoint, + maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, false), producerIdExpirationCheckIntervalMs, lastShutdownClean) + logsToClose = logsToClose :+ log + log + } + + private def createLogWithOffsetOverflow(logConfig: LogConfig): (UnifiedLog, LogSegment) = { + LogTestUtils.initializeLogDirWithOverflowedSegment(logDir) + + val log = createLog(logDir, logConfig, recoveryPoint = Long.MaxValue) + val segmentWithOverflow = LogTestUtils.firstOverflowSegment(log).getOrElse { + throw new AssertionError("Failed to create log with a segment which has overflowed offsets") + } + + (log, segmentWithOverflow) + } + + private def recoverAndCheck(config: LogConfig, expectedKeys: Iterable[Long]): UnifiedLog = { + // method is called only in case of recovery from hard reset + val recoveredLog = LogTestUtils.recoverAndCheck(logDir, config, expectedKeys, brokerTopicStats, mockTime, mockTime.scheduler) + logsToClose = logsToClose :+ recoveredLog + recoveredLog + } + + /** + * Wrap a single record log buffer with leader epoch. + */ + private def singletonRecordsWithLeaderEpoch(value: Array[Byte], + key: Array[Byte] = null, + leaderEpoch: Int, + offset: Long, + codec: Compression = Compression.NONE, + timestamp: Long = RecordBatch.NO_TIMESTAMP, + magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = { + val records = Seq(new SimpleRecord(timestamp, key, value)) + + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) + val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, offset, + mockTime.milliseconds, leaderEpoch) + records.foreach(builder.append) + builder.build() + } + @Test def testSkipLoadingIfEmptyProducerStateBeforeTruncation(): Unit = { val maxTransactionTimeoutMs = 60000 @@ -439,8 +419,8 @@ class LogLoaderTest { val logDirFailureChannel: LogDirFailureChannel = new LogDirFailureChannel(1) val config = new LogConfig(new Properties()) val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val offsets = new LogLoader( logDir, topicPartition, @@ -452,7 +432,7 @@ class LogLoaderTest { segments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, stateManager, new ConcurrentHashMap[String, Integer], false @@ -466,8 +446,7 @@ class LogLoaderTest { producerIdExpirationCheckIntervalMs = 30000, leaderEpochCache = leaderEpochCache, producerStateManager = stateManager, - _topicId = None, - keepPartitionMetadataFile = true) + _topicId = None) verify(stateManager).updateMapEndOffset(0L) verify(stateManager).removeStraySnapshots(any()) @@ -533,119 +512,6 @@ class LogLoaderTest { log.close() } - @nowarn("cat=deprecation") - @Test - def testSkipTruncateAndReloadIfOldMessageFormatAndNoCleanShutdown(): Unit = { - val maxTransactionTimeoutMs = 60000 - val producerStateManagerConfig = new ProducerStateManagerConfig(300000, false) - - val stateManager: ProducerStateManager = mock(classOf[ProducerStateManager]) - when(stateManager.isEmpty).thenReturn(true) - when(stateManager.firstUnstableOffset).thenReturn(Optional.empty[LogOffsetMetadata]()) - when(stateManager.producerStateManagerConfig).thenReturn(producerStateManagerConfig) - when(stateManager.maxTransactionTimeoutMs).thenReturn(maxTransactionTimeoutMs) - - val topicPartition = UnifiedLog.parseTopicPartitionName(logDir) - val logProps = new Properties() - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.10.2") - val config = new LogConfig(logProps) - val logDirFailureChannel = null - val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) - val offsets = new LogLoader( - logDir, - topicPartition, - config, - mockTime.scheduler, - mockTime, - logDirFailureChannel, - false, - segments, - 0L, - 0L, - leaderEpochCache.toJava, - stateManager, - new ConcurrentHashMap[String, Integer], - false - ).load() - val localLog = new LocalLog(logDir, config, segments, offsets.recoveryPoint, - offsets.nextOffsetMetadata, mockTime.scheduler, mockTime, topicPartition, - logDirFailureChannel) - new UnifiedLog(offsets.logStartOffset, - localLog, - brokerTopicStats = brokerTopicStats, - producerIdExpirationCheckIntervalMs = 30000, - leaderEpochCache = leaderEpochCache, - producerStateManager = stateManager, - _topicId = None, - keepPartitionMetadataFile = true) - - verify(stateManager).removeStraySnapshots(any[java.util.List[java.lang.Long]]) - verify(stateManager, times(2)).updateMapEndOffset(0L) - verify(stateManager, times(2)).takeSnapshot() - verify(stateManager).isEmpty - verify(stateManager).firstUnstableOffset - verify(stateManager, times(2)).takeSnapshot() - verify(stateManager, times(2)).updateMapEndOffset(0L) - } - - @nowarn("cat=deprecation") - @Test - def testSkipTruncateAndReloadIfOldMessageFormatAndCleanShutdown(): Unit = { - val maxTransactionTimeoutMs = 60000 - val producerStateManagerConfig = new ProducerStateManagerConfig(300000, false) - - val stateManager: ProducerStateManager = mock(classOf[ProducerStateManager]) - when(stateManager.isEmpty).thenReturn(true) - when(stateManager.firstUnstableOffset).thenReturn(Optional.empty[LogOffsetMetadata]()) - when(stateManager.producerStateManagerConfig).thenReturn(producerStateManagerConfig) - when(stateManager.maxTransactionTimeoutMs).thenReturn(maxTransactionTimeoutMs) - - val topicPartition = UnifiedLog.parseTopicPartitionName(logDir) - val logProps = new Properties() - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.10.2") - val config = new LogConfig(logProps) - val logDirFailureChannel = null - val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) - val offsets = new LogLoader( - logDir, - topicPartition, - config, - mockTime.scheduler, - mockTime, - logDirFailureChannel, - true, - segments, - 0L, - 0L, - leaderEpochCache.toJava, - stateManager, - new ConcurrentHashMap[String, Integer], - false - ).load() - val localLog = new LocalLog(logDir, config, segments, offsets.recoveryPoint, - offsets.nextOffsetMetadata, mockTime.scheduler, mockTime, topicPartition, - logDirFailureChannel) - new UnifiedLog(offsets.logStartOffset, - localLog, - brokerTopicStats = brokerTopicStats, - producerIdExpirationCheckIntervalMs = 30000, - leaderEpochCache = leaderEpochCache, - producerStateManager = stateManager, - _topicId = None, - keepPartitionMetadataFile = true) - - verify(stateManager).removeStraySnapshots(any[java.util.List[java.lang.Long]]) - verify(stateManager, times(2)).updateMapEndOffset(0L) - verify(stateManager, times(2)).takeSnapshot() - verify(stateManager).isEmpty - verify(stateManager).firstUnstableOffset - } - - @nowarn("cat=deprecation") @Test def testSkipTruncateAndReloadIfNewMessageFormatAndCleanShutdown(): Unit = { val maxTransactionTimeoutMs = 60000 @@ -659,13 +525,11 @@ class LogLoaderTest { when(stateManager.maxTransactionTimeoutMs).thenReturn(maxTransactionTimeoutMs) val topicPartition = UnifiedLog.parseTopicPartitionName(logDir) - val logProps = new Properties() - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.11.0") - val config = new LogConfig(logProps) + val config = new LogConfig(new Properties()) val logDirFailureChannel = null val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, config.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val offsets = new LogLoader( logDir, topicPartition, @@ -677,7 +541,7 @@ class LogLoaderTest { segments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, stateManager, new ConcurrentHashMap[String, Integer], false @@ -691,8 +555,7 @@ class LogLoaderTest { producerIdExpirationCheckIntervalMs = 30000, leaderEpochCache = leaderEpochCache, producerStateManager = stateManager, - _topicId = None, - keepPartitionMetadataFile = true) + _topicId = None) verify(stateManager).removeStraySnapshots(any[java.util.List[java.lang.Long]]) verify(stateManager, times(2)).updateMapEndOffset(0L) @@ -888,38 +751,6 @@ class LogLoaderTest { log.close() } - /** - * Test that if messages format version of the messages in a segment is before 0.10.0, the time index should be empty. - */ - @nowarn("cat=deprecation") - @Test - def testRebuildTimeIndexForOldMessages(): Unit = { - val numMessages = 200 - val segmentSize = 200 - val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentSize.toString) - logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "1") - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.9.0") - val logConfig = new LogConfig(logProps) - var log = createLog(logDir, logConfig) - for (i <- 0 until numMessages) - log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10), - timestamp = mockTime.milliseconds + i * 10, magicValue = RecordBatch.MAGIC_VALUE_V1), leaderEpoch = 0) - val timeIndexFiles = log.logSegments.asScala.map(_.timeIndexFile()) - log.close() - - // Delete the time index. - timeIndexFiles.foreach(file => Files.delete(file.toPath)) - - // The rebuilt time index should be empty - log = createLog(logDir, logConfig, recoveryPoint = numMessages + 1, lastShutdownClean = false) - for (segment <- log.logSegments.asScala.init) { - assertEquals(0, segment.timeIndex.entries, "The time index should be empty") - assertEquals(0, segment.timeIndexFile().length, "The time index file size should be 0") - } - } - - /** * Test that if we have corrupted an index segment it is rebuilt when the log is re-opened */ @@ -1116,30 +947,6 @@ class LogLoaderTest { Utils.delete(logDir) } - @nowarn("cat=deprecation") - @Test - def testLeaderEpochCacheClearedAfterStaticMessageFormatDowngrade(): Unit = { - val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 65536) - val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5) - assertEquals(Some(5), log.latestEpoch) - log.close() - - // reopen the log with an older message format version and check the cache - val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1000") - logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "1") - logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "65536") - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.10.2") - val downgradedLogConfig = new LogConfig(logProps) - val reopened = createLog(logDir, downgradedLogConfig, lastShutdownClean = false) - LogTestUtils.assertLeaderEpochCacheEmpty(reopened) - - reopened.appendAsLeader(TestUtils.records(List(new SimpleRecord("bar".getBytes())), - magicValue = RecordVersion.V1.value), leaderEpoch = 5) - LogTestUtils.assertLeaderEpochCacheEmpty(reopened) - } - @Test def testOverCompactedLogRecoveryMultiRecord(): Unit = { // append some messages to create some segments @@ -1395,7 +1202,7 @@ class LogLoaderTest { @Test def testLogRecoversForLeaderEpoch(): Unit = { val log = createLog(logDir, new LogConfig(new Properties)) - val leaderEpochCache = log.leaderEpochCache.get + val leaderEpochCache = log.leaderEpochCache val firstBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 1, offset = 0) log.appendAsFollower(records = firstBatch) @@ -1417,7 +1224,7 @@ class LogLoaderTest { // reopen the log and recover from the beginning val recoveredLog = createLog(logDir, new LogConfig(new Properties), lastShutdownClean = false) - val recoveredLeaderEpochCache = recoveredLog.leaderEpochCache.get + val recoveredLeaderEpochCache = recoveredLog.leaderEpochCache // epoch entries should be recovered assertEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), recoveredLeaderEpochCache.epochEntries) @@ -1813,8 +1620,8 @@ class LogLoaderTest { log.logSegments.forEach(segment => segments.add(segment)) assertEquals(5, segments.firstSegment.get.baseOffset) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val offsets = new LogLoader( logDir, topicPartition, @@ -1826,7 +1633,7 @@ class LogLoaderTest { segments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, stateManager, new ConcurrentHashMap[String, Integer], isRemoteLogEnabled diff --git a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala index 28adaa35fab1d..98edbec7cbbc6 100755 --- a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala @@ -800,7 +800,7 @@ class LogManagerTest { val newProperties = new Properties() newProperties.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) - spyLogManager.updateTopicConfig(topic, newProperties, isRemoteLogStorageSystemEnabled = false, wasRemoteLogEnabled = false, fromZK = false) + spyLogManager.updateTopicConfig(topic, newProperties, isRemoteLogStorageSystemEnabled = false, wasRemoteLogEnabled = false) assertTrue(log0.config.delete) assertTrue(log1.config.delete) @@ -975,7 +975,6 @@ class LogManagerTest { // not clean shutdown lastShutdownClean = false, topicId = None, - keepPartitionMetadataFile = false, // pass mock map for verification later numRemainingSegments = mockMap) @@ -1383,7 +1382,6 @@ class LogManagerTest { time = Time.SYSTEM, brokerTopicStats = new BrokerTopicStats, logDirFailureChannel = new LogDirFailureChannel(1), - keepPartitionMetadataFile = true, interBrokerProtocolVersion = MetadataVersion.latestTesting, remoteStorageSystemEnable = false, initialTaskDelayMs = 0) diff --git a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala index 6e27ea75944fe..bf2b71676faf8 100644 --- a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala +++ b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala @@ -35,7 +35,6 @@ import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.Scheduler -import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile import org.apache.kafka.storage.internals.log.LogConfig.{DEFAULT_REMOTE_LOG_COPY_DISABLE_CONFIG, DEFAULT_REMOTE_LOG_DELETE_ON_DISABLE_CONFIG} import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, FetchDataInfo, LazyIndex, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogOffsetsListener, LogSegment, ProducerStateManager, ProducerStateManagerConfig, TransactionIndex} import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -104,7 +103,6 @@ object LogTestUtils { producerIdExpirationCheckIntervalMs: Int = TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, lastShutdownClean: Boolean = true, topicId: Option[Uuid] = None, - keepPartitionMetadataFile: Boolean = true, numRemainingSegments: ConcurrentMap[String, Integer] = new ConcurrentHashMap[String, Integer], remoteStorageSystemEnable: Boolean = false, remoteLogManager: Option[RemoteLogManager] = None, @@ -123,7 +121,6 @@ object LogTestUtils { logDirFailureChannel = new LogDirFailureChannel(10), lastShutdownClean = lastShutdownClean, topicId = topicId, - keepPartitionMetadataFile = keepPartitionMetadataFile, numRemainingSegments = numRemainingSegments, remoteStorageSystemEnable = remoteStorageSystemEnable, logOffsetsListener = logOffsetsListener @@ -262,12 +259,6 @@ object LogTestUtils { def listProducerSnapshotOffsets(logDir: File): Seq[Long] = ProducerStateManager.listSnapshotFiles(logDir).asScala.map(_.offset).sorted.toSeq - def assertLeaderEpochCacheEmpty(log: UnifiedLog): Unit = { - assertEquals(None, log.leaderEpochCache) - assertEquals(None, log.latestEpoch) - assertFalse(LeaderEpochCheckpointFile.newFile(log.dir).exists()) - } - def appendNonTransactionalAsLeader(log: UnifiedLog, numRecords: Int): Unit = { val simpleRecords = (0 until numRecords).map { seq => new SimpleRecord(s"$seq".getBytes) diff --git a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala index 600cf2d2237ac..edbc8db0fb2e0 100755 --- a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala +++ b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala @@ -17,7 +17,6 @@ package kafka.log -import kafka.common.{OffsetsOutOfOrderException, UnexpectedAppendOffsetException} import kafka.log.remote.RemoteLogManager import kafka.server.{DelayedRemoteListOffsets, KafkaConfig} import kafka.utils.TestUtils @@ -34,15 +33,16 @@ import org.apache.kafka.common.record._ import org.apache.kafka.common.requests.{ListOffsetsRequest, ListOffsetsResponse} import org.apache.kafka.common.utils.{BufferSupplier, Time, Utils} import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.server.config.KRaftConfigs import org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig import org.apache.kafka.server.log.remote.storage.{NoOpRemoteLogMetadataManager, NoOpRemoteStorageManager, RemoteLogManagerConfig} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.purgatory.DelayedOperationPurgatory -import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.server.storage.log.{FetchIsolation, UnexpectedAppendOffsetException} import org.apache.kafka.server.util.{KafkaScheduler, MockTime, Scheduler} import org.apache.kafka.storage.internals.checkpoint.{LeaderEpochCheckpointFile, PartitionMetadataFile} import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetResultHolder, OffsetsOutOfOrderException, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, VerificationGuard} import org.apache.kafka.storage.internals.utils.Throttler import org.apache.kafka.storage.log.metrics.{BrokerTopicMetrics, BrokerTopicStats} import org.junit.jupiter.api.Assertions._ @@ -57,12 +57,10 @@ import java.io._ import java.nio.ByteBuffer import java.nio.file.Files import java.util.concurrent.{Callable, ConcurrentHashMap, Executors, TimeUnit} -import java.util.{Optional, OptionalLong, Properties} -import scala.annotation.nowarn +import java.util.{Optional, OptionalInt, OptionalLong, Properties} import scala.collection.immutable.SortedSet import scala.collection.mutable.ListBuffer import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOptional, RichOptionalInt} class UnifiedLogTest { var config: KafkaConfig = _ @@ -76,7 +74,7 @@ class UnifiedLogTest { @BeforeEach def setUp(): Unit = { - val props = TestUtils.createBrokerConfig(0, "127.0.0.1:1", port = -1) + val props = TestUtils.createBrokerConfig(0, port = -1) config = KafkaConfig.fromProps(props) } @@ -656,23 +654,20 @@ class UnifiedLogTest { val records = TestUtils.records(List(new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)), baseOffset = 27) appendAsFollower(log, records, leaderEpoch = 19) - assertEquals(Some(new EpochEntry(19, 27)), - log.leaderEpochCache.flatMap(_.latestEntry.toScala)) + assertEquals(Optional.of(new EpochEntry(19, 27)), log.leaderEpochCache.latestEntry) assertEquals(29, log.logEndOffset) def verifyTruncationClearsEpochCache(epoch: Int, truncationOffset: Long): Unit = { // Simulate becoming a leader - log.maybeAssignEpochStartOffset(leaderEpoch = epoch, startOffset = log.logEndOffset) - assertEquals(Some(new EpochEntry(epoch, 29)), - log.leaderEpochCache.flatMap(_.latestEntry.toScala)) + log.assignEpochStartOffset(leaderEpoch = epoch, startOffset = log.logEndOffset) + assertEquals(Optional.of(new EpochEntry(epoch, 29)), log.leaderEpochCache.latestEntry) assertEquals(29, log.logEndOffset) // Now we become the follower and truncate to an offset greater // than or equal to the log end offset. The trivial epoch entry // at the end of the log should be gone log.truncateTo(truncationOffset) - assertEquals(Some(new EpochEntry(19, 27)), - log.leaderEpochCache.flatMap(_.latestEntry.toScala)) + assertEquals(Optional.of(new EpochEntry(19, 27)), log.leaderEpochCache.latestEntry) assertEquals(29, log.logEndOffset) } @@ -818,11 +813,11 @@ class UnifiedLogTest { records.batches.forEach(_.setPartitionLeaderEpoch(0)) val filtered = ByteBuffer.allocate(2048) - records.filterTo(new TopicPartition("foo", 0), new RecordFilter(0, 0) { + records.filterTo(new RecordFilter(0, 0) { override def checkBatchRetention(batch: RecordBatch): RecordFilter.BatchRetentionResult = new RecordFilter.BatchRetentionResult(RecordFilter.BatchRetention.DELETE_EMPTY, false) override def shouldRetainRecord(recordBatch: RecordBatch, record: Record): Boolean = !record.hasKey - }, filtered, Int.MaxValue, BufferSupplier.NO_CACHING) + }, filtered, BufferSupplier.NO_CACHING) filtered.flip() val filteredRecords = MemoryRecords.readableRecords(filtered) @@ -860,11 +855,11 @@ class UnifiedLogTest { records.batches.forEach(_.setPartitionLeaderEpoch(0)) val filtered = ByteBuffer.allocate(2048) - records.filterTo(new TopicPartition("foo", 0), new RecordFilter(0, 0) { + records.filterTo(new RecordFilter(0, 0) { override def checkBatchRetention(batch: RecordBatch): RecordFilter.BatchRetentionResult = new RecordFilter.BatchRetentionResult(RecordFilter.BatchRetention.RETAIN_EMPTY, true) override def shouldRetainRecord(recordBatch: RecordBatch, record: Record): Boolean = false - }, filtered, Int.MaxValue, BufferSupplier.NO_CACHING) + }, filtered, BufferSupplier.NO_CACHING) filtered.flip() val filteredRecords = MemoryRecords.readableRecords(filtered) @@ -904,11 +899,11 @@ class UnifiedLogTest { records.batches.forEach(_.setPartitionLeaderEpoch(0)) val filtered = ByteBuffer.allocate(2048) - records.filterTo(new TopicPartition("foo", 0), new RecordFilter(0, 0) { + records.filterTo(new RecordFilter(0, 0) { override def checkBatchRetention(batch: RecordBatch): RecordFilter.BatchRetentionResult = new RecordFilter.BatchRetentionResult(RecordFilter.BatchRetention.DELETE_EMPTY, false) override def shouldRetainRecord(recordBatch: RecordBatch, record: Record): Boolean = !record.hasKey - }, filtered, Int.MaxValue, BufferSupplier.NO_CACHING) + }, filtered, BufferSupplier.NO_CACHING) filtered.flip() val filteredRecords = MemoryRecords.readableRecords(filtered) @@ -1969,26 +1964,6 @@ class UnifiedLogTest { log.close() } - @Test - def testNoOpWhenKeepPartitionMetadataFileIsFalse(): Unit = { - val logConfig = LogTestUtils.createLogConfig() - val log = createLog(logDir, logConfig, keepPartitionMetadataFile = false) - - val topicId = Uuid.randomUuid() - log.assignTopicId(topicId) - // We should not write to this file or set the topic ID - assertFalse(log.partitionMetadataFile.get.exists()) - assertEquals(None, log.topicId) - log.close() - - val log2 = createLog(logDir, logConfig, topicId = Some(Uuid.randomUuid()), keepPartitionMetadataFile = false) - - // We should not write to this file or set the topic ID - assertFalse(log2.partitionMetadataFile.get.exists()) - assertEquals(None, log2.topicId) - log2.close() - } - @Test def testLogFailsWhenInconsistentTopicIdSet(): Unit = { val logConfig = LogTestUtils.createLogConfig() @@ -2031,7 +2006,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) val log = createLog(logDir, logConfig) - assertEquals(OffsetResultHolder(None), log.fetchOffsetByTimestamp(0L)) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L)) val firstTimestamp = mockTime.milliseconds val firstLeaderEpoch = 0 @@ -2047,23 +2022,23 @@ class UnifiedLogTest { timestamp = secondTimestamp), leaderEpoch = secondLeaderEpoch) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), log.fetchOffsetByTimestamp(firstTimestamp)) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), log.fetchOffsetByTimestamp(secondTimestamp)) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP)) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)) // The cache can be updated directly after a leader change. // The new latest offset should reflect the updated epoch. - log.maybeAssignEpochStartOffset(2, 2L) + log.assignEpochStartOffset(2, 2L) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)) } @@ -2072,7 +2047,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) val log = createLog(logDir, logConfig) - assertEquals(OffsetResultHolder(None), log.fetchOffsetByTimestamp(0L)) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L)) val firstTimestamp = mockTime.milliseconds val leaderEpoch = 0 @@ -2092,7 +2067,7 @@ class UnifiedLogTest { timestamp = firstTimestamp), leaderEpoch = leaderEpoch) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(leaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(leaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP)) } @@ -2115,7 +2090,7 @@ class UnifiedLogTest { remoteLogStorageEnable = true) val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true, remoteLogManager = Some(remoteLogManager)) // Note that the log is empty, so remote offset read won't happen - assertEquals(OffsetResultHolder(None), log.fetchOffsetByTimestamp(0L, Some(remoteLogManager))) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Some(remoteLogManager))) val firstTimestamp = mockTime.milliseconds val firstLeaderEpoch = 0 @@ -2137,34 +2112,34 @@ class UnifiedLogTest { .filter(_ == firstTimestamp) .map[TimestampAndOffset](x => new TimestampAndOffset(x, 0L, Optional.of(firstLeaderEpoch))) }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), - anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache.get)) + anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) log._localLogStartOffset = 1 def assertFetchOffsetByTimestamp(expected: Option[TimestampAndOffset], timestamp: Long): Unit = { val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, Some(remoteLogManager)) - assertTrue(offsetResultHolder.futureHolderOpt.isDefined) + assertTrue(offsetResultHolder.futureHolderOpt.isPresent) offsetResultHolder.futureHolderOpt.get.taskFuture.get(1, TimeUnit.SECONDS) assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.isDone) - assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.get().isRight) - assertEquals(expected, offsetResultHolder.futureHolderOpt.get.taskFuture.get().getOrElse(null)) + assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.get().hasTimestampAndOffset) + assertEquals(expected.get, offsetResultHolder.futureHolderOpt.get.taskFuture.get().timestampAndOffset().orElse(null)) } // In the assertions below we test that offset 0 (first timestamp) is in remote and offset 1 (second timestamp) is in local storage. assertFetchOffsetByTimestamp(Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) assertFetchOffsetByTimestamp(Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Some(remoteLogManager))) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Some(remoteLogManager))) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) // The cache can be updated directly after a leader change. // The new latest offset should reflect the updated epoch. - log.maybeAssignEpochStartOffset(2, 2L) - - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2)))), + log.assignEpochStartOffset(2, 2L) + + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) } @@ -2173,7 +2148,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) val log = createLog(logDir, logConfig) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)) val firstTimestamp = mockTime.milliseconds @@ -2189,7 +2164,7 @@ class UnifiedLogTest { timestamp = secondTimestamp), leaderEpoch = leaderEpoch) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)) } @@ -2212,8 +2187,8 @@ class UnifiedLogTest { remoteLogStorageEnable = true) val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true, remoteLogManager = Some(remoteLogManager)) // Note that the log is empty, so remote offset read won't happen - assertEquals(OffsetResultHolder(None), log.fetchOffsetByTimestamp(0L, Some(remoteLogManager))) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0, Optional.empty()))), + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Some(remoteLogManager))) + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0, Optional.empty())), log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Some(remoteLogManager))) val firstTimestamp = mockTime.milliseconds @@ -2236,43 +2211,48 @@ class UnifiedLogTest { .filter(_ == firstTimestamp) .map[TimestampAndOffset](x => new TimestampAndOffset(x, 0L, Optional.of(firstLeaderEpoch))) }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), - anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache.get)) + anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) log._localLogStartOffset = 1 log._highestOffsetInRemoteStorage = 0 def assertFetchOffsetByTimestamp(expected: Option[TimestampAndOffset], timestamp: Long): Unit = { val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, Some(remoteLogManager)) - assertTrue(offsetResultHolder.futureHolderOpt.isDefined) + assertTrue(offsetResultHolder.futureHolderOpt.isPresent) offsetResultHolder.futureHolderOpt.get.taskFuture.get(1, TimeUnit.SECONDS) assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.isDone) - assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.get().isRight) - assertEquals(expected, offsetResultHolder.futureHolderOpt.get.taskFuture.get().getOrElse(null)) + assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.get().hasTimestampAndOffset) + assertEquals(expected.get, offsetResultHolder.futureHolderOpt.get.taskFuture.get().timestampAndOffset().orElse(null)) } // In the assertions below we test that offset 0 (first timestamp) is in remote and offset 1 (second timestamp) is in local storage. assertFetchOffsetByTimestamp(Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) assertFetchOffsetByTimestamp(Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Some(remoteLogManager))) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP, Some(remoteLogManager))) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Some(remoteLogManager))) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) // The cache can be updated directly after a leader change. // The new latest offset should reflect the updated epoch. - log.maybeAssignEpochStartOffset(2, 2L) + log.assignEpochStartOffset(2, 2L) - assertEquals(OffsetResultHolder(Some(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2)))), + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2))), log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) } private def createKafkaConfigWithRLM: KafkaConfig = { val props = new Properties() - props.put("zookeeper.connect", "test") + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") + props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "0") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + props.setProperty("controller.quorum.bootstrap.servers", "localhost:9093") + props.setProperty("listeners", "CONTROLLER://:9093") + props.setProperty("advertised.listeners", "CONTROLLER://127.0.0.1:9093") props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true") props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteStorageManager].getName) props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteLogMetadataManager].getName) @@ -2574,54 +2554,28 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024) val log = createLog(logDir, logConfig) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5) - assertEquals(Some(5), log.leaderEpochCache.flatMap(_.latestEpoch.toScala)) + assertEquals(OptionalInt.of(5), log.leaderEpochCache.latestEpoch) log.appendAsFollower(TestUtils.records(List(new SimpleRecord("foo".getBytes())), baseOffset = 1L, magicValue = RecordVersion.V1.value)) - assertEquals(None, log.leaderEpochCache.flatMap(_.latestEpoch.toScala)) + assertEquals(OptionalInt.empty, log.leaderEpochCache.latestEpoch) } - @nowarn("cat=deprecation") - @Test - def testLeaderEpochCacheClearedAfterDynamicMessageFormatDowngrade(): Unit = { - val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024) - val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5) - assertEquals(Some(5), log.latestEpoch) - - val logProps = new Properties() - logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1000") - logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "1") - logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "65536") - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.10.2") - val downgradedLogConfig = new LogConfig(logProps) - log.updateConfig(downgradedLogConfig) - LogTestUtils.assertLeaderEpochCacheEmpty(log) - - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("bar".getBytes())), - magicValue = RecordVersion.V1.value), leaderEpoch = 5) - LogTestUtils.assertLeaderEpochCacheEmpty(log) - } - - @nowarn("cat=deprecation") @Test def testLeaderEpochCacheCreatedAfterMessageFormatUpgrade(): Unit = { val logProps = new Properties() logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1000") logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "1") logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "65536") - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.10.2") val logConfig = new LogConfig(logProps) val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("bar".getBytes())), - magicValue = RecordVersion.V1.value), leaderEpoch = 5) - LogTestUtils.assertLeaderEpochCacheEmpty(log) + log.appendAsLeaderWithRecordVersion(TestUtils.records(List(new SimpleRecord("bar".getBytes())), + magicValue = RecordVersion.V1.value), leaderEpoch = 5, RecordVersion.V1) + assertEquals(None, log.latestEpoch) - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.11.0") - val upgradedLogConfig = new LogConfig(logProps) - log.updateConfig(upgradedLogConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes())), + magicValue = RecordVersion.V2.value), leaderEpoch = 5) assertEquals(Some(5), log.latestEpoch) } @@ -2710,8 +2664,8 @@ class UnifiedLogTest { for (_ <- 0 until 100) log.appendAsLeader(createRecords, leaderEpoch = 0) - log.maybeAssignEpochStartOffset(0, 40) - log.maybeAssignEpochStartOffset(1, 90) + log.assignEpochStartOffset(0, 40) + log.assignEpochStartOffset(1, 90) // segments are not eligible for deletion if no high watermark has been set val numSegments = log.numberOfSegments @@ -2796,9 +2750,7 @@ class UnifiedLogTest { assertEquals(log.logStartOffset, 15) } - def epochCache(log: UnifiedLog): LeaderEpochFileCache = { - log.leaderEpochCache.get - } + def epochCache(log: UnifiedLog): LeaderEpochFileCache = log.leaderEpochCache @Test def shouldDeleteSizeBasedSegments(): Unit = { @@ -2927,7 +2879,7 @@ class UnifiedLogTest { //Given this partition is on leader epoch 72 val epoch = 72 val log = createLog(logDir, new LogConfig(new Properties)) - log.maybeAssignEpochStartOffset(epoch, records.length) + log.assignEpochStartOffset(epoch, records.length) //When appending messages as a leader (i.e. assignOffsets = true) for (record <- records) @@ -3701,14 +3653,9 @@ class UnifiedLogTest { assertTrue(newDir.exists()) log.renameDir(newDir.getName, false) - assertTrue(log.leaderEpochCache.isEmpty) + assertFalse(log.leaderEpochCache.nonEmpty) assertTrue(log.partitionMetadataFile.isEmpty) assertEquals(0, log.logEndOffset) - // verify that records appending can still succeed - // even with the uninitialized leaderEpochCache and partitionMetadataFile - val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes))) - log.appendAsLeader(records, leaderEpoch = 0) - assertEquals(1, log.logEndOffset) // verify that the background deletion can succeed log.delete() @@ -3899,7 +3846,7 @@ class UnifiedLogTest { var sequence = if (appendOrigin == AppendOrigin.CLIENT) 3 else 0 val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) - assertFalse(log.hasOngoingTransaction(producerId)) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) assertFalse(log.verificationGuard(producerId).verify(VerificationGuard.SENTINEL)) @@ -3929,7 +3876,7 @@ class UnifiedLogTest { assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) log.appendAsLeader(idempotentRecords, origin = appendOrigin, leaderEpoch = 0) - assertFalse(log.hasOngoingTransaction(producerId)) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) // Since we wrote idempotent records, we keep VerificationGuard. assertEquals(verificationGuard, log.verificationGuard(producerId)) @@ -3937,7 +3884,7 @@ class UnifiedLogTest { // Now write the transactional records assertTrue(log.verificationGuard(producerId).verify(verificationGuard)) log.appendAsLeader(transactionalRecords, origin = appendOrigin, leaderEpoch = 0, verificationGuard = verificationGuard) - assertTrue(log.hasOngoingTransaction(producerId)) + assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) // VerificationGuard should be cleared now. assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) @@ -3951,7 +3898,7 @@ class UnifiedLogTest { ) log.appendAsLeader(endTransactionMarkerRecord, origin = AppendOrigin.COORDINATOR, leaderEpoch = 0) - assertFalse(log.hasOngoingTransaction(producerId)) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) if (appendOrigin == AppendOrigin.CLIENT) @@ -3983,7 +3930,7 @@ class UnifiedLogTest { ) log.appendAsLeader(endTransactionMarkerRecord, origin = AppendOrigin.COORDINATOR, leaderEpoch = 0) - assertFalse(log.hasOngoingTransaction(producerId)) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) } @@ -4011,7 +3958,7 @@ class UnifiedLogTest { ) log.appendAsLeader(transactionalRecords, leaderEpoch = 0) - assertTrue(log.hasOngoingTransaction(producerId)) + assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) } @@ -4036,14 +3983,14 @@ class UnifiedLogTest { new SimpleRecord("2".getBytes) ) assertThrows(classOf[InvalidTxnStateException], () => log.appendAsLeader(transactionalRecords, leaderEpoch = 0)) - assertFalse(log.hasOngoingTransaction(producerId)) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch) assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) log.appendAsLeader(transactionalRecords, leaderEpoch = 0, verificationGuard = verificationGuard) - assertTrue(log.hasOngoingTransaction(producerId)) + assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) } @@ -4056,7 +4003,7 @@ class UnifiedLogTest { val sequence = 3 val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) - assertFalse(log.hasOngoingTransaction(producerId)) + assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) val transactionalRecords = MemoryRecords.withTransactionalRecords( @@ -4462,8 +4409,8 @@ class UnifiedLogTest { segments.add(seg2) assertEquals(Seq(Long.MaxValue, Long.MaxValue), log.getFirstBatchTimestampForSegments(segments).asScala.toSeq) - seg1.append(1, 1000L, 1, MemoryRecords.withRecords(1, Compression.NONE, new SimpleRecord("one".getBytes))) - seg2.append(2, 2000L, 1, MemoryRecords.withRecords(2, Compression.NONE, new SimpleRecord("two".getBytes))) + seg1.append(1, MemoryRecords.withRecords(1, Compression.NONE, new SimpleRecord(1000L, "one".getBytes))) + seg2.append(2, MemoryRecords.withRecords(2, Compression.NONE, new SimpleRecord(2000L, "two".getBytes))) assertEquals(Seq(1000L, 2000L), log.getFirstBatchTimestampForSegments(segments).asScala.toSeq) seg1.close() @@ -4475,7 +4422,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(remoteLogStorageEnable = true) val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true) val result = log.fetchOffsetByTimestamp(mockTime.milliseconds(), Some(null)) - assertEquals(OffsetResultHolder(None, None), result) + assertEquals(new OffsetResultHolder(Optional.empty(), Optional.empty()), result) } private def appendTransactionalToBuffer(buffer: ByteBuffer, @@ -4532,13 +4479,12 @@ class UnifiedLogTest { producerIdExpirationCheckIntervalMs: Int = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, lastShutdownClean: Boolean = true, topicId: Option[Uuid] = None, - keepPartitionMetadataFile: Boolean = true, remoteStorageSystemEnable: Boolean = false, remoteLogManager: Option[RemoteLogManager] = None, logOffsetsListener: LogOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER): UnifiedLog = { val log = LogTestUtils.createLog(dir, config, brokerTopicStats, scheduler, time, logStartOffset, recoveryPoint, maxTransactionTimeoutMs, producerStateManagerConfig, producerIdExpirationCheckIntervalMs, - lastShutdownClean, topicId, keepPartitionMetadataFile, new ConcurrentHashMap[String, Integer], + lastShutdownClean, topicId, new ConcurrentHashMap[String, Integer], remoteStorageSystemEnable, remoteLogManager, logOffsetsListener) logsToClose = logsToClose :+ log log diff --git a/core/src/test/scala/unit/kafka/log/VerificationGuardTest.scala b/core/src/test/scala/unit/kafka/log/VerificationGuardTest.scala deleted file mode 100644 index b18b7430b9a96..0000000000000 --- a/core/src/test/scala/unit/kafka/log/VerificationGuardTest.scala +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package unit.kafka.log - -import org.apache.kafka.storage.internals.log.VerificationGuard -import org.apache.kafka.storage.internals.log.VerificationGuard.SENTINEL -import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotEquals, assertTrue} -import org.junit.jupiter.api.Test - -class VerificationGuardTest { - - @Test - def testEqualsAndHashCode(): Unit = { - val verificationGuard1 = new VerificationGuard - val verificationGuard2 = new VerificationGuard - - assertNotEquals(verificationGuard1, verificationGuard2) - assertNotEquals(SENTINEL, verificationGuard1) - assertEquals(SENTINEL, SENTINEL) - - assertNotEquals(verificationGuard1.hashCode, verificationGuard2.hashCode) - assertNotEquals(SENTINEL.hashCode, verificationGuard1.hashCode) - assertEquals(SENTINEL.hashCode, SENTINEL.hashCode) - } - - @Test - def testVerify(): Unit = { - val verificationGuard1 = new VerificationGuard - val verificationGuard2 = new VerificationGuard - - assertFalse(verificationGuard1.verify(verificationGuard2)) - assertFalse(verificationGuard1.verify(SENTINEL)) - assertFalse(SENTINEL.verify(verificationGuard1)) - assertFalse(SENTINEL.verify(SENTINEL)) - assertTrue(verificationGuard1.verify(verificationGuard1)) - } - -} diff --git a/core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala b/core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala index 6ebcbeb4fcd28..4d55c30b39771 100644 --- a/core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala +++ b/core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala @@ -377,7 +377,7 @@ class RemoteIndexCacheTest { // Simulate a concurrency situation where one thread is reading the entry already present in the cache (cache hit) // and the other thread is reading an entry which is not available in the cache (cache miss). The expected behaviour // is for the former thread to succeed while latter is fetching from rsm. - // In this this test we simulate the situation using latches. We perform the following operations: + // In this test we simulate the situation using latches. We perform the following operations: // 1. Start the CacheMiss thread and wait until it starts executing the rsm.fetchIndex // 2. Block the CacheMiss thread inside the call to rsm.fetchIndex. // 3. Start the CacheHit thread. Assert that it performs a successful read. @@ -624,7 +624,7 @@ class RemoteIndexCacheTest { assertCacheSize(2) verifyEntryIsEvicted(metadataList(0), entry0) - // Reduce cache capacity to only store 1 entries + // Reduce cache capacity to only store 1 entry cache.resizeCacheSize(1 * estimateEntryBytesSize) assertCacheSize(1) verifyEntryIsEvicted(metadataList(1), entry1) diff --git a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala index 1efce4c3d2352..b1bc03b6ff479 100644 --- a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala +++ b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala @@ -20,7 +20,7 @@ package kafka.metrics import java.lang.management.ManagementFactory import java.util.Properties import javax.management.ObjectName -import com.yammer.metrics.core.{Gauge, MetricPredicate} +import com.yammer.metrics.core.MetricPredicate import org.junit.jupiter.api.Assertions._ import kafka.integration.KafkaServerTestHarness import kafka.server._ @@ -33,7 +33,6 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.metrics.JmxReporter import org.apache.kafka.common.utils.Time -import org.apache.kafka.metadata.migration.ZkMigrationState import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics @@ -52,7 +51,7 @@ class MetricsTest extends KafkaServerTestHarness with Logging { overridingProps.put(JmxReporter.EXCLUDE_CONFIG, s"$requiredKafkaServerPrefix=ClusterId") def generateConfigs: Seq[KafkaConfig] = - TestUtils.createBrokerConfigs(numNodes, zkConnectOrNull, enableControlledShutdown = false). + TestUtils.createBrokerConfigs(numNodes, enableControlledShutdown = false). map(KafkaConfig.fromProps(_, overridingProps)) val nMessages = 2 @@ -63,7 +62,7 @@ class MetricsTest extends KafkaServerTestHarness with Logging { val topic = "test-topic-metric" createTopic(topic) deleteTopic(topic) - TestUtils.verifyTopicDeletion(zkClientOrNull, topic, 1, brokers) + TestUtils.verifyTopicDeletion(topic, 1, brokers) assertEquals(Set.empty, topicMetricGroups(topic), "Topic metrics exists after deleteTopic") } @@ -78,7 +77,7 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertTrue(topicMetricGroups(topic).nonEmpty, "Topic metrics don't exist") brokers.foreach(b => assertNotNull(b.brokerTopicStats.topicStats(topic))) deleteTopic(topic) - TestUtils.verifyTopicDeletion(zkClientOrNull, topic, 1, brokers) + TestUtils.verifyTopicDeletion(topic, 1, brokers) assertEquals(Set.empty, topicMetricGroups(topic), "Topic metrics exists after deleteTopic") } @@ -214,29 +213,6 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertTrue(TestUtils.meterCount(bytesOut) > initialBytesOut) } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testZkControllerMetrics(quorum: String): Unit = { - val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics - - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ActiveControllerCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=OfflinePartitionsCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=PreferredReplicaImbalanceCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=GlobalTopicCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=GlobalPartitionCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=TopicsToDeleteCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ReplicasToDeleteCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=TopicsIneligibleToDeleteCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ReplicasIneligibleToDeleteCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ActiveBrokerCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=FencedBrokerCount"), 1) - assertEquals(metrics.keySet.asScala.count(_.getMBeanName == "kafka.controller:type=KafkaController,name=ZkMigrationState"), 1) - - val zkStateMetricName = metrics.keySet.asScala.filter(_.getMBeanName == "kafka.controller:type=KafkaController,name=ZkMigrationState").head - val zkStateGauge = metrics.get(zkStateMetricName).asInstanceOf[Gauge[Int]] - assertEquals(ZkMigrationState.ZK.value().intValue(), zkStateGauge.value()) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testKRaftControllerMetrics(quorum: String): Unit = { @@ -252,15 +228,10 @@ class MetricsTest extends KafkaServerTestHarness with Logging { "kafka.controller:type=KafkaController,name=MetadataErrorCount", "kafka.controller:type=KafkaController,name=OfflinePartitionsCount", "kafka.controller:type=KafkaController,name=PreferredReplicaImbalanceCount", - "kafka.controller:type=KafkaController,name=ZkMigrationState", ).foreach(expected => { assertEquals(1, metrics.keySet.asScala.count(_.getMBeanName.equals(expected)), s"Unable to find $expected") }) - - val zkStateMetricName = metrics.keySet.asScala.filter(_.getMBeanName == "kafka.controller:type=KafkaController,name=ZkMigrationState").head - val zkStateGauge = metrics.get(zkStateMetricName).asInstanceOf[Gauge[Int]] - assertEquals(ZkMigrationState.NONE.value().intValue(), zkStateGauge.value()) } /** @@ -271,7 +242,7 @@ class MetricsTest extends KafkaServerTestHarness with Logging { @ValueSource(strings = Array("kraft")) def testSessionExpireListenerMetrics(quorum: String): Unit = { val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics - val expectedNumMetrics = if (isKRaftTest()) 0 else 1 + val expectedNumMetrics = 0 assertEquals(expectedNumMetrics, metrics.keySet.asScala. count(_.getMBeanName == "kafka.server:type=SessionExpireListener,name=SessionState")) assertEquals(expectedNumMetrics, metrics.keySet.asScala. diff --git a/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala b/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala index 41c08d32968e5..3906011a20380 100644 --- a/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala @@ -66,11 +66,12 @@ class ConnectionQuotasTest { } def brokerPropsWithDefaultConnectionLimits: Properties = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val props = TestUtils.createBrokerConfig(0, port = 0) props.put(SocketServerConfigs.LISTENERS_CONFIG, "EXTERNAL://localhost:0,REPLICATION://localhost:1,ADMIN://localhost:2") // ConnectionQuotas does not limit inter-broker listener even when broker-wide connection limit is reached props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "REPLICATION") - props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "EXTERNAL:PLAINTEXT,REPLICATION:PLAINTEXT,ADMIN:PLAINTEXT") + props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "REPLICATION://localhost:1") + props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,REPLICATION:PLAINTEXT,ADMIN:PLAINTEXT") props.put(QuotaConfig.NUM_QUOTA_SAMPLES_CONFIG, numQuotaSamples.toString) props.put(QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_CONFIG, quotaWindowSizeSeconds.toString) props diff --git a/core/src/test/scala/unit/kafka/network/ProcessorTest.scala b/core/src/test/scala/unit/kafka/network/ProcessorTest.scala new file mode 100644 index 0000000000000..d42ae11bae64f --- /dev/null +++ b/core/src/test/scala/unit/kafka/network/ProcessorTest.scala @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.network + +import kafka.server.SimpleApiVersionManager +import org.apache.kafka.common.errors.{InvalidRequestException, UnsupportedVersionException} +import org.apache.kafka.common.message.ApiMessageType.ListenerType +import org.apache.kafka.common.protocol.ApiKeys +import org.apache.kafka.common.requests.{RequestHeader, RequestTestUtils} +import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion} +import org.junit.jupiter.api.Assertions.assertThrows +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.function.Executable + +import java.util.Collections + +class ProcessorTest { + + @Test + def testParseRequestHeaderWithDisabledApi(): Unit = { + val requestHeader = RequestTestUtils.serializeRequestHeader( + new RequestHeader(ApiKeys.INIT_PRODUCER_ID, 0, "clientid", 0)) + val apiVersionManager = new SimpleApiVersionManager(ListenerType.CONTROLLER, true, + () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, true)) + assertThrows(classOf[InvalidRequestException], (() => Processor.parseRequestHeader(apiVersionManager, requestHeader)): Executable, + "INIT_PRODUCER_ID with listener type CONTROLLER should throw InvalidRequestException exception") + } + + @Test + def testParseRequestHeaderWithUnsupportedApiVersion(): Unit = { + val requestHeader = RequestTestUtils.serializeRequestHeader( + new RequestHeader(ApiKeys.PRODUCE, 0, "clientid", 0)) + val apiVersionManager = new SimpleApiVersionManager(ListenerType.BROKER, true, + () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, true)) + assertThrows(classOf[UnsupportedVersionException], (() => Processor.parseRequestHeader(apiVersionManager, requestHeader)): Executable, + "PRODUCE v0 should throw UnsupportedVersionException exception") + } + +} diff --git a/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala b/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala index 40452c128540c..e6f62a52f951b 100644 --- a/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala +++ b/core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala @@ -32,7 +32,6 @@ import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Test import org.mockito.Mockito.mock -import java.util.Collections import scala.jdk.OptionConverters.RichOption class RequestConvertToJsonTest { @@ -51,21 +50,6 @@ class RequestConvertToJsonTest { assertEquals(expectedNode, actualNode) } - @Test - def testRequestHeaderNodeWithDeprecatedApiVersion(): Unit = { - val fetchRequest = FetchRequest.Builder.forConsumer(0, 0, 0, Collections.emptyMap()).build(0) - val req = request(fetchRequest) - val header = req.header - - val expectedNode = RequestHeaderDataJsonConverter.write(header.data, header.headerVersion, false).asInstanceOf[ObjectNode] - expectedNode.set("requestApiKeyName", new TextNode(header.apiKey.toString)) - expectedNode.set("requestApiVersionDeprecated", BooleanNode.TRUE) - - val actualNode = RequestConvertToJson.requestHeaderNode(header) - - assertEquals(expectedNode, actualNode) - } - @Test def testRequestDesc(): Unit = { val alterIsrRequest = new AlterPartitionRequest(new AlterPartitionRequestData(), 0) diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index ca4156eacd242..5ebcfd65ccec2 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -37,7 +37,6 @@ import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.utils._ import org.apache.kafka.network.RequestConvertToJson import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.network.metrics.RequestMetrics import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion} import org.apache.kafka.server.config.QuotaConfig @@ -45,7 +44,8 @@ import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.network.ConnectionDisconnectListener import org.apache.kafka.server.quota.{ThrottleCallback, ThrottledChannel} import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils} -import org.apache.log4j.Level +import org.apache.logging.log4j.{Level, LogManager} +import org.apache.logging.log4j.core.config.Configurator import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api._ @@ -66,7 +66,7 @@ import scala.jdk.CollectionConverters._ import scala.util.control.ControlThrowable class SocketServerTest { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val props = TestUtils.createBrokerConfig(0, port = 0) props.put("listeners", "PLAINTEXT://localhost:0") props.put("num.network.threads", "1") props.put("socket.send.buffer.bytes", "300000") @@ -88,7 +88,7 @@ class SocketServerTest { var server: SocketServer = _ val sockets = new ArrayBuffer[Socket] - private val kafkaLogger = org.apache.log4j.LogManager.getLogger("kafka") + private val kafkaLogger = LogManager.getLogger("kafka") private var logLevelToRestore: Level = _ def endpoint: EndPoint = { KafkaConfig.fromProps(props, doLog = false).dataPlaneListeners.head @@ -102,9 +102,8 @@ class SocketServerTest { server.enableRequestProcessing(Map.empty).get(1, TimeUnit.MINUTES) // Run the tests with TRACE logging to exercise request logging path logLevelToRestore = kafkaLogger.getLevel - kafkaLogger.setLevel(Level.TRACE) + Configurator.setLevel(kafkaLogger.getName, Level.TRACE) - assertTrue(server.controlPlaneRequestChannelOpt.isEmpty) } @AfterEach @@ -112,7 +111,7 @@ class SocketServerTest { shutdownServerAndMetrics(server) sockets.foreach(_.close()) sockets.clear() - kafkaLogger.setLevel(logLevelToRestore) + Configurator.setLevel(kafkaLogger.getName, logLevelToRestore) TestUtils.clearYammerMetrics() } @@ -120,8 +119,9 @@ class SocketServerTest { val outgoing = new DataOutputStream(socket.getOutputStream) id match { case Some(id) => - outgoing.writeInt(request.length + 2) + outgoing.writeInt(request.length + 4) outgoing.writeShort(id) + outgoing.writeShort(ApiKeys.PRODUCE.oldestVersion) case None => outgoing.writeInt(request.length) } @@ -235,7 +235,7 @@ class SocketServerTest { val clientId = "" val ackTimeoutMs = 10000 - val emptyRequest = requests.ProduceRequest.forCurrentMagic(new ProduceRequestData() + val emptyRequest = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks(ack) .setTimeoutMs(ackTimeoutMs) @@ -313,119 +313,15 @@ class SocketServerTest { ) } - @Test - def testRequestPerSecAndDeprecatedRequestsPerSecMetrics(): Unit = { - val clientName = "apache-kafka-java" - val clientVersion = AppInfoParser.getVersion - - def deprecatedRequestsPerSec(requestVersion: Short): Option[Long] = - TestUtils.meterCountOpt(s"${RequestMetrics.DEPRECATED_REQUESTS_PER_SEC},request=Produce,version=$requestVersion," + - s"clientSoftwareName=$clientName,clientSoftwareVersion=$clientVersion") - - def requestsPerSec(requestVersion: Short): Option[Long] = - TestUtils.meterCountOpt(s"${RequestMetrics.REQUESTS_PER_SEC},request=Produce,version=$requestVersion") - - val plainSocket = connect() - val address = plainSocket.getLocalAddress - val clientId = "clientId" - - sendRequest(plainSocket, apiVersionRequestBytes(clientId, ApiKeys.API_VERSIONS.latestVersion)) - var receivedReq = receiveRequest(server.dataPlaneRequestChannel) - server.dataPlaneRequestChannel.sendNoOpResponse(receivedReq) - - var requestVersion = ApiKeys.PRODUCE.latestVersion - sendRequest(plainSocket, producerRequestBytes(requestVersion)) - receivedReq = receiveRequest(server.dataPlaneRequestChannel) - - assertEquals(clientName, receivedReq.context.clientInformation.softwareName) - assertEquals(clientVersion, receivedReq.context.clientInformation.softwareVersion) - - server.dataPlaneRequestChannel.sendNoOpResponse(receivedReq) - TestUtils.waitUntilTrue(() => requestsPerSec(requestVersion).isDefined, "RequestsPerSec metric could not be found") - assertTrue(requestsPerSec(requestVersion).getOrElse(0L) > 0, "RequestsPerSec should be higher than 0") - assertEquals(None, deprecatedRequestsPerSec(requestVersion)) - - requestVersion = 3 - sendRequest(plainSocket, producerRequestBytes(requestVersion)) - receivedReq = receiveRequest(server.dataPlaneRequestChannel) - server.dataPlaneRequestChannel.sendNoOpResponse(receivedReq) - TestUtils.waitUntilTrue(() => deprecatedRequestsPerSec(requestVersion).isDefined, "DeprecatedRequestsPerSec metric could not be found") - assertTrue(deprecatedRequestsPerSec(requestVersion).getOrElse(0L) > 0, "DeprecatedRequestsPerSec should be higher than 0") - - plainSocket.setSoLinger(true, 0) - plainSocket.close() - - TestUtils.waitUntilTrue(() => server.connectionCount(address) == 0, msg = "Connection not closed") - - } - - @Test - def testStagedListenerStartup(): Unit = { - shutdownServerAndMetrics(server) - val testProps = new Properties - testProps ++= props - testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0,CONTROL_PLANE://localhost:0") - testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROL_PLANE:PLAINTEXT") - testProps.put("control.plane.listener.name", "CONTROL_PLANE") - testProps.put("inter.broker.listener.name", "INTERNAL") - val config = KafkaConfig.fromProps(testProps) - val testableServer = new TestableSocketServer(config) - - val updatedEndPoints = config.effectiveAdvertisedBrokerListeners.map { endpoint => - endpoint.copy(port = testableServer.boundPort(endpoint.listenerName)) - }.map(_.toJava) - - val externalReadyFuture = new CompletableFuture[Void]() - - def controlPlaneListenerStarted() = { - try { - val socket = connect(testableServer, config.controlPlaneListenerName.get, localAddr = InetAddress.getLocalHost) - sendAndReceiveControllerRequest(socket, testableServer) - true - } catch { - case _: Throwable => false - } - } - - def listenerStarted(listenerName: ListenerName) = { - try { - val socket = connect(testableServer, listenerName, localAddr = InetAddress.getLocalHost) - sendAndReceiveRequest(socket, testableServer) - true - } catch { - case _: Throwable => false - } - } - - try { - val externalListener = new ListenerName("EXTERNAL") - val externalEndpoint = updatedEndPoints.find(e => e.listenerName.get == externalListener.value).get - val controlPlaneListener = new ListenerName("CONTROL_PLANE") - val controlPlaneEndpoint = updatedEndPoints.find(e => e.listenerName.get == controlPlaneListener.value).get - val futures = Map( - externalEndpoint -> externalReadyFuture, - controlPlaneEndpoint -> CompletableFuture.completedFuture[Void](null)) - val requestProcessingFuture = testableServer.enableRequestProcessing(futures) - TestUtils.waitUntilTrue(() => controlPlaneListenerStarted(), "Control plane listener not started") - assertFalse(listenerStarted(config.interBrokerListenerName)) - assertFalse(listenerStarted(externalListener)) - externalReadyFuture.complete(null) - TestUtils.waitUntilTrue(() => listenerStarted(config.interBrokerListenerName), "Inter-broker listener not started") - TestUtils.waitUntilTrue(() => listenerStarted(externalListener), "External listener not started") - requestProcessingFuture.get(1, TimeUnit.MINUTES) - } finally { - shutdownServerAndMetrics(testableServer) - } - } - @Test def testStagedListenerShutdownWhenConnectionQueueIsFull(): Unit = { shutdownServerAndMetrics(server) val testProps = new Properties testProps ++= props - testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0,CONTROLLER://localhost:0") + testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0") + testProps.put("advertised.listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0") testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT") - testProps.put("control.plane.listener.name", "CONTROLLER") + testProps.put("controller.listener.names", "CONTROLLER") testProps.put("inter.broker.listener.name", "INTERNAL") val config = KafkaConfig.fromProps(testProps) val connectionQueueSize = 1 @@ -870,7 +766,7 @@ class SocketServerTest { @Test def testZeroMaxConnectionsPerIp(): Unit = { - val newProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val newProps = TestUtils.createBrokerConfig(0, port = 0) newProps.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG, "0") newProps.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, "%s:%s".format("127.0.0.1", "5")) val server = new SocketServer(KafkaConfig.fromProps(newProps), new Metrics(), @@ -909,7 +805,7 @@ class SocketServerTest { @Test def testMaxConnectionsPerIpOverrides(): Unit = { val overrideNum = server.config.maxConnectionsPerIp + 1 - val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val overrideProps = TestUtils.createBrokerConfig(0, port = 0) overrideProps.put(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, s"localhost:$overrideNum") val serverMetrics = new Metrics() val overrideServer = new SocketServer(KafkaConfig.fromProps(overrideProps), serverMetrics, @@ -968,7 +864,7 @@ class SocketServerTest { @Test def testConnectionRatePerIp(): Unit = { val defaultTimeoutMs = 2000 - val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val overrideProps = TestUtils.createBrokerConfig(0, port = 0) overrideProps.remove(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG) overrideProps.put(QuotaConfig.NUM_QUOTA_SAMPLES_CONFIG, String.valueOf(2)) val connectionRate = 5 @@ -1019,7 +915,7 @@ class SocketServerTest { @Test def testThrottledSocketsClosedOnShutdown(): Unit = { - val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val overrideProps = TestUtils.createBrokerConfig(0, port = 0) overrideProps.remove("max.connections.per.ip") overrideProps.put(QuotaConfig.NUM_QUOTA_SAMPLES_CONFIG, String.valueOf(2)) val connectionRate = 5 @@ -1062,7 +958,7 @@ class SocketServerTest { val clientId = "" val ackTimeoutMs = 10000 val ack = 0: Short - val emptyRequest = requests.ProduceRequest.forCurrentMagic(new ProduceRequestData() + val emptyRequest = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks(ack) .setTimeoutMs(ackTimeoutMs) @@ -1096,6 +992,7 @@ class SocketServerTest { val password = "admin-secret" val reauthMs = 1500 props.setProperty("listeners", "SASL_PLAINTEXT://localhost:0") + props.setProperty("advertised.listeners", "SASL_PLAINTEXT://localhost:0") props.setProperty("security.inter.broker.protocol", "SASL_PLAINTEXT") props.setProperty("listener.name.sasl_plaintext.plain.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required " + @@ -1104,8 +1001,9 @@ class SocketServerTest { props.setProperty("listener.name.sasl_plaintext.sasl.enabled.mechanisms", "PLAIN") props.setProperty("num.network.threads", "1") props.setProperty("connections.max.reauth.ms", reauthMs.toString) - val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, - saslProperties = Some(props), enableSaslPlaintext = true) + props.setProperty("listener.security.protocol.map", "SASL_PLAINTEXT:SASL_PLAINTEXT,CONTROLLER:PLAINTEXT") + + val overrideProps = TestUtils.createBrokerConfig(0, saslProperties = Some(props), enableSaslPlaintext = true) val time = new MockTime() val overrideServer = new TestableSocketServer(KafkaConfig.fromProps(overrideProps), time = time) try { @@ -1144,7 +1042,7 @@ class SocketServerTest { // ...and now send something to trigger the disconnection val ackTimeoutMs = 10000 val ack = 0: Short - val emptyRequest = requests.ProduceRequest.forCurrentMagic(new ProduceRequestData() + val emptyRequest = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks(ack) .setTimeoutMs(ackTimeoutMs) @@ -1185,7 +1083,7 @@ class SocketServerTest { } private def checkClientDisconnectionUpdatesRequestMetrics(responseBufferSize: Int): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val props = TestUtils.createBrokerConfig(0, port = 0) val overrideServer = new TestableSocketServer(KafkaConfig.fromProps(props)) try { @@ -1218,7 +1116,7 @@ class SocketServerTest { def testServerShutdownWithoutEnable(): Unit = { // The harness server has already been enabled, so it's invalid for this test. shutdownServerAndMetrics(server) - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val props = TestUtils.createBrokerConfig(0, port = 0) val overrideServer = new TestableSocketServer(KafkaConfig.fromProps(props)) overrideServer.shutdown() assertFalse(overrideServer.testableAcceptor.isOpen) @@ -1643,8 +1541,6 @@ class SocketServerTest { val testableServer = new TestableSocketServer(time = time) testableServer.enableRequestProcessing(Map.empty).get(1, TimeUnit.MINUTES) - assertTrue(testableServer.controlPlaneRequestChannelOpt.isEmpty) - val proxyServer = new ProxyServer(testableServer) try { val testableSelector = testableServer.testableSelector @@ -1876,33 +1772,13 @@ class SocketServerTest { } } - - @Test - def testControlPlaneAsPrivilegedListener(): Unit = { - val testProps = new Properties - testProps ++= props - testProps.put("listeners", "PLAINTEXT://localhost:0,CONTROLLER://localhost:0") - testProps.put("listener.security.protocol.map", "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT") - testProps.put("control.plane.listener.name", "CONTROLLER") - val config = KafkaConfig.fromProps(testProps) - withTestableServer(config, { testableServer => - val controlPlaneSocket = connect(testableServer, config.controlPlaneListenerName.get, - localAddr = InetAddress.getLocalHost) - val sentRequest = sendAndReceiveControllerRequest(controlPlaneSocket, testableServer) - assertTrue(sentRequest.context.fromPrivilegedListener) - - val plainSocket = connect(testableServer, localAddr = InetAddress.getLocalHost) - val plainRequest = sendAndReceiveRequest(plainSocket, testableServer) - assertFalse(plainRequest.context.fromPrivilegedListener) - }) - } - @Test def testInterBrokerListenerAsPrivilegedListener(): Unit = { val testProps = new Properties testProps ++= props testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0") - testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT") + testProps.put("advertised.listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0") + testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT") testProps.put("inter.broker.listener.name", "INTERNAL") val config = KafkaConfig.fromProps(testProps) withTestableServer(config, { testableServer => @@ -1918,33 +1794,6 @@ class SocketServerTest { }) } - @Test - def testControlPlaneTakePrecedenceOverInterBrokerListenerAsPrivilegedListener(): Unit = { - val testProps = new Properties - testProps ++= props - testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0,CONTROLLER://localhost:0") - testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT") - testProps.put("control.plane.listener.name", "CONTROLLER") - testProps.put("inter.broker.listener.name", "INTERNAL") - val config = KafkaConfig.fromProps(testProps) - withTestableServer(config, { testableServer => - val controlPlaneSocket = connect(testableServer, config.controlPlaneListenerName.get, - localAddr = InetAddress.getLocalHost) - val controlPlaneRequest = sendAndReceiveControllerRequest(controlPlaneSocket, testableServer) - assertTrue(controlPlaneRequest.context.fromPrivilegedListener) - - val interBrokerSocket = connect(testableServer, config.interBrokerListenerName, - localAddr = InetAddress.getLocalHost) - val interBrokerRequest = sendAndReceiveRequest(interBrokerSocket, testableServer) - assertFalse(interBrokerRequest.context.fromPrivilegedListener) - - val externalSocket = connect(testableServer, new ListenerName("EXTERNAL"), - localAddr = InetAddress.getLocalHost) - val externalRequest = sendAndReceiveRequest(externalSocket, testableServer) - assertFalse(externalRequest.context.fromPrivilegedListener) - }) - } - @Test def testListenBacklogSize(): Unit = { val backlogSize = 128 @@ -2074,9 +1923,10 @@ class SocketServerTest { private def sslServerProps: Properties = { val trustStoreFile = TestUtils.tempFile("truststore", ".jks") - val sslProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, interBrokerSecurityProtocol = Some(SecurityProtocol.SSL), + val sslProps = TestUtils.createBrokerConfig(0, interBrokerSecurityProtocol = Some(SecurityProtocol.SSL), trustStoreFile = Some(trustStoreFile)) sslProps.put(SocketServerConfigs.LISTENERS_CONFIG, "SSL://localhost:0") + sslProps.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "SSL://localhost:0") sslProps.put(SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG, "1") sslProps } @@ -2098,11 +1948,6 @@ class SocketServerTest { } } - def sendAndReceiveControllerRequest(socket: Socket, server: SocketServer): RequestChannel.Request = { - sendRequest(socket, producerRequestBytes()) - receiveRequest(server.controlPlaneRequestChannelOpt.get) - } - private def assertProcessorHealthy(testableServer: TestableSocketServer, healthySockets: Seq[Socket] = Seq.empty): Unit = { val selector = testableServer.testableSelector selector.reset() @@ -2248,7 +2093,8 @@ class SocketServerTest { time: Time = Time.SYSTEM, connectionDisconnectListeners: Seq[ConnectionDisconnectListener] = Seq.empty ) extends SocketServer( - config, new Metrics, time, credentialProvider, apiVersionManager, connectionDisconnectListeners = connectionDisconnectListeners + config, new Metrics, time, credentialProvider, apiVersionManager, + connectionDisconnectListeners = connectionDisconnectListeners ) { override def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel) : DataPlaneAcceptor = { diff --git a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala index 09b35318818e0..16c9d30b15974 100644 --- a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala +++ b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala @@ -34,7 +34,7 @@ import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.Endpoints import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.ProcessRole -import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ReplicationConfigs, ServerLogConfigs, ZkConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} import org.apache.kafka.server.fault.FaultHandler import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -46,29 +46,6 @@ import scala.util.Using import scala.jdk.CollectionConverters._ class RaftManagerTest { - private def createZkBrokerConfig( - migrationEnabled: Boolean, - nodeId: Int, - logDir: Seq[Path], - metadataDir: Option[Path] - ): KafkaConfig = { - val props = new Properties - logDir.foreach { value => - props.setProperty(ServerLogConfigs.LOG_DIR_CONFIG, value.toString) - } - if (migrationEnabled) { - metadataDir.foreach { value => - props.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, value.toString) - } - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, s"$nodeId@localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - } - - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") - props.setProperty(ServerConfigs.BROKER_ID_CONFIG, nodeId.toString) - new KafkaConfig(props) - } private def createConfig( processRoles: Set[ProcessRole], @@ -245,80 +222,6 @@ class RaftManagerTest { } } - @Test - def testMigratingZkBrokerDeletesMetadataLog(): Unit = { - val logDirs = Seq(TestUtils.tempDir().toPath) - val metadataLogDir = Some(TestUtils.tempDir().toPath) - val nodeId = 1 - val config = createZkBrokerConfig(migrationEnabled = true, nodeId, logDirs, metadataLogDir) - createMetadataLog(config) - - KafkaRaftManager.maybeDeleteMetadataLogDir(config) - assertLogDirsExist(logDirs, metadataLogDir, expectMetadataLog = false) - } - - @Test - def testNonMigratingZkBrokerDoesNotDeleteMetadataLog(): Unit = { - val logDirs = Seq(TestUtils.tempDir().toPath) - val metadataLogDir = Some(TestUtils.tempDir().toPath) - val nodeId = 1 - - val config = createZkBrokerConfig(migrationEnabled = false, nodeId, logDirs, metadataLogDir) - - // Create the metadata log dir directly as if the broker was previously in migration mode. - // This simulates a misconfiguration after downgrade - Files.createDirectory(metadataLogDir.get.resolve("__cluster_metadata-0")) - - val err = assertThrows(classOf[RuntimeException], () => KafkaRaftManager.maybeDeleteMetadataLogDir(config), - "Should have not deleted the metadata log") - assertEquals("Not deleting metadata log dir since migrations are not enabled.", err.getMessage) - - assertLogDirsExist(logDirs, metadataLogDir, expectMetadataLog = true) - } - - @Test - def testZkBrokerDoesNotDeleteSeparateLogDirs(): Unit = { - val logDirs = Seq(TestUtils.tempDir().toPath, TestUtils.tempDir().toPath) - val metadataLogDir = Some(TestUtils.tempDir().toPath) - val nodeId = 1 - val config = createZkBrokerConfig(migrationEnabled = true, nodeId, logDirs, metadataLogDir) - createMetadataLog(config) - - KafkaRaftManager.maybeDeleteMetadataLogDir(config) - assertLogDirsExist(logDirs, metadataLogDir, expectMetadataLog = false) - } - - @Test - def testZkBrokerDoesNotDeleteSameLogDir(): Unit = { - val logDirs = Seq(TestUtils.tempDir().toPath, TestUtils.tempDir().toPath) - val metadataLogDir = logDirs.headOption - val nodeId = 1 - val config = createZkBrokerConfig(migrationEnabled = true, nodeId, logDirs, metadataLogDir) - createMetadataLog(config) - - KafkaRaftManager.maybeDeleteMetadataLogDir(config) - assertLogDirsExist(logDirs, metadataLogDir, expectMetadataLog = false) - } - - @Test - def testKRaftBrokerDoesNotDeleteMetadataLog(): Unit = { - val logDirs = Seq(TestUtils.tempDir().toPath) - val metadataLogDir = Some(TestUtils.tempDir().toPath) - val nodeId = 1 - val config = createConfig( - Set(ProcessRole.BrokerRole), - nodeId, - logDirs, - metadataLogDir - ) - createMetadataLog(config) - - assertThrows(classOf[RuntimeException], () => KafkaRaftManager.maybeDeleteMetadataLogDir(config), - "Should not have deleted metadata log") - assertLogDirsExist(logDirs, metadataLogDir, expectMetadataLog = true) - - } - private def fileLocked(path: Path): Boolean = { Using.resource(FileChannel.open(path, StandardOpenOption.CREATE, StandardOpenOption.WRITE)) { channel => try { diff --git a/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala b/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala index a614c92b780e4..833cae0672d67 100644 --- a/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala +++ b/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala @@ -89,7 +89,7 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { } def properties: Properties = { - val props = TestUtils.createBrokerConfig(0, null) + val props = TestUtils.createBrokerConfig(0) props.put(StandardAuthorizer.SUPER_USERS_CONFIG, superUsers) props } diff --git a/core/src/test/scala/unit/kafka/security/authorizer/BaseAuthorizerTest.scala b/core/src/test/scala/unit/kafka/security/authorizer/BaseAuthorizerTest.scala index dd8337ff3c95a..c7726ff52454f 100644 --- a/core/src/test/scala/unit/kafka/security/authorizer/BaseAuthorizerTest.scala +++ b/core/src/test/scala/unit/kafka/security/authorizer/BaseAuthorizerTest.scala @@ -20,7 +20,6 @@ package kafka.security.authorizer import java.net.InetAddress import java.util.UUID import kafka.server.KafkaConfig -import kafka.zookeeper.ZooKeeperClient import org.apache.kafka.common.acl.AclOperation.{ALL, READ, WRITE} import org.apache.kafka.common.acl.AclPermissionType.{ALLOW, DENY} import org.apache.kafka.common.acl.{AccessControlEntry, AccessControlEntryFilter, AclBinding, AclBindingFilter, AclOperation} @@ -50,7 +49,6 @@ trait BaseAuthorizerTest { val requestContext: RequestContext = newRequestContext(principal, InetAddress.getByName("192.168.0.1")) val superUserName = "superuser1" var config: KafkaConfig = _ - var zooKeeperClient: ZooKeeperClient = _ var resource: ResourcePattern = _ @Test diff --git a/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala index 05c3bc2eadeac..15fb0ac2a2f72 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala @@ -23,10 +23,9 @@ import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion import org.apache.kafka.common.message.ApiMessageType import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.ApiKeys -import org.apache.kafka.common.record.RecordVersion import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse, RequestUtils} import org.apache.kafka.common.utils.Utils -import org.apache.kafka.server.common.{GroupVersion, MetadataVersion, TransactionVersion} +import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, GroupVersion, MetadataVersion, TransactionVersion} import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Tag @@ -64,12 +63,12 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { clientTelemetryEnabled: Boolean = false, apiVersion: Short = ApiKeys.API_VERSIONS.latestVersion ): Unit = { - if (cluster.isKRaftTest && apiVersion >= 3) { - assertEquals(3, apiVersionsResponse.data().finalizedFeatures().size()) + if (apiVersion >= 3) { + assertEquals(4, apiVersionsResponse.data().finalizedFeatures().size()) assertEquals(MetadataVersion.latestTesting().featureLevel(), apiVersionsResponse.data().finalizedFeatures().find(MetadataVersion.FEATURE_NAME).minVersionLevel()) assertEquals(MetadataVersion.latestTesting().featureLevel(), apiVersionsResponse.data().finalizedFeatures().find(MetadataVersion.FEATURE_NAME).maxVersionLevel()) - assertEquals(4, apiVersionsResponse.data().supportedFeatures().size()) + assertEquals(5, apiVersionsResponse.data().supportedFeatures().size()) assertEquals(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(MetadataVersion.FEATURE_NAME).minVersion()) if (apiVersion < 4) { assertEquals(1, apiVersionsResponse.data().supportedFeatures().find("kraft.version").minVersion()) @@ -83,13 +82,11 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { assertEquals(0, apiVersionsResponse.data().supportedFeatures().find(GroupVersion.FEATURE_NAME).minVersion()) assertEquals(GroupVersion.GV_1.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(GroupVersion.FEATURE_NAME).maxVersion()) + + assertEquals(0, apiVersionsResponse.data().supportedFeatures().find(EligibleLeaderReplicasVersion.FEATURE_NAME).minVersion()) + assertEquals(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(EligibleLeaderReplicasVersion.FEATURE_NAME).maxVersion()) } - val expectedApis = if (!cluster.isKRaftTest) { - ApiVersionsResponse.collectApis( - ApiKeys.apisForListener(ApiMessageType.ListenerType.ZK_BROKER), - enableUnstableLastVersion - ) - } else if (cluster.controllerListenerName().toScala.contains(listenerName)) { + val expectedApis = if (cluster.controllerListenerName().toScala.contains(listenerName)) { ApiVersionsResponse.collectApis( ApiKeys.apisForListener(ApiMessageType.ListenerType.CONTROLLER), enableUnstableLastVersion @@ -97,7 +94,6 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { } else { ApiVersionsResponse.intersectForwardableApis( ApiMessageType.ListenerType.BROKER, - RecordVersion.current, NodeApiVersions.create(ApiKeys.controllerApis().asScala.map(ApiVersionsResponse.toApiVersion).asJava).allSupportedApiVersions(), enableUnstableLastVersion, clientTelemetryEnabled @@ -107,9 +103,7 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { assertEquals(expectedApis.size, apiVersionsResponse.data.apiKeys.size, "API keys in ApiVersionsResponse must match API keys supported by broker.") - val defaultApiVersionsResponse = if (!cluster.isKRaftTest) { - TestUtils.defaultApiVersionsResponse(0, ListenerType.ZK_BROKER, enableUnstableLastVersion) - } else if (cluster.controllerListenerName().toScala.contains(listenerName)) { + val defaultApiVersionsResponse = if (cluster.controllerListenerName().toScala.contains(listenerName)) { TestUtils.defaultApiVersionsResponse(0, ListenerType.CONTROLLER, enableUnstableLastVersion) } else { TestUtils.createApiVersionsResponse(0, expectedApis) diff --git a/core/src/test/scala/unit/kafka/server/AbstractCreateTopicsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AbstractCreateTopicsRequestTest.scala index 6901933bff0ac..4a119cdd5e6a6 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractCreateTopicsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractCreateTopicsRequestTest.scala @@ -137,10 +137,6 @@ abstract class AbstractCreateTopicsRequestTest extends BaseRequestTest { } } - if (!isKRaftTest()) { - // Verify controller broker has the correct metadata - verifyMetadata(controllerSocketServer) - } if (!request.data.validateOnly) { // Wait until metadata is propagated and validate non-controller broker has the correct metadata TestUtils.waitForPartitionMetadata(brokers, topic.name(), 0) diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala index 4fd92d3e7f50c..7528eefc420ea 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala @@ -343,8 +343,6 @@ class AbstractFetcherManagerTest { override protected def logEndOffset(topicPartition: TopicPartition): Long = 1 override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = Some(new OffsetAndEpoch(1, 0)) - - override protected val isOffsetForLeaderEpochSupported: Boolean = false } } diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala index b98c1ddfd0327..5f01458ffa7f7 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala @@ -390,43 +390,6 @@ class AbstractFetcherThreadTest { assertEquals(leaderState.highWatermark, replicaState.highWatermark) } - @Test - def testTruncateToHighWatermarkIfLeaderEpochRequestNotSupported(): Unit = { - val highWatermark = 2L - val partition = new TopicPartition("topic", 0) - val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { - override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = - throw new UnsupportedOperationException - - override val isTruncationOnFetchSupported: Boolean = false - } - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) - val fetcher = new MockFetcherThread(mockLeaderEndPoint, mockTierStateMachine) { - override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = { - assertEquals(highWatermark, truncationState.offset) - assertTrue(truncationState.truncationCompleted) - super.truncate(topicPartition, truncationState) - } - override protected val isOffsetForLeaderEpochSupported: Boolean = false - } - - val replicaLog = Seq( - mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)), - mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)), - mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes))) - - val replicaState = PartitionState(replicaLog, leaderEpoch = 5, highWatermark) - fetcher.setReplicaState(partition, replicaState) - fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), highWatermark, leaderEpoch = 5))) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - - assertEquals(highWatermark, replicaState.logEndOffset) - assertEquals(highWatermark, fetcher.fetchState(partition).get.fetchOffset) - assertTrue(fetcher.fetchState(partition).get.isReadyForFetch) - } - @Test def testTruncateToHighWatermarkIfLeaderEpochInfoNotAvailable(): Unit = { val highWatermark = 2L diff --git a/core/src/test/scala/unit/kafka/server/AbstractMetadataRequestTest.scala b/core/src/test/scala/unit/kafka/server/AbstractMetadataRequestTest.scala index ebe4bd05f3ffb..b0f0f74e88dc8 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractMetadataRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractMetadataRequestTest.scala @@ -54,17 +54,9 @@ abstract class AbstractMetadataRequestTest extends BaseRequestTest { } protected def checkAutoCreatedTopic(autoCreatedTopic: String, response: MetadataResponse): Unit = { - if (isKRaftTest()) { - assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors.get(autoCreatedTopic)) - for (i <- 0 until brokers.head.config.numPartitions) { - TestUtils.waitForPartitionMetadata(brokers, autoCreatedTopic, i) - } - } else { - assertEquals(Errors.LEADER_NOT_AVAILABLE, response.errors.get(autoCreatedTopic)) - assertEquals(Some(brokers.head.config.numPartitions), zkClient.getTopicPartitionCount(autoCreatedTopic)) - for (i <- 0 until brokers.head.config.numPartitions) { - TestUtils.waitForPartitionMetadata(brokers, autoCreatedTopic, i) - } + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors.get(autoCreatedTopic)) + for (i <- 0 until brokers.head.config.numPartitions) { + TestUtils.waitForPartitionMetadata(brokers, autoCreatedTopic, i) } } } diff --git a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala index 20d9b70a1ae73..701ff8f079ce2 100644 --- a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala @@ -34,6 +34,8 @@ import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.RequestAndCompletionHandler import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import org.mockito.ArgumentMatchers import org.mockito.ArgumentMatchers.{any, anyLong, anyString} import org.mockito.MockedConstruction.Context @@ -70,9 +72,9 @@ class AddPartitionsToTxnManagerTest { private val authenticationErrorResponse = clientResponse(null, authException = new SaslAuthenticationException("")) private val versionMismatchResponse = clientResponse(null, mismatchException = new UnsupportedVersionException("")) private val disconnectedResponse = clientResponse(null, disconnected = true) - private val transactionSupportedOperation = genericError + private val transactionSupportedOperation = genericErrorSupported - private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:2181")) + private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) @BeforeEach def setup(): Unit = { @@ -94,8 +96,10 @@ class AddPartitionsToTxnManagerTest { callbackErrors.foreachEntry(errors.put) } - @Test - def testAddTxnData(): Unit = { + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def testAddTxnData(isAddPartition: Boolean): Unit = { + val transactionSupportedOperation = if (isAddPartition) addPartition else genericErrorSupported when(partitionFor.apply(transactionalId1)).thenReturn(0) when(partitionFor.apply(transactionalId2)).thenReturn(1) when(partitionFor.apply(transactionalId3)).thenReturn(0) @@ -106,9 +110,9 @@ class AddPartitionsToTxnManagerTest { val transaction2Errors = mutable.Map[TopicPartition, Errors]() val transaction3Errors = mutable.Map[TopicPartition, Errors]() - addPartitionsToTxnManager.verifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) - addPartitionsToTxnManager.verifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) - addPartitionsToTxnManager.verifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transaction3Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transaction3Errors), transactionSupportedOperation) // We will try to add transaction1 3 more times (retries). One will have the same epoch, one will have a newer epoch, and one will have an older epoch than the new one we just added. val transaction1RetryWithSameEpochErrors = mutable.Map[TopicPartition, Errors]() @@ -116,17 +120,17 @@ class AddPartitionsToTxnManagerTest { val transaction1RetryWithOldEpochErrors = mutable.Map[TopicPartition, Errors]() // Trying to add more transactional data for the same transactional ID, producer ID, and epoch should simply replace the old data and send a retriable response. - addPartitionsToTxnManager.verifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithSameEpochErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithSameEpochErrors), transactionSupportedOperation) val expectedNetworkErrors = topicPartitions.map(_ -> Errors.NETWORK_EXCEPTION).toMap assertEquals(expectedNetworkErrors, transaction1Errors) // Trying to add more transactional data for the same transactional ID and producer ID, but new epoch should replace the old data and send an error response for it. - addPartitionsToTxnManager.verifyTransaction(transactionalId1, producerId1, producerEpoch = 1, topicPartitions, setErrors(transaction1RetryWithNewerEpochErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 1, topicPartitions, setErrors(transaction1RetryWithNewerEpochErrors), transactionSupportedOperation) val expectedEpochErrors = topicPartitions.map(_ -> Errors.INVALID_PRODUCER_EPOCH).toMap assertEquals(expectedEpochErrors, transaction1RetryWithSameEpochErrors) // Trying to add more transactional data for the same transactional ID and producer ID, but an older epoch should immediately return with error and keep the old data queued to send. - addPartitionsToTxnManager.verifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithOldEpochErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithOldEpochErrors), transactionSupportedOperation) assertEquals(expectedEpochErrors, transaction1RetryWithOldEpochErrors) val requestsAndHandlers = addPartitionsToTxnManager.generateRequests().asScala @@ -136,45 +140,44 @@ class AddPartitionsToTxnManagerTest { assertEquals( AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection(Seq( - transactionData(transactionalId3, producerId3), - transactionData(transactionalId1, producerId1, producerEpoch = 1) + transactionData(transactionalId3, producerId3, verifyOnly = !isAddPartition), + transactionData(transactionalId1, producerId1, producerEpoch = 1, verifyOnly = !isAddPartition) ).iterator.asJava) ).data, requestAndHandler.request.asInstanceOf[AddPartitionsToTxnRequest.Builder].data // insertion order ) } else { - verifyRequest(node1, transactionalId2, producerId2, requestAndHandler) + verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) } } } - @Test - def testGenerateRequests(): Unit = { + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def testGenerateRequests(isAddPartition: Boolean): Unit = { when(partitionFor.apply(transactionalId1)).thenReturn(0) when(partitionFor.apply(transactionalId2)).thenReturn(1) when(partitionFor.apply(transactionalId3)).thenReturn(2) mockTransactionStateMetadata(0, 0, Some(node0)) mockTransactionStateMetadata(1, 1, Some(node1)) mockTransactionStateMetadata(2, 2, Some(node2)) + val transactionSupportedOperation = if (isAddPartition) addPartition else genericErrorSupported val transactionErrors = mutable.Map[TopicPartition, Errors]() - addPartitionsToTxnManager.verifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - addPartitionsToTxnManager.verifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) val requestsAndHandlers = addPartitionsToTxnManager.generateRequests().asScala assertEquals(2, requestsAndHandlers.size) // Note: handlers are tested in testAddPartitionsToTxnHandlerErrorHandling requestsAndHandlers.foreach { requestAndHandler => - if (requestAndHandler.destination == node0) { - verifyRequest(node0, transactionalId1, producerId1, requestAndHandler) - } else { - verifyRequest(node1, transactionalId2, producerId2, requestAndHandler) - } + if (requestAndHandler.destination == node0) verifyRequest(node0, transactionalId1, producerId1, !isAddPartition, requestAndHandler) + else verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) } - addPartitionsToTxnManager.verifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - addPartitionsToTxnManager.verifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) // Test creationTimeMs increases too. time.sleep(10) @@ -183,7 +186,7 @@ class AddPartitionsToTxnManagerTest { // The request for node1 should not be added because one request is already inflight. assertEquals(1, requestsAndHandlers2.size) requestsAndHandlers2.foreach { requestAndHandler => - verifyRequest(node2, transactionalId3, producerId3, requestAndHandler) + verifyRequest(node2, transactionalId3, producerId3, !isAddPartition, requestAndHandler) } // Complete the request for node1 so the new one can go through. @@ -191,7 +194,7 @@ class AddPartitionsToTxnManagerTest { val requestsAndHandlers3 = addPartitionsToTxnManager.generateRequests().asScala assertEquals(1, requestsAndHandlers3.size) requestsAndHandlers3.foreach { requestAndHandler => - verifyRequest(node1, transactionalId2, producerId2, requestAndHandler) + verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) } } @@ -202,7 +205,7 @@ class AddPartitionsToTxnManagerTest { def checkError(): Unit = { val errors = mutable.Map[TopicPartition, Errors]() - addPartitionsToTxnManager.verifyTransaction( + addPartitionsToTxnManager.addOrVerifyTransaction( transactionalId1, producerId1, producerEpoch = 0, @@ -241,16 +244,16 @@ class AddPartitionsToTxnManagerTest { transaction1Errors.clear() transaction2Errors.clear() - addPartitionsToTxnManager.verifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) - addPartitionsToTxnManager.verifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) } def addTransactionsToVerifyRequestVersion(operationExpected: TransactionSupportedOperation): Unit = { transaction1Errors.clear() transaction2Errors.clear() - addPartitionsToTxnManager.verifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), operationExpected) - addPartitionsToTxnManager.verifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), operationExpected) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), operationExpected) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), operationExpected) } val expectedAuthErrors = topicPartitions.map(_ -> Errors.SASL_AUTHENTICATION_FAILED).toMap @@ -319,7 +322,7 @@ class AddPartitionsToTxnManagerTest { assertEquals(expectedTransactionAbortableErrorsTxn1LowerVersion, transaction1Errors) assertEquals(expectedTransactionAbortableErrorsTxn2LowerVersion, transaction2Errors) - addTransactionsToVerifyRequestVersion(genericError) + addTransactionsToVerifyRequestVersion(genericErrorSupported) receiveResponse(mixedAbortableErrorsResponse) assertEquals(expectedTransactionAbortableErrorsTxn1HigherVersion, transaction1Errors) assertEquals(expectedTransactionAbortableErrorsTxn2HigherVersion, transaction2Errors) @@ -360,8 +363,8 @@ class AddPartitionsToTxnManagerTest { ) try { - addPartitionsManagerWithMockedMetrics.verifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) - addPartitionsManagerWithMockedMetrics.verifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsManagerWithMockedMetrics.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsManagerWithMockedMetrics.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) time.sleep(100) @@ -424,13 +427,14 @@ class AddPartitionsToTxnManagerTest { private def transactionData( transactionalId: String, producerId: Long, - producerEpoch: Short = 0 + producerEpoch: Short = 0, + verifyOnly: Boolean, ): AddPartitionsToTxnTransaction = { new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId) .setProducerId(producerId) .setProducerEpoch(producerEpoch) - .setVerifyOnly(true) + .setVerifyOnly(verifyOnly) .setTopics(new AddPartitionsToTxnTopicCollection( Seq(new AddPartitionsToTxnTopic() .setName(topic) @@ -445,6 +449,7 @@ class AddPartitionsToTxnManagerTest { expectedDestination: Node, transactionalId: String, producerId: Long, + verifyOnly: Boolean, requestAndHandler: RequestAndCompletionHandler ): Unit = { assertEquals(time.milliseconds(), requestAndHandler.creationTimeMs) @@ -452,7 +457,7 @@ class AddPartitionsToTxnManagerTest { assertEquals( AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection( - Seq(transactionData(transactionalId, producerId)).iterator.asJava + Seq(transactionData(transactionalId, producerId, verifyOnly = verifyOnly)).iterator.asJava ) ).data, requestAndHandler.request.asInstanceOf[AddPartitionsToTxnRequest.Builder].data diff --git a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala index 74a378995b16b..1b35f93961946 100644 --- a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala @@ -32,9 +32,9 @@ import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType import org.apache.kafka.common.requests.{AddPartitionsToTxnRequest, AddPartitionsToTxnResponse, FindCoordinatorRequest, FindCoordinatorResponse, InitProducerIdRequest, InitProducerIdResponse} import org.apache.kafka.server.config.ServerLogConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{Arguments, MethodSource} +import org.junit.jupiter.params.provider.{Arguments, MethodSource, ValueSource} import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -109,9 +109,10 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { assertTrue(errors.containsKey(nonExistentTopic)) assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, errors.get(nonExistentTopic)) } - - @Test - def testOneSuccessOneErrorInBatchedRequest(): Unit = { + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testOneSuccessOneErrorInBatchedRequest(quorum: String): Unit = { val tp0 = new TopicPartition(topic1, 0) val transactionalId1 = "foobar" val transactionalId2 = "barfoo" // "barfoo" maps to the same transaction coordinator @@ -148,8 +149,9 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { assertEquals(expectedErrors, errors) } - @Test - def testVerifyOnly(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testVerifyOnly(quorum: String): Unit = { val tp0 = new TopicPartition(topic1, 0) val transactionalId = "foobar" @@ -172,6 +174,7 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { // First find coordinator request creates the state topic, then wait for transactional topics to be created. connectAndReceive[FindCoordinatorResponse](findCoordinatorRequest, brokerSocketServer(brokers.head.config.brokerId)) TestUtils.waitForAllPartitionsMetadata(brokers, "__transaction_state", 50) + TestUtils.ensureConsistentKRaftMetadata(brokers, controllerServer) val findCoordinatorResponse = connectAndReceive[FindCoordinatorResponse](findCoordinatorRequest, brokerSocketServer(brokers.head.config.brokerId)) val coordinatorId = findCoordinatorResponse.data().coordinators().get(0).nodeId() diff --git a/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala index b461d39c1d819..6c882d3877d83 100644 --- a/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala @@ -21,7 +21,6 @@ import kafka.server.{BrokerServer, ControllerServer, IntegrationTestUtils} import org.apache.kafka.common.test.api.ClusterInstance import org.apache.kafka.common.test.api.{ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.test.api.ClusterTestExtensions -import org.apache.kafka.common.test.api.RaftClusterInvocationContext.RaftClusterInstance import org.apache.kafka.common.message.AllocateProducerIdsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests._ @@ -35,11 +34,10 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { @ClusterTest def testAllocateProducersIdSentToController(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val sourceBroker = raftCluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] + val sourceBroker = cluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] val controllerId = sourceBroker.raftManager.leaderAndEpoch.leaderId().getAsInt - val controllerServer = raftCluster.controllers.values().stream() + val controllerServer = cluster.controllers.values().stream() .filter(_.config.nodeId == controllerId) .findFirst() .get() @@ -52,11 +50,10 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { @ClusterTest(controllers = 3) def testAllocateProducersIdSentToNonController(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val sourceBroker = raftCluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] + val sourceBroker = cluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] val controllerId = sourceBroker.raftManager.leaderAndEpoch.leaderId().getAsInt - val controllerServer = raftCluster.controllers().values().stream() + val controllerServer = cluster.controllers().values().stream() .filter(_.config.nodeId != controllerId) .findFirst() .get() diff --git a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala index 38825ef417409..9455eaf7bfd5a 100644 --- a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala @@ -19,11 +19,10 @@ package kafka.server import java.util.Collections import java.util.stream.{Stream => JStream} -import kafka.zk.KafkaZkClient import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.TopicIdPartition import org.apache.kafka.common.Uuid -import org.apache.kafka.common.errors.{AuthenticationException, InvalidUpdateVersionException, OperationNotAttemptedException, UnknownServerException, UnsupportedVersionException} +import org.apache.kafka.common.errors.{AuthenticationException, OperationNotAttemptedException, UnknownServerException, UnsupportedVersionException} import org.apache.kafka.common.message.AlterPartitionRequestData.BrokerState import org.apache.kafka.common.message.{AlterPartitionRequestData, AlterPartitionResponseData} import org.apache.kafka.common.metrics.Metrics @@ -33,7 +32,7 @@ import org.apache.kafka.common.requests.RequestHeader import org.apache.kafka.common.requests.{AbstractRequest, AlterPartitionRequest, AlterPartitionResponse} import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, MetadataVersion, NodeToControllerChannelManager} -import org.apache.kafka.server.common.MetadataVersion.{IBP_2_7_IV2, IBP_3_2_IV0, IBP_3_5_IV1} +import org.apache.kafka.server.common.MetadataVersion.{IBP_3_0_IV1, IBP_3_2_IV0, IBP_3_5_IV1} import org.apache.kafka.server.util.{MockScheduler, MockTime} import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions._ @@ -43,7 +42,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.Arguments import org.junit.jupiter.params.provider.MethodSource import org.mockito.ArgumentMatcher -import org.mockito.ArgumentMatchers.{any, anyString} +import org.mockito.ArgumentMatchers.any import org.mockito.Mockito.{mock, reset, times, verify} import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} @@ -76,7 +75,7 @@ class AlterPartitionManagerTest { val scheduler = new MockScheduler(time) val alterPartitionManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, () => metadataVersion) alterPartitionManager.start() - alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) verify(brokerToController).start() verify(brokerToController).sendRequest(any(), any()) } @@ -91,7 +90,7 @@ class AlterPartitionManagerTest { for (ii <- 1 to 3) { isrWithBrokerEpoch += new BrokerState().setBrokerId(ii).setBrokerEpoch(100 + ii) } - alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, LeaderRecoveryState.RECOVERED, isrWithBrokerEpoch.toList.asJava, 10), 0) + alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, LeaderRecoveryState.RECOVERED, isrWithBrokerEpoch.toList.asJava, 10)) val expectedAlterPartitionData = new AlterPartitionRequestData() .setBrokerId(brokerId) @@ -100,23 +99,15 @@ class AlterPartitionManagerTest { .setTopicName(topic) .setTopicId(topicId) - if (metadataVersion.isTopicIdsSupported) { - val newIsrWithBrokerEpoch = new ListBuffer[BrokerState]() - newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(1).setBrokerEpoch(101)) - newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(2).setBrokerEpoch(102)) - newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(3).setBrokerEpoch(103)) - topicData.partitions.add(new AlterPartitionRequestData.PartitionData() - .setPartitionIndex(0) - .setLeaderEpoch(1) - .setPartitionEpoch(10) - .setNewIsrWithEpochs(newIsrWithBrokerEpoch.toList.asJava)) - } else { - topicData.partitions.add(new AlterPartitionRequestData.PartitionData() - .setPartitionIndex(0) - .setLeaderEpoch(1) - .setPartitionEpoch(10) - .setNewIsr(List(1, 2, 3).map(Integer.valueOf).asJava)) - } + val newIsrWithBrokerEpoch = new ListBuffer[BrokerState]() + newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(1).setBrokerEpoch(101)) + newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(2).setBrokerEpoch(102)) + newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(3).setBrokerEpoch(103)) + topicData.partitions.add(new AlterPartitionRequestData.PartitionData() + .setPartitionIndex(0) + .setLeaderEpoch(1) + .setPartitionEpoch(10) + .setNewIsrWithEpochs(newIsrWithBrokerEpoch.toList.asJava)) expectedAlterPartitionData.topics.add(topicData) @@ -137,7 +128,7 @@ class AlterPartitionManagerTest { val scheduler = new MockScheduler(time) val alterPartitionManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, () => metadataVersion) alterPartitionManager.start() - alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1).map(Int.box).asJava, leaderRecoveryState, 10), 0) + alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1).map(Int.box).asJava, leaderRecoveryState, 10)) verify(brokerToController).start() verify(brokerToController).sendRequest(requestCapture.capture(), any()) @@ -149,7 +140,6 @@ class AlterPartitionManagerTest { @ParameterizedTest @MethodSource(Array("provideMetadataVersions")) def testOverwriteWithinBatch(metadataVersion: MetadataVersion): Unit = { - val canUseTopicIds = metadataVersion.isAtLeast(MetadataVersion.IBP_2_8_IV0) val capture: ArgumentCaptor[AbstractRequest.Builder[AlterPartitionRequest]] = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[AlterPartitionRequest]]) val callbackCapture: ArgumentCaptor[ControllerRequestCompletionHandler] = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) @@ -158,10 +148,10 @@ class AlterPartitionManagerTest { alterPartitionManager.start() // Only send one ISR update for a given topic+partition - val firstSubmitFuture = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + val firstSubmitFuture = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) assertFalse(firstSubmitFuture.isDone) - val failedSubmitFuture = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + val failedSubmitFuture = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) assertTrue(failedSubmitFuture.isCompletedExceptionally) assertFutureThrows(failedSubmitFuture, classOf[OperationNotAttemptedException]) @@ -169,13 +159,13 @@ class AlterPartitionManagerTest { val alterPartitionResp = partitionResponse() val resp = makeClientResponse( response = alterPartitionResp, - version = if (canUseTopicIds) ApiKeys.ALTER_PARTITION.latestVersion else 1 + version = ApiKeys.ALTER_PARTITION.latestVersion ) verify(brokerToController).sendRequest(capture.capture(), callbackCapture.capture()) callbackCapture.getValue.onComplete(resp) // Now we can submit this partition again - val newSubmitFuture = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + val newSubmitFuture = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) assertFalse(newSubmitFuture.isDone) verify(brokerToController).start() @@ -203,12 +193,12 @@ class AlterPartitionManagerTest { // First request will send batch of one alterPartitionManager.submit(new TopicIdPartition(topicId, 0, topic), - new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) // Other submissions will queue up until a response for (i <- 1 to 9) { alterPartitionManager.submit(new TopicIdPartition(topicId, i, topic), - new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) } // Simulate response, omitting partition 0 will allow it to stay in unsent queue @@ -245,12 +235,12 @@ class AlterPartitionManagerTest { val scheduler = new MockScheduler(time) val alterPartitionManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, () => IBP_3_2_IV0) alterPartitionManager.start() - val future = alterPartitionManager.submit(tp0, leaderAndIsr, 0) + val future = alterPartitionManager.submit(tp0, leaderAndIsr) val finalFuture = new CompletableFuture[LeaderAndIsr]() future.whenComplete { (_, e) => if (e != null) { // Retry when error. - alterPartitionManager.submit(tp0, leaderAndIsr, 0).whenComplete { (result, e) => + alterPartitionManager.submit(tp0, leaderAndIsr).whenComplete { (result, e) => if (e != null) { finalFuture.completeExceptionally(e) } else { @@ -319,7 +309,7 @@ class AlterPartitionManagerTest { val scheduler = new MockScheduler(time) val alterPartitionManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, () => IBP_3_2_IV0) alterPartitionManager.start() - alterPartitionManager.submit(tp0, leaderAndIsr, 0) + alterPartitionManager.submit(tp0, leaderAndIsr) verify(brokerToController).start() verify(brokerToController).sendRequest(any(), callbackCapture.capture()) @@ -367,7 +357,7 @@ class AlterPartitionManagerTest { private def checkPartitionError(error: Errors): Unit = { val alterPartitionManager = testPartitionError(tp0, error) // Any partition-level error should clear the item from the pending queue allowing for future updates - val future = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + val future = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) assertFalse(future.isDone) } @@ -379,7 +369,7 @@ class AlterPartitionManagerTest { val alterPartitionManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, () => IBP_3_2_IV0) alterPartitionManager.start() - val future = alterPartitionManager.submit(tp, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + val future = alterPartitionManager.submit(tp, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) verify(brokerToController).start() verify(brokerToController).sendRequest(any(), callbackCapture.capture()) @@ -403,11 +393,11 @@ class AlterPartitionManagerTest { alterPartitionManager.start() // First submit will send the request - alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) // These will become pending unsent items - alterPartitionManager.submit(tp1, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) - alterPartitionManager.submit(tp2, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10), 0) + alterPartitionManager.submit(tp1, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) + alterPartitionManager.submit(tp2, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) verify(brokerToController).start() verify(brokerToController).sendRequest(any(), callbackCapture.capture()) @@ -423,13 +413,8 @@ class AlterPartitionManagerTest { @ParameterizedTest @MethodSource(Array("provideMetadataVersions")) def testPartitionMissingInResponse(metadataVersion: MetadataVersion): Unit = { - val expectedVersion = if (metadataVersion.isTopicIdsSupported) { - ApiKeys.ALTER_PARTITION.latestVersion - } else { - 1.toShort - } + val expectedVersion = ApiKeys.ALTER_PARTITION.latestVersion val leaderAndIsr = new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10) - val controlledEpoch = 0 val brokerEpoch = 2 val scheduler = new MockScheduler(time) val brokerToController = Mockito.mock(classOf[NodeToControllerChannelManager]) @@ -444,15 +429,15 @@ class AlterPartitionManagerTest { alterPartitionManager.start() // The first `submit` will send the `AlterIsr` request - val future1 = alterPartitionManager.submit(tp0, leaderAndIsr, controlledEpoch) + val future1 = alterPartitionManager.submit(tp0, leaderAndIsr) val callback1 = verifySendRequest(brokerToController, alterPartitionRequestMatcher( expectedTopicPartitions = Set(tp0), expectedVersion = expectedVersion )) // Additional calls while the `AlterIsr` request is inflight will be queued - val future2 = alterPartitionManager.submit(tp1, leaderAndIsr, controlledEpoch) - val future3 = alterPartitionManager.submit(tp2, leaderAndIsr, controlledEpoch) + val future2 = alterPartitionManager.submit(tp1, leaderAndIsr) + val future3 = alterPartitionManager.submit(tp2, leaderAndIsr) // Respond to the first request, which will also allow the next request to get sent callback1.onComplete(makeClientResponse( @@ -487,74 +472,6 @@ class AlterPartitionManagerTest { assertFutureThrows(future2, classOf[UnknownServerException]) } - @ParameterizedTest - @MethodSource(Array("provideMetadataVersions")) - def testPartialTopicIds(metadataVersion: MetadataVersion): Unit = { - val canUseTopicIds = metadataVersion.isAtLeast(MetadataVersion.IBP_2_8_IV0) - val foo = new TopicIdPartition(Uuid.ZERO_UUID, 0, "foo") - val bar = new TopicIdPartition(Uuid.randomUuid(), 0, "bar") - val zar = new TopicIdPartition(Uuid.randomUuid(), 0, "zar") - - val leaderAndIsr = new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10) - val controlledEpoch = 0 - val brokerEpoch = 2 - val scheduler = new MockScheduler(time) - val brokerToController = Mockito.mock(classOf[NodeToControllerChannelManager]) - val alterPartitionManager = new DefaultAlterPartitionManager( - brokerToController, - scheduler, - time, - brokerId, - () => brokerEpoch, - () => metadataVersion - ) - alterPartitionManager.start() - - // Submits an alter isr update with zar, which has a topic id. - val future1 = alterPartitionManager.submit(zar, leaderAndIsr, controlledEpoch) - - // The latest version is expected if all the submitted partitions - // have topic ids and IBP >= 2.8; version 1 should be used otherwise. - val callback1 = verifySendRequest(brokerToController, alterPartitionRequestMatcher( - expectedTopicPartitions = Set(zar), - expectedVersion = if (canUseTopicIds) ApiKeys.ALTER_PARTITION.latestVersion else 1 - )) - - // Submits two additional alter isr changes with foo and bar while the previous one - // is still inflight. foo has no topic id, bar has one. - val future2 = alterPartitionManager.submit(foo, leaderAndIsr, controlledEpoch) - val future3 = alterPartitionManager.submit(bar, leaderAndIsr, controlledEpoch) - - // Completes the first request. That triggers the next one. - callback1.onComplete(makeClientResponse( - response = makeAlterPartition(Seq(makeAlterPartitionTopicData(zar, Errors.NONE))), - version = if (canUseTopicIds) ApiKeys.ALTER_PARTITION.latestVersion else 1 - )) - - assertTrue(future1.isDone) - assertFalse(future2.isDone) - assertFalse(future3.isDone) - - // Version 1 is expected because foo does not have a topic id. - val callback2 = verifySendRequest(brokerToController, alterPartitionRequestMatcher( - expectedTopicPartitions = Set(foo, bar), - expectedVersion = 1 - )) - - // Completes the second request. - callback2.onComplete(makeClientResponse( - response = makeAlterPartition(Seq( - makeAlterPartitionTopicData(foo, Errors.NONE), - makeAlterPartitionTopicData(bar, Errors.NONE), - )), - version = 1 - )) - - assertTrue(future1.isDone) - assertTrue(future2.isDone) - assertTrue(future3.isDone) - } - private def verifySendRequest( brokerToController: NodeToControllerChannelManager, expectedRequest: ArgumentMatcher[AbstractRequest.Builder[_ <: AbstractRequest]] @@ -610,52 +527,6 @@ class AlterPartitionManagerTest { ) } - private def makeAlterPartition( - topics: Seq[AlterPartitionResponseData.TopicData] - ): AlterPartitionResponse = { - new AlterPartitionResponse(new AlterPartitionResponseData().setTopics(topics.asJava)) - } - - private def makeAlterPartitionTopicData( - topicIdPartition: TopicIdPartition, - error: Errors - ): AlterPartitionResponseData.TopicData = { - new AlterPartitionResponseData.TopicData() - .setTopicName(topicIdPartition.topic) - .setTopicId(topicIdPartition.topicId) - .setPartitions(Collections.singletonList( - new AlterPartitionResponseData.PartitionData() - .setPartitionIndex(topicIdPartition.partition) - .setErrorCode(error.code))) - } - - @Test - def testZkBasic(): Unit = { - val scheduler = new MockScheduler(time) - scheduler.startup() - - val kafkaZkClient = Mockito.mock(classOf[KafkaZkClient]) - Mockito.doAnswer(_ => (true, 2)) - .when(kafkaZkClient) - .conditionalUpdatePath(anyString(), any(), ArgumentMatchers.eq(1), any()) - Mockito.doAnswer(_ => (false, 2)) - .when(kafkaZkClient) - .conditionalUpdatePath(anyString(), any(), ArgumentMatchers.eq(3), any()) - - val zkIsrManager = new ZkAlterPartitionManager(scheduler, time, kafkaZkClient) - zkIsrManager.start() - - // Correct ZK version - val future1 = zkIsrManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 1), 0) - assertTrue(future1.isDone) - assertEquals(new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 2), future1.get) - - // Wrong ZK version - val future2 = zkIsrManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 3), 0) - assertTrue(future2.isCompletedExceptionally) - assertFutureThrows(future2, classOf[InvalidUpdateVersionException]) - } - private def partitionResponse( tp: TopicIdPartition = tp0, error: Errors = Errors.NONE, @@ -688,7 +559,7 @@ object AlterPartitionManagerTest { // Supports KIP-704: unclean leader recovery IBP_3_2_IV0, // Supports KIP-497: alter partition - IBP_2_7_IV2 + IBP_3_0_IV1 ) } diff --git a/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala index 4b3e170191506..8e2698b0842cf 100644 --- a/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala @@ -27,7 +27,8 @@ import org.apache.kafka.common.requests.{AlterReplicaLogDirsRequest, AlterReplic import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.storage.internals.log.LogFileUtils import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util.Properties import scala.jdk.CollectionConverters._ @@ -51,12 +52,13 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { .find(p => p.partitionIndex == tp.partition).get.errorCode) } - @Test - def testAlterReplicaLogDirsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterReplicaLogDirsRequest(quorum: String): Unit = { val partitionNum = 5 // Alter replica dir before topic creation - val logDir1 = new File(servers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath + val logDir1 = new File(brokers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath val partitionDirs1 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir1).toMap val alterReplicaLogDirsResponse1 = sendAlterReplicaLogDirsRequest(partitionDirs1) @@ -64,16 +66,16 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { (0 until partitionNum).foreach { partition => val tp = new TopicPartition(topic, partition) assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, findErrorForPartition(alterReplicaLogDirsResponse1, tp)) - assertTrue(servers.head.logManager.getLog(tp).isEmpty) + assertTrue(brokers.head.logManager.getLog(tp).isEmpty) } createTopic(topic, partitionNum) (0 until partitionNum).foreach { partition => - assertEquals(logDir1, servers.head.logManager.getLog(new TopicPartition(topic, partition)).get.dir.getParent) + assertEquals(logDir1, brokers.head.logManager.getLog(new TopicPartition(topic, partition)).get.dir.getParent) } // Alter replica dir again after topic creation - val logDir2 = new File(servers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath + val logDir2 = new File(brokers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath val partitionDirs2 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir2).toMap val alterReplicaLogDirsResponse2 = sendAlterReplicaLogDirsRequest(partitionDirs2) // The response should succeed for all partitions @@ -81,17 +83,18 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { val tp = new TopicPartition(topic, partition) assertEquals(Errors.NONE, findErrorForPartition(alterReplicaLogDirsResponse2, tp)) TestUtils.waitUntilTrue(() => { - logDir2 == servers.head.logManager.getLog(new TopicPartition(topic, partition)).get.dir.getParent + logDir2 == brokers.head.logManager.getLog(new TopicPartition(topic, partition)).get.dir.getParent }, "timed out waiting for replica movement") } } - @Test - def testAlterReplicaLogDirsRequestErrorCode(): Unit = { - val offlineDir = new File(servers.head.config.logDirs.tail.head).getAbsolutePath - val validDir1 = new File(servers.head.config.logDirs(1)).getAbsolutePath - val validDir2 = new File(servers.head.config.logDirs(2)).getAbsolutePath - val validDir3 = new File(servers.head.config.logDirs(3)).getAbsolutePath + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterReplicaLogDirsRequestErrorCode(quorum: String): Unit = { + val offlineDir = new File(brokers.head.config.logDirs.tail.head).getAbsolutePath + val validDir1 = new File(brokers.head.config.logDirs(1)).getAbsolutePath + val validDir2 = new File(brokers.head.config.logDirs(2)).getAbsolutePath + val validDir3 = new File(brokers.head.config.logDirs(3)).getAbsolutePath // Test AlterReplicaDirRequest before topic creation val partitionDirs1 = mutable.Map.empty[TopicPartition, String] @@ -112,8 +115,8 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { assertEquals(Errors.NONE, findErrorForPartition(alterReplicaDirResponse2, new TopicPartition(topic, 1))) // Test AlterReplicaDirRequest after topic creation and log directory failure - servers.head.logDirFailureChannel.maybeAddOfflineLogDir(offlineDir, "", new java.io.IOException()) - TestUtils.waitUntilTrue(() => !servers.head.logManager.isLogDirOnline(offlineDir), s"timed out waiting for $offlineDir to be offline", 3000) + brokers.head.logDirFailureChannel.maybeAddOfflineLogDir(offlineDir, "", new java.io.IOException()) + TestUtils.waitUntilTrue(() => !brokers.head.logManager.isLogDirOnline(offlineDir), s"timed out waiting for $offlineDir to be offline", 3000) val partitionDirs3 = mutable.Map.empty[TopicPartition, String] partitionDirs3.put(new TopicPartition(topic, 0), "invalidDir") partitionDirs3.put(new TopicPartition(topic, 1), validDir3) @@ -124,19 +127,20 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { assertEquals(Errors.KAFKA_STORAGE_ERROR, findErrorForPartition(alterReplicaDirResponse3, new TopicPartition(topic, 2))) } - @Test - def testAlterReplicaLogDirsRequestWithRetention(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterReplicaLogDirsRequestWithRetention(quorum: String): Unit = { val partitionNum = 1 // Alter replica dir before topic creation - val logDir1 = new File(servers.head.config.logDirs(1)).getAbsolutePath + val logDir1 = new File(brokers.head.config.logDirs(1)).getAbsolutePath val partitionDirs1 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir1).toMap val alterReplicaLogDirsResponse1 = sendAlterReplicaLogDirsRequest(partitionDirs1) // The response should show error UNKNOWN_TOPIC_OR_PARTITION for all partitions val tp = new TopicPartition(topic, 0) assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, findErrorForPartition(alterReplicaLogDirsResponse1, tp)) - assertTrue(servers.head.logManager.getLog(tp).isEmpty) + assertTrue(brokers.head.logManager.getLog(tp).isEmpty) val topicProperties = new Properties() topicProperties.put(TopicConfig.RETENTION_BYTES_CONFIG, "1024") @@ -147,13 +151,13 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { topicProperties.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1024") createTopic(topic, partitionNum, 1, topicProperties) - assertEquals(logDir1, servers.head.logManager.getLog(tp).get.dir.getParent) + assertEquals(logDir1, brokers.head.logManager.getLog(tp).get.dir.getParent) // send enough records to trigger log rolling (0 until 20).foreach { _ => - TestUtils.generateAndProduceMessages(servers, topic, 10, 1) + TestUtils.generateAndProduceMessages(brokers, topic, 10, 1) } - TestUtils.waitUntilTrue(() => servers.head.logManager.getLog(new TopicPartition(topic, 0)).get.numberOfSegments > 1, + TestUtils.waitUntilTrue(() => brokers.head.logManager.getLog(new TopicPartition(topic, 0)).get.numberOfSegments > 1, "timed out waiting for log segment to roll") // Wait for log segment retention in original dir. @@ -162,12 +166,12 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { }, "timed out waiting for log segment to retention") // Alter replica dir again after topic creation - val logDir2 = new File(servers.head.config.logDirs(2)).getAbsolutePath + val logDir2 = new File(brokers.head.config.logDirs(2)).getAbsolutePath val alterReplicaLogDirsResponse2 = sendAlterReplicaLogDirsRequest(Map(tp -> logDir2)) // The response should succeed for all partitions assertEquals(Errors.NONE, findErrorForPartition(alterReplicaLogDirsResponse2, tp)) TestUtils.waitUntilTrue(() => { - logDir2 == servers.head.logManager.getLog(tp).get.dir.getParent + logDir2 == brokers.head.logManager.getLog(tp).get.dir.getParent }, "timed out waiting for replica movement") // Make sure the deleted log segment is removed @@ -191,7 +195,6 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { val data = new AlterReplicaLogDirsRequestData() .setDirs(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirCollection(logDirs.asJava.iterator)) val request = new AlterReplicaLogDirsRequest.Builder(data).build() - connectAndReceive[AlterReplicaLogDirsResponse](request, destination = controllerSocketServer) + connectAndReceive[AlterReplicaLogDirsResponse](request, anySocketServer) } - } diff --git a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala index 4797d849beaa6..ced7887351082 100644 --- a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala @@ -34,7 +34,7 @@ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuild import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, AuthorizationResult} import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.config.ServerConfigs -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -238,19 +238,6 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { checkAllErrorsAlteringCredentials(results, Errors.RESOURCE_NOT_FOUND, "when deleting a non-existing credential") } - @Test - def testAlterNotController(): Unit = { - val request = new AlterUserScramCredentialsRequest.Builder( - new AlterUserScramCredentialsRequestData() - .setDeletions(util.Arrays.asList(new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`))) - .setUpsertions(util.Arrays.asList(new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName(user2).setMechanism(ScramMechanism.SCRAM_SHA_512.`type`)))).build() - val response = sendAlterUserScramCredentialsRequest(request, notControllerSocketServer) - - val results = response.data.results - assertEquals(2, results.size) - checkAllErrorsAlteringCredentials(results, Errors.NOT_CONTROLLER, "when routed incorrectly to a non-Controller broker") - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testAlterAndDescribe(quorum: String): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala index a7a18085bbbda..fcfd8a05ae649 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala @@ -16,12 +16,11 @@ */ package kafka.server -import kafka.server.metadata.ZkMetadataCache import org.apache.kafka.clients.NodeApiVersions import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.server.BrokerFeatures -import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.KRaftVersion import org.junit.jupiter.api.{Disabled, Test} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest @@ -32,7 +31,7 @@ import scala.jdk.CollectionConverters._ class ApiVersionManagerTest { private val brokerFeatures = BrokerFeatures.createDefault(true) - private val metadataCache = new ZkMetadataCache(1, MetadataVersion.latestTesting(), brokerFeatures) + private val metadataCache = MetadataCache.kRaftMetadataCache(1, () => KRaftVersion.LATEST_PRODUCTION) @ParameterizedTest @EnumSource(classOf[ListenerType]) @@ -73,7 +72,7 @@ class ApiVersionManagerTest { @Test def testControllerApiIntersection(): Unit = { - val controllerMinVersion: Short = 1 + val controllerMinVersion: Short = 3 val controllerMaxVersion: Short = 5 val forwardingManager = Mockito.mock(classOf[ForwardingManager]) diff --git a/core/src/test/scala/unit/kafka/server/AutoTopicCreationManagerTest.scala b/core/src/test/scala/unit/kafka/server/AutoTopicCreationManagerTest.scala deleted file mode 100644 index d86c450ea0a73..0000000000000 --- a/core/src/test/scala/unit/kafka/server/AutoTopicCreationManagerTest.scala +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import java.net.InetAddress -import java.nio.ByteBuffer -import java.util.concurrent.atomic.AtomicBoolean -import java.util.{Collections, Optional, Properties} -import kafka.controller.KafkaController -import kafka.coordinator.transaction.TransactionCoordinator -import kafka.utils.TestUtils -import org.apache.kafka.clients.{ClientResponse, NodeApiVersions, RequestCompletionHandler} -import org.apache.kafka.common.Node -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.internals.Topic.{GROUP_METADATA_TOPIC_NAME, SHARE_GROUP_STATE_TOPIC_NAME, TRANSACTION_STATE_TOPIC_NAME} -import org.apache.kafka.common.message.{ApiVersionsResponseData, CreateTopicsRequestData} -import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic -import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic -import org.apache.kafka.common.network.{ClientInformation, ListenerName} -import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests._ -import org.apache.kafka.common.security.auth.{KafkaPrincipal, KafkaPrincipalSerde, SecurityProtocol} -import org.apache.kafka.common.utils.{SecurityUtils, Utils} -import org.apache.kafka.coordinator.group.{GroupCoordinator, GroupCoordinatorConfig} -import org.apache.kafka.coordinator.share.ShareCoordinator -import org.apache.kafka.server.config.{ServerConfigs, ShareCoordinatorConfig} -import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} -import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} -import org.junit.jupiter.api.{BeforeEach, Test} -import org.mockito.ArgumentMatchers.any -import org.mockito.invocation.InvocationOnMock -import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} - -import scala.collection.{Map, Seq} - -class AutoTopicCreationManagerTest { - - private val requestTimeout = 100 - private var config: KafkaConfig = _ - private val metadataCache = Mockito.mock(classOf[MetadataCache]) - private val brokerToController = Mockito.mock(classOf[NodeToControllerChannelManager]) - private val adminManager = Mockito.mock(classOf[ZkAdminManager]) - private val controller = Mockito.mock(classOf[KafkaController]) - private val groupCoordinator = Mockito.mock(classOf[GroupCoordinator]) - private val transactionCoordinator = Mockito.mock(classOf[TransactionCoordinator]) - private val shareCoordinator = Mockito.mock(classOf[ShareCoordinator]) - private var autoTopicCreationManager: AutoTopicCreationManager = _ - - private val internalTopicPartitions = 2 - private val internalTopicReplicationFactor: Short = 2 - - @BeforeEach - def setup(): Unit = { - val props = TestUtils.createBrokerConfig(1, "localhost") - props.setProperty(ServerConfigs.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout.toString) - - props.setProperty(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, internalTopicPartitions.toString) - props.setProperty(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, internalTopicPartitions.toString) - props.setProperty(ShareCoordinatorConfig.STATE_TOPIC_REPLICATION_FACTOR_CONFIG , internalTopicPartitions.toString) - - props.setProperty(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, internalTopicReplicationFactor.toString) - props.setProperty(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, internalTopicReplicationFactor.toString) - props.setProperty(ShareCoordinatorConfig.STATE_TOPIC_NUM_PARTITIONS_CONFIG, internalTopicReplicationFactor.toString) - - config = KafkaConfig.fromProps(props) - val aliveBrokers = Seq(new Node(0, "host0", 0), new Node(1, "host1", 1)) - - Mockito.reset(metadataCache, controller, brokerToController, groupCoordinator, transactionCoordinator, shareCoordinator) - - Mockito.when(metadataCache.getAliveBrokerNodes(any(classOf[ListenerName]))).thenReturn(aliveBrokers) - } - - @Test - def testCreateOffsetTopic(): Unit = { - Mockito.when(groupCoordinator.groupMetadataTopicConfigs).thenReturn(new Properties) - testCreateTopic(GROUP_METADATA_TOPIC_NAME, isInternal = true, internalTopicPartitions, internalTopicReplicationFactor) - } - - @Test - def testCreateTxnTopic(): Unit = { - Mockito.when(transactionCoordinator.transactionTopicConfigs).thenReturn(new Properties) - testCreateTopic(TRANSACTION_STATE_TOPIC_NAME, isInternal = true, internalTopicPartitions, internalTopicReplicationFactor) - } - - @Test - def testCreateShareStateTopic(): Unit = { - Mockito.when(shareCoordinator.shareGroupStateTopicConfigs()).thenReturn(new Properties) - testCreateTopic(SHARE_GROUP_STATE_TOPIC_NAME, isInternal = true, internalTopicPartitions, internalTopicReplicationFactor) - } - - @Test - def testCreateNonInternalTopic(): Unit = { - testCreateTopic("topic", isInternal = false) - } - - private def testCreateTopic(topicName: String, - isInternal: Boolean, - numPartitions: Int = 1, - replicationFactor: Short = 1): Unit = { - autoTopicCreationManager = new DefaultAutoTopicCreationManager( - config, - Some(brokerToController), - Some(adminManager), - Some(controller), - groupCoordinator, - transactionCoordinator, - Some(shareCoordinator)) - - val topicsCollection = new CreateTopicsRequestData.CreatableTopicCollection - topicsCollection.add(getNewTopic(topicName, numPartitions, replicationFactor)) - val requestBody = new CreateTopicsRequest.Builder( - new CreateTopicsRequestData() - .setTopics(topicsCollection) - .setTimeoutMs(requestTimeout)) - - Mockito.when(controller.isActive).thenReturn(false) - - // Calling twice with the same topic will only trigger one forwarding. - createTopicAndVerifyResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, topicName, isInternal) - createTopicAndVerifyResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, topicName, isInternal) - - Mockito.verify(brokerToController).sendRequest( - ArgumentMatchers.eq(requestBody), - any(classOf[ControllerRequestCompletionHandler])) - } - - @Test - def testCreateTopicsWithForwardingDisabled(): Unit = { - autoTopicCreationManager = new DefaultAutoTopicCreationManager( - config, - None, - Some(adminManager), - Some(controller), - groupCoordinator, - transactionCoordinator, - Some(shareCoordinator)) - - val topicName = "topic" - - Mockito.when(controller.isActive).thenReturn(false) - - createTopicAndVerifyResult(Errors.UNKNOWN_TOPIC_OR_PARTITION, topicName, isInternal = false) - - Mockito.verify(adminManager).createTopics( - ArgumentMatchers.eq(0), - ArgumentMatchers.eq(false), - ArgumentMatchers.eq(Map(topicName -> getNewTopic(topicName))), - ArgumentMatchers.eq(Map.empty), - any(classOf[ControllerMutationQuota]), - any(classOf[Map[String, ApiError] => Unit])) - } - - @Test - def testInvalidReplicationFactorForNonInternalTopics(): Unit = { - testErrorWithCreationInZk(Errors.INVALID_REPLICATION_FACTOR, "topic", isInternal = false) - } - - @Test - def testInvalidReplicationFactorForConsumerOffsetsTopic(): Unit = { - Mockito.when(groupCoordinator.groupMetadataTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.INVALID_REPLICATION_FACTOR, Topic.GROUP_METADATA_TOPIC_NAME, isInternal = true) - } - - @Test - def testInvalidReplicationFactorForTxnOffsetTopic(): Unit = { - Mockito.when(transactionCoordinator.transactionTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.INVALID_REPLICATION_FACTOR, Topic.TRANSACTION_STATE_TOPIC_NAME, isInternal = true) - } - - @Test - def testTopicExistsErrorSwapForNonInternalTopics(): Unit = { - testErrorWithCreationInZk(Errors.TOPIC_ALREADY_EXISTS, "topic", isInternal = false, - expectedError = Some(Errors.LEADER_NOT_AVAILABLE)) - } - - @Test - def testTopicExistsErrorSwapForConsumerOffsetsTopic(): Unit = { - Mockito.when(groupCoordinator.groupMetadataTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.TOPIC_ALREADY_EXISTS, Topic.GROUP_METADATA_TOPIC_NAME, isInternal = true, - expectedError = Some(Errors.LEADER_NOT_AVAILABLE)) - } - - @Test - def testTopicExistsErrorSwapForTxnOffsetTopic(): Unit = { - Mockito.when(transactionCoordinator.transactionTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.TOPIC_ALREADY_EXISTS, Topic.TRANSACTION_STATE_TOPIC_NAME, isInternal = true, - expectedError = Some(Errors.LEADER_NOT_AVAILABLE)) - } - - @Test - def testRequestTimeoutErrorSwapForNonInternalTopics(): Unit = { - testErrorWithCreationInZk(Errors.REQUEST_TIMED_OUT, "topic", isInternal = false, - expectedError = Some(Errors.LEADER_NOT_AVAILABLE)) - } - - @Test - def testRequestTimeoutErrorSwapForConsumerOffsetTopic(): Unit = { - Mockito.when(groupCoordinator.groupMetadataTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.REQUEST_TIMED_OUT, Topic.GROUP_METADATA_TOPIC_NAME, isInternal = true, - expectedError = Some(Errors.LEADER_NOT_AVAILABLE)) - } - - @Test - def testRequestTimeoutErrorSwapForTxnOffsetTopic(): Unit = { - Mockito.when(transactionCoordinator.transactionTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.REQUEST_TIMED_OUT, Topic.TRANSACTION_STATE_TOPIC_NAME, isInternal = true, - expectedError = Some(Errors.LEADER_NOT_AVAILABLE)) - } - - @Test - def testUnknownTopicPartitionForNonIntervalTopic(): Unit = { - testErrorWithCreationInZk(Errors.UNKNOWN_TOPIC_OR_PARTITION, "topic", isInternal = false) - } - - @Test - def testUnknownTopicPartitionForConsumerOffsetTopic(): Unit = { - Mockito.when(groupCoordinator.groupMetadataTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.UNKNOWN_TOPIC_OR_PARTITION, Topic.GROUP_METADATA_TOPIC_NAME, isInternal = true) - } - - @Test - def testUnknownTopicPartitionForTxnOffsetTopic(): Unit = { - Mockito.when(transactionCoordinator.transactionTopicConfigs).thenReturn(new Properties) - testErrorWithCreationInZk(Errors.UNKNOWN_TOPIC_OR_PARTITION, Topic.TRANSACTION_STATE_TOPIC_NAME, isInternal = true) - } - - @Test - def testTopicCreationWithMetadataContextPassPrincipal(): Unit = { - val topicName = "topic" - - val userPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "user") - val serializeIsCalled = new AtomicBoolean(false) - val principalSerde = new KafkaPrincipalSerde { - override def serialize(principal: KafkaPrincipal): Array[Byte] = { - assertEquals(principal, userPrincipal) - serializeIsCalled.set(true) - Utils.utf8(principal.toString) - } - override def deserialize(bytes: Array[Byte]): KafkaPrincipal = SecurityUtils.parseKafkaPrincipal(Utils.utf8(bytes)) - } - - val requestContext = initializeRequestContext(topicName, userPrincipal, Optional.of(principalSerde)) - - autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) - - assertTrue(serializeIsCalled.get()) - - val argumentCaptor = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]) - Mockito.verify(brokerToController).sendRequest( - argumentCaptor.capture(), - any(classOf[ControllerRequestCompletionHandler])) - val capturedRequest = argumentCaptor.getValue.asInstanceOf[EnvelopeRequest.Builder].build(ApiKeys.ENVELOPE.latestVersion()) - assertEquals(userPrincipal, SecurityUtils.parseKafkaPrincipal(Utils.utf8(capturedRequest.requestPrincipal))) - } - - @Test - def testTopicCreationWithMetadataContextWhenPrincipalSerdeNotDefined(): Unit = { - val topicName = "topic" - - val requestContext = initializeRequestContext(topicName, KafkaPrincipal.ANONYMOUS, Optional.empty()) - - // Throw upon undefined principal serde when building the forward request - assertThrows(classOf[IllegalArgumentException], () => autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext))) - } - - @Test - def testTopicCreationWithMetadataContextNoRetryUponUnsupportedVersion(): Unit = { - val topicName = "topic" - - val principalSerde = new KafkaPrincipalSerde { - override def serialize(principal: KafkaPrincipal): Array[Byte] = { - Utils.utf8(principal.toString) - } - override def deserialize(bytes: Array[Byte]): KafkaPrincipal = SecurityUtils.parseKafkaPrincipal(Utils.utf8(bytes)) - } - - val requestContext = initializeRequestContext(topicName, KafkaPrincipal.ANONYMOUS, Optional.of(principalSerde)) - autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) - autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) - - // Should only trigger once - val argumentCaptor = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) - Mockito.verify(brokerToController).sendRequest( - any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), - argumentCaptor.capture()) - - // Complete with unsupported version will not trigger a retry, but cleanup the inflight topics instead - val header = new RequestHeader(ApiKeys.ENVELOPE, 0, "client", 1) - val response = new EnvelopeResponse(ByteBuffer.allocate(0), Errors.UNSUPPORTED_VERSION) - val clientResponse = new ClientResponse(header, null, null, - 0, 0, false, null, null, response) - argumentCaptor.getValue.asInstanceOf[RequestCompletionHandler].onComplete(clientResponse) - Mockito.verify(brokerToController, Mockito.times(1)).sendRequest( - any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), - argumentCaptor.capture()) - - // Could do the send again as inflight topics are cleared. - autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, Some(requestContext)) - Mockito.verify(brokerToController, Mockito.times(2)).sendRequest( - any(classOf[AbstractRequest.Builder[_ <: AbstractRequest]]), - argumentCaptor.capture()) - } - - private def initializeRequestContext(topicName: String, - kafkaPrincipal: KafkaPrincipal, - principalSerde: Optional[KafkaPrincipalSerde]): RequestContext = { - - autoTopicCreationManager = new DefaultAutoTopicCreationManager( - config, - Some(brokerToController), - Some(adminManager), - Some(controller), - groupCoordinator, - transactionCoordinator, - Some(shareCoordinator)) - - val topicsCollection = new CreateTopicsRequestData.CreatableTopicCollection - topicsCollection.add(getNewTopic(topicName)) - val createTopicApiVersion = new ApiVersionsResponseData.ApiVersion() - .setApiKey(ApiKeys.CREATE_TOPICS.id) - .setMinVersion(0) - .setMaxVersion(0) - Mockito.when(brokerToController.controllerApiVersions()) - .thenReturn(Optional.of(NodeApiVersions.create(Collections.singleton(createTopicApiVersion)))) - - Mockito.when(controller.isActive).thenReturn(false) - - val requestHeader = new RequestHeader(ApiKeys.METADATA, ApiKeys.METADATA.latestVersion, - "clientId", 0) - new RequestContext(requestHeader, "1", InetAddress.getLocalHost, Optional.empty(), - kafkaPrincipal, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), - SecurityProtocol.PLAINTEXT, ClientInformation.EMPTY, false, principalSerde) - } - - private def testErrorWithCreationInZk(error: Errors, - topicName: String, - isInternal: Boolean, - expectedError: Option[Errors] = None): Unit = { - autoTopicCreationManager = new DefaultAutoTopicCreationManager( - config, - None, - Some(adminManager), - Some(controller), - groupCoordinator, - transactionCoordinator, - Some(shareCoordinator)) - - Mockito.when(controller.isActive).thenReturn(false) - val newTopic = if (isInternal) { - topicName match { - case Topic.GROUP_METADATA_TOPIC_NAME => getNewTopic(topicName, - numPartitions = config.groupCoordinatorConfig.offsetsTopicPartitions, replicationFactor = config.groupCoordinatorConfig.offsetsTopicReplicationFactor) - case Topic.TRANSACTION_STATE_TOPIC_NAME => getNewTopic(topicName, - numPartitions = config.transactionLogConfig.transactionTopicPartitions, replicationFactor = config.transactionLogConfig.transactionTopicReplicationFactor) - } - } else { - getNewTopic(topicName) - } - - val topicErrors = if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) null else - Map(topicName -> new ApiError(error)) - Mockito.when(adminManager.createTopics( - ArgumentMatchers.eq(0), - ArgumentMatchers.eq(false), - ArgumentMatchers.eq(Map(topicName -> newTopic)), - ArgumentMatchers.eq(Map.empty), - any(classOf[ControllerMutationQuota]), - any(classOf[Map[String, ApiError] => Unit]))).thenAnswer((invocation: InvocationOnMock) => { - invocation.getArgument(5).asInstanceOf[Map[String, ApiError] => Unit] - .apply(topicErrors) - }) - - createTopicAndVerifyResult(expectedError.getOrElse(error), topicName, isInternal = isInternal) - } - - private def createTopicAndVerifyResult(error: Errors, - topicName: String, - isInternal: Boolean, - metadataContext: Option[RequestContext] = None): Unit = { - val topicResponses = autoTopicCreationManager.createTopics( - Set(topicName), UnboundedControllerMutationQuota, metadataContext) - - val expectedResponses = Seq(new MetadataResponseTopic() - .setErrorCode(error.code()) - .setIsInternal(isInternal) - .setName(topicName)) - - assertEquals(expectedResponses, topicResponses) - } - - private def getNewTopic(topicName: String, numPartitions: Int = 1, replicationFactor: Short = 1): CreatableTopic = { - new CreatableTopic() - .setName(topicName) - .setNumPartitions(numPartitions) - .setReplicationFactor(replicationFactor) - } -} diff --git a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala index 4c1494380ea60..3a0ffe1b4779f 100644 --- a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala @@ -56,25 +56,9 @@ abstract class BaseRequestTest extends IntegrationTestHarness { }.map(_.socketServer).getOrElse(throw new IllegalStateException("No live broker is available")) } - def controllerSocketServer: SocketServer = { - if (isKRaftTest()) { - controllerServer.socketServer - } else { - servers.find { server => - server.kafkaController.isActive - }.map(_.socketServer).getOrElse(throw new IllegalStateException("No controller broker is available")) - } - } + def controllerSocketServer: SocketServer = controllerServer.socketServer - def notControllerSocketServer: SocketServer = { - if (isKRaftTest()) { - anySocketServer - } else { - servers.find { server => - !server.kafkaController.isActive - }.map(_.socketServer).getOrElse(throw new IllegalStateException("No non-controller broker is available")) - } - } + def notControllerSocketServer: SocketServer = anySocketServer def brokerSocketServer(brokerId: Int): SocketServer = { brokers.find { broker => @@ -85,16 +69,9 @@ abstract class BaseRequestTest extends IntegrationTestHarness { /** * Return the socket server where admin request to be sent. * - * For KRaft clusters that is any broker as the broker will forward the request to the active - * controller. For Legacy clusters that is the controller broker. + * KRaft clusters that is any broker as the broker will forward the request to the active controller. */ - def adminSocketServer: SocketServer = { - if (isKRaftTest()) { - anySocketServer - } else { - controllerSocketServer - } - } + def adminSocketServer: SocketServer = anySocketServer def connect(socketServer: SocketServer = anySocketServer, listenerName: ListenerName = listenerName): Socket = { diff --git a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala index 0f5d3f027e5e4..71bfbefa307f4 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala @@ -24,16 +24,11 @@ import org.apache.kafka.common.Uuid import org.apache.kafka.common.message.{BrokerHeartbeatResponseData, BrokerRegistrationResponseData} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, BrokerHeartbeatRequest, BrokerHeartbeatResponse, BrokerRegistrationRequest, BrokerRegistrationResponse} -import org.apache.kafka.metadata.{BrokerState, VersionRange} +import org.apache.kafka.metadata.BrokerState import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.server.BrokerFeatures -import org.apache.kafka.server.common.{Features, KRaftVersion, MetadataVersion} -import org.apache.kafka.server.common.MetadataVersion.{IBP_3_8_IV0, IBP_3_9_IV0} -import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs, ZkConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test, Timeout} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource import java.util.concurrent.{CompletableFuture, Future} import scala.jdk.CollectionConverters._ @@ -61,15 +56,6 @@ class BrokerLifecycleManagerTest { properties } - def migrationConfigProperties(ibp: MetadataVersion) = { - val migrationConfigProperties = configProperties - migrationConfigProperties.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - migrationConfigProperties.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") - migrationConfigProperties.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "") - migrationConfigProperties.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, ibp.toString) - migrationConfigProperties - } - @Test def testCreateAndClose(): Unit = { val context = new RegistrationTestContext(configProperties) @@ -113,42 +99,6 @@ class BrokerLifecycleManagerTest { } } - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testSuccessfulRegistrationDuringMigration(nonInitialKraftVersion: Boolean): Unit = { - val ibp = if (nonInitialKraftVersion) IBP_3_9_IV0 else IBP_3_8_IV0 - val context = new RegistrationTestContext(migrationConfigProperties(ibp)) - manager = new BrokerLifecycleManager(context.config, context.time, "successful-registration-", isZkBroker = false, Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) - val controllerNode = new Node(3000, "localhost", 8021) - context.controllerNodeProvider.node.set(controllerNode) - val features = BrokerFeatures.createDefaultFeatureMap(BrokerFeatures.createDefault(true)).asScala - - // Even though ZK brokers don't use "metadata.version" feature, we need to overwrite it with our IBP as part of registration - // so the KRaft controller can verify that all brokers are on the same IBP before starting the migration. - val featuresRemapped = features + (MetadataVersion.FEATURE_NAME -> VersionRange.of(ibp.featureLevel(), ibp.featureLevel())) - - manager.start(() => context.highestMetadataOffset.get(), - context.mockChannelManager, context.clusterId, context.advertisedListeners, - featuresRemapped.asJava, OptionalLong.of(10L)) - TestUtils.retry(60000) { - assertEquals(1, context.mockChannelManager.unsentQueue.size) - val sentBrokerRegistrationData = context.mockChannelManager.unsentQueue.getFirst.request.build().asInstanceOf[BrokerRegistrationRequest].data() - assertEquals(10L, sentBrokerRegistrationData.previousBrokerEpoch()) - assertEquals(ibp.featureLevel(), sentBrokerRegistrationData.features().find(MetadataVersion.FEATURE_NAME).maxSupportedVersion()) - if (nonInitialKraftVersion) { - val sentKraftVersion = sentBrokerRegistrationData.features().find(KRaftVersion.FEATURE_NAME) - assertEquals(Features.KRAFT_VERSION.minimumProduction(), sentKraftVersion.minSupportedVersion()) - assertEquals(Features.KRAFT_VERSION.latestTesting(), sentKraftVersion.maxSupportedVersion()) - } - } - context.mockClient.prepareResponseFrom(new BrokerRegistrationResponse( - new BrokerRegistrationResponseData().setBrokerEpoch(1000)), controllerNode) - TestUtils.retry(10000) { - context.poll() - assertEquals(1000L, manager.brokerEpoch) - } - } - @Test def testRegistrationTimeout(): Unit = { val context = new RegistrationTestContext(configProperties) diff --git a/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala b/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala index e697eede58132..7d40f34fd9dd9 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala @@ -17,7 +17,7 @@ package kafka.server -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterInstance, ClusterTest, ClusterTestExtensions, Type} +import org.apache.kafka.common.test.api.{ClusterInstance, ClusterTest, ClusterTestExtensions, Type} import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic import org.apache.kafka.common.message.{BrokerRegistrationRequestData, CreateTopicsRequestData} @@ -28,7 +28,7 @@ import org.apache.kafka.common.requests._ import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{Node, Uuid} -import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, Features, MetadataVersion, NodeToControllerChannelManager} +import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, Feature, MetadataVersion, NodeToControllerChannelManager} import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.extension.ExtendWith @@ -57,10 +57,8 @@ class BrokerRegistrationRequestTest { val saslMechanism: String = "" - def isZkController: Boolean = !clusterInstance.isKRaftTest - override def getControllerInfo(): ControllerInformation = - ControllerInformation(node, listenerName, securityProtocol, saslMechanism, isZkController) + ControllerInformation(node, listenerName, securityProtocol, saslMechanism) }, Time.SYSTEM, new Metrics(), @@ -102,7 +100,7 @@ class BrokerRegistrationRequestTest { .setMaxSupportedVersion(max.featureLevel()) ) } - Features.PRODUCTION_FEATURES.stream().filter(_.featureName != MetadataVersion.FEATURE_NAME).forEach { + Feature.PRODUCTION_FEATURES.stream().filter(_.featureName != MetadataVersion.FEATURE_NAME).forEach { feature => features.add(new BrokerRegistrationRequestData.Feature() .setName(feature.featureName) @@ -143,36 +141,7 @@ class BrokerRegistrationRequestTest { Errors.forCode(resp.topics().find(topicName).errorCode()) } - @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_4_IV0, - serverProperties = Array(new ClusterConfigProperty(key = "zookeeper.metadata.migration.enable", value = "false"))) - def testRegisterZkWithKRaftMigrationDisabled(clusterInstance: ClusterInstance): Unit = { - val clusterId = clusterInstance.clusterId() - val channelManager = brokerToControllerChannelManager(clusterInstance) - try { - channelManager.start() - - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersion.IBP_3_3_IV0, MetadataVersion.IBP_3_3_IV0)))) - - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), None)) - - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersion.IBP_3_4_IV0, MetadataVersion.IBP_3_4_IV0)))) - - assertEquals( - Errors.NONE, - registerBroker(channelManager, clusterId, 100, None, Some((MetadataVersion.IBP_3_4_IV0, MetadataVersion.IBP_3_4_IV0)))) - } finally { - channelManager.shutdown() - } - } - - @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_3_IV3, - serverProperties = Array(new ClusterConfigProperty(key = "zookeeper.metadata.migration.enable", value = "false"))) + @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_3_IV3) def testRegisterZkWith33Controller(clusterInstance: ClusterInstance): Unit = { // Verify that a controller running an old metadata.version cannot register a ZK broker val clusterId = clusterInstance.clusterId() diff --git a/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala b/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala index 608f4d1426d61..a56914c724859 100644 --- a/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala @@ -171,23 +171,28 @@ class ClientQuotasRequestTest(cluster: ClusterInstance) { def testClientQuotasForScramUsers(): Unit = { val userName = "user" - val results = cluster.createAdminClient().alterUserScramCredentials(util.Arrays.asList( - new UserScramCredentialUpsertion(userName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "password"))) - results.all.get - - val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> userName).asJava) - - verifyDescribeEntityQuotas(entity, Map.empty) - - alterEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0) - ), validateOnly = false) - - verifyDescribeEntityQuotas(entity, Map( - QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 10000.0, - QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 - )) + val admin = cluster.admin() + try { + val results = admin.alterUserScramCredentials(util.Arrays.asList( + new UserScramCredentialUpsertion(userName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "password"))) + results.all.get + + val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> userName).asJava) + + verifyDescribeEntityQuotas(entity, Map.empty) + + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0) + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 10000.0, + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 + )) + } finally { + admin.close() + } } @ClusterTest diff --git a/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala b/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala index 34753acabbdc3..a4494c5f1e776 100644 --- a/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala @@ -47,7 +47,7 @@ class ConfigAdminManagerTest { val logger = LoggerFactory.getLogger(classOf[ConfigAdminManagerTest]) def newConfigAdminManager(brokerId: Integer): ConfigAdminManager = { - val config = TestUtils.createBrokerConfig(nodeId = brokerId, zkConnect = null) + val config = TestUtils.createBrokerConfig(nodeId = brokerId) new ConfigAdminManager(brokerId, new KafkaConfig(config), new MockConfigRepository()) } diff --git a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala index d94d319438728..8753ceb78dc81 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala @@ -20,7 +20,9 @@ import org.apache.kafka.common.test.api.ClusterInstance import org.apache.kafka.common.test.api._ import org.apache.kafka.common.test.api.ClusterTestExtensions import kafka.utils.TestUtils -import org.apache.kafka.common.{ConsumerGroupState, Uuid} +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor +import org.apache.kafka.clients.consumer.internals.ConsumerProtocol +import org.apache.kafka.common.{ConsumerGroupState, TopicPartition, Uuid} import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData.{Assignment, DescribedGroup, TopicPartitions} import org.apache.kafka.common.message.{ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatResponseData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} @@ -29,11 +31,12 @@ import org.apache.kafka.common.resource.ResourceType import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.common.Features -import org.junit.jupiter.api.Assertions.assertEquals +import org.apache.kafka.server.common.Feature +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse} import org.junit.jupiter.api.extension.ExtendWith import java.lang.{Byte => JByte} +import java.util.Collections import scala.jdk.CollectionConverters._ @ExtendWith(value = Array(classOf[ClusterTestExtensions])) @@ -47,7 +50,7 @@ class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCo new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") ), features = Array( - new ClusterFeature(feature = Features.GROUP_VERSION, version = 0) + new ClusterFeature(feature = Feature.GROUP_VERSION, version = 0) ) ) def testConsumerGroupDescribeWhenFeatureFlagNotEnabled(): Unit = { @@ -83,130 +86,241 @@ class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCo // in this test because it does not use FindCoordinator API. createOffsetsTopic() - val admin = cluster.createAdminClient() - val topicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) - - val timeoutMs = 5 * 60 * 1000 - val clientId = "client-id" - val clientHost = "/127.0.0.1" - val authorizedOperationsInt = Utils.to32BitField( - AclEntry.supportedOperations(ResourceType.GROUP).asScala - .map(_.code.asInstanceOf[JByte]).asJava) - - // Add first group with one member. - var grp1Member1Response: ConsumerGroupHeartbeatResponseData = null - TestUtils.waitUntilTrue(() => { - grp1Member1Response = consumerGroupHeartbeat( - groupId = "grp-1", - memberId = Uuid.randomUuid().toString, - rebalanceTimeoutMs = timeoutMs, - subscribedTopicNames = List("bar"), - topicPartitions = List.empty + val admin = cluster.admin() + try { + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 ) - grp1Member1Response.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $grp1Member1Response.") - - // Add second group with two members. For the first member, we - // wait until it receives an assignment. We use 'range` in this - // case to validate the assignor selection logic. - var grp2Member1Response: ConsumerGroupHeartbeatResponseData = null - TestUtils.waitUntilTrue(() => { - grp2Member1Response = consumerGroupHeartbeat( - memberId = "member-1", + + val timeoutMs = 5 * 60 * 1000 + val clientId = "client-id" + val clientHost = "/127.0.0.1" + val authorizedOperationsInt = Utils.to32BitField( + AclEntry.supportedOperations(ResourceType.GROUP).asScala + .map(_.code.asInstanceOf[JByte]).asJava) + + // Add first group with one member. + var grp1Member1Response: ConsumerGroupHeartbeatResponseData = null + TestUtils.waitUntilTrue(() => { + grp1Member1Response = consumerGroupHeartbeat( + groupId = "grp-1", + memberId = Uuid.randomUuid().toString, + rebalanceTimeoutMs = timeoutMs, + subscribedTopicNames = List("bar"), + topicPartitions = List.empty + ) + grp1Member1Response.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $grp1Member1Response.") + + // Add second group with two members. For the first member, we + // wait until it receives an assignment. We use 'range` in this + // case to validate the assignor selection logic. + var grp2Member1Response: ConsumerGroupHeartbeatResponseData = null + TestUtils.waitUntilTrue(() => { + grp2Member1Response = consumerGroupHeartbeat( + memberId = "member-1", + groupId = "grp-2", + serverAssignor = "range", + rebalanceTimeoutMs = timeoutMs, + subscribedTopicNames = List("foo"), + topicPartitions = List.empty + ) + grp2Member1Response.assignment != null && !grp2Member1Response.assignment.topicPartitions.isEmpty + }, msg = s"Could not join the group successfully. Last response $grp2Member1Response.") + + val grp2Member2Response = consumerGroupHeartbeat( + memberId = "member-2", groupId = "grp-2", serverAssignor = "range", rebalanceTimeoutMs = timeoutMs, subscribedTopicNames = List("foo"), topicPartitions = List.empty ) - grp2Member1Response.assignment != null && !grp2Member1Response.assignment.topicPartitions.isEmpty - }, msg = s"Could not join the group successfully. Last response $grp2Member1Response.") - val grp2Member2Response = consumerGroupHeartbeat( - memberId = "member-2", - groupId = "grp-2", - serverAssignor = "range", - rebalanceTimeoutMs = timeoutMs, - subscribedTopicNames = List("foo"), - topicPartitions = List.empty + for (version <- ApiKeys.CONSUMER_GROUP_DESCRIBE.oldestVersion() to ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { + val expected = List( + new DescribedGroup() + .setGroupId("grp-1") + .setGroupState(ConsumerGroupState.STABLE.toString) + .setGroupEpoch(1) + .setAssignmentEpoch(1) + .setAssignorName("uniform") + .setAuthorizedOperations(authorizedOperationsInt) + .setMembers(List( + new ConsumerGroupDescribeResponseData.Member() + .setMemberId(grp1Member1Response.memberId) + .setMemberEpoch(grp1Member1Response.memberEpoch) + .setClientId(clientId) + .setClientHost(clientHost) + .setSubscribedTopicRegex("") + .setSubscribedTopicNames(List("bar").asJava) + .setMemberType(if (version == 0) -1.toByte else 1.toByte) + ).asJava), + new DescribedGroup() + .setGroupId("grp-2") + .setGroupState(ConsumerGroupState.RECONCILING.toString) + .setGroupEpoch(grp2Member2Response.memberEpoch) + .setAssignmentEpoch(grp2Member2Response.memberEpoch) + .setAssignorName("range") + .setAuthorizedOperations(authorizedOperationsInt) + .setMembers(List( + new ConsumerGroupDescribeResponseData.Member() + .setMemberId(grp2Member2Response.memberId) + .setMemberEpoch(grp2Member2Response.memberEpoch) + .setClientId(clientId) + .setClientHost(clientHost) + .setSubscribedTopicRegex("") + .setSubscribedTopicNames(List("foo").asJava) + .setAssignment(new Assignment()) + .setTargetAssignment(new Assignment() + .setTopicPartitions(List( + new TopicPartitions() + .setTopicId(topicId) + .setTopicName("foo") + .setPartitions(List[Integer](2).asJava) + ).asJava)) + .setMemberType(if (version == 0) -1.toByte else 1.toByte), + new ConsumerGroupDescribeResponseData.Member() + .setMemberId(grp2Member1Response.memberId) + .setMemberEpoch(grp2Member1Response.memberEpoch) + .setClientId(clientId) + .setClientHost(clientHost) + .setSubscribedTopicRegex("") + .setSubscribedTopicNames(List("foo").asJava) + .setAssignment(new Assignment() + .setTopicPartitions(List( + new TopicPartitions() + .setTopicId(topicId) + .setTopicName("foo") + .setPartitions(List[Integer](0, 1, 2).asJava) + ).asJava)) + .setTargetAssignment(new Assignment() + .setTopicPartitions(List( + new TopicPartitions() + .setTopicId(topicId) + .setTopicName("foo") + .setPartitions(List[Integer](0, 1).asJava) + ).asJava)) + .setMemberType(if (version == 0) -1.toByte else 1.toByte), + ).asJava), + ) + + val actual = consumerGroupDescribe( + groupIds = List("grp-1", "grp-2"), + includeAuthorizedOperations = true, + version = version.toShort, + ) + + assertEquals(expected, actual) + } + } finally { + admin.close() + } + } + + @ClusterTest( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") ) + ) + def testConsumerGroupDescribeWithMigrationMember(): Unit = { + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + + // Create the topic. + val topicName = "foo" + createTopic( + topic = topicName, + numPartitions = 3 + ) + + val groupId = "grp" + + // Classic member 1 joins the classic group. + val memberId1 = joinDynamicConsumerGroupWithOldProtocol( + groupId = groupId, + metadata = ConsumerProtocol.serializeSubscription( + new ConsumerPartitionAssignor.Subscription( + Collections.singletonList(topicName), + null, + List().asJava + ) + ).array, + assignment = ConsumerProtocol.serializeAssignment( + new ConsumerPartitionAssignor.Assignment( + List(0, 1, 2).map(p => new TopicPartition(topicName, p)).asJava + ) + ).array + )._1 + + // The joining request with a consumer group member 2 is accepted. + val memberId2 = consumerGroupHeartbeat( + groupId = groupId, + memberId = "member-2", + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List(topicName), + topicPartitions = List.empty, + expectedError = Errors.NONE + ).memberId for (version <- ApiKeys.CONSUMER_GROUP_DESCRIBE.oldestVersion() to ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { - val expected = List( - new DescribedGroup() - .setGroupId("grp-1") - .setGroupState(ConsumerGroupState.STABLE.toString) - .setGroupEpoch(1) - .setAssignmentEpoch(1) - .setAssignorName("uniform") - .setAuthorizedOperations(authorizedOperationsInt) - .setMembers(List( - new ConsumerGroupDescribeResponseData.Member() - .setMemberId(grp1Member1Response.memberId) - .setMemberEpoch(grp1Member1Response.memberEpoch) - .setClientId(clientId) - .setClientHost(clientHost) - .setSubscribedTopicRegex("") - .setSubscribedTopicNames(List("bar").asJava) - ).asJava), - new DescribedGroup() - .setGroupId("grp-2") - .setGroupState(ConsumerGroupState.RECONCILING.toString) - .setGroupEpoch(grp2Member2Response.memberEpoch) - .setAssignmentEpoch(grp2Member2Response.memberEpoch) - .setAssignorName("range") - .setAuthorizedOperations(authorizedOperationsInt) - .setMembers(List( - new ConsumerGroupDescribeResponseData.Member() - .setMemberId(grp2Member2Response.memberId) - .setMemberEpoch(grp2Member2Response.memberEpoch) - .setClientId(clientId) - .setClientHost(clientHost) - .setSubscribedTopicRegex("") - .setSubscribedTopicNames(List("foo").asJava) - .setAssignment(new Assignment()) - .setTargetAssignment(new Assignment() - .setTopicPartitions(List( - new TopicPartitions() - .setTopicId(topicId) - .setTopicName("foo") - .setPartitions(List[Integer](2).asJava) - ).asJava)), - new ConsumerGroupDescribeResponseData.Member() - .setMemberId(grp2Member1Response.memberId) - .setMemberEpoch(grp2Member1Response.memberEpoch) - .setClientId(clientId) - .setClientHost(clientHost) - .setSubscribedTopicRegex("") - .setSubscribedTopicNames(List("foo").asJava) - .setAssignment(new Assignment() - .setTopicPartitions(List( - new TopicPartitions() - .setTopicId(topicId) - .setTopicName("foo") - .setPartitions(List[Integer](0, 1, 2).asJava) - ).asJava)) - .setTargetAssignment(new Assignment() - .setTopicPartitions(List( - new TopicPartitions() - .setTopicId(topicId) - .setTopicName("foo") - .setPartitions(List[Integer](0, 1).asJava) - ).asJava)), - ).asJava), + val actual = consumerGroupDescribe( + groupIds = List(groupId), + includeAuthorizedOperations = true, + version = version.toShort, ) + assertEquals(1, actual.size) + val group = actual.head + val member1 = group.members.asScala.find(_.memberId == memberId1) + assertFalse(member1.isEmpty) + // Version 0 doesn't have memberType field, so memberType field on member 1 is -1 (unknown). + // After version 1, there is memberType field and it should be +1 (classic) for member 1. + assertEquals(if (version == 0) -1.toByte else 0.toByte, member1.get.memberType) + + val member2 = group.members.asScala.find(_.memberId == memberId2) + assertFalse(member2.isEmpty) + assertEquals(if (version == 0) -1.toByte else 1.toByte, member2.get.memberType) + } + // Classic member 1 leaves group. + leaveGroup( + groupId = groupId, + memberId = memberId1, + useNewProtocol = false, + version = ApiKeys.LEAVE_GROUP.latestVersion(isUnstableApiEnabled) + ) + + // Member 1 joins as consumer group member. + consumerGroupHeartbeat( + groupId = groupId, + memberId = memberId1, + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List(topicName), + topicPartitions = List.empty, + expectedError = Errors.NONE + ) + + // There is no classic member in the group. + for (version <- ApiKeys.CONSUMER_GROUP_DESCRIBE.oldestVersion() to ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { val actual = consumerGroupDescribe( - groupIds = List("grp-1", "grp-2"), + groupIds = List(groupId), includeAuthorizedOperations = true, version = version.toShort, ) + assertEquals(1, actual.size) + val group = actual.head + val member1 = group.members.asScala.find(_.memberId == memberId1) + assertFalse(member1.isEmpty) + assertEquals(if (version == 0) -1.toByte else 1.toByte, member1.get.memberType) - assertEquals(expected, actual) + val member2 = group.members.asScala.find(_.memberId == memberId2) + assertFalse(member2.isEmpty) + assertEquals(if (version == 0) -1.toByte else 1.toByte, member2.get.memberType) } } } diff --git a/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala index 63004b99d2ae7..23b5589225dd7 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala @@ -17,7 +17,6 @@ package kafka.server import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterInstance, ClusterTest, ClusterTestDefaults, ClusterTestExtensions, Type} -import org.apache.kafka.common.test.api.RaftClusterInvocationContext.RaftClusterInstance import kafka.utils.TestUtils import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} @@ -27,7 +26,7 @@ import org.apache.kafka.common.message.{ConsumerGroupHeartbeatRequestData, Consu import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{ConsumerGroupHeartbeatRequest, ConsumerGroupHeartbeatResponse} import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinatorConfig} -import org.apache.kafka.server.common.Features +import org.apache.kafka.server.common.Feature import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotEquals, assertNotNull} import org.junit.jupiter.api.extension.ExtendWith @@ -51,8 +50,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { ) def testConsumerGroupHeartbeatIsInaccessibleWhenDisabledByStaticConfig(): Unit = { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData(), - true + new ConsumerGroupHeartbeatRequestData() ).build() val consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) @@ -62,13 +60,12 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { @ClusterTest( features = Array( - new ClusterFeature(feature = Features.GROUP_VERSION, version = 0) + new ClusterFeature(feature = Feature.GROUP_VERSION, version = 0) ) ) def testConsumerGroupHeartbeatIsInaccessibleWhenFeatureFlagNotEnabled(): Unit = { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData(), - true + new ConsumerGroupHeartbeatRequestData() ).build() val consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) @@ -78,284 +75,375 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { @ClusterTest def testConsumerGroupHeartbeatIsAccessibleWhenNewGroupCoordinatorIsEnabled(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() - + val admin = cluster.admin() + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertNotNull(consumerGroupHeartbeatResponse.data.memberId) + assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) + + // Create the topic. + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) + + // Prepare the next heartbeat. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(consumerGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch) + ).build() + + // This is the expected assignment. + val expectedAssignment = new ConsumerGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ConsumerGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + + // Heartbeats until the partitions are assigned. + consumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + consumerGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) + + // Leave the group. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(consumerGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(-1) + ).build() - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertNotNull(consumerGroupHeartbeatResponse.data.memberId) - assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) - - // Create the topic. - val topicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) - - // Prepare the next heartbeat. - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(consumerGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch), - true - ).build() - // This is the expected assignment. - val expectedAssignment = new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ConsumerGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(topicId) - .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) - - // Heartbeats until the partitions are assigned. - consumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - consumerGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) - - // Leave the group. - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(consumerGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(-1), - true - ).build() - - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - - // Verify the response. - assertEquals(-1, consumerGroupHeartbeatResponse.data.memberEpoch) + // Verify the response. + assertEquals(-1, consumerGroupHeartbeatResponse.data.memberEpoch) + } finally { + admin.close() + } } @ClusterTest def testConsumerGroupHeartbeatWithRegularExpression(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid().toString) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicRegex("foo") - .setTopicPartitions(List.empty.asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertNotNull(consumerGroupHeartbeatResponse.data.memberId) - assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid().toString) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicRegex("foo*") + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertNotNull(consumerGroupHeartbeatResponse.data.memberId) + assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) + + // Create the topic. + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) + + // Prepare the next heartbeat. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(consumerGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch) + ).build() + + // This is the expected assignment. + val expectedAssignment = new ConsumerGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ConsumerGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + + // Heartbeats until the partitions are assigned. + consumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + consumerGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) + } finally { + admin.close() + } } @ClusterTest def testConsumerGroupHeartbeatWithInvalidRegularExpression(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid().toString) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicRegex("[") - .setTopicPartitions(List.empty.asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REGULAR_EXPRESSION.code - }, msg = s"Did not receive the expected error. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(Errors.INVALID_REGULAR_EXPRESSION.code, consumerGroupHeartbeatResponse.data.errorCode) + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid().toString) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicRegex("[") + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REGULAR_EXPRESSION.code + }, msg = s"Did not receive the expected error. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(Errors.INVALID_REGULAR_EXPRESSION.code, consumerGroupHeartbeatResponse.data.errorCode) + } finally { + admin.close() + } } @ClusterTest - def testRejoiningStaticMemberGetsAssignmentsBackWhenNewGroupCoordinatorIsEnabled(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() - val instanceId = "instanceId" + def testConsumerGroupHeartbeatWithEmptySubscription(): Unit = { + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. + var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid().toString) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicRegex("") + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Did not receive the expected successful response. Last response $consumerGroupHeartbeatResponse.") + + // Heartbeat request to join the group. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid().toString) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List.empty.asJava) + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + consumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Did not receive the expected successful response. Last response $consumerGroupHeartbeatResponse.") + } finally { + admin.close() + } + } - // Heartbeat request so that a static member joins the group - var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setInstanceId(instanceId) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() + @ClusterTest + def testRejoiningStaticMemberGetsAssignmentsBackWhenNewGroupCoordinatorIsEnabled(): Unit = { + val admin = cluster.admin() + try { + val instanceId = "instanceId" + + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request so that a static member joins the group + var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setInstanceId(instanceId) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Static member could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertNotNull(consumerGroupHeartbeatResponse.data.memberId) + assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) + + // Create the topic. + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) + + // Prepare the next heartbeat. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setInstanceId(instanceId) + .setMemberId(consumerGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch) + ).build() + + // This is the expected assignment. + val expectedAssignment = new ConsumerGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ConsumerGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + + // Heartbeats until the partitions are assigned. + consumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + consumerGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Static member could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertNotNull(consumerGroupHeartbeatResponse.data.memberId) + assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) + + val oldMemberId = consumerGroupHeartbeatResponse.data.memberId + + // Leave the group temporarily + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setInstanceId(instanceId) + .setMemberId(consumerGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(-2) + ).build() - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Static member could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertNotNull(consumerGroupHeartbeatResponse.data.memberId) - assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) - - // Create the topic. - val topicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) - - // Prepare the next heartbeat. - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setInstanceId(instanceId) - .setMemberId(consumerGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch), - true - ).build() - // This is the expected assignment. - val expectedAssignment = new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ConsumerGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(topicId) - .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + // Verify the response. + assertEquals(-2, consumerGroupHeartbeatResponse.data.memberEpoch) + + // Another static member replaces the above member. It gets the same assignments back + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setInstanceId(instanceId) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava) + ).build() - // Heartbeats until the partitions are assigned. - consumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - consumerGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Static member could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertNotNull(consumerGroupHeartbeatResponse.data.memberId) - assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) - - val oldMemberId = consumerGroupHeartbeatResponse.data.memberId - // Leave the group temporarily - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setInstanceId(instanceId) - .setMemberId(consumerGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(-2), - true - ).build() - - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - - // Verify the response. - assertEquals(-2, consumerGroupHeartbeatResponse.data.memberEpoch) - - // Another static member replaces the above member. It gets the same assignments back - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setInstanceId(instanceId) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() - - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - - // Verify the response. - assertNotNull(consumerGroupHeartbeatResponse.data.memberId) - assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) - // The 2 member IDs should be different - assertNotEquals(oldMemberId, consumerGroupHeartbeatResponse.data.memberId) + // Verify the response. + assertNotNull(consumerGroupHeartbeatResponse.data.memberId) + assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) + // The 2 member IDs should be different + assertNotEquals(oldMemberId, consumerGroupHeartbeatResponse.data.memberId) + } finally { + admin.close() + } } @ClusterTest( @@ -365,109 +453,109 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { ) ) def testStaticMemberRemovedAfterSessionTimeoutExpiryWhenNewGroupCoordinatorIsEnabled(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() - val instanceId = "instanceId" - - // Creates the __consumer_offsets topics because it won't be created automatically - // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setInstanceId(instanceId) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertNotNull(consumerGroupHeartbeatResponse.data.memberId) - assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) - - // Create the topic. - val topicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) - - // Prepare the next heartbeat. - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setInstanceId(instanceId) - .setMemberId(consumerGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch), - true - ).build() - - // This is the expected assignment. - val expectedAssignment = new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ConsumerGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(topicId) - .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) - - // Heartbeats until the partitions are assigned. - consumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - consumerGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) - - // A new static member tries to join the group with an inuse instanceid. - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setInstanceId(instanceId) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() - - // Validating that trying to join with an in-use instanceId would throw an UnreleasedInstanceIdException. - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - assertEquals(Errors.UNRELEASED_INSTANCE_ID.code, consumerGroupHeartbeatResponse.data.errorCode) - - // The new static member join group will keep failing with an UnreleasedInstanceIdException - // until eventually it gets through because the existing member will be kicked out - // because of not sending a heartbeat till session timeout expiry. - TestUtils.waitUntilTrue(() => { + val admin = cluster.admin() + try { + val instanceId = "instanceId" + + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setInstanceId(instanceId) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertNotNull(consumerGroupHeartbeatResponse.data.memberId) + assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ConsumerGroupHeartbeatResponseData.Assignment(), consumerGroupHeartbeatResponse.data.assignment) + + // Create the topic. + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) + + // Prepare the next heartbeat. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setInstanceId(instanceId) + .setMemberId(consumerGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch) + ).build() + + // This is the expected assignment. + val expectedAssignment = new ConsumerGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ConsumerGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + + // Heartbeats until the partitions are assigned. + consumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + consumerGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(2, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) + + // A new static member tries to join the group with an inuse instanceid. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setInstanceId(instanceId) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava) + ).build() + + // Validating that trying to join with an in-use instanceId would throw an UnreleasedInstanceIdException. consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - consumerGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not re-join the group successfully. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. The group epoch bumps upto 4 which eventually reflects in the new member epoch. - assertEquals(4, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) + assertEquals(Errors.UNRELEASED_INSTANCE_ID.code, consumerGroupHeartbeatResponse.data.errorCode) + + // The new static member join group will keep failing with an UnreleasedInstanceIdException + // until eventually it gets through because the existing member will be kicked out + // because of not sending a heartbeat till session timeout expiry. + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + consumerGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not re-join the group successfully. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. The group epoch bumps upto 4 which eventually reflects in the new member epoch. + assertEquals(4, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(expectedAssignment, consumerGroupHeartbeatResponse.data.assignment) + } finally { + admin.close() + } } @ClusterTest( @@ -476,114 +564,116 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { ) ) def testUpdateConsumerGroupHeartbeatConfigSuccessful(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() - val newHeartbeatIntervalMs = 10000 - val instanceId = "instanceId" - val consumerGroupId = "grp" - - // Creates the __consumer_offsets topics because it won't be created automatically - // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId(consumerGroupId) - .setMemberId(Uuid.randomUuid.toString) - .setInstanceId(instanceId) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertNotNull(consumerGroupHeartbeatResponse.data.memberId) - assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) - assertEquals(5000, consumerGroupHeartbeatResponse.data.heartbeatIntervalMs) - - // Alter consumer heartbeat interval config - val resource = new ConfigResource(ConfigResource.Type.GROUP, consumerGroupId) - val op = new AlterConfigOp( - new ConfigEntry(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, newHeartbeatIntervalMs.toString), - OpType.SET - ) - admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all.get - - // Prepare the next heartbeat. - consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId(consumerGroupId) - .setInstanceId(instanceId) - .setMemberId(consumerGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch), - true - ).build() - - // Verify the response. The heartbeat interval was updated. - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - newHeartbeatIntervalMs == consumerGroupHeartbeatResponse.data.heartbeatIntervalMs - }, msg = s"Dynamic update consumer group config failed. Last response $consumerGroupHeartbeatResponse.") + val admin = cluster.admin() + try { + val newHeartbeatIntervalMs = 10000 + val instanceId = "instanceId" + val consumerGroupId = "grp" + + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(consumerGroupId) + .setMemberId(Uuid.randomUuid.toString) + .setInstanceId(instanceId) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava) + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") + + // Verify the response. + assertNotNull(consumerGroupHeartbeatResponse.data.memberId) + assertEquals(1, consumerGroupHeartbeatResponse.data.memberEpoch) + assertEquals(5000, consumerGroupHeartbeatResponse.data.heartbeatIntervalMs) + + // Alter consumer heartbeat interval config + val resource = new ConfigResource(ConfigResource.Type.GROUP, consumerGroupId) + val op = new AlterConfigOp( + new ConfigEntry(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, newHeartbeatIntervalMs.toString), + OpType.SET + ) + admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all.get + + // Prepare the next heartbeat. + consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(consumerGroupId) + .setInstanceId(instanceId) + .setMemberId(consumerGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(consumerGroupHeartbeatResponse.data.memberEpoch) + ).build() + + // Verify the response. The heartbeat interval was updated. + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + newHeartbeatIntervalMs == consumerGroupHeartbeatResponse.data.heartbeatIntervalMs + }, msg = s"Dynamic update consumer group config failed. Last response $consumerGroupHeartbeatResponse.") + } finally { + admin.close() + } } @ClusterTest def testConsumerGroupHeartbeatFailureIfMemberIdMissingForVersionsAbove0(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() - - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REQUEST.code - }, msg = "Should fail due to invalid member id.") + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( + new ConsumerGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5 * 60 * 1000) + .setSubscribedTopicNames(List("foo").asJava) + .setTopicPartitions(List.empty.asJava) + ).build() + + var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REQUEST.code + }, msg = "Should fail due to invalid member id.") + } finally { + admin.close() + } } @ClusterTest def testMemberIdGeneratedOnServerWhenApiVersionIs0(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. TestUtils.createOffsetsTopicWithAdmin( admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq ) val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( @@ -592,8 +682,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setMemberEpoch(0) .setRebalanceTimeoutMs(5 * 60 * 1000) .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true + .setTopicPartitions(List.empty.asJava) ).build(0) var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null @@ -605,6 +694,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { val memberId = consumerGroupHeartbeatResponse.data().memberId() assertNotNull(memberId) assertFalse(memberId.isEmpty) + admin.close() } private def connectAndReceive(request: ConsumerGroupHeartbeatRequest): ConsumerGroupHeartbeatResponse = { diff --git a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala index 31821ceb3a35d..b5ef943df844e 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala @@ -486,6 +486,121 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ) } + /** + * The test method checks the following scenario: + * 1. Creating a classic group with member 1, whose assignment has non-empty user data. + * 2. Member 2 using consumer protocol joins. The group cannot be upgraded and the join is + * rejected. + * 3. Member 1 leaves. + * 4. Member 2 using consumer protocol joins. The group is upgraded. + */ + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") + ) + ) + def testOnlineMigrationWithNonEmptyUserDataInAssignment(): Unit = { + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + + // Create the topic. + createTopic( + topic = "foo", + numPartitions = 3 + ) + + // Classic member 1 joins the classic group. + val groupId = "grp" + + val memberId1 = joinDynamicConsumerGroupWithOldProtocol( + groupId = groupId, + metadata = metadata(List.empty), + assignment = assignment(List(0, 1, 2), ByteBuffer.allocate(1)) + )._1 + + // The joining request with a consumer group member 2 is rejected. + val errorMessage = consumerGroupHeartbeat( + groupId = groupId, + memberId = Uuid.randomUuid.toString, + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List("foo"), + topicPartitions = List.empty, + expectedError = Errors.GROUP_ID_NOT_FOUND + ).errorMessage + + assertEquals( + "Cannot upgrade classic group grp to consumer group because an unsupported custom assignor is in use. " + + "Please refer to the documentation or switch to a default assignor before re-attempting the upgrade.", + errorMessage + ) + + // The group is still a classic group. + assertEquals( + List( + new ListGroupsResponseData.ListedGroup() + .setGroupId(groupId) + .setProtocolType("consumer") + .setGroupState(ClassicGroupState.STABLE.toString) + .setGroupType(Group.GroupType.CLASSIC.toString) + ), + listGroups( + statesFilter = List.empty, + typesFilter = List(Group.GroupType.CLASSIC.toString) + ) + ) + + // Classic member 1 leaves the group. + leaveGroup( + groupId = groupId, + memberId = memberId1, + useNewProtocol = false, + version = ApiKeys.LEAVE_GROUP.latestVersion(isUnstableApiEnabled) + ) + + // Verify that the group is empty. + assertEquals( + List( + new ListGroupsResponseData.ListedGroup() + .setGroupId(groupId) + .setProtocolType("consumer") + .setGroupState(ClassicGroupState.EMPTY.toString) + .setGroupType(Group.GroupType.CLASSIC.toString) + ), + listGroups( + statesFilter = List.empty, + typesFilter = List(Group.GroupType.CLASSIC.toString) + ) + ) + + // The joining request with a consumer group member is accepted. + consumerGroupHeartbeat( + groupId = groupId, + memberId = Uuid.randomUuid.toString, + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List("foo"), + topicPartitions = List.empty, + expectedError = Errors.NONE + ) + + // The group has become a consumer group. + assertEquals( + List( + new ListGroupsResponseData.ListedGroup() + .setGroupId(groupId) + .setProtocolType("consumer") + .setGroupState(ConsumerGroupState.STABLE.toString) + .setGroupType(Group.GroupType.CONSUMER.toString) + ), + listGroups( + statesFilter = List.empty, + typesFilter = List(Group.GroupType.CONSUMER.toString) + ) + ) + } + private def testUpgradeFromEmptyClassicToConsumerGroup(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. @@ -782,7 +897,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ) // Member 1 commits offset. Start from version 1 because version 0 goes to ZK. - for (version <- 1 to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { + for (version <- ApiKeys.OFFSET_COMMIT.oldestVersion to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { for (partitionId <- 0 to 2) { commitOffset( groupId = groupId, @@ -1081,7 +1196,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ) // Member 1 commits offset. Start from version 1 because version 0 goes to ZK. - for (version <- 1 to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { + for (version <- ApiKeys.OFFSET_COMMIT.oldestVersion to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { for (partitionId <- 0 to 2) { commitOffset( groupId = groupId, @@ -1262,10 +1377,11 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ).array } - private def assignment(assignedPartitions: List[Int]): Array[Byte] = { + private def assignment(assignedPartitions: List[Int], userData: ByteBuffer = null): Array[Byte] = { ConsumerProtocol.serializeAssignment( new ConsumerPartitionAssignor.Assignment( - assignedPartitions.map(new TopicPartition("foo", _)).asJava + assignedPartitions.map(new TopicPartition("foo", _)).asJava, + userData ) ).array } diff --git a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala index ec09e10840d95..b7e42c2399727 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala @@ -52,6 +52,7 @@ import org.apache.kafka.common.{ElectionType, Uuid} import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT import org.apache.kafka.controller.{Controller, ControllerRequestContext, ResultOrError} import org.apache.kafka.image.publisher.ControllerRegistrationsPublisher +import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.network.metrics.RequestChannelMetrics import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, AuthorizationResult, Authorizer} @@ -155,7 +156,8 @@ class ControllerApisTest { throttle: Boolean = false): ControllerApis = { props.put(KRaftConfigs.NODE_ID_CONFIG, nodeId: java.lang.Integer) props.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") - props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "PLAINTEXT") + props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + props.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") props.put(QuorumConfig.QUORUM_VOTERS_CONFIG, s"$nodeId@localhost:9092") new ControllerApis( requestChannel, @@ -409,7 +411,7 @@ class ControllerApisTest { assertThrows(classOf[ClusterAuthorizationException], () => { controllerApis = createControllerApis(Some(createDenyAllAuthorizer()), new MockController.Builder().build()) controllerApis.handleAlterPartitionRequest(buildRequest(new AlterPartitionRequest.Builder( - new AlterPartitionRequestData(), false).build(0))) + new AlterPartitionRequestData()).build(0))) }) } @@ -1284,20 +1286,22 @@ class ControllerApisTest { @Test def testUnauthorizedControllerRegistrationRequest(): Unit = { - assertThrows(classOf[ClusterAuthorizationException], () => { + val exception = assertThrows(classOf[ClusterAuthorizationException], () => { controllerApis = createControllerApis(Some(createDenyAllAuthorizer()), new MockController.Builder().build()) controllerApis.handleControllerRegistration(buildRequest( new ControllerRegistrationRequest(new ControllerRegistrationRequestData(), 0.toShort))) }) + assertTrue(exception.getMessage.contains("needs CLUSTER_ACTION permission")) } @Test def testUnauthorizedDescribeClusterRequest(): Unit = { - assertThrows(classOf[ClusterAuthorizationException], () => { + val exception = assertThrows(classOf[ClusterAuthorizationException], () => { controllerApis = createControllerApis(Some(createDenyAllAuthorizer()), new MockController.Builder().build()) controllerApis.handleDescribeCluster(buildRequest( new DescribeClusterRequest(new DescribeClusterRequestData(), 1.toShort))) }) + assertTrue(exception.getMessage.contains("needs ALTER permission")) } @AfterEach diff --git a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala index de2e80aace0e3..f63434a256166 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala @@ -387,24 +387,17 @@ class ControllerMutationQuotaTest extends BaseRequestTest { private def waitUserQuota(user: String, expectedQuota: Double): Unit = { val quotaManager = brokers.head.quotaManagers.controllerMutation - val controllerQuotaManager = - if (isKRaftTest()) Option(controllerServers.head.quotaManagers.controllerMutation) - else Option.empty + val controllerQuotaManager = controllerServers.head.quotaManagers.controllerMutation var actualQuota = Double.MinValue TestUtils.waitUntilTrue(() => { actualQuota = quotaManager.quota(user, "").bound() - if (controllerQuotaManager.isDefined) - expectedQuota == actualQuota && expectedQuota == controllerQuotaManager.get.quota(user, "").bound() - else - expectedQuota == actualQuota + expectedQuota == actualQuota && expectedQuota == controllerQuotaManager.quota(user, "").bound() }, s"Quota of $user is not $expectedQuota but $actualQuota") } private def quotaMetric(user: String): Option[KafkaMetric] = { - val metrics = - if (isKRaftTest()) controllerServers.head.metrics - else brokers.head.metrics + val metrics = controllerServers.head.metrics val metricName = metrics.metricName( "tokens", QuotaType.CONTROLLER_MUTATION.toString, @@ -449,7 +442,7 @@ class ControllerMutationQuotaTest extends BaseRequestTest { connectAndReceive[AlterClientQuotasResponse]( request, destination = controllerSocketServer, - if (isKRaftTest()) ListenerName.normalised("CONTROLLER") else listenerName + ListenerName.normalised("CONTROLLER") ) } } diff --git a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala index 1dfc33e43bdfd..f4d2916986f36 100644 --- a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala @@ -17,7 +17,6 @@ package kafka.server -import kafka.utils._ import org.apache.kafka.common.Uuid import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.CreateTopicsRequestData @@ -103,36 +102,6 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { validateTopicExists("partial-none") } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testCreateTopicsWithVeryShortTimeouts(quorum: String): Unit = { - // When using ZooKeeper, we don't expect a request to ever complete within 1ms. - // A timeout of 1 ms allows us to test the purgatory timeout logic. - // - // Note: we do not test KRaft here because its behavior is different. Server-side - // timeouts are much less likely to happen with KRaft since the operation is much - // faster. Additionally, if a server side timeout does happen, the operation is - // usually not performed. - validateErrorCreateTopicsRequests(topicsReq(Seq( - topicReq("error-timeout", numPartitions = 10, replicationFactor = 3)), timeout = 1), - Map("error-timeout" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false) - validateErrorCreateTopicsRequests(topicsReq(Seq( - topicReq("error-timeout-zero", numPartitions = 10, replicationFactor = 3)), timeout = 0), - Map("error-timeout-zero" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false) - // Negative timeouts are treated the same as 0 - validateErrorCreateTopicsRequests(topicsReq(Seq( - topicReq("error-timeout-negative", numPartitions = 10, replicationFactor = 3)), timeout = -1), - Map("error-timeout-negative" -> error(Errors.REQUEST_TIMED_OUT)), checkErrorMessage = false) - // The topics should still get created eventually - TestUtils.waitForPartitionMetadata(servers, "error-timeout", 0) - TestUtils.waitForPartitionMetadata(servers, "error-timeout-zero", 0) - TestUtils.waitForPartitionMetadata(servers, "error-timeout-negative", 0) - validateTopicExists("error-timeout") - validateTopicExists("error-timeout-zero") - validateTopicExists("error-timeout-negative") - } - - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testInvalidCreateTopicsRequests(quorum: String): Unit = { @@ -149,21 +118,8 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { } @ParameterizedTest - @ValueSource(strings = Array("zk", "zkMigration")) - def testNotController(quorum: String): Unit = { - // Note: we don't run this test when in KRaft mode, because KRaft doesn't have this - // behavior of returning NOT_CONTROLLER. Instead, the request is forwarded. - val req = topicsReq(Seq(topicReq("topic1"))) - val response = sendCreateTopicRequest(req, notControllerSocketServer) - val error = if (isZkMigrationTest()) Errors.NONE else Errors.NOT_CONTROLLER - assertEquals(1, response.errorCounts().get(error)) - } - - @ParameterizedTest - @ValueSource(strings = Array("zk")) + @ValueSource(strings = Array("kraft")) def testCreateTopicsRequestVersions(quorum: String): Unit = { - // Note: we don't run this test when in KRaft mode, because kraft does not yet support returning topic - // configs from CreateTopics. for (version <- ApiKeys.CREATE_TOPICS.oldestVersion to ApiKeys.CREATE_TOPICS.latestVersion) { val topic = s"topic_$version" val data = new CreateTopicsRequestData() @@ -175,7 +131,7 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { ).asJava.iterator())) val request = new CreateTopicsRequest.Builder(data).build(version.asInstanceOf[Short]) - val response = sendCreateTopicRequest(request) + val response = sendCreateTopicRequest(request, adminSocketServer) val topicResponse = response.data.topics.find(topic) assertNotNull(topicResponse) diff --git a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala index 09d69b9bfce26..96ebfd66683b6 100644 --- a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala +++ b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala @@ -104,31 +104,19 @@ class CreateTopicsRequestWithPolicyTest extends AbstractCreateTopicsRequestTest Map(existingTopic -> error(Errors.TOPIC_ALREADY_EXISTS, Some("Topic 'existing-topic' already exists.")))) - var errorMsg = if (isKRaftTest()) { - "Unable to replicate the partition 4 time(s): The target replication factor of 4 cannot be reached because only 3 broker(s) are registered." - } else { - "Replication factor: 4 larger than available brokers: 3." - } + var errorMsg = "Unable to replicate the partition 4 time(s): The target replication factor of 4 cannot be reached because only 3 broker(s) are registered." validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-replication", numPartitions = 10, replicationFactor = brokerCount + 1)), validateOnly = true), Map("error-replication" -> error(Errors.INVALID_REPLICATION_FACTOR, Some(errorMsg)))) - errorMsg = if (isKRaftTest()) { - "Replication factor must be larger than 0, or -1 to use the default value." - } else { - "Replication factor must be larger than 0." - } + errorMsg = "Replication factor must be larger than 0, or -1 to use the default value." validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-replication2", numPartitions = 10, replicationFactor = -2)), validateOnly = true), Map("error-replication2" -> error(Errors.INVALID_REPLICATION_FACTOR, Some(errorMsg)))) - errorMsg = if (isKRaftTest()) { - "Number of partitions was set to an invalid non-positive value." - } else { - "Number of partitions must be larger than 0." - } + errorMsg = "Number of partitions was set to an invalid non-positive value." validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-partitions", numPartitions = -2, replicationFactor = 1)), validateOnly = true), Map("error-partitions" -> error(Errors.INVALID_PARTITIONS, diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala index 75d77a01460f7..2c211eb042a11 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala @@ -16,7 +16,7 @@ */ package kafka.server -import kafka.api.{IntegrationTestHarness, KafkaSasl, SaslSetup} +import kafka.api.{IntegrationTestHarness, SaslSetup} import kafka.security.JaasTestUtils import kafka.utils.TestUtils import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, CreateDelegationTokenOptions, DescribeDelegationTokenOptions} @@ -52,7 +52,7 @@ class DelegationTokenRequestsTest extends IntegrationTestHarness with SaslSetup @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), KafkaSasl, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) super.setUp(testInfo) } @@ -151,7 +151,7 @@ class DelegationTokenRequestsTest extends IntegrationTestHarness with SaslSetup // Create a DelegationToken with a short lifetime to validate the expire code val createResult5 = adminClient.createDelegationToken(new CreateDelegationTokenOptions() .renewers(renewer1) - .maxlifeTimeMs(1 * 1000)) + .maxLifetimeMs(1 * 1000)) val token5 = createResult5.delegationToken().get() TestUtils.waitUntilTrue(() => brokers.forall(server => server.tokenCache.tokens().size() == 1), diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala index 43694ebb8551e..c380816f769fe 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala @@ -16,7 +16,7 @@ */ package kafka.server -import kafka.api.{KafkaSasl, SaslSetup} +import kafka.api.SaslSetup import kafka.security.JaasTestUtils import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.common.errors.DelegationTokenDisabledException @@ -42,7 +42,7 @@ class DelegationTokenRequestsWithDisableTokenFeatureTest extends BaseRequestTest @BeforeEach override def setUp(testInfo: TestInfo): Unit = { - startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), KafkaSasl, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) super.setUp(testInfo) } diff --git a/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala index 2485f09409d51..0fab872363bd8 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala @@ -123,6 +123,8 @@ class DeleteGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinator List(new DescribedGroup() .setGroupId("grp") .setGroupState(ClassicGroupState.DEAD.toString) + .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code) + .setErrorMessage("Group grp not found.") ), describeGroups(List("grp")) ) diff --git a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala index 235461c396c13..4a4bc73a33205 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala @@ -18,13 +18,11 @@ package kafka.server import java.util -import java.util.Collections import kafka.network.SocketServer import kafka.utils.{Logging, TestUtils} -import org.apache.kafka.common.Uuid import org.apache.kafka.common.message.DeleteTopicsRequestData import org.apache.kafka.common.message.DeleteTopicsRequestData.DeleteTopicState -import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.DeleteTopicsRequest import org.apache.kafka.common.requests.DeleteTopicsResponse import org.apache.kafka.common.requests.MetadataRequest @@ -41,7 +39,7 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { @ParameterizedTest @ValueSource(strings = Array("kraft")) def testTopicDeletionClusterHasOfflinePartitions(quorum: String): Unit = { - // Create a two topics with one partition/replica. Make one of them offline. + // Create two topics with one partition/replica. Make one of them offline. val offlineTopic = "topic-1" val onlineTopic = "topic-2" createTopicWithAssignment(offlineTopic, Map[Int, Seq[Int]](0 -> Seq(0))) @@ -126,114 +124,6 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { } } - /* - * Only run this test against ZK cluster. The KRaft controller doesn't perform operations that have timed out. - */ - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testErrorDeleteTopicRequests(quorum: String): Unit = { - val timeout = 30000 - val timeoutTopic = "invalid-timeout" - - // Basic - validateErrorDeleteTopicRequests(new DeleteTopicsRequest.Builder( - new DeleteTopicsRequestData() - .setTopicNames(util.Arrays.asList("invalid-topic")) - .setTimeoutMs(timeout)).build(), - Map("invalid-topic" -> Errors.UNKNOWN_TOPIC_OR_PARTITION)) - - // Partial - createTopic("partial-topic-1") - validateErrorDeleteTopicRequests(new DeleteTopicsRequest.Builder( - new DeleteTopicsRequestData() - .setTopicNames(util.Arrays.asList("partial-topic-1", "partial-invalid-topic")) - .setTimeoutMs(timeout)).build(), - Map( - "partial-topic-1" -> Errors.NONE, - "partial-invalid-topic" -> Errors.UNKNOWN_TOPIC_OR_PARTITION - ) - ) - - // Topic IDs - createTopic("topic-id-1") - val validId = getTopicIds()("topic-id-1") - val invalidId = Uuid.randomUuid - validateErrorDeleteTopicRequestsWithIds(new DeleteTopicsRequest.Builder( - new DeleteTopicsRequestData() - .setTopics(util.Arrays.asList(new DeleteTopicState().setTopicId(invalidId), - new DeleteTopicState().setTopicId(validId))) - .setTimeoutMs(timeout)).build(), - Map( - invalidId -> Errors.UNKNOWN_TOPIC_ID, - validId -> Errors.NONE - ) - ) - - // Timeout - createTopic(timeoutTopic, 5, 2) - // Must be a 0ms timeout to avoid transient test failures. Even a timeout of 1ms has succeeded in the past. - validateErrorDeleteTopicRequests(new DeleteTopicsRequest.Builder( - new DeleteTopicsRequestData() - .setTopicNames(util.Arrays.asList(timeoutTopic)) - .setTimeoutMs(0)).build(), - Map(timeoutTopic -> Errors.REQUEST_TIMED_OUT)) - // The topic should still get deleted eventually - TestUtils.waitUntilTrue(() => !brokers.head.metadataCache.contains(timeoutTopic), s"Topic $timeoutTopic is never deleted") - validateTopicIsDeleted(timeoutTopic) - } - - private def validateErrorDeleteTopicRequests(request: DeleteTopicsRequest, expectedResponse: Map[String, Errors]): Unit = { - val response = sendDeleteTopicsRequest(request) - val errors = response.data.responses - - val errorCount = response.errorCounts().asScala.foldLeft(0)(_+_._2) - assertEquals(expectedResponse.size, errorCount, "The response size should match") - - expectedResponse.foreach { case (topic, expectedError) => - assertEquals(expectedResponse(topic).code, errors.find(topic).errorCode, "The response error should match") - // If no error validate the topic was deleted - if (expectedError == Errors.NONE) { - validateTopicIsDeleted(topic) - } - } - } - - private def validateErrorDeleteTopicRequestsWithIds(request: DeleteTopicsRequest, expectedResponse: Map[Uuid, Errors]): Unit = { - val response = sendDeleteTopicsRequest(request) - val responses = response.data.responses - val errors = responses.asScala.map(result => result.topicId() -> result.errorCode()).toMap - val names = responses.asScala.map(result => result.topicId() -> result.name()).toMap - - val errorCount = response.errorCounts().asScala.foldLeft(0)(_+_._2) - assertEquals(expectedResponse.size, errorCount, "The response size should match") - - expectedResponse.foreach { case (topic, expectedError) => - assertEquals(expectedResponse(topic).code, errors(topic), "The response error should match") - // If no error validate the topic was deleted - if (expectedError == Errors.NONE) { - validateTopicIsDeleted(names(topic)) - } - } - } - - /* - * Only run this test against ZK clusters. KRaft doesn't have this behavior of returning NOT_CONTROLLER. - * Instead, the request is forwarded. - */ - @ParameterizedTest - @ValueSource(strings = Array("zk", "zkMigration")) - def testNotController(quorum: String): Unit = { - val request = new DeleteTopicsRequest.Builder( - new DeleteTopicsRequestData() - .setTopicNames(Collections.singletonList("not-controller")) - .setTimeoutMs(1000)).build() - val response = sendDeleteTopicsRequest(request, notControllerSocketServer) - - val expectedError = if (isZkMigrationTest()) Errors.NONE else Errors.NOT_CONTROLLER - val error = response.data.responses.find("not-controller").errorCode() - assertEquals(expectedError.code(), error) - } - private def validateTopicIsDeleted(topic: String): Unit = { val metadata = connectAndReceive[MetadataResponse](new MetadataRequest.Builder( List(topic).asJava, true).build).topicMetadata.asScala @@ -249,28 +139,24 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { } @ParameterizedTest - @ValueSource(strings = Array("zk")) + @ValueSource(strings = Array("kraft")) def testDeleteTopicsVersions(quorum: String): Unit = { - // This test assumes that the current valid versions are 0-6 please adjust the test if there are changes. - assertEquals(0, DeleteTopicsRequestData.LOWEST_SUPPORTED_VERSION) - assertEquals(6, DeleteTopicsRequestData.HIGHEST_SUPPORTED_VERSION) - val timeout = 10000 - DeleteTopicsRequestData.SCHEMAS.indices.foreach { version => + for (version <- ApiKeys.DELETE_TOPICS.oldestVersion to ApiKeys.DELETE_TOPICS.latestVersion) { info(s"Creating and deleting tests for version $version") val topicName = s"topic-$version" - createTopic(topicName) - val data = new DeleteTopicsRequestData().setTimeoutMs(timeout) + createTopic(topicName) + val data = new DeleteTopicsRequestData().setTimeoutMs(timeout) - if (version < 6) { - data.setTopicNames(util.Arrays.asList(topicName)) - } else { - data.setTopics(util.Arrays.asList(new DeleteTopicState().setName(topicName))) - } + if (version < 6) { + data.setTopicNames(util.Arrays.asList(topicName)) + } else { + data.setTopics(util.Arrays.asList(new DeleteTopicState().setName(topicName))) + } - validateValidDeleteTopicRequests(new DeleteTopicsRequest.Builder(data).build(version.toShort)) + validateValidDeleteTopicRequests(new DeleteTopicsRequest.Builder(data).build(version.toShort)) } } } diff --git a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala index bb0a96e1c34d6..4232030634cb8 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala @@ -40,7 +40,7 @@ class DeleteTopicsRequestWithDeletionDisabledTest extends BaseRequestTest { } override def generateConfigs = { - val props = TestUtils.createBrokerConfigs(brokerCount, zkConnectOrNull, + val props = TestUtils.createBrokerConfigs(brokerCount, enableControlledShutdown = false, enableDeleteTopic = false, interBrokerSecurityProtocol = Some(securityProtocol), trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties, logDirCount = logDirCount) @@ -71,7 +71,7 @@ class DeleteTopicsRequestWithDeletionDisabledTest extends BaseRequestTest { connectAndReceive[DeleteTopicsResponse]( request, controllerSocketServer, - if (isKRaftTest()) ListenerName.normalised("CONTROLLER") else listenerName + ListenerName.normalised("CONTROLLER") ) } diff --git a/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala index f053d22df12cd..6e43f904c11c7 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala @@ -69,11 +69,6 @@ class DescribeClusterRequestTest extends BaseRequestTest { .setRack(server.config.rack.orNull) }.toSet - var expectedControllerId = 0 - if (!isKRaftTest()) { - // in KRaft mode DescribeClusterRequest will return a random broker id as the controllerId (KIP-590) - expectedControllerId = servers.filter(_.kafkaController.isActive).last.config.brokerId - } val expectedClusterId = brokers.last.clusterId val expectedClusterAuthorizedOperations = if (includeClusterAuthorizedOperations) { @@ -92,11 +87,7 @@ class DescribeClusterRequestTest extends BaseRequestTest { .build(version.toShort) val describeClusterResponse = sentDescribeClusterRequest(describeClusterRequest) - if (isKRaftTest()) { - assertTrue(0 to brokerCount contains describeClusterResponse.data.controllerId) - } else { - assertEquals(expectedControllerId, describeClusterResponse.data.controllerId) - } + assertTrue(0 to brokerCount contains describeClusterResponse.data.controllerId) assertEquals(expectedClusterId, describeClusterResponse.data.clusterId) assertEquals(expectedClusterAuthorizedOperations, describeClusterResponse.data.clusterAuthorizedOperations) assertEquals(expectedBrokers, describeClusterResponse.data.brokers.asScala.toSet) diff --git a/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala index 4a822048e1c42..de8044ce2c113 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala @@ -20,7 +20,7 @@ import org.apache.kafka.common.test.api.ClusterInstance import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.test.api.ClusterTestExtensions import org.apache.kafka.common.message.DescribeGroupsResponseData.{DescribedGroup, DescribedGroupMember} -import org.apache.kafka.common.protocol.ApiKeys +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.group.classic.ClassicGroupState import org.junit.jupiter.api.Assertions.assertEquals @@ -106,6 +106,8 @@ class DescribeGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinat new DescribedGroup() .setGroupId("grp-unknown") .setGroupState(ClassicGroupState.DEAD.toString) // Return DEAD group when the group does not exist. + .setErrorCode(if (version >= 6) Errors.GROUP_ID_NOT_FOUND.code() else Errors.NONE.code()) + .setErrorMessage(if (version >= 6) "Group grp-unknown not found." else null) ), describeGroups( groupIds = List("grp-1", "grp-2", "grp-unknown"), diff --git a/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala index 5a0a5d8dc180a..d22b53c0cb67c 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala @@ -25,7 +25,8 @@ import org.apache.kafka.common.message.DescribeLogDirsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests._ import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ @@ -38,16 +39,17 @@ class DescribeLogDirsRequestTest extends BaseRequestTest { val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) - @Test - def testDescribeLogDirsRequest(): Unit = { - val onlineDir = new File(servers.head.config.logDirs.head).getAbsolutePath - val offlineDir = new File(servers.head.config.logDirs.tail.head).getAbsolutePath - servers.head.replicaManager.handleLogDirFailure(offlineDir) + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeLogDirsRequest(quorum: String): Unit = { + val onlineDir = new File(brokers.head.config.logDirs.head).getAbsolutePath + val offlineDir = new File(brokers.head.config.logDirs.tail.head).getAbsolutePath + brokers.head.replicaManager.handleLogDirFailure(offlineDir) createTopic(topic, partitionNum, 1) - TestUtils.generateAndProduceMessages(servers, topic, 10) + TestUtils.generateAndProduceMessages(brokers, topic, 10) val request = new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null)).build() - val response = connectAndReceive[DescribeLogDirsResponse](request, destination = controllerSocketServer) + val response = connectAndReceive[DescribeLogDirsResponse](request, destination = anySocketServer) assertEquals(logDirCount, response.data.results.size) val offlineResult = response.data.results.asScala.find(logDirResult => logDirResult.logDir == offlineDir).get @@ -67,14 +69,14 @@ class DescribeLogDirsRequestTest extends BaseRequestTest { }.toMap val replicaInfo0 = onlinePartitionsMap(tp0) val replicaInfo1 = onlinePartitionsMap(tp1) - val log0 = servers.head.logManager.getLog(tp0).get - val log1 = servers.head.logManager.getLog(tp1).get + val log0 = brokers.head.logManager.getLog(tp0).get + val log1 = brokers.head.logManager.getLog(tp1).get assertEquals(log0.size, replicaInfo0.partitionSize) assertEquals(log1.size, replicaInfo1.partitionSize) - val logEndOffset = servers.head.logManager.getLog(tp0).get.logEndOffset + val logEndOffset = brokers.head.logManager.getLog(tp0).get.logEndOffset assertTrue(logEndOffset > 0, s"LogEndOffset '$logEndOffset' should be > 0") - assertEquals(servers.head.replicaManager.getLogEndOffsetLag(tp0, log0.logEndOffset, isFuture = false), replicaInfo0.offsetLag) - assertEquals(servers.head.replicaManager.getLogEndOffsetLag(tp1, log1.logEndOffset, isFuture = false), replicaInfo1.offsetLag) + assertEquals(brokers.head.replicaManager.getLogEndOffsetLag(tp0, log0.logEndOffset, isFuture = false), replicaInfo0.offsetLag) + assertEquals(brokers.head.replicaManager.getLogEndOffsetLag(tp1, log1.logEndOffset, isFuture = false), replicaInfo1.offsetLag) } } diff --git a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala index 768be4786bd07..10b42f96b4e54 100755 --- a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala @@ -21,21 +21,18 @@ import java.{lang, util} import java.util.{Optional, Properties, Map => JMap} import java.util.concurrent.{CompletionStage, TimeUnit} import java.util.concurrent.atomic.AtomicReference -import kafka.controller.KafkaController import kafka.log.LogManager import kafka.log.remote.RemoteLogManager import kafka.network.{DataPlaneAcceptor, SocketServer} import kafka.utils.TestUtils -import kafka.zk.KafkaZkClient import org.apache.kafka.common.{Endpoint, Reconfigurable} import org.apache.kafka.common.acl.{AclBinding, AclBindingFilter} -import org.apache.kafka.common.config.types.Password -import org.apache.kafka.common.config.{ConfigException, SaslConfigs, SslConfigs} +import org.apache.kafka.common.config.{ConfigException, SslConfigs} import org.apache.kafka.common.metrics.{JmxReporter, Metrics} import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.security.PasswordEncoderConfigs import org.apache.kafka.server.authorizer._ import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ZkConfigs} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig @@ -49,7 +46,6 @@ import org.mockito.ArgumentMatchers.anyString import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} import org.mockito.Mockito.{mock, verify, verifyNoMoreInteractions, when} -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ import scala.collection.Set @@ -57,12 +53,12 @@ class DynamicBrokerConfigTest { @Test def testConfigUpdate(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val oldKeystore = "oldKs.jks" props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, oldKeystore) val config = KafkaConfig(props) val dynamicConfig = config.dynamicConfig - dynamicConfig.initialize(None, None) + dynamicConfig.initialize(None) assertEquals(config, dynamicConfig.currentKafkaConfig) assertEquals(oldKeystore, config.values.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG)) @@ -100,40 +96,9 @@ class DynamicBrokerConfigTest { } } - @Test - def testEnableDefaultUncleanLeaderElection(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) - origProps.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "false") - - val config = KafkaConfig(origProps) - val serverMock = Mockito.mock(classOf[KafkaServer]) - val controllerMock = Mockito.mock(classOf[KafkaController]) - val logManagerMock = Mockito.mock(classOf[LogManager]) - - Mockito.when(serverMock.config).thenReturn(config) - Mockito.when(serverMock.kafkaController).thenReturn(controllerMock) - Mockito.when(serverMock.logManager).thenReturn(logManagerMock) - Mockito.when(logManagerMock.allLogs).thenReturn(Iterable.empty) - - val currentDefaultLogConfig = new AtomicReference(new LogConfig(new Properties)) - Mockito.when(logManagerMock.currentDefaultConfig).thenAnswer(_ => currentDefaultLogConfig.get()) - Mockito.when(logManagerMock.reconfigureDefaultLogConfig(ArgumentMatchers.any(classOf[LogConfig]))) - .thenAnswer(invocation => currentDefaultLogConfig.set(invocation.getArgument(0))) - - config.dynamicConfig.initialize(None, None) - config.dynamicConfig.addBrokerReconfigurable(new DynamicLogConfig(logManagerMock, serverMock)) - - val props = new Properties() - - props.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") - config.dynamicConfig.updateDefaultConfig(props) - assertTrue(config.uncleanLeaderElectionEnable) - Mockito.verify(controllerMock).enableDefaultUncleanLeaderElection() - } - @Test def testUpdateDynamicThreadPool(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(0, port = 8181) origProps.put(ServerConfigs.NUM_IO_THREADS_CONFIG, "4") origProps.put(SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG, "2") origProps.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "1") @@ -159,7 +124,7 @@ class DynamicBrokerConfigTest { Mockito.when(serverMock.logManager).thenReturn(logManagerMock) Mockito.when(serverMock.kafkaScheduler).thenReturn(schedulerMock) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(new BrokerDynamicThreadPool(serverMock)) config.dynamicConfig.addReconfigurable(acceptorMock) @@ -202,13 +167,87 @@ class DynamicBrokerConfigTest { ) } - @nowarn("cat=deprecation") + @Test + def testUpdateRemoteLogManagerDynamicThreadPool(): Unit = { + val origProps = TestUtils.createBrokerConfig(0, port = 8181) + val config = KafkaConfig(origProps) + assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerCopierThreadPoolSize()) + assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerExpirationThreadPoolSize()) + assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_READER_THREADS, config.remoteLogManagerConfig.remoteLogReaderThreads()) + + val serverMock = mock(classOf[KafkaBroker]) + val remoteLogManager = mock(classOf[RemoteLogManager]) + when(serverMock.config).thenReturn(config) + when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) + + config.dynamicConfig.initialize(None) + config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) + + // Test dynamic update with valid values + val props = new Properties() + props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, "8") + config.dynamicConfig.validate(props, perBrokerConfig = true) + config.dynamicConfig.updateDefaultConfig(props) + assertEquals(8, config.remoteLogManagerConfig.remoteLogManagerCopierThreadPoolSize()) + verify(remoteLogManager).resizeCopierThreadPool(8) + + props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, "7") + config.dynamicConfig.validate(props, perBrokerConfig = false) + config.dynamicConfig.updateDefaultConfig(props) + assertEquals(7, config.remoteLogManagerConfig.remoteLogManagerExpirationThreadPoolSize()) + verify(remoteLogManager).resizeExpirationThreadPool(7) + + props.put(RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP, "6") + config.dynamicConfig.validate(props, perBrokerConfig = true) + config.dynamicConfig.updateDefaultConfig(props) + assertEquals(6, config.remoteLogManagerConfig.remoteLogReaderThreads()) + verify(remoteLogManager).resizeReaderThreadPool(6) + props.clear() + verifyNoMoreInteractions(remoteLogManager) + } + + @Test + def testRemoteLogDynamicThreadPoolWithInvalidValues(): Unit = { + val origProps = TestUtils.createBrokerConfig(0, port = 8181) + val config = KafkaConfig(origProps) + + val serverMock = mock(classOf[KafkaBroker]) + val remoteLogManager = mock(classOf[RemoteLogManager]) + when(serverMock.config).thenReturn(config) + when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) + + config.dynamicConfig.initialize(None) + config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) + + // Test dynamic update with invalid values + val props = new Properties() + props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, "0") + val err = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props, perBrokerConfig = true)) + assertTrue(err.getMessage.contains("Value must be at least 1")) + + val props1 = new Properties() + props1.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, "-1") + val err1 = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props1, perBrokerConfig = false)) + assertTrue(err1.getMessage.contains("Value must be at least 1")) + + val props2 = new Properties() + props2.put(RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP, "2") + val err2 = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props2, perBrokerConfig = false)) + assertTrue(err2.getMessage.contains("value should be at least half the current value")) + + val props3 = new Properties() + props3.put(RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP, "-1") + val err3 = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props, perBrokerConfig = true)) + assertTrue(err3.getMessage.contains("Value must be at least 1")) + verifyNoMoreInteractions(remoteLogManager) + } + @Test def testConfigUpdateWithSomeInvalidConfigs(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(0, port = 8181) origProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS") val config = KafkaConfig(origProps) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) val validProps = Map(s"listener.name.external.${SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG}" -> "ks.p12") @@ -220,17 +259,14 @@ class DynamicBrokerConfigTest { // Test update of configs with invalid type val invalidProps = Map(CleanerConfig.LOG_CLEANER_THREADS_PROP -> "invalid") verifyConfigUpdateWithInvalidConfig(config, origProps, validProps, invalidProps) - - val excludedTopicConfig = Map(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG -> "0.10.2") - verifyConfigUpdateWithInvalidConfig(config, origProps, validProps, excludedTopicConfig) } @Test def testConfigUpdateWithReconfigurableValidationFailure(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(0, port = 8181) origProps.put(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, "100000000") val config = KafkaConfig(origProps) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) val validProps = Map.empty[String, String] val invalidProps = Map(CleanerConfig.LOG_CLEANER_THREADS_PROP -> "20") @@ -261,7 +297,7 @@ class DynamicBrokerConfigTest { @Test def testReconfigurableValidation(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(0, port = 8181) val config = KafkaConfig(origProps) val invalidReconfigurableProps = Set(CleanerConfig.LOG_CLEANER_THREADS_PROP, ServerConfigs.BROKER_ID_CONFIG, "some.prop") val validReconfigurableProps = Set(CleanerConfig.LOG_CLEANER_THREADS_PROP, CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, "some.prop") @@ -331,10 +367,9 @@ class DynamicBrokerConfigTest { } private def verifyConfigUpdate(name: String, value: Object, perBrokerConfig: Boolean, expectFailure: Boolean): Unit = { - val configProps = TestUtils.createBrokerConfig(0, null, port = 8181) - configProps.put(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, "broker.secret") + val configProps = TestUtils.createBrokerConfig(0, port = 8181) val config = KafkaConfig(configProps) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) val props = new Properties props.put(name, value) @@ -365,7 +400,7 @@ class DynamicBrokerConfigTest { validProps.foreach { case (k, v) => props.put(k, v) } invalidProps.foreach { case (k, v) => props.put(k, v) } - // DynamicBrokerConfig#validate is used by AdminClient to validate the configs provided in + // DynamicBrokerConfig#validate is used by AdminClient to validate the configs provided // in an AlterConfigs request. Validation should fail with an exception if any of the configs are invalid. assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props, perBrokerConfig = true)) @@ -379,74 +414,17 @@ class DynamicBrokerConfigTest { } } - @Test - def testPasswordConfigEncryption(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) - val configWithoutSecret = KafkaConfig(props) - props.put(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, "config-encoder-secret") - val configWithSecret = KafkaConfig(props) - val dynamicProps = new Properties - dynamicProps.put(SaslConfigs.SASL_JAAS_CONFIG, "myLoginModule required;") - - try { - configWithoutSecret.dynamicConfig.toPersistentProps(dynamicProps, perBrokerConfig = true) - } catch { - case _: ConfigException => // expected exception - } - val persistedProps = configWithSecret.dynamicConfig.toPersistentProps(dynamicProps, perBrokerConfig = true) - assertFalse(persistedProps.getProperty(SaslConfigs.SASL_JAAS_CONFIG).contains("myLoginModule"), - "Password not encoded") - val decodedProps = configWithSecret.dynamicConfig.fromPersistentProps(persistedProps, perBrokerConfig = true) - assertEquals("myLoginModule required;", decodedProps.getProperty(SaslConfigs.SASL_JAAS_CONFIG)) - } - - @Test - def testPasswordConfigEncoderSecretChange(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) - props.put(SaslConfigs.SASL_JAAS_CONFIG, "staticLoginModule required;") - props.put(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, "config-encoder-secret") - val config = KafkaConfig(props) - config.dynamicConfig.initialize(None, None) - val dynamicProps = new Properties - dynamicProps.put(SaslConfigs.SASL_JAAS_CONFIG, "dynamicLoginModule required;") - - val persistedProps = config.dynamicConfig.toPersistentProps(dynamicProps, perBrokerConfig = true) - assertFalse(persistedProps.getProperty(SaslConfigs.SASL_JAAS_CONFIG).contains("LoginModule"), - "Password not encoded") - config.dynamicConfig.updateBrokerConfig(0, persistedProps) - assertEquals("dynamicLoginModule required;", config.values.get(SaslConfigs.SASL_JAAS_CONFIG).asInstanceOf[Password].value) - - // New config with same secret should use the dynamic password config - val newConfigWithSameSecret = KafkaConfig(props) - newConfigWithSameSecret.dynamicConfig.initialize(None, None) - newConfigWithSameSecret.dynamicConfig.updateBrokerConfig(0, persistedProps) - assertEquals("dynamicLoginModule required;", newConfigWithSameSecret.values.get(SaslConfigs.SASL_JAAS_CONFIG).asInstanceOf[Password].value) - - // New config with new secret should use the dynamic password config if new and old secrets are configured in KafkaConfig - props.put(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, "new-encoder-secret") - props.put(PasswordEncoderConfigs.PASSWORD_ENCODER_OLD_SECRET_CONFIG, "config-encoder-secret") - val newConfigWithNewAndOldSecret = KafkaConfig(props) - newConfigWithNewAndOldSecret.dynamicConfig.updateBrokerConfig(0, persistedProps) - assertEquals("dynamicLoginModule required;", newConfigWithSameSecret.values.get(SaslConfigs.SASL_JAAS_CONFIG).asInstanceOf[Password].value) - - // New config with new secret alone should revert to static password config since dynamic config cannot be decoded - props.put(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, "another-new-encoder-secret") - val newConfigWithNewSecret = KafkaConfig(props) - newConfigWithNewSecret.dynamicConfig.updateBrokerConfig(0, persistedProps) - assertEquals("staticLoginModule required;", newConfigWithNewSecret.values.get(SaslConfigs.SASL_JAAS_CONFIG).asInstanceOf[Password].value) - } - @Test def testDynamicListenerConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 9092) + val props = TestUtils.createBrokerConfig(0, port = 9092) val oldConfig = KafkaConfig.fromProps(props) val kafkaServer: KafkaBroker = mock(classOf[kafka.server.KafkaBroker]) when(kafkaServer.config).thenReturn(oldConfig) - props.put(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://hostname:9092,SASL_PLAINTEXT://hostname:9093") + props.put(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://hostname:9092") new DynamicListenerConfig(kafkaServer).validateReconfiguration(KafkaConfig(props)) - // it is illegal to update non-reconfiguable configs of existent listeners + // it is illegal to update non-reconfigurable configs of existent listeners props.put("listener.name.plaintext.you.should.not.pass", "failure") val dynamicListenerConfig = new DynamicListenerConfig(kafkaServer) assertThrows(classOf[ConfigException], () => dynamicListenerConfig.validateReconfiguration(KafkaConfig(props))) @@ -480,9 +458,9 @@ class DynamicBrokerConfigTest { @Test def testAuthorizerConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 9092) + val props = TestUtils.createBrokerConfig(0, port = 9092) val oldConfig = KafkaConfig.fromProps(props) - oldConfig.dynamicConfig.initialize(None, None) + oldConfig.dynamicConfig.initialize(None) val kafkaServer: KafkaBroker = mock(classOf[kafka.server.KafkaBroker]) when(kafkaServer.config).thenReturn(oldConfig) @@ -514,7 +492,6 @@ class DynamicBrokerConfigTest { port: Int ): Properties = { val retval = TestUtils.createBrokerConfig(nodeId, - zkConnect = null, enableControlledShutdown = true, enableDeleteTopic = true, port) @@ -529,7 +506,7 @@ class DynamicBrokerConfigTest { def testCombinedControllerAuthorizerConfig(): Unit = { val props = createCombinedControllerConfig(0, 9092) val oldConfig = KafkaConfig.fromProps(props) - oldConfig.dynamicConfig.initialize(None, None) + oldConfig.dynamicConfig.initialize(None) val controllerServer: ControllerServer = mock(classOf[kafka.server.ControllerServer]) when(controllerServer.config).thenReturn(oldConfig) @@ -557,7 +534,6 @@ class DynamicBrokerConfigTest { port: Int ): Properties = { val retval = TestUtils.createBrokerConfig(nodeId, - zkConnect = null, enableControlledShutdown = true, enableDeleteTopic = true, port @@ -575,7 +551,7 @@ class DynamicBrokerConfigTest { def testIsolatedControllerAuthorizerConfig(): Unit = { val props = createIsolatedControllerConfig(0, port = 9092) val oldConfig = KafkaConfig.fromProps(props) - oldConfig.dynamicConfig.initialize(None, None) + oldConfig.dynamicConfig.initialize(None) val controllerServer: ControllerServer = mock(classOf[kafka.server.ControllerServer]) when(controllerServer.config).thenReturn(oldConfig) @@ -610,29 +586,11 @@ class DynamicBrokerConfigTest { DynamicBrokerConfig.brokerConfigSynonyms(ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG, matchListenerOverride = true)) } - @Test - def testDynamicConfigInitializationWithoutConfigsInZK(): Unit = { - val zkClient: KafkaZkClient = mock(classOf[KafkaZkClient]) - when(zkClient.getEntityConfigs(anyString(), anyString())).thenReturn(new java.util.Properties()) - - val initialProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 9092) - initialProps.remove(ServerConfigs.BACKGROUND_THREADS_CONFIG) - val oldConfig = KafkaConfig.fromProps(initialProps) - val dynamicBrokerConfig = new DynamicBrokerConfig(oldConfig) - dynamicBrokerConfig.initialize(Some(zkClient), None) - dynamicBrokerConfig.addBrokerReconfigurable(new TestDynamicThreadPool) - - val newprops = new Properties() - newprops.put(ServerConfigs.NUM_IO_THREADS_CONFIG, "10") - newprops.put(ServerConfigs.BACKGROUND_THREADS_CONFIG, "100") - dynamicBrokerConfig.updateBrokerConfig(0, newprops) - } - @Test def testImproperConfigsAreRemoved(): Unit = { - val props = TestUtils.createBrokerConfig(0, null) + val props = TestUtils.createBrokerConfig(0) val config = KafkaConfig(props) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) assertEquals(SocketServerConfigs.MAX_CONNECTIONS_DEFAULT, config.maxConnections) assertEquals(LogConfig.DEFAULT_MAX_MESSAGE_BYTES, config.messageMaxBytes) @@ -659,7 +617,7 @@ class DynamicBrokerConfigTest { @Test def testUpdateMetricReporters(): Unit = { val brokerId = 0 - val origProps = TestUtils.createBrokerConfig(brokerId, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(brokerId, port = 8181) val config = KafkaConfig(origProps) val serverMock = Mockito.mock(classOf[KafkaBroker]) @@ -667,7 +625,7 @@ class DynamicBrokerConfigTest { Mockito.when(serverMock.config).thenReturn(config) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) val m = new DynamicMetricsReporters(brokerId, config, metrics, "clusterId") config.dynamicConfig.addReconfigurable(m) assertEquals(1, m.currentReporters.size) @@ -683,7 +641,7 @@ class DynamicBrokerConfigTest { @Test def testUpdateMetricReportersNoJmxReporter(): Unit = { val brokerId = 0 - val origProps = TestUtils.createBrokerConfig(brokerId, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(brokerId, port = 8181) origProps.put(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, "") val config = KafkaConfig(origProps) @@ -692,7 +650,7 @@ class DynamicBrokerConfigTest { Mockito.when(serverMock.config).thenReturn(config) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) val m = new DynamicMetricsReporters(brokerId, config, metrics, "clusterId") config.dynamicConfig.addReconfigurable(m) assertTrue(m.currentReporters.isEmpty) @@ -710,7 +668,7 @@ class DynamicBrokerConfigTest { @Test def testNonInternalValuesDoesNotExposeInternalConfigs(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.put(KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG, "1024") val config = new KafkaConfig(props) assertFalse(config.nonInternalValues.containsKey(KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG)) @@ -720,11 +678,11 @@ class DynamicBrokerConfigTest { @Test def testDynamicLogLocalRetentionMsConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.put(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, "2592000000") val config = KafkaConfig(props) val dynamicLogConfig = new DynamicLogConfig(mock(classOf[LogManager]), mock(classOf[KafkaBroker])) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(dynamicLogConfig) val newProps = new Properties() @@ -743,11 +701,11 @@ class DynamicBrokerConfigTest { @Test def testDynamicLogLocalRetentionSizeConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.put(ServerLogConfigs.LOG_RETENTION_BYTES_CONFIG, "4294967296") val config = KafkaConfig(props) val dynamicLogConfig = new DynamicLogConfig(mock(classOf[LogManager]), mock(classOf[KafkaBroker])) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(dynamicLogConfig) val newProps = new Properties() @@ -766,11 +724,11 @@ class DynamicBrokerConfigTest { @Test def testDynamicLogLocalRetentionSkipsOnInvalidConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.put(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_MS_PROP, "1000") props.put(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP, "1024") val config = KafkaConfig(props) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) // Check for invalid localRetentionMs < -2 verifyConfigUpdateWithInvalidConfig(config, props, Map.empty, Map(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_MS_PROP -> "-3")) @@ -792,7 +750,7 @@ class DynamicBrokerConfigTest { @Test def testDynamicRemoteFetchMaxWaitMsConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val config = KafkaConfig(props) val kafkaBroker = mock(classOf[KafkaBroker]) when(kafkaBroker.config).thenReturn(config) @@ -800,7 +758,7 @@ class DynamicBrokerConfigTest { assertEquals(500, config.remoteLogManagerConfig.remoteFetchMaxWaitMs) val dynamicRemoteLogConfig = new DynamicRemoteLogConfig(kafkaBroker) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(dynamicRemoteLogConfig) val newProps = new Properties() @@ -826,7 +784,7 @@ class DynamicBrokerConfigTest { @Test def testDynamicRemoteListOffsetsRequestTimeoutMsConfig(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val config = KafkaConfig(props) val kafkaBroker = mock(classOf[KafkaBroker]) when(kafkaBroker.config).thenReturn(config) @@ -835,7 +793,7 @@ class DynamicBrokerConfigTest { config.remoteLogManagerConfig.remoteListOffsetsRequestTimeoutMs) val dynamicRemoteLogConfig = new DynamicRemoteLogConfig(kafkaBroker) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(dynamicRemoteLogConfig) val newProps = new Properties() @@ -861,7 +819,7 @@ class DynamicBrokerConfigTest { @Test def testUpdateDynamicRemoteLogManagerConfig(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(0, port = 8181) origProps.put(RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP, "2") val config = KafkaConfig(origProps) @@ -871,7 +829,7 @@ class DynamicBrokerConfigTest { Mockito.when(serverMock.config).thenReturn(config) Mockito.when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) val props = new Properties() @@ -886,7 +844,7 @@ class DynamicBrokerConfigTest { @Test def testRemoteLogManagerCopyQuotaUpdates(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 9092) + val props = TestUtils.createBrokerConfig(0, port = 9092) val config = KafkaConfig.fromProps(props) val serverMock: KafkaBroker = mock(classOf[KafkaBroker]) val remoteLogManager = mock(classOf[RemoteLogManager]) @@ -894,7 +852,7 @@ class DynamicBrokerConfigTest { Mockito.when(serverMock.config).thenReturn(config) Mockito.when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND, @@ -917,7 +875,7 @@ class DynamicBrokerConfigTest { @Test def testRemoteLogManagerFetchQuotaUpdates(): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 9092) + val props = TestUtils.createBrokerConfig(0, port = 9092) val config = KafkaConfig.fromProps(props) val serverMock: KafkaBroker = mock(classOf[KafkaBroker]) val remoteLogManager = mock(classOf[RemoteLogManager]) @@ -925,7 +883,7 @@ class DynamicBrokerConfigTest { Mockito.when(serverMock.config).thenReturn(config) Mockito.when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND, @@ -952,7 +910,7 @@ class DynamicBrokerConfigTest { val copyQuotaProp = RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP val fetchQuotaProp = RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP - val props = TestUtils.createBrokerConfig(0, null, port = 9092) + val props = TestUtils.createBrokerConfig(0, port = 9092) val config = KafkaConfig.fromProps(props) val serverMock: KafkaBroker = mock(classOf[KafkaBroker]) val remoteLogManager = Mockito.mock(classOf[RemoteLogManager]) @@ -960,7 +918,7 @@ class DynamicBrokerConfigTest { Mockito.when(serverMock.config).thenReturn(config) Mockito.when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) // Default values @@ -1002,12 +960,12 @@ class DynamicBrokerConfigTest { retentionMs: Long, logLocalRetentionBytes: Long, retentionBytes: Long): Unit = { - val props = TestUtils.createBrokerConfig(0, null, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.put(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, retentionMs.toString) props.put(ServerLogConfigs.LOG_RETENTION_BYTES_CONFIG, retentionBytes.toString) val config = KafkaConfig(props) val dynamicLogConfig = new DynamicLogConfig(mock(classOf[LogManager]), mock(classOf[KafkaBroker])) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(dynamicLogConfig) val newProps = new Properties() @@ -1033,13 +991,13 @@ class DynamicBrokerConfigTest { Mockito.when(logManagerMock.reconfigureDefaultLogConfig(ArgumentMatchers.any(classOf[LogConfig]))) .thenAnswer(invocation => currentDefaultLogConfig.set(invocation.getArgument(0))) - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) config.dynamicConfig.addBrokerReconfigurable(new DynamicLogConfig(logManagerMock, serverMock)) } @Test def testDynamicLogConfigHandlesSynonymsCorrectly(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(0, port = 8181) origProps.put(ServerLogConfigs.LOG_RETENTION_TIME_MINUTES_CONFIG, "1") val ctx = new DynamicLogConfigContext(origProps) assertEquals(TimeUnit.MINUTES.toMillis(1), ctx.config.logRetentionTimeMillis) @@ -1052,7 +1010,7 @@ class DynamicBrokerConfigTest { @Test def testLogRetentionTimeMinutesIsNotDynamicallyReconfigurable(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, null, port = 8181) + val origProps = TestUtils.createBrokerConfig(0, port = 8181) origProps.put(ServerLogConfigs.LOG_RETENTION_TIME_HOURS_CONFIG, "1") val ctx = new DynamicLogConfigContext(origProps) assertEquals(TimeUnit.HOURS.toMillis(1), ctx.config.logRetentionTimeMillis) @@ -1063,9 +1021,24 @@ class DynamicBrokerConfigTest { assertEquals(TimeUnit.HOURS.toMillis(1), ctx.config.logRetentionTimeMillis) assertFalse(ctx.currentDefaultLogConfig.get().originals().containsKey(ServerLogConfigs.LOG_RETENTION_TIME_MINUTES_CONFIG)) } + + @Test + def testAdvertisedListenersIsNotDynamicallyReconfigurable(): Unit = { + val origProps = TestUtils.createBrokerConfig(0, port = 8181) + val ctx = new DynamicLogConfigContext(origProps) + + // update advertised listeners should not work + val props = new Properties() + props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "SASL_PLAINTEXT://localhost:8181") + ctx.config.dynamicConfig.updateDefaultConfig(props) + ctx.config.effectiveAdvertisedBrokerListeners.foreach(e => + assertEquals(SecurityProtocol.PLAINTEXT.name, e.listenerName.value) + ) + assertFalse(ctx.currentDefaultLogConfig.get().originals().containsKey(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG)) + } } -class TestDynamicThreadPool() extends BrokerReconfigurable { +class TestDynamicThreadPool extends BrokerReconfigurable { override def reconfigurableConfigs: Set[String] = { DynamicThreadPool.ReconfigurableConfigs diff --git a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala index f827d981e295e..27f746a6b6d61 100644 --- a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala @@ -22,55 +22,47 @@ import kafka.log.UnifiedLog import kafka.log.remote.RemoteLogManager import kafka.utils.TestUtils.random import kafka.utils._ -import kafka.zk.ConfigEntityChangeNotificationZNode import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.admin.AlterConfigOp.OpType -import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, Config, ConfigEntry} +import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, ConfigEntry} import org.apache.kafka.common.config.{ConfigResource, TopicConfig} import org.apache.kafka.common.errors.{InvalidRequestException, UnknownTopicOrPartitionException} import org.apache.kafka.common.metrics.Quota import org.apache.kafka.common.quota.ClientQuotaAlteration.Op import org.apache.kafka.common.quota.ClientQuotaEntity.{CLIENT_ID, IP, USER} import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity} -import org.apache.kafka.common.record.{CompressionType, RecordVersion} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.coordinator.group.GroupConfig -import org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV1 -import org.apache.kafka.server.config.{ConfigType, QuotaConfig, ServerLogConfigs, ZooKeeperInternals} +import org.apache.kafka.server.config.{QuotaConfig, ServerLogConfigs} import org.apache.kafka.storage.internals.log.LogConfig +import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{Test, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource import org.mockito.ArgumentCaptor -import org.mockito.ArgumentMatchers.{any, anyString} +import org.mockito.ArgumentMatchers.any import org.mockito.Mockito._ import java.net.InetAddress -import java.nio.charset.StandardCharsets import java.util import java.util.Collections.{singletonList, singletonMap} import java.util.concurrent.ExecutionException import java.util.{Collections, Properties} -import scala.annotation.nowarn import scala.collection.{Map, Seq} import scala.jdk.CollectionConverters._ @Timeout(100) class DynamicConfigChangeTest extends KafkaServerTestHarness { override def generateConfigs: Seq[KafkaConfig] = { - val cfg = TestUtils.createBrokerConfig(0, zkConnectOrNull) + val cfg = TestUtils.createBrokerConfig(0) List(KafkaConfig.fromProps(cfg)) } @ParameterizedTest @ValueSource(strings = Array("kraft")) def testConfigChange(quorum: String): Unit = { - if (!isKRaftTest()) { - assertTrue(this.servers.head.dynamicConfigHandlers.contains(ConfigType.TOPIC), - "Should contain a ConfigHandler for topics") - } val oldVal: java.lang.Long = 100000L val newVal: java.lang.Long = 200000L val tp = new TopicPartition("test", 0) @@ -82,26 +74,20 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { assertTrue(logOpt.isDefined) assertEquals(oldVal, logOpt.get.config.flushInterval) } - if (isKRaftTest()) { - val admin = createAdminClient() - try { - val resource = new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()) - val op = new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, newVal.toString), - OpType.SET) - val resource2 = new ConfigResource(ConfigResource.Type.BROKER, "") - val op2 = new AlterConfigOp(new ConfigEntry(ServerLogConfigs.LOG_FLUSH_INTERVAL_MS_CONFIG, newVal.toString), - OpType.SET) - admin.incrementalAlterConfigs(Map( - resource -> List(op).asJavaCollection, - resource2 -> List(op2).asJavaCollection, - ).asJava).all.get - } finally { - admin.close() - } - } else { - val newProps = new Properties() - newProps.setProperty(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, newVal.toString) - adminZkClient.changeTopicConfig(tp.topic, newProps) + val admin = createAdminClient() + try { + val resource = new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()) + val op = new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, newVal.toString), + OpType.SET) + val resource2 = new ConfigResource(ConfigResource.Type.BROKER, "") + val op2 = new AlterConfigOp(new ConfigEntry(ServerLogConfigs.LOG_FLUSH_INTERVAL_MS_CONFIG, newVal.toString), + OpType.SET) + admin.incrementalAlterConfigs(Map( + resource -> List(op).asJavaCollection, + resource2 -> List(op2).asJavaCollection, + ).asJava).all.get + } finally { + admin.close() } TestUtils.retry(10000) { assertEquals(newVal, this.brokers.head.logManager.getLog(tp).get.config.flushInterval) @@ -123,22 +109,15 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } val newSegmentSize = 2000 - if (isKRaftTest()) { - val admin = createAdminClient() - try { - val resource = new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()) - val op = new AlterConfigOp(new ConfigEntry(TopicConfig.SEGMENT_BYTES_CONFIG, newSegmentSize.toString), - OpType.SET) - admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all.get - } finally { - admin.close() - } - } else { - val newProps = new Properties() - newProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, newSegmentSize.toString) - adminZkClient.changeTopicConfig(tp.topic, newProps) + val admin = createAdminClient() + try { + val resource = new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()) + val op = new AlterConfigOp(new ConfigEntry(TopicConfig.SEGMENT_BYTES_CONFIG, newSegmentSize.toString), + OpType.SET) + admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all.get + } finally { + admin.close() } - val log = brokers.head.logManager.getLog(tp).get TestUtils.retry(10000) { assertEquals(newSegmentSize, log.config.segmentSize) @@ -149,35 +128,6 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { assertTrue(log.logSegments.stream.allMatch(_.size > 1000), "Log segment size change not applied") } - @nowarn("cat=deprecation") - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testMessageFormatVersionChange(quorum: String): Unit = { - val tp = new TopicPartition("test", 0) - val logProps = new Properties() - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.10.2") - createTopic(tp.topic, 1, 1, logProps) - val server = servers.head - TestUtils.waitUntilTrue(() => server.logManager.getLog(tp).isDefined, - "Topic metadata propagation failed") - val log = server.logManager.getLog(tp).get - // message format version should always be 3.0 if inter-broker protocol is 3.0 or higher - assertEquals(IBP_3_0_IV1, log.config.messageFormatVersion) - assertEquals(RecordVersion.V2, log.config.recordVersion) - - val compressionType = CompressionType.LZ4 - logProps.put(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG, "0.11.0") - // set compression type so that we can detect when the config change has propagated - logProps.put(TopicConfig.COMPRESSION_TYPE_CONFIG, compressionType.name) - adminZkClient.changeTopicConfig(tp.topic, logProps) - TestUtils.waitUntilTrue(() => - server.logManager.getLog(tp).get.config.compression.isPresent && - server.logManager.getLog(tp).get.config.compression.get.`type` == compressionType, - "Topic config change propagation failed") - assertEquals(IBP_3_0_IV1, log.config.messageFormatVersion) - assertEquals(RecordVersion.V2, log.config.recordVersion) - } - private def testQuotaConfigChange(entity: ClientQuotaEntity, user: KafkaPrincipal, clientId: String): Unit = { @@ -279,70 +229,20 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testQuotaInitialization(quorum: String): Unit = { - val server = servers.head - val clientIdProps = new Properties() - server.shutdown() - clientIdProps.put(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, "1000") - clientIdProps.put(QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, "2000") - val userProps = new Properties() - userProps.put(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, "10000") - userProps.put(QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, "20000") - val userClientIdProps = new Properties() - userClientIdProps.put(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, "100000") - userClientIdProps.put(QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, "200000") - - adminZkClient.changeClientIdConfig("overriddenClientId", clientIdProps) - adminZkClient.changeUserOrUserClientIdConfig("overriddenUser", userProps) - adminZkClient.changeUserOrUserClientIdConfig("ANONYMOUS/clients/overriddenUserClientId", userClientIdProps) - - // Remove config change znodes to force quota initialization only through loading of user/client quotas - zkClient.getChildren(ConfigEntityChangeNotificationZNode.path).foreach { p => zkClient.deletePath(ConfigEntityChangeNotificationZNode.path + "/" + p) } - server.startup() - val quotaManagers = server.dataPlaneRequestProcessor.quotas - - assertEquals(Quota.upperBound(1000), quotaManagers.produce.quota("someuser", "overriddenClientId")) - assertEquals(Quota.upperBound(2000), quotaManagers.fetch.quota("someuser", "overriddenClientId")) - assertEquals(Quota.upperBound(10000), quotaManagers.produce.quota("overriddenUser", "someclientId")) - assertEquals(Quota.upperBound(20000), quotaManagers.fetch.quota("overriddenUser", "someclientId")) - assertEquals(Quota.upperBound(100000), quotaManagers.produce.quota("ANONYMOUS", "overriddenUserClientId")) - assertEquals(Quota.upperBound(200000), quotaManagers.fetch.quota("ANONYMOUS", "overriddenUserClientId")) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testIpQuotaInitialization(quorum: String): Unit = { val broker = brokers.head - if (isKRaftTest()) { - val admin = createAdminClient() - try { - val alterations = util.Arrays.asList( - new ClientQuotaAlteration(new ClientQuotaEntity(singletonMap(IP, null)), - singletonList(new Op(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, 20))), - new ClientQuotaAlteration(new ClientQuotaEntity(singletonMap(IP, "1.2.3.4")), - singletonList(new Op(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, 10)))) - admin.alterClientQuotas(alterations).all().get() - } finally { - admin.close() - } - } else { - broker.shutdown() - - val ipDefaultProps = new Properties() - ipDefaultProps.put(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, "20") - adminZkClient.changeIpConfig(ZooKeeperInternals.DEFAULT_STRING, ipDefaultProps) - - val ipOverrideProps = new Properties() - ipOverrideProps.put(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, "10") - adminZkClient.changeIpConfig("1.2.3.4", ipOverrideProps) - - // Remove config change znodes to force quota initialization only through loading of ip quotas - zkClient.getChildren(ConfigEntityChangeNotificationZNode.path).foreach { p => - zkClient.deletePath(ConfigEntityChangeNotificationZNode.path + "/" + p) - } - broker.startup() + val admin = createAdminClient() + try { + val alterations = util.Arrays.asList( + new ClientQuotaAlteration(new ClientQuotaEntity(singletonMap(IP, null)), + singletonList(new Op(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, 20))), + new ClientQuotaAlteration(new ClientQuotaEntity(singletonMap(IP, "1.2.3.4")), + singletonList(new Op(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, 10)))) + admin.alterClientQuotas(alterations).all().get() + } finally { + admin.close() } TestUtils.retry(10000) { val connectionQuotas = broker.socketServer.connectionQuotas @@ -393,15 +293,6 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testConfigChangeOnNonExistingTopic(quorum: String): Unit = { - val topic = tempTopic() - val logProps = new Properties() - logProps.put(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, 10000: java.lang.Integer) - assertThrows(classOf[UnknownTopicOrPartitionException], () => adminZkClient.changeTopicConfig(topic, logProps)) - } - private def tempTopic() : String = "testTopic" + random.nextInt(1000000) @ParameterizedTest @@ -422,39 +313,6 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testProcessNotification(quorum: String): Unit = { - val props = new Properties() - props.put("a.b", "10") - - // Create a mock ConfigHandler to record config changes it is asked to process - val handler: ConfigHandler = mock(classOf[ConfigHandler]) - - val configManager = new ZkConfigManager(zkClient, Map(ConfigType.TOPIC -> handler)) - // Notifications created using the old TopicConfigManager are ignored. - configManager.ConfigChangedNotificationHandler.processNotification("not json".getBytes(StandardCharsets.UTF_8)) - - // Incorrect Map. No version - var jsonMap: Map[String, Any] = Map("v" -> 1, "x" -> 2) - - assertThrows(classOf[Throwable], () => configManager.ConfigChangedNotificationHandler.processNotification(Json.encodeAsBytes(jsonMap.asJava))) - // Version is provided. EntityType is incorrect - jsonMap = Map("version" -> 1, "entity_type" -> "garbage", "entity_name" -> "x") - assertThrows(classOf[Throwable], () => configManager.ConfigChangedNotificationHandler.processNotification(Json.encodeAsBytes(jsonMap.asJava))) - - // EntityName isn't provided - jsonMap = Map("version" -> 1, "entity_type" -> ConfigType.TOPIC) - assertThrows(classOf[Throwable], () => configManager.ConfigChangedNotificationHandler.processNotification(Json.encodeAsBytes(jsonMap.asJava))) - - // Everything is provided - jsonMap = Map("version" -> 1, "entity_type" -> ConfigType.TOPIC, "entity_name" -> "x") - configManager.ConfigChangedNotificationHandler.processNotification(Json.encodeAsBytes(jsonMap.asJava)) - - // Verify that processConfigChanges was only called once - verify(handler).processConfigChanges(anyString, any[Properties]) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testIncrementalAlterDefaultTopicConfig(quorum: String): Unit = { @@ -463,56 +321,30 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { val resource = new ConfigResource(ConfigResource.Type.TOPIC, "") val op = new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, "200000"), OpType.SET) val future = admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all - TestUtils.assertFutureExceptionTypeEquals(future, classOf[InvalidRequestException]) + assertFutureThrows(future, classOf[InvalidRequestException]) } finally { admin.close() } } - @nowarn("cat=deprecation") - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testAlterDefaultTopicConfig(quorum: String): Unit = { + private def setBrokerConfigs(brokerId: String, newValue: Long): Unit = alterBrokerConfigs(brokerId, newValue, OpType.SET) + private def deleteBrokerConfigs(brokerId: String): Unit = alterBrokerConfigs(brokerId, 0, OpType.DELETE) + private def alterBrokerConfigs(brokerId: String, newValue: Long, op: OpType): Unit = { val admin = createAdminClient() try { - val resource = new ConfigResource(ConfigResource.Type.TOPIC, "") - val config = new Config(Collections.singleton(new ConfigEntry(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, "200000"))) - val future = admin.alterConfigs(Map(resource -> config).asJava).all - TestUtils.assertFutureExceptionTypeEquals(future, classOf[InvalidRequestException]) + val resource = new ConfigResource(ConfigResource.Type.BROKER, brokerId) + val configOp = new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, newValue.toString), op) + val configOp2 = new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, newValue.toString), op) + val configOp3 = new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, newValue.toString), op) + val configOps = List(configOp, configOp2, configOp3).asJavaCollection + admin.incrementalAlterConfigs(Map( + resource -> configOps, + ).asJava).all.get } finally { admin.close() } } - private def setBrokerConfigs(brokerId: String, newValue: Long): Unit = alterBrokerConfigs(brokerId, newValue, OpType.SET) - private def deleteBrokerConfigs(brokerId: String): Unit = alterBrokerConfigs(brokerId, 0, OpType.DELETE) - private def alterBrokerConfigs(brokerId: String, newValue: Long, op: OpType): Unit = { - if (isKRaftTest()) { - val admin = createAdminClient() - try { - val resource = new ConfigResource(ConfigResource.Type.BROKER, brokerId) - val configOp = new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, newValue.toString), op) - val configOp2 = new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, newValue.toString), op) - val configOp3 = new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, newValue.toString), op) - val configOps = List(configOp, configOp2, configOp3).asJavaCollection - admin.incrementalAlterConfigs(Map( - resource -> configOps, - ).asJava).all.get - } finally { - admin.close() - } - } else { - val newProps = new Properties() - if (op == OpType.SET) { - newProps.put(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, newValue.toString) - newProps.put(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, newValue.toString) - newProps.put(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, newValue.toString) - } - val brokerIdOption = if (brokerId != "") Option(brokerId.toInt) else None - adminZkClient.changeBrokerConfig(brokerIdOption, newProps) - } - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testBrokerIdConfigChangeAndDelete(quorum: String): Unit = { @@ -640,7 +472,7 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { val resource = new ConfigResource(ConfigResource.Type.GROUP, "") val op = new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "200000"), OpType.SET) val future = admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all - TestUtils.assertFutureExceptionTypeEquals(future, classOf[InvalidRequestException]) + assertFutureThrows(future, classOf[InvalidRequestException]) } finally { admin.close() } @@ -665,7 +497,7 @@ class DynamicConfigChangeUnitTest { @Test def shouldParseReplicationQuotaProperties(): Unit = { - val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null, null) + val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null) val props: Properties = new Properties() //Given @@ -678,7 +510,7 @@ class DynamicConfigChangeUnitTest { @Test def shouldParseWildcardReplicationQuotaProperties(): Unit = { - val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null, null) + val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null) val props: Properties = new Properties() //Given @@ -698,7 +530,7 @@ class DynamicConfigChangeUnitTest { CoreUtils.propsWith(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, value), 102, QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) } - val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null, null) + val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null) assertEquals(ReplicationQuotaManager.ALL_REPLICAS.asScala.map(_.toInt).toSeq, parse(configHandler, "* ")) assertEquals(Seq(), parse(configHandler, " ")) assertEquals(Seq(6), parse(configHandler, "6:102")) @@ -708,7 +540,7 @@ class DynamicConfigChangeUnitTest { @Test def shouldParseReplicationQuotaReset(): Unit = { - val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null, null) + val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null) val props: Properties = new Properties() //Given @@ -755,7 +587,7 @@ class DynamicConfigChangeUnitTest { doNothing().when(rlm).onLeadershipChange(leaderPartitionsArg.capture(), followerPartitionsArg.capture(), any()) val isRemoteLogEnabledBeforeUpdate = false - val configHandler: TopicConfigHandler = new TopicConfigHandler(replicaManager, null, null, None) + val configHandler: TopicConfigHandler = new TopicConfigHandler(replicaManager, null, null) configHandler.maybeUpdateRemoteLogComponents(topic, Seq(log0, log1), isRemoteLogEnabledBeforeUpdate, false) assertEquals(Collections.singleton(partition0), leaderPartitionsArg.getValue) assertEquals(Collections.singleton(partition1), followerPartitionsArg.getValue) @@ -779,7 +611,7 @@ class DynamicConfigChangeUnitTest { when(partition.isLeader).thenReturn(true) val isRemoteLogEnabledBeforeUpdate = true - val configHandler: TopicConfigHandler = new TopicConfigHandler(replicaManager, null, null, None) + val configHandler: TopicConfigHandler = new TopicConfigHandler(replicaManager, null, null) configHandler.maybeUpdateRemoteLogComponents(topic, Seq(log0), isRemoteLogEnabledBeforeUpdate, false) verify(rlm, never()).onLeadershipChange(any(), any(), any()) } diff --git a/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala index f2be521fca4d7..85ae9121843a4 100755 --- a/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala @@ -44,7 +44,7 @@ import scala.jdk.CollectionConverters._ class EdgeCaseRequestTest extends KafkaServerTestHarness { def generateConfigs = { - val props = TestUtils.createBrokerConfig(1, zkConnectOrNull) + val props = TestUtils.createBrokerConfig(1) props.setProperty(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false") List(KafkaConfig.fromProps(props)) } @@ -129,7 +129,7 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { val version = ApiKeys.PRODUCE.latestVersion: Short val (serializedBytes, responseHeaderVersion) = { val headerBytes = requestHeaderBytes(ApiKeys.PRODUCE.id, version, "", correlationId) - val request = requests.ProduceRequest.forCurrentMagic(new ProduceRequestData() + val request = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(topicPartition.topic()).setPartitionData(Collections.singletonList( diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestDownConversionConfigTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestDownConversionConfigTest.scala deleted file mode 100644 index 813ee7ae4aafb..0000000000000 --- a/core/src/test/scala/unit/kafka/server/FetchRequestDownConversionConfigTest.scala +++ /dev/null @@ -1,272 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.server - -import java.util -import java.util.{Optional, Properties} -import kafka.utils.TestUtils -import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} -import org.apache.kafka.common.config.TopicConfig -import org.apache.kafka.common.message.FetchResponseData -import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{FetchRequest, FetchResponse} -import org.apache.kafka.common.serialization.StringSerializer -import org.apache.kafka.network.metrics.RequestMetrics -import org.apache.kafka.server.config.ServerLogConfigs.LOG_MESSAGE_DOWNCONVERSION_ENABLE_CONFIG -import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource - -import scala.jdk.CollectionConverters._ - -class FetchRequestDownConversionConfigTest extends BaseRequestTest { - private var producer: KafkaProducer[String, String] = _ - override def brokerCount: Int = 2 - - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - super.setUp(testInfo) - initProducer() - } - - @AfterEach - override def tearDown(): Unit = { - if (producer != null) - producer.close() - super.tearDown() - } - - override protected def brokerPropertyOverrides(properties: Properties): Unit = { - super.brokerPropertyOverrides(properties) - properties.put(LOG_MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, "false") - } - - private def initProducer(): Unit = { - producer = TestUtils.createProducer(bootstrapServers(), - keySerializer = new StringSerializer, valueSerializer = new StringSerializer) - } - - private def createTopics(numTopics: Int, numPartitions: Int, - configs: Map[String, String] = Map.empty, topicSuffixStart: Int = 0): Map[TopicPartition, Int] = { - val topics = (0 until numTopics).map(t => s"topic${t + topicSuffixStart}") - val topicConfig = new Properties - topicConfig.setProperty(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, 1.toString) - configs.foreach { case (k, v) => topicConfig.setProperty(k, v) } - topics.flatMap { topic => - val partitionToLeader = createTopic( - topic, - numPartitions = numPartitions, - replicationFactor = 2, - topicConfig = topicConfig - ) - partitionToLeader.map { case (partition, leader) => new TopicPartition(topic, partition) -> leader } - }.toMap - } - - private def createPartitionMap(maxPartitionBytes: Int, topicPartitions: Seq[TopicPartition], - topicIds: Map[String, Uuid], - offsetMap: Map[TopicPartition, Long] = Map.empty): util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] = { - val partitionMap = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] - topicPartitions.foreach { tp => - partitionMap.put(tp, new FetchRequest.PartitionData(topicIds.getOrElse(tp.topic, Uuid.ZERO_UUID), offsetMap.getOrElse(tp, 0), 0L, - maxPartitionBytes, Optional.empty())) - } - partitionMap - } - - private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse = { - connectAndReceive[FetchResponse](request, destination = brokerSocketServer(leaderId)) - } - - /** - * Tests that fetch request that require down-conversion returns with an error response when down-conversion is disabled on broker. - */ - @Test - def testV1FetchWithDownConversionDisabled(): Unit = { - val topicMap = createTopics(numTopics = 5, numPartitions = 1) - val topicPartitions = topicMap.keySet.toSeq - val topicIds = servers.head.kafkaController.controllerContext.topicIds - val topicNames = topicIds.map(_.swap) - topicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic(), "key", "value")).get()) - val fetchRequest = FetchRequest.Builder.forConsumer(1, Int.MaxValue, 0, createPartitionMap(1024, - topicPartitions, topicIds.toMap)).build(1) - val fetchResponse = sendFetchRequest(topicMap.head._2, fetchRequest) - val fetchResponseData = fetchResponse.responseData(topicNames.asJava, 1) - topicPartitions.foreach(tp => assertEquals(Errors.UNSUPPORTED_VERSION, Errors.forCode(fetchResponseData.get(tp).errorCode))) - } - - /** - * Tests that "message.downconversion.enable" has no effect when down-conversion is not required. - */ - @Test - def testLatestFetchWithDownConversionDisabled(): Unit = { - val topicMap = createTopics(numTopics = 5, numPartitions = 1) - val topicPartitions = topicMap.keySet.toSeq - val topicIds = servers.head.kafkaController.controllerContext.topicIds - val topicNames = topicIds.map(_.swap) - topicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic(), "key", "value")).get()) - val fetchRequest = FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion, Int.MaxValue, 0, createPartitionMap(1024, - topicPartitions, topicIds.toMap)).build() - val fetchResponse = sendFetchRequest(topicMap.head._2, fetchRequest) - val fetchResponseData = fetchResponse.responseData(topicNames.asJava, ApiKeys.FETCH.latestVersion) - topicPartitions.foreach(tp => assertEquals(Errors.NONE, Errors.forCode(fetchResponseData.get(tp).errorCode))) - } - - /** - * Tests that "message.downconversion.enable" has no effect when down-conversion is not required on last version before topic IDs. - */ - @Test - def testV12WithDownConversionDisabled(): Unit = { - val topicMap = createTopics(numTopics = 5, numPartitions = 1) - val topicPartitions = topicMap.keySet.toSeq - val topicIds = servers.head.kafkaController.controllerContext.topicIds - val topicNames = topicIds.map(_.swap) - topicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic(), "key", "value")).get()) - val fetchRequest = FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion, Int.MaxValue, 0, createPartitionMap(1024, - topicPartitions, topicIds.toMap)).build(12) - val fetchResponse = sendFetchRequest(topicMap.head._2, fetchRequest) - val fetchResponseData = fetchResponse.responseData(topicNames.asJava, 12) - topicPartitions.foreach(tp => assertEquals(Errors.NONE, Errors.forCode(fetchResponseData.get(tp).errorCode))) - } - - /** - * Tests that "message.downconversion.enable" can be set at topic level, and its configuration is obeyed for client - * fetch requests. - */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testV1FetchFromConsumer(quorum: String): Unit = { - testV1Fetch(isFollowerFetch = false) - } - - /** - * Tests that "message.downconversion.enable" has no effect on fetch requests from replicas. - */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testV1FetchFromReplica(quorum: String): Unit = { - testV1Fetch(isFollowerFetch = true) - } - - def testV1Fetch(isFollowerFetch: Boolean): Unit = { - val fetchRequest = "request=Fetch" - val fetchTemporaryMemoryBytesMetricName = s"${RequestMetrics.TEMPORARY_MEMORY_BYTES},$fetchRequest" - val fetchMessageConversionsTimeMsMetricName = s"${RequestMetrics.MESSAGE_CONVERSIONS_TIME_MS},$fetchRequest" - val initialFetchMessageConversionsPerSec = TestUtils.metersCount(BrokerTopicMetrics.FETCH_MESSAGE_CONVERSIONS_PER_SEC) - val initialFetchMessageConversionsTimeMs = TestUtils.metersCount(fetchMessageConversionsTimeMsMetricName) - val initialFetchTemporaryMemoryBytes = TestUtils.metersCount(fetchTemporaryMemoryBytesMetricName) - val topicWithDownConversionEnabled = "foo" - val topicWithDownConversionDisabled = "bar" - val replicaIds = brokers.map(_.config.brokerId) - val leaderId = replicaIds.head - val followerId = replicaIds.last - - val admin = createAdminClient() - - val topicWithDownConversionDisabledId = TestUtils.createTopicWithAdminRaw( - admin, - topicWithDownConversionDisabled, - replicaAssignment = Map(0 -> replicaIds) - ) - - val topicConfig = new Properties - topicConfig.put(TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, "true") - val topicWithDownConversionEnabledId = TestUtils.createTopicWithAdminRaw( - admin, - topicWithDownConversionEnabled, - replicaAssignment = Map(0 -> replicaIds), - topicConfig = topicConfig - ) - - val partitionWithDownConversionEnabled = new TopicPartition(topicWithDownConversionEnabled, 0) - val partitionWithDownConversionDisabled = new TopicPartition(topicWithDownConversionDisabled, 0) - - val allTopicPartitions = Seq( - partitionWithDownConversionEnabled, - partitionWithDownConversionDisabled - ) - - allTopicPartitions.foreach { tp => - producer.send(new ProducerRecord(tp.topic, "key", "value")).get() - } - - val topicIdMap = Map( - topicWithDownConversionEnabled -> topicWithDownConversionEnabledId, - topicWithDownConversionDisabled -> topicWithDownConversionDisabledId - ) - - val fetchResponseData = sendFetch( - leaderId, - allTopicPartitions, - topicIdMap, - fetchVersion = 1, - replicaIdOpt = if (isFollowerFetch) Some(followerId) else None - ) - - def error(tp: TopicPartition): Errors = { - Errors.forCode(fetchResponseData.get(tp).errorCode) - } - - def verifyMetrics(): Unit = { - TestUtils.waitUntilTrue(() => TestUtils.metersCount(BrokerTopicMetrics.FETCH_MESSAGE_CONVERSIONS_PER_SEC) > initialFetchMessageConversionsPerSec, - s"The `FetchMessageConversionsPerSec` metric count is not incremented after 5 seconds. " + - s"init: $initialFetchMessageConversionsPerSec final: ${TestUtils.metersCount(BrokerTopicMetrics.FETCH_MESSAGE_CONVERSIONS_PER_SEC)}", 5000) - - TestUtils.waitUntilTrue(() => TestUtils.metersCount(fetchMessageConversionsTimeMsMetricName) > initialFetchMessageConversionsTimeMs, - s"The `MessageConversionsTimeMs` in fetch request metric count is not incremented after 5 seconds. " + - s"init: $initialFetchMessageConversionsTimeMs final: ${TestUtils.metersCount(fetchMessageConversionsTimeMsMetricName)}", 5000) - - TestUtils.waitUntilTrue(() => TestUtils.metersCount(fetchTemporaryMemoryBytesMetricName) > initialFetchTemporaryMemoryBytes, - s"The `TemporaryMemoryBytes` in fetch request metric count is not incremented after 5 seconds. " + - s"init: $initialFetchTemporaryMemoryBytes final: ${TestUtils.metersCount(fetchTemporaryMemoryBytesMetricName)}", 5000) - } - - assertEquals(Errors.NONE, error(partitionWithDownConversionEnabled)) - if (isFollowerFetch) { - assertEquals(Errors.NONE, error(partitionWithDownConversionDisabled)) - } else { - assertEquals(Errors.UNSUPPORTED_VERSION, error(partitionWithDownConversionDisabled)) - } - - verifyMetrics() - } - - private def sendFetch( - leaderId: Int, - partitions: Seq[TopicPartition], - topicIdMap: Map[String, Uuid], - fetchVersion: Short, - replicaIdOpt: Option[Int] - ): util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData] = { - val topicNameMap = topicIdMap.map(_.swap) - val partitionMap = createPartitionMap(1024, partitions, topicIdMap) - - val fetchRequest = replicaIdOpt.map { replicaId => - FetchRequest.Builder.forReplica(fetchVersion, replicaId, -1, Int.MaxValue, 0, partitionMap) - .build(fetchVersion) - }.getOrElse { - FetchRequest.Builder.forConsumer(fetchVersion, Int.MaxValue, 0, partitionMap) - .build(fetchVersion) - } - - val fetchResponse = sendFetchRequest(leaderId, fetchRequest) - fetchResponse.responseData(topicNameMap.asJava, fetchVersion) - } -} diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala index 3357306d1c4fd..a446bc9036098 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala @@ -116,11 +116,12 @@ class FetchRequestMaxBytesTest extends BaseRequestTest { private def expectNextRecords(expected: IndexedSeq[Array[Byte]], fetchOffset: Long): Unit = { + val requestVersion = 4: Short val response = sendFetchRequest(0, - FetchRequest.Builder.forConsumer(3, Int.MaxValue, 0, + FetchRequest.Builder.forConsumer(requestVersion, Int.MaxValue, 0, Map(testTopicPartition -> - new PartitionData(Uuid.ZERO_UUID, fetchOffset, 0, Integer.MAX_VALUE, Optional.empty())).asJava).build(3)) - val records = FetchResponse.recordsOrFail(response.responseData(getTopicNames().asJava, 3).get(testTopicPartition)).records() + new PartitionData(Uuid.ZERO_UUID, fetchOffset, 0, Integer.MAX_VALUE, Optional.empty())).asJava).build(requestVersion)) + val records = FetchResponse.recordsOrFail(response.responseData(getTopicNames().asJava, requestVersion).get(testTopicPartition)).records() assertNotNull(records) val recordsList = records.asScala.toList assertEquals(expected.size, recordsList.size) diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala index a81a0d2c46eee..f96b2ceca3159 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala @@ -20,16 +20,15 @@ import kafka.utils.TestUtils import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.record.{CompressionType, RecordBatch} +import org.apache.kafka.common.record.CompressionType import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, FetchMetadata => JFetchMetadata} -import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer} +import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.{IsolationLevel, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.server.record.BrokerCompressionType import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource -import java.io.DataInputStream import java.util import java.util.Optional import scala.collection.Seq @@ -363,156 +362,6 @@ class FetchRequestTest extends BaseFetchRequestTest { assertResponseErrorForEpoch(Errors.UNKNOWN_LEADER_EPOCH, 2, Optional.of(leaderEpoch + 1)) } - /** - * Tests that down-conversions don't leak memory. Large down conversions are triggered - * in the server. The client closes its connection after reading partial data when the - * channel is muted in the server. If buffers are not released this will result in OOM. - */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDownConversionWithConnectionFailure(quorum: String): Unit = { - val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1).head - val topicIds = getTopicIds().asJava - val topicNames = topicIds.asScala.map(_.swap).asJava - - val msgValueLen = 100 * 1000 - val batchSize = 4 * msgValueLen - val producer = TestUtils.createProducer( - bootstrapServers(), - lingerMs = Int.MaxValue, - deliveryTimeoutMs = Int.MaxValue, - batchSize = batchSize, - keySerializer = new StringSerializer, - valueSerializer = new ByteArraySerializer) - val bytes = new Array[Byte](msgValueLen) - val futures = try { - (0 to 1000).map { _ => - producer.send(new ProducerRecord(topicPartition.topic, topicPartition.partition, "key", bytes)) - } - } finally { - producer.close() - } - // Check futures to ensure sends succeeded, but do this after close since the last - // batch is not complete, but sent when the producer is closed - futures.foreach(_.get) - - def fetch(version: Short, maxPartitionBytes: Int, closeAfterPartialResponse: Boolean): Option[FetchResponse] = { - val fetchRequest = FetchRequest.Builder.forConsumer(version, Int.MaxValue, 0, createPartitionMap(maxPartitionBytes, - Seq(topicPartition))).build(version) - - val socket = connect(brokerSocketServer(leaderId)) - try { - send(fetchRequest, socket) - if (closeAfterPartialResponse) { - // read some data to ensure broker has muted this channel and then close socket - val size = new DataInputStream(socket.getInputStream).readInt() - // Check that we have received almost `maxPartitionBytes` (minus a tolerance) since in - // the case of OOM, the size will be significantly smaller. We can't check for exactly - // maxPartitionBytes since we use approx message sizes that include only the message value. - assertTrue(size > maxPartitionBytes - batchSize, - s"Fetch size too small $size, broker may have run out of memory") - None - } else { - Some(receive[FetchResponse](socket, ApiKeys.FETCH, version)) - } - } finally { - socket.close() - } - } - - val version = 1.toShort - (0 to 15).foreach(_ => fetch(version, maxPartitionBytes = msgValueLen * 1000, closeAfterPartialResponse = true)) - - val response = fetch(version, maxPartitionBytes = batchSize, closeAfterPartialResponse = false) - val fetchResponse = response.getOrElse(throw new IllegalStateException("No fetch response")) - val partitionData = fetchResponse.responseData(topicNames, version).get(topicPartition) - assertEquals(Errors.NONE.code, partitionData.errorCode) - val batches = FetchResponse.recordsOrFail(partitionData).batches.asScala.toBuffer - assertEquals(3, batches.size) // size is 3 (not 4) since maxPartitionBytes=msgValueSize*4, excluding key and headers - } - - /** - * Ensure that we respect the fetch offset when returning records that were converted from an uncompressed v2 - * record batch to multiple v0/v1 record batches with size 1. If the fetch offset points to inside the record batch, - * some records have to be dropped during the conversion. - */ - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testDownConversionFromBatchedToUnbatchedRespectsOffset(quorum: String): Unit = { - // Increase linger so that we have control over the batches created - producer = TestUtils.createProducer(bootstrapServers(), - retries = 5, - keySerializer = new StringSerializer, - valueSerializer = new StringSerializer, - lingerMs = 30 * 1000, - deliveryTimeoutMs = 60 * 1000) - - val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1).head - val topic = topicPartition.topic - val topicIds = getTopicIds().asJava - val topicNames = topicIds.asScala.map(_.swap).asJava - - val firstBatchFutures = (0 until 10).map(i => producer.send(new ProducerRecord(topic, s"key-$i", s"value-$i"))) - producer.flush() - val secondBatchFutures = (10 until 25).map(i => producer.send(new ProducerRecord(topic, s"key-$i", s"value-$i"))) - producer.flush() - - firstBatchFutures.foreach(_.get) - secondBatchFutures.foreach(_.get) - - def check(fetchOffset: Long, requestVersion: Short, expectedOffset: Long, expectedNumBatches: Int, expectedMagic: Byte): Unit = { - var batchesReceived = 0 - var currentFetchOffset = fetchOffset - var currentExpectedOffset = expectedOffset - - // With KIP-283, we might not receive all batches in a single fetch request so loop through till we have consumed - // all batches we are interested in. - while (batchesReceived < expectedNumBatches) { - val fetchRequest = FetchRequest.Builder.forConsumer(requestVersion, Int.MaxValue, 0, createPartitionMap(Int.MaxValue, - Seq(topicPartition), Map(topicPartition -> currentFetchOffset))).build(requestVersion) - val fetchResponse = sendFetchRequest(leaderId, fetchRequest) - - // validate response - val partitionData = fetchResponse.responseData(topicNames, requestVersion).get(topicPartition) - assertEquals(Errors.NONE.code, partitionData.errorCode) - assertTrue(partitionData.highWatermark > 0) - val batches = FetchResponse.recordsOrFail(partitionData).batches.asScala.toBuffer - val batch = batches.head - assertEquals(expectedMagic, batch.magic) - assertEquals(currentExpectedOffset, batch.baseOffset) - - currentFetchOffset = batches.last.lastOffset + 1 - currentExpectedOffset += (batches.last.lastOffset - batches.head.baseOffset + 1) - batchesReceived += batches.size - } - - assertEquals(expectedNumBatches, batchesReceived) - } - - // down conversion to message format 0, batches of 1 message are returned so we receive the exact offset we requested - check(fetchOffset = 3, expectedOffset = 3, requestVersion = 1, expectedNumBatches = 22, - expectedMagic = RecordBatch.MAGIC_VALUE_V0) - check(fetchOffset = 15, expectedOffset = 15, requestVersion = 1, expectedNumBatches = 10, - expectedMagic = RecordBatch.MAGIC_VALUE_V0) - - // down conversion to message format 1, batches of 1 message are returned so we receive the exact offset we requested - check(fetchOffset = 3, expectedOffset = 3, requestVersion = 3, expectedNumBatches = 22, - expectedMagic = RecordBatch.MAGIC_VALUE_V1) - check(fetchOffset = 15, expectedOffset = 15, requestVersion = 3, expectedNumBatches = 10, - expectedMagic = RecordBatch.MAGIC_VALUE_V1) - - // no down conversion, we receive a single batch so the received offset won't necessarily be the same - check(fetchOffset = 3, expectedOffset = 0, requestVersion = 4, expectedNumBatches = 2, - expectedMagic = RecordBatch.MAGIC_VALUE_V2) - check(fetchOffset = 15, expectedOffset = 10, requestVersion = 4, expectedNumBatches = 1, - expectedMagic = RecordBatch.MAGIC_VALUE_V2) - - // no down conversion, we receive a single batch and the exact offset we requested because it happens to be the - // offset of the first record in the batch - check(fetchOffset = 10, expectedOffset = 10, requestVersion = 4, expectedNumBatches = 1, - expectedMagic = RecordBatch.MAGIC_VALUE_V2) - } - /** * Test that when an incremental fetch session contains partitions with an error, * those partitions are returned in all incremental fetch requests. @@ -654,7 +503,7 @@ class FetchRequestTest extends BaseFetchRequestTest { val res0 = sendFetchRequest(leaderId, req0) val data0 = res0.responseData(topicNames, 9).get(topicPartition) - assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE.code, data0.errorCode) + assertEquals(Errors.NONE.code, data0.errorCode) // fetch request with version 10: works fine! val req1= new FetchRequest.Builder(0, 10, -1, -1, Int.MaxValue, 0, @@ -702,63 +551,34 @@ class FetchRequestTest extends BaseFetchRequestTest { "key3", "value3")).get producer2.close() - // fetch request with fetch version v1 (magic 0): - // gzip compressed record is returned with down-conversion. - // zstd compressed record raises UNSUPPORTED_COMPRESSION_TYPE error. - val req0 = new FetchRequest.Builder(0, 1, -1, -1, Int.MaxValue, 0, + // fetch request with version 4: even though zstd is officially only supported from v10, this actually succeeds + // since the server validation is only active when zstd is configured via a topic config, the server doesn't + // check the record batches and hence has no mechanism to detect the case where the producer sent record batches + // compressed with zstd and the topic config for compression is the default + val req0 = new FetchRequest.Builder(0, 4, -1, -1, Int.MaxValue, 0, createPartitionMap(300, Seq(topicPartition), Map.empty)) - .setMaxBytes(800) - .build() - + .setMaxBytes(800).build() val res0 = sendFetchRequest(leaderId, req0) - val data0 = res0.responseData(topicNames, 1).get(topicPartition) + val data0 = res0.responseData(topicNames, 10).get(topicPartition) assertEquals(Errors.NONE.code, data0.errorCode) - assertEquals(1, records(data0).size) + assertEquals(3, records(data0).size) - val req1 = new FetchRequest.Builder(0, 1, -1, -1, Int.MaxValue, 0, - createPartitionMap(300, Seq(topicPartition), Map(topicPartition -> 1L))) + // fetch request with version 10: works fine! + val req1 = new FetchRequest.Builder(0, 10, -1, -1, Int.MaxValue, 0, + createPartitionMap(300, Seq(topicPartition), Map.empty)) .setMaxBytes(800).build() - val res1 = sendFetchRequest(leaderId, req1) - val data1 = res1.responseData(topicNames, 1).get(topicPartition) - assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE.code, data1.errorCode) + val data1 = res1.responseData(topicNames, 10).get(topicPartition) + assertEquals(Errors.NONE.code, data1.errorCode) + assertEquals(3, records(data1).size) - // fetch request with fetch version v3 (magic 1): - // gzip compressed record is returned with down-conversion. - // zstd compressed record raises UNSUPPORTED_COMPRESSION_TYPE error. - val req2 = new FetchRequest.Builder(2, 3, -1, -1, Int.MaxValue, 0, + val req2 = new FetchRequest.Builder(0, ApiKeys.FETCH.latestVersion(), -1, -1, Int.MaxValue, 0, createPartitionMap(300, Seq(topicPartition), Map.empty)) .setMaxBytes(800).build() - val res2 = sendFetchRequest(leaderId, req2) - val data2 = res2.responseData(topicNames, 3).get(topicPartition) + val data2 = res2.responseData(topicNames, ApiKeys.FETCH.latestVersion()).get(topicPartition) assertEquals(Errors.NONE.code, data2.errorCode) - assertEquals(1, records(data2).size) - - val req3 = new FetchRequest.Builder(0, 1, -1, -1, Int.MaxValue, 0, - createPartitionMap(300, Seq(topicPartition), Map(topicPartition -> 1L))) - .setMaxBytes(800).build() - - val res3 = sendFetchRequest(leaderId, req3) - val data3 = res3.responseData(topicNames, 1).get(topicPartition) - assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE.code, data3.errorCode) - - // fetch request with version 10: works fine! - val req4 = new FetchRequest.Builder(0, 10, -1, -1, Int.MaxValue, 0, - createPartitionMap(300, Seq(topicPartition), Map.empty)) - .setMaxBytes(800).build() - val res4 = sendFetchRequest(leaderId, req4) - val data4 = res4.responseData(topicNames, 10).get(topicPartition) - assertEquals(Errors.NONE.code, data4.errorCode) - assertEquals(3, records(data4).size) - - val req5 = new FetchRequest.Builder(0, ApiKeys.FETCH.latestVersion(), -1, -1, Int.MaxValue, 0, - createPartitionMap(300, Seq(topicPartition), Map.empty)) - .setMaxBytes(800).build() - val res5 = sendFetchRequest(leaderId, req5) - val data5 = res5.responseData(topicNames, ApiKeys.FETCH.latestVersion()).get(topicPartition) - assertEquals(Errors.NONE.code, data5.errorCode) - assertEquals(3, records(data5).size) + assertEquals(3, records(data2).size) } private def checkFetchResponse(expectedPartitions: Seq[TopicPartition], fetchResponse: FetchResponse, diff --git a/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala b/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala index 787945024852d..8e4df446d8867 100755 --- a/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala @@ -210,7 +210,7 @@ class FetchSessionTest { .setLastStableOffset(5) .setLogStartOffset(5)) - val sessionId = context1.updateAndGenerateResponseData(response).sessionId() + val sessionId = context1.updateAndGenerateResponseData(response, Seq.empty.asJava).sessionId() // With no changes, the cached epochs should remain the same val requestData2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] @@ -227,7 +227,7 @@ class FetchSessionTest { assertEquals(Optional.empty(), epochs1(tp0)) assertEquals(Optional.of(1), epochs2(tp1)) assertEquals(Optional.of(2), epochs2(tp2)) - context2.updateAndGenerateResponseData(response).sessionId() + context2.updateAndGenerateResponseData(response, Seq.empty.asJava).sessionId() // Now verify we can change the leader epoch and the context is updated val requestData3 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] @@ -310,7 +310,7 @@ class FetchSessionTest { .setLastStableOffset(5) .setLogStartOffset(5)) - val sessionId = context1.updateAndGenerateResponseData(response).sessionId() + val sessionId = context1.updateAndGenerateResponseData(response, Seq.empty.asJava).sessionId() // With no changes, the cached epochs should remain the same val requestData2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] @@ -326,7 +326,7 @@ class FetchSessionTest { assertEquals(Map(tp0 -> Optional.empty, tp1 -> Optional.of(1), tp2 -> Optional.of(2)), cachedLeaderEpochs(context2)) assertEquals(Map(tp0 -> Optional.empty, tp1 -> Optional.empty, tp2 -> Optional.of(1)), cachedLastFetchedEpochs(context2)) - context2.updateAndGenerateResponseData(response).sessionId() + context2.updateAndGenerateResponseData(response, Seq.empty.asJava).sessionId() // Now verify we can change the leader epoch and the context is updated val requestData3 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] @@ -411,7 +411,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val resp2 = context2.updateAndGenerateResponseData(respData2) + val resp2 = context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava) assertEquals(Errors.NONE, resp2.error()) assertTrue(resp2.sessionId() != INVALID_SESSION_ID) assertEquals(respData2.asScala.map { case (tp, data) => (tp.topicPartition, data)}.toMap.asJava, resp2.responseData(topicNames, request2.version)) @@ -428,7 +428,7 @@ class FetchSessionTest { ) assertEquals(classOf[SessionErrorContext], context3.getClass) assertEquals(Errors.INVALID_FETCH_SESSION_EPOCH, - context3.updateAndGenerateResponseData(respData2).error()) + context3.updateAndGenerateResponseData(respData2, Seq.empty.asJava).error()) // Test trying to create a new session with a non-existent session id val request4 = createRequest(new JFetchMetadata(resp2.sessionId() + 1, 1), reqData2, EMPTY_PART_LIST, isFromFollower = false) @@ -441,7 +441,7 @@ class FetchSessionTest { topicNames ) assertEquals(Errors.FETCH_SESSION_ID_NOT_FOUND, - context4.updateAndGenerateResponseData(respData2).error()) + context4.updateAndGenerateResponseData(respData2, Seq.empty.asJava).error()) // Continue the first fetch session we created. val reqData5 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] @@ -463,7 +463,7 @@ class FetchSessionTest { assertEquals(entry.getValue, data) }) assertEquals(10, context5.getFetchOffset(tp1).get) - val resp5 = context5.updateAndGenerateResponseData(respData2) + val resp5 = context5.updateAndGenerateResponseData(respData2, Seq.empty.asJava) assertEquals(Errors.NONE, resp5.error()) assertEquals(resp2.sessionId(), resp5.sessionId()) assertEquals(0, resp5.responseData(topicNames, request5.version).size()) @@ -480,7 +480,7 @@ class FetchSessionTest { ) assertEquals(classOf[SessionErrorContext], context6.getClass) assertEquals(Errors.INVALID_FETCH_SESSION_EPOCH, - context6.updateAndGenerateResponseData(respData2).error()) + context6.updateAndGenerateResponseData(respData2, Seq.empty.asJava).error()) // Test generating a throttled response for the incremental fetch session val reqData7 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] @@ -493,7 +493,7 @@ class FetchSessionTest { request7.forgottenTopics(topicNames), topicNames ) - val resp7 = context7.getThrottledResponse(100) + val resp7 = context7.getThrottledResponse(100, Seq.empty.asJava) assertEquals(Errors.NONE, resp7.error()) assertEquals(resp2.sessionId(), resp7.sessionId()) assertEquals(100, resp7.throttleTimeMs()) @@ -531,7 +531,7 @@ class FetchSessionTest { .setHighWatermark(100) .setLastStableOffset(100) .setLogStartOffset(100)) - val resp8 = context8.updateAndGenerateResponseData(respData8) + val resp8 = context8.updateAndGenerateResponseData(respData8, Seq.empty.asJava) assertEquals(Errors.NONE, resp8.error) nextSessionId = resp8.sessionId } while (nextSessionId == prevSessionId) @@ -579,7 +579,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, resp1.error()) assertTrue(resp1.sessionId() != INVALID_SESSION_ID) assertEquals(2, resp1.responseData(topicNames, request1.version).size()) @@ -620,7 +620,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val resp2 = context2.updateAndGenerateResponseData(respData2) + val resp2 = context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava) assertEquals(Errors.NONE, resp2.error) assertEquals(1, resp2.responseData(topicNames, request2.version).size) assertTrue(resp2.sessionId > 0) @@ -667,7 +667,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) // Since we are ignoring IDs, we should have no errors. assertEquals(Errors.NONE, resp1.error()) assertTrue(resp1.sessionId() != INVALID_SESSION_ID) @@ -723,7 +723,7 @@ class FetchSessionTest { respData1.put(emptyZar0, new FetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) // On the latest request version, we should have unknown topic ID errors. assertEquals(Errors.NONE, resp1.error()) assertTrue(resp1.sessionId() != INVALID_SESSION_ID) @@ -767,7 +767,7 @@ class FetchSessionTest { respData2.put(emptyZar0, new FetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code)) - val resp2 = context2.updateAndGenerateResponseData(respData2) + val resp2 = context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava) // Since we are ignoring IDs, we should have no errors. assertEquals(Errors.NONE, resp2.error()) assertTrue(resp2.sessionId() != INVALID_SESSION_ID) @@ -813,7 +813,7 @@ class FetchSessionTest { .setHighWatermark(100) .setLastStableOffset(100) .setLogStartOffset(100)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, resp1.error()) assertTrue(resp1.sessionId() != INVALID_SESSION_ID) @@ -835,7 +835,7 @@ class FetchSessionTest { assertEquals(classOf[SessionErrorContext], context2.getClass) val respData2 = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] assertEquals(Errors.FETCH_SESSION_TOPIC_ID_ERROR, - context2.updateAndGenerateResponseData(respData2).error()) + context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava).error()) } @Test @@ -869,7 +869,7 @@ class FetchSessionTest { .setHighWatermark(100) .setLastStableOffset(100) .setLogStartOffset(100)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, resp1.error()) assertTrue(resp1.sessionId() != INVALID_SESSION_ID) @@ -890,7 +890,7 @@ class FetchSessionTest { assertEquals(classOf[SessionErrorContext], context2.getClass) val respData2 = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] assertEquals(Errors.FETCH_SESSION_TOPIC_ID_ERROR, - context2.updateAndGenerateResponseData(respData2).error()) + context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava).error()) } // This test simulates a session where the topic ID changes broker side (the one handling the request) in both the metadata cache and the log @@ -934,7 +934,7 @@ class FetchSessionTest { .setLastStableOffset(-1) .setLogStartOffset(-1) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, resp1.error()) assertTrue(resp1.sessionId() != INVALID_SESSION_ID) assertEquals(2, resp1.responseData(topicNames, request1.version).size) @@ -961,7 +961,7 @@ class FetchSessionTest { .setLastStableOffset(-1) .setLogStartOffset(-1) .setErrorCode(Errors.INCONSISTENT_TOPIC_ID.code)) - val resp2 = context2.updateAndGenerateResponseData(respData2) + val resp2 = context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava) assertEquals(Errors.NONE, resp2.error) assertTrue(resp2.sessionId > 0) @@ -1031,7 +1031,7 @@ class FetchSessionTest { noErrorResponse ) } - context.updateAndGenerateResponseData(data).sessionId + context.updateAndGenerateResponseData(data, Seq.empty.asJava).sessionId } val foo = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) @@ -1154,7 +1154,7 @@ class FetchSessionTest { noErrorResponse ) } - context.updateAndGenerateResponseData(data).sessionId + context.updateAndGenerateResponseData(data, Seq.empty.asJava).sessionId } val foo = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) @@ -1254,7 +1254,7 @@ class FetchSessionTest { errorResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) ) } - context.updateAndGenerateResponseData(data) + context.updateAndGenerateResponseData(data, Seq.empty.asJava) } val foo = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) @@ -1357,7 +1357,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val session1resp = session1context1.updateAndGenerateResponseData(respData1) + val session1resp = session1context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, session1resp.error()) assertTrue(session1resp.sessionId() != INVALID_SESSION_ID) assertEquals(2, session1resp.responseData(topicNames, session1request1.version).size) @@ -1394,7 +1394,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val session2resp = session2context.updateAndGenerateResponseData(respData1) + val session2resp = session2context.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, session2resp.error()) assertTrue(session2resp.sessionId() != INVALID_SESSION_ID) assertEquals(2, session2resp.responseData(topicNames, session2request1.version()).size()) @@ -1452,7 +1452,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val session3resp = session3context.updateAndGenerateResponseData(respData3) + val session3resp = session3context.updateAndGenerateResponseData(respData3, Seq.empty.asJava) assertEquals(Errors.NONE, session3resp.error()) assertTrue(session3resp.sessionId() != INVALID_SESSION_ID) assertEquals(2, session3resp.responseData(topicNames, session3request1.version).size) @@ -1500,7 +1500,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val session1resp = session1context.updateAndGenerateResponseData(respData1) + val session1resp = session1context.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, session1resp.error()) assertTrue(session1resp.sessionId() != INVALID_SESSION_ID) assertEquals(2, session1resp.responseData(topicNames, session1request.version).size) @@ -1538,7 +1538,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val session2resp = session2context.updateAndGenerateResponseData(session2RespData) + val session2resp = session2context.updateAndGenerateResponseData(session2RespData, Seq.empty.asJava) assertEquals(Errors.NONE, session2resp.error()) assertTrue(session2resp.sessionId() != INVALID_SESSION_ID) assertEquals(2, session2resp.responseData(topicNames, session2request.version).size) @@ -1578,7 +1578,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val session3resp = session3context.updateAndGenerateResponseData(respData3) + val session3resp = session3context.updateAndGenerateResponseData(respData3, Seq.empty.asJava) assertEquals(Errors.NONE, session3resp.error()) assertTrue(session3resp.sessionId() != INVALID_SESSION_ID) assertEquals(2, session3resp.responseData(topicNames, session3request.version).size) @@ -1621,7 +1621,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val session4resp = session3context.updateAndGenerateResponseData(respData4) + val session4resp = session3context.updateAndGenerateResponseData(respData4, Seq.empty.asJava) assertEquals(Errors.NONE, session4resp.error()) assertTrue(session4resp.sessionId() != INVALID_SESSION_ID) assertEquals(2, session4resp.responseData(topicNames, session4request.version).size) @@ -1669,7 +1669,7 @@ class FetchSessionTest { .setHighWatermark(10) .setLastStableOffset(10) .setLogStartOffset(10)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, resp1.error) assertTrue(resp1.sessionId() != INVALID_SESSION_ID) assertEquals(2, resp1.responseData(topicNames, request1.version).size) @@ -1691,7 +1691,7 @@ class FetchSessionTest { ) assertEquals(classOf[SessionlessFetchContext], context2.getClass) val respData2 = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] - val resp2 = context2.updateAndGenerateResponseData(respData2) + val resp2 = context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava) assertEquals(INVALID_SESSION_ID, resp2.sessionId) assertTrue(resp2.responseData(topicNames, request2.version).isEmpty) assertEquals(0, cacheShard.size) @@ -1735,7 +1735,7 @@ class FetchSessionTest { .setLastStableOffset(105) .setLogStartOffset(0) .setDivergingEpoch(divergingEpoch)) - val resp1 = context1.updateAndGenerateResponseData(respData) + val resp1 = context1.updateAndGenerateResponseData(respData, Seq.empty.asJava) assertEquals(Errors.NONE, resp1.error) assertNotEquals(INVALID_SESSION_ID, resp1.sessionId) assertEquals(util.Set.of(tp1.topicPartition, tp2.topicPartition), resp1.responseData(topicNames, request1.version).keySet) @@ -1752,7 +1752,7 @@ class FetchSessionTest { topicNames ) assertEquals(classOf[IncrementalFetchContext], context2.getClass) - val resp2 = context2.updateAndGenerateResponseData(respData) + val resp2 = context2.updateAndGenerateResponseData(respData, Seq.empty.asJava) assertEquals(Errors.NONE, resp2.error) assertEquals(resp1.sessionId, resp2.sessionId) assertEquals(Collections.singleton(tp2.topicPartition), resp2.responseData(topicNames, request2.version).keySet) @@ -1764,7 +1764,7 @@ class FetchSessionTest { .setLastStableOffset(105) .setLogStartOffset(0) .setDivergingEpoch(divergingEpoch)) - val resp3 = context2.updateAndGenerateResponseData(respData) + val resp3 = context2.updateAndGenerateResponseData(respData, Seq.empty.asJava) assertEquals(Errors.NONE, resp3.error) assertEquals(resp1.sessionId, resp3.sessionId) assertEquals(util.Set.of(tp1.topicPartition, tp2.topicPartition), resp3.responseData(topicNames, request2.version).keySet) @@ -1776,7 +1776,7 @@ class FetchSessionTest { .setHighWatermark(110) .setLastStableOffset(110) .setLogStartOffset(0)) - val resp4 = context2.updateAndGenerateResponseData(respData) + val resp4 = context2.updateAndGenerateResponseData(respData, Seq.empty.asJava) assertEquals(Errors.NONE, resp4.error) assertEquals(resp1.sessionId, resp4.sessionId) assertEquals(util.Set.of(tp1.topicPartition, tp2.topicPartition), resp4.responseData(topicNames, request2.version).keySet) @@ -1820,7 +1820,7 @@ class FetchSessionTest { .setLastStableOffset(50) .setLogStartOffset(0)) - val resp1 = context1.updateAndGenerateResponseData(respData1) + val resp1 = context1.updateAndGenerateResponseData(respData1, Seq.empty.asJava) assertEquals(Errors.NONE, resp1.error) assertNotEquals(INVALID_SESSION_ID, resp1.sessionId) assertEquals(util.Set.of(tp1.topicPartition, tp2.topicPartition, tp3.topicPartition), resp1.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet()) @@ -1836,7 +1836,7 @@ class FetchSessionTest { // Response is empty val respData2 = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] - val resp2 = context2.updateAndGenerateResponseData(respData2) + val resp2 = context2.updateAndGenerateResponseData(respData2, Seq.empty.asJava) assertEquals(Errors.NONE, resp2.error) assertEquals(resp1.sessionId, resp2.sessionId) assertEquals(Collections.emptySet(), resp2.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet) @@ -1860,7 +1860,7 @@ class FetchSessionTest { .setHighWatermark(50) .setLastStableOffset(50) .setLogStartOffset(0)) - val resp3 = context2.updateAndGenerateResponseData(respData3) + val resp3 = context2.updateAndGenerateResponseData(respData3, Seq.empty.asJava) assertEquals(Errors.NONE, resp3.error) assertEquals(resp1.sessionId, resp3.sessionId) assertEquals(util.Set.of(tp1.topicPartition, tp2.topicPartition), resp3.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet) diff --git a/core/src/test/scala/unit/kafka/server/FinalizedFeatureCacheTest.scala b/core/src/test/scala/unit/kafka/server/FinalizedFeatureCacheTest.scala deleted file mode 100644 index d98bcd23b259d..0000000000000 --- a/core/src/test/scala/unit/kafka/server/FinalizedFeatureCacheTest.scala +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.server - -import kafka.server.metadata.{FeatureCacheUpdateException, ZkMetadataCache} -import org.apache.kafka.common.feature.{Features, SupportedVersionRange} -import org.apache.kafka.server.BrokerFeatures -import org.apache.kafka.server.common.MetadataVersion -import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} -import org.junit.jupiter.api.Test - -import scala.jdk.CollectionConverters._ - -class FinalizedFeatureCacheTest { - - @Test - def testEmpty(): Unit = { - assertTrue(new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, BrokerFeatures.createDefault(true)).getFeatureOption.isEmpty) - } - - def asJava(input: Map[String, Short]): java.util.Map[String, java.lang.Short] = { - input.map(kv => kv._1 -> kv._2.asInstanceOf[java.lang.Short]).asJava - } - - @Test - def testUpdateOrThrowFailedDueToInvalidEpoch(): Unit = { - val supportedFeatures = Map[String, SupportedVersionRange]( - "feature_1" -> new SupportedVersionRange(1, 4)) - val brokerFeatures = BrokerFeatures.createDefault(true, Features.supportedFeatures(supportedFeatures.asJava)) - - val finalizedFeatures = Map[String, Short]("feature_1" -> 4) - - val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) - cache.updateFeaturesOrThrow(finalizedFeatures, 10) - assertTrue(cache.getFeatureOption.isDefined) - assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures()) - assertEquals(10, cache.getFeatureOption.get.finalizedFeaturesEpoch()) - - assertThrows(classOf[FeatureCacheUpdateException], () => cache.updateFeaturesOrThrow(finalizedFeatures, 9)) - - // Check that the failed updateOrThrow call did not make any mutations. - assertTrue(cache.getFeatureOption.isDefined) - assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures()) - assertEquals(10, cache.getFeatureOption.get.finalizedFeaturesEpoch()) - } - - @Test - def testUpdateOrThrowFailedDueToInvalidFeatures(): Unit = { - val supportedFeatures = - Map[String, SupportedVersionRange]("feature_1" -> new SupportedVersionRange(1, 1)) - val brokerFeatures = BrokerFeatures.createDefault(true, Features.supportedFeatures(supportedFeatures.asJava)) - - val finalizedFeatures = Map[String, Short]("feature_1" -> 2) - - val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) - assertThrows(classOf[FeatureCacheUpdateException], () => cache.updateFeaturesOrThrow(finalizedFeatures, 12)) - - // Check that the failed updateOrThrow call did not make any mutations. - assertTrue(cache.getFeatureOption.isEmpty) - } - - @Test - def testUpdateOrThrowSuccess(): Unit = { - val supportedFeatures = - Map[String, SupportedVersionRange]("feature_1" -> new SupportedVersionRange(1, 4)) - val brokerFeatures = BrokerFeatures.createDefault(true, Features.supportedFeatures(supportedFeatures.asJava)) - - val finalizedFeatures = Map[String, Short]("feature_1" -> 3) - - val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) - cache.updateFeaturesOrThrow(finalizedFeatures, 12) - assertTrue(cache.getFeatureOption.isDefined) - assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures()) - assertEquals(12, cache.getFeatureOption.get.finalizedFeaturesEpoch()) - } - - @Test - def testClear(): Unit = { - val supportedFeatures = - Map[String, SupportedVersionRange]("feature_1" -> new SupportedVersionRange(1, 4)) - val brokerFeatures = BrokerFeatures.createDefault(true, Features.supportedFeatures(supportedFeatures.asJava)) - - val finalizedFeatures = Map[String, Short]("feature_1" -> 3) - - val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) - cache.updateFeaturesOrThrow(finalizedFeatures, 12) - assertTrue(cache.getFeatureOption.isDefined) - assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures()) - assertEquals(12, cache.getFeatureOption.get.finalizedFeaturesEpoch()) - - cache.clearFeatures() - assertTrue(cache.getFeatureOption.isEmpty) - } -} diff --git a/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala b/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala index 26e91d4d3db0f..d2d8d3e0382c3 100644 --- a/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala @@ -66,11 +66,11 @@ class ForwardingManagerTest { } private def controllerInfo = { - ControllerInformation(Some(new Node(0, "host", 1234)), new ListenerName(""), SecurityProtocol.PLAINTEXT, "", isZkController = true) + ControllerInformation(Some(new Node(0, "host", 1234)), new ListenerName(""), SecurityProtocol.PLAINTEXT, "") } private def emptyControllerInfo = { - ControllerInformation(None, new ListenerName(""), SecurityProtocol.PLAINTEXT, "", isZkController = true) + ControllerInformation(None, new ListenerName(""), SecurityProtocol.PLAINTEXT, "") } @Test diff --git a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala index f3375cd79c60a..916c244c2954d 100644 --- a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala @@ -25,17 +25,17 @@ import org.apache.kafka.common.message.DeleteGroupsResponseData.{DeletableGroupR import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse import org.apache.kafka.common.message.SyncGroupRequestData.SyncGroupRequestAssignment -import org.apache.kafka.common.message.{ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, DeleteGroupsRequestData, DeleteGroupsResponseData, DescribeGroupsRequestData, DescribeGroupsResponseData, HeartbeatRequestData, HeartbeatResponseData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchResponseData, ShareGroupDescribeRequestData, ShareGroupDescribeResponseData, ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData, SyncGroupRequestData, SyncGroupResponseData} +import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AddOffsetsToTxnResponseData, ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, DeleteGroupsRequestData, DeleteGroupsResponseData, DescribeGroupsRequestData, DescribeGroupsResponseData, EndTxnRequestData, HeartbeatRequestData, HeartbeatResponseData, InitProducerIdRequestData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchResponseData, ShareGroupDescribeRequestData, ShareGroupDescribeResponseData, ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData, SyncGroupRequestData, SyncGroupResponseData, TxnOffsetCommitRequestData, TxnOffsetCommitResponseData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, ConsumerGroupDescribeRequest, ConsumerGroupDescribeResponse, ConsumerGroupHeartbeatRequest, ConsumerGroupHeartbeatResponse, DeleteGroupsRequest, DeleteGroupsResponse, DescribeGroupsRequest, DescribeGroupsResponse, HeartbeatRequest, HeartbeatResponse, JoinGroupRequest, JoinGroupResponse, LeaveGroupRequest, LeaveGroupResponse, ListGroupsRequest, ListGroupsResponse, OffsetCommitRequest, OffsetCommitResponse, OffsetDeleteRequest, OffsetDeleteResponse, OffsetFetchRequest, OffsetFetchResponse, ShareGroupDescribeRequest, ShareGroupDescribeResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, SyncGroupRequest, SyncGroupResponse} +import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, AddOffsetsToTxnRequest, AddOffsetsToTxnResponse, ConsumerGroupDescribeRequest, ConsumerGroupDescribeResponse, ConsumerGroupHeartbeatRequest, ConsumerGroupHeartbeatResponse, DeleteGroupsRequest, DeleteGroupsResponse, DescribeGroupsRequest, DescribeGroupsResponse, EndTxnRequest, EndTxnResponse, HeartbeatRequest, HeartbeatResponse, InitProducerIdRequest, InitProducerIdResponse, JoinGroupRequest, JoinGroupResponse, LeaveGroupRequest, LeaveGroupResponse, ListGroupsRequest, ListGroupsResponse, OffsetCommitRequest, OffsetCommitResponse, OffsetDeleteRequest, OffsetDeleteResponse, OffsetFetchRequest, OffsetFetchResponse, ShareGroupDescribeRequest, ShareGroupDescribeResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, SyncGroupRequest, SyncGroupResponse, TxnOffsetCommitRequest, TxnOffsetCommitResponse} import org.apache.kafka.common.serialization.StringSerializer +import org.apache.kafka.common.utils.ProducerIdAndEpoch import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT import org.junit.jupiter.api.Assertions.{assertEquals, fail} import java.util.{Comparator, Properties} import java.util.stream.Collectors import scala.collection.Seq -import scala.collection.convert.ImplicitConversions.{`collection AsScalaIterable`, `map AsScala`} import scala.jdk.CollectionConverters._ import scala.reflect.ClassTag @@ -47,24 +47,47 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { protected var producer: KafkaProducer[String, String] = _ protected def createOffsetsTopic(): Unit = { - TestUtils.createOffsetsTopicWithAdmin( - admin = cluster.createAdminClient(), - brokers = brokers(), - controllers = controllerServers() - ) + val admin = cluster.admin() + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = brokers(), + controllers = controllerServers() + ) + } finally { + admin.close() + } + } + + protected def createTransactionStateTopic(): Unit = { + val admin = cluster.admin() + try { + TestUtils.createTransactionStateTopicWithAdmin( + admin = admin, + brokers = brokers(), + controllers = controllerServers() + ) + } finally { + admin.close() + } } protected def createTopic( topic: String, numPartitions: Int ): Unit = { - TestUtils.createTopicWithAdmin( - admin = cluster.createAdminClient(), - brokers = brokers(), - controllers = controllerServers(), - topic = topic, - numPartitions = numPartitions - ) + val admin = cluster.admin() + try { + TestUtils.createTopicWithAdmin( + admin = admin, + brokers = brokers(), + controllers = controllerServers(), + topic = topic, + numPartitions = numPartitions + ) + } finally { + admin.close() + } } protected def createTopicAndReturnLeaders( @@ -73,16 +96,21 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { replicationFactor: Int = 1, topicConfig: Properties = new Properties ): Map[TopicIdPartition, Int] = { - val partitionToLeader = TestUtils.createTopicWithAdmin( - admin = cluster.createAdminClient(), - topic = topic, - brokers = brokers(), - controllers = controllerServers(), - numPartitions = numPartitions, - replicationFactor = replicationFactor, - topicConfig = topicConfig - ) - partitionToLeader.map { case (partition, leader) => new TopicIdPartition(getTopicIds(topic), new TopicPartition(topic, partition)) -> leader } + val admin = cluster.admin() + try { + val partitionToLeader = TestUtils.createTopicWithAdmin( + admin = admin, + topic = topic, + brokers = brokers(), + controllers = controllerServers(), + numPartitions = numPartitions, + replicationFactor = replicationFactor, + topicConfig = topicConfig + ) + partitionToLeader.map { case (partition, leader) => new TopicIdPartition(getTopicIds(topic), new TopicPartition(topic, partition)) -> leader } + } finally { + admin.close() + } } protected def isUnstableApiEnabled: Boolean = { @@ -94,11 +122,11 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { } protected def getTopicIds: Map[String, Uuid] = { - cluster.controllers().get(cluster.controllerIds().iterator().next()).controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().toMap + cluster.controllers().get(cluster.controllerIds().iterator().next()).controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().asScala.toMap } protected def getBrokers: Seq[KafkaBroker] = { - cluster.brokers.values().stream().collect(Collectors.toList[KafkaBroker]).toSeq + cluster.brokers.values().stream().collect(Collectors.toList[KafkaBroker]).asScala.toSeq } protected def bootstrapServers(): String = { @@ -179,6 +207,114 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { assertEquals(expectedResponse, response.data) } + protected def commitTxnOffset( + groupId: String, + memberId: String, + generationId: Int, + producerId: Long, + producerEpoch: Short, + transactionalId: String, + topic: String, + partition: Int, + offset: Long, + expectedError: Errors, + version: Short = ApiKeys.TXN_OFFSET_COMMIT.latestVersion(isUnstableApiEnabled) + ): Unit = { + val request = new TxnOffsetCommitRequest.Builder( + new TxnOffsetCommitRequestData() + .setGroupId(groupId) + .setMemberId(memberId) + .setGenerationId(generationId) + .setProducerId(producerId) + .setProducerEpoch(producerEpoch) + .setTransactionalId(transactionalId) + .setTopics(List( + new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() + .setName(topic) + .setPartitions(List( + new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() + .setPartitionIndex(partition) + .setCommittedOffset(offset) + ).asJava) + ).asJava) + ).build(version) + + val expectedResponse = new TxnOffsetCommitResponseData() + .setTopics(List( + new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() + .setName(topic) + .setPartitions(List( + new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() + .setPartitionIndex(partition) + .setErrorCode(expectedError.code) + ).asJava) + ).asJava) + + val response = connectAndReceive[TxnOffsetCommitResponse](request) + assertEquals(expectedResponse, response.data) + } + + protected def addOffsetsToTxn( + groupId: String, + producerId: Long, + producerEpoch: Short, + transactionalId: String, + version: Short = ApiKeys.ADD_OFFSETS_TO_TXN.latestVersion(isUnstableApiEnabled) + ): Unit = { + val request = new AddOffsetsToTxnRequest.Builder( + new AddOffsetsToTxnRequestData() + .setTransactionalId(transactionalId) + .setProducerId(producerId) + .setProducerEpoch(producerEpoch) + .setGroupId(groupId) + ).build(version) + + val response = connectAndReceive[AddOffsetsToTxnResponse](request) + assertEquals(new AddOffsetsToTxnResponseData(), response.data) + } + + protected def initProducerId( + transactionalId: String, + transactionTimeoutMs: Int = 60000, + producerIdAndEpoch: ProducerIdAndEpoch, + expectedError: Errors, + version: Short = ApiKeys.INIT_PRODUCER_ID.latestVersion(isUnstableApiEnabled) + ): ProducerIdAndEpoch = { + val request = new InitProducerIdRequest.Builder( + new InitProducerIdRequestData() + .setTransactionalId(transactionalId) + .setTransactionTimeoutMs(transactionTimeoutMs) + .setProducerId(producerIdAndEpoch.producerId) + .setProducerEpoch(producerIdAndEpoch.epoch)) + .build(version) + + val response = connectAndReceive[InitProducerIdResponse](request).data + assertEquals(expectedError.code, response.errorCode) + new ProducerIdAndEpoch(response.producerId, response.producerEpoch) + } + + protected def endTxn( + producerId: Long, + producerEpoch: Short, + transactionalId: String, + isTransactionV2Enabled: Boolean, + committed: Boolean, + expectedError: Errors, + version: Short = ApiKeys.END_TXN.latestVersion(isUnstableApiEnabled) + ): Unit = { + val request = new EndTxnRequest.Builder( + new EndTxnRequestData() + .setProducerId(producerId) + .setProducerEpoch(producerEpoch) + .setTransactionalId(transactionalId) + .setCommitted(committed), + isUnstableApiEnabled, + isTransactionV2Enabled + ).build(version) + + assertEquals(expectedError, connectAndReceive[EndTxnResponse](request).error) + } + protected def fetchOffsets( groupId: String, memberId: String, @@ -517,7 +653,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { protected def consumerGroupDescribe( groupIds: List[String], - includeAuthorizedOperations: Boolean, + includeAuthorizedOperations: Boolean = false, version: Short = ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled) ): List[ConsumerGroupDescribeResponseData.DescribedGroup] = { val consumerGroupDescribeRequest = new ConsumerGroupDescribeRequest.Builder( @@ -591,8 +727,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { .setRebalanceTimeoutMs(rebalanceTimeoutMs) .setSubscribedTopicNames(subscribedTopicNames.asJava) .setServerAssignor(serverAssignor) - .setTopicPartitions(topicPartitions.asJava), - true + .setTopicPartitions(topicPartitions.asJava) ).build(version) // Send the request until receiving a successful response. There is a delay @@ -646,14 +781,12 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { ) } - protected def leaveGroupWithOldProtocol( + protected def classicLeaveGroup( groupId: String, memberIds: List[String], groupInstanceIds: List[String] = null, - expectedLeaveGroupError: Errors, - expectedMemberErrors: List[Errors], version: Short = ApiKeys.LEAVE_GROUP.latestVersion(isUnstableApiEnabled) - ): Unit = { + ): LeaveGroupResponseData = { val leaveGroupRequest = new LeaveGroupRequest.Builder( groupId, List.tabulate(memberIds.length) { i => @@ -663,6 +796,24 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { }.asJava ).build(version) + connectAndReceive[LeaveGroupResponse](leaveGroupRequest).data + } + + protected def leaveGroupWithOldProtocol( + groupId: String, + memberIds: List[String], + groupInstanceIds: List[String] = null, + expectedLeaveGroupError: Errors, + expectedMemberErrors: List[Errors], + version: Short = ApiKeys.LEAVE_GROUP.latestVersion(isUnstableApiEnabled) + ): Unit = { + val leaveGroupResponse = classicLeaveGroup( + groupId, + memberIds, + groupInstanceIds, + version + ) + val expectedResponseData = new LeaveGroupResponseData() if (expectedLeaveGroupError != Errors.NONE) { expectedResponseData.setErrorCode(expectedLeaveGroupError.code) @@ -676,8 +827,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { }.asJava) } - val leaveGroupResponse = connectAndReceive[LeaveGroupResponse](leaveGroupRequest) - assertEquals(expectedResponseData, leaveGroupResponse.data) + assertEquals(expectedResponseData, leaveGroupResponse) } protected def leaveGroup( diff --git a/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala b/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala index 506ceac074bd2..f625afa1fa719 100755 --- a/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala +++ b/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala @@ -27,12 +27,13 @@ import kafka.cluster.Partition import kafka.server.metadata.MockConfigRepository import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.record.SimpleRecord +import org.apache.kafka.server.common.KRaftVersion import org.apache.kafka.server.util.{KafkaScheduler, MockTime} import org.apache.kafka.storage.internals.log.{CleanerConfig, LogDirFailureChannel} class HighwatermarkPersistenceTest { - val configs = TestUtils.createBrokerConfigs(2, TestUtils.MockZkConnect).map(KafkaConfig.fromProps) + val configs = TestUtils.createBrokerConfigs(2).map(KafkaConfig.fromProps) val topic = "foo" val configRepository = new MockConfigRepository() val logManagers = configs map { config => @@ -69,7 +70,7 @@ class HighwatermarkPersistenceTest { scheduler = scheduler, logManager = logManagers.head, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(configs.head.brokerId, configs.head.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = logDirFailureChannels.head, alterPartitionManager = alterIsrManager) replicaManager.startup() @@ -127,7 +128,7 @@ class HighwatermarkPersistenceTest { scheduler = scheduler, logManager = logManagers.head, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(configs.head.brokerId, configs.head.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = logDirFailureChannels.head, alterPartitionManager = alterIsrManager) replicaManager.startup() diff --git a/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala b/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala index d34e72148b5d5..2f11690bacdc4 100644 --- a/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala +++ b/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala @@ -27,6 +27,7 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.utils.Time import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.KRaftVersion import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.log.{LogDirFailureChannel, LogOffsetMetadata} @@ -47,7 +48,7 @@ class IsrExpirationTest { val overridingProps = new Properties() overridingProps.put(ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_CONFIG, replicaLagTimeMaxMs.toString) overridingProps.put(ReplicationConfigs.REPLICA_FETCH_WAIT_MAX_MS_CONFIG, replicaFetchWaitMaxMs.toString) - val configs = TestUtils.createBrokerConfigs(2, TestUtils.MockZkConnect).map(KafkaConfig.fromProps(_, overridingProps)) + val configs = TestUtils.createBrokerConfigs(2).map(KafkaConfig.fromProps(_, overridingProps)) val topic = "foo" val time = new MockTime @@ -72,7 +73,7 @@ class IsrExpirationTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(configs.head.brokerId, configs.head.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(configs.head.logDirs.size), alterPartitionManager = alterIsrManager) } diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index 2e6ffdf400b81..bfa19917a6f60 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -17,16 +17,14 @@ package kafka.server -import kafka.cluster.{Broker, Partition} -import kafka.controller.{ControllerContext, KafkaController} +import kafka.cluster.Partition import kafka.coordinator.transaction.{InitProducerIdResult, TransactionCoordinator} import kafka.log.UnifiedLog import kafka.network.RequestChannel import kafka.server.QuotaFactory.QuotaManagers -import kafka.server.metadata.{ConfigRepository, KRaftMetadataCache, MockConfigRepository, ZkMetadataCache} +import kafka.server.metadata.{ConfigRepository, KRaftMetadataCache, MockConfigRepository} import kafka.server.share.SharePartitionManager import kafka.utils.{CoreUtils, Log4jController, Logging, TestUtils} -import kafka.zk.KafkaZkClient import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} import org.apache.kafka.common._ @@ -35,7 +33,7 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.config.ConfigResource.Type.{BROKER, BROKER_LOGGER} import org.apache.kafka.common.errors.{ClusterAuthorizationException, UnsupportedVersionException} -import org.apache.kafka.common.internals.{KafkaFutureImpl, Topic} +import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.memory.MemoryPool import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTopic, AddPartitionsToTxnTopicCollection, AddPartitionsToTxnTransaction, AddPartitionsToTxnTransactionCollection} import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnResult @@ -43,8 +41,7 @@ import org.apache.kafka.common.message.AlterConfigsRequestData.{AlterConfigsReso import org.apache.kafka.common.message.AlterConfigsResponseData.{AlterConfigsResourceResponse => LAlterConfigsResourceResponse} import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData.DescribedGroup -import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic -import org.apache.kafka.common.message.CreateTopicsRequestData.{CreatableTopic, CreatableTopicCollection} +import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.{AlterConfigsResource => IAlterConfigsResource, AlterConfigsResourceCollection => IAlterConfigsResourceCollection, AlterableConfig => IAlterableConfig, AlterableConfigCollection => IAlterableConfigCollection} import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.{AlterConfigsResourceResponse => IAlterConfigsResourceResponse} @@ -57,13 +54,13 @@ import org.apache.kafka.common.message.OffsetDeleteRequestData.{OffsetDeleteRequ import org.apache.kafka.common.message.OffsetDeleteResponseData.{OffsetDeleteResponsePartition, OffsetDeleteResponsePartitionCollection, OffsetDeleteResponseTopic, OffsetDeleteResponseTopicCollection} import org.apache.kafka.common.message.ShareFetchRequestData.{AcknowledgementBatch, ForgottenTopic} import org.apache.kafka.common.message.ShareFetchResponseData.{AcquiredRecords, PartitionData, ShareFetchableTopicResponse} -import org.apache.kafka.common.message.StopReplicaRequestData.{StopReplicaPartitionState, StopReplicaTopicState} -import org.apache.kafka.common.message.UpdateMetadataRequestData.{UpdateMetadataBroker, UpdateMetadataEndpoint, UpdateMetadataPartitionState} +import org.apache.kafka.common.metadata.{TopicRecord, PartitionRecord, RegisterBrokerRecord} +import org.apache.kafka.common.metadata.RegisterBrokerRecord.{BrokerEndpoint, BrokerEndpointCollection} +import org.apache.kafka.common.protocol.ApiMessage import org.apache.kafka.common.message._ import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.{ClientInformation, ListenerName} import org.apache.kafka.common.protocol.{ApiKeys, Errors, MessageUtil} -import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity} import org.apache.kafka.common.record._ import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType import org.apache.kafka.common.requests.MetadataResponse.TopicMetadata @@ -77,17 +74,15 @@ import org.apache.kafka.common.utils.{ImplicitLinkedHashCollection, ProducerIdAn import org.apache.kafka.coordinator.group.GroupConfig.{CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, CONSUMER_SESSION_TIMEOUT_MS_CONFIG, SHARE_AUTO_OFFSET_RESET_CONFIG, SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, SHARE_RECORD_LOCK_DURATION_MS_CONFIG, SHARE_SESSION_TIMEOUT_MS_CONFIG} import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinator, GroupCoordinatorConfig} -import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorConfigTest} +import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorTestConfig} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.metadata.LeaderAndIsr import org.apache.kafka.network.metrics.{RequestChannelMetrics, RequestMetrics} import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.{BrokerFeatures, ClientMetricsManager} import org.apache.kafka.server.authorizer.{Action, AuthorizationResult, Authorizer} -import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_2_IV0, IBP_2_2_IV1} import org.apache.kafka.server.common.{FeatureVersion, FinalizedFeatures, GroupVersion, KRaftVersion, MetadataVersion, RequestLocal, TransactionVersion} -import org.apache.kafka.server.config.{ConfigType, KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.metrics.ClientMetricsTestUtils import org.apache.kafka.server.share.{CachedSharePartition, ErroneousAndValidPartitionData} import org.apache.kafka.server.quota.ThrottleCallback @@ -124,9 +119,7 @@ class KafkaApisTest extends Logging { private val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) private val groupCoordinator: GroupCoordinator = mock(classOf[GroupCoordinator]) private val shareCoordinator: ShareCoordinator = mock(classOf[ShareCoordinator]) - private val adminManager: ZkAdminManager = mock(classOf[ZkAdminManager]) private val txnCoordinator: TransactionCoordinator = mock(classOf[TransactionCoordinator]) - private val controller: KafkaController = mock(classOf[KafkaController]) private val forwardingManager: ForwardingManager = mock(classOf[ForwardingManager]) private val autoTopicCreationManager: AutoTopicCreationManager = mock(classOf[AutoTopicCreationManager]) @@ -134,12 +127,9 @@ class KafkaApisTest extends Logging { override def serialize(principal: KafkaPrincipal): Array[Byte] = Utils.utf8(principal.toString) override def deserialize(bytes: Array[Byte]): KafkaPrincipal = SecurityUtils.parseKafkaPrincipal(Utils.utf8(bytes)) } - private val zkClient: KafkaZkClient = mock(classOf[KafkaZkClient]) private val metrics = new Metrics() private val brokerId = 1 - // KRaft tests should override this with a KRaftMetadataCache - private var metadataCache: MetadataCache = MetadataCache.zkMetadataCache(brokerId, MetadataVersion.latestTesting()) - private var brokerEpochManager: ZkBrokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) + private var metadataCache: MetadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) private val clientQuotaManager: ClientQuotaManager = mock(classOf[ClientQuotaManager]) private val clientRequestQuotaManager: ClientRequestQuotaManager = mock(classOf[ClientRequestQuotaManager]) private val clientControllerQuotaManager: ControllerMutationQuotaManager = mock(classOf[ControllerMutationQuotaManager]) @@ -167,67 +157,36 @@ class KafkaApisTest extends Logging { def createKafkaApis(interBrokerProtocolVersion: MetadataVersion = MetadataVersion.latestTesting, authorizer: Option[Authorizer] = None, - enableForwarding: Boolean = false, configRepository: ConfigRepository = new MockConfigRepository(), - raftSupport: Boolean = false, overrideProperties: Map[String, String] = Map.empty, featureVersions: Seq[FeatureVersion] = Seq.empty): KafkaApis = { - val properties = if (raftSupport) { - val properties = TestUtils.createBrokerConfig(brokerId, "") - properties.put(KRaftConfigs.NODE_ID_CONFIG, brokerId.toString) - properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") - val voterId = brokerId + 1 - properties.put(QuorumConfig.QUORUM_VOTERS_CONFIG, s"$voterId@localhost:9093") - properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - properties - } else { - TestUtils.createBrokerConfig(brokerId, "zk") - } + + val properties = TestUtils.createBrokerConfig(brokerId) + properties.put(KRaftConfigs.NODE_ID_CONFIG, brokerId.toString) + properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + val voterId = brokerId + 1 + properties.put(QuorumConfig.QUORUM_VOTERS_CONFIG, s"$voterId@localhost:9093") + overrideProperties.foreach( p => properties.put(p._1, p._2)) - TestUtils.setIbpAndMessageFormatVersions(properties, interBrokerProtocolVersion) + TestUtils.setIbpVersion(properties, interBrokerProtocolVersion) val config = new KafkaConfig(properties) - val forwardingManagerOpt = if (enableForwarding) - Some(this.forwardingManager) - else - None - - val metadataSupport = if (raftSupport) { - // it will be up to the test to replace the default ZkMetadataCache implementation - // with a KRaftMetadataCache instance - metadataCache match { - case cache: KRaftMetadataCache => RaftSupport(forwardingManager, cache) - case _ => throw new IllegalStateException("Test must set an instance of KRaftMetadataCache") - } - } else { - metadataCache match { - case zkMetadataCache: ZkMetadataCache => - ZkSupport(adminManager, controller, zkClient, forwardingManagerOpt, zkMetadataCache, brokerEpochManager) - case _ => throw new IllegalStateException("Test must set an instance of ZkMetadataCache") - } - } + val listenerType = ListenerType.BROKER + val enabledApis = ApiKeys.apisForListener(listenerType).asScala - val listenerType = if (raftSupport) ListenerType.BROKER else ListenerType.ZK_BROKER - val enabledApis = if (enableForwarding) { - ApiKeys.apisForListener(listenerType).asScala ++ Set(ApiKeys.ENVELOPE) - } else { - ApiKeys.apisForListener(listenerType).asScala.toSet - } val apiVersionManager = new SimpleApiVersionManager( listenerType, enabledApis, BrokerFeatures.defaultSupportedFeatures(true), true, - () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, raftSupport)) - - val clientMetricsManagerOpt = if (raftSupport) Some(clientMetricsManager) else None + () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, true)) when(groupCoordinator.isNewGroupCoordinator).thenReturn(config.isNewGroupCoordinatorEnabled) setupFeatures(featureVersions) new KafkaApis( requestChannel = requestChannel, - metadataSupport = metadataSupport, + forwardingManager = forwardingManager, replicaManager = replicaManager, groupCoordinator = groupCoordinator, txnCoordinator = txnCoordinator, @@ -241,13 +200,13 @@ class KafkaApisTest extends Logging { authorizer = authorizer, quotas = quotas, fetchManager = fetchManager, - sharePartitionManager = Some(sharePartitionManager), + sharePartitionManager = sharePartitionManager, brokerTopicStats = brokerTopicStats, clusterId = clusterId, time = time, tokenManager = null, apiVersionManager = apiVersionManager, - clientMetricsManager = clientMetricsManagerOpt) + clientMetricsManager = clientMetricsManager) } private def setupFeatures(featureVersions: Seq[FeatureVersion]): Unit = { @@ -296,7 +255,7 @@ class KafkaApisTest extends Logging { topicConfigs.put(propName, propValue) when(configRepository.topicConfig(resourceName)).thenReturn(topicConfigs) - metadataCache = mock(classOf[ZkMetadataCache]) + metadataCache = mock(classOf[KRaftMetadataCache]) when(metadataCache.contains(resourceName)).thenReturn(true) val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() @@ -305,10 +264,8 @@ class KafkaApisTest extends Logging { .setResourceName(resourceName) .setResourceType(ConfigResource.Type.TOPIC.id)).asJava)) .build(requestHeader.apiVersion) - val request = buildRequest(describeConfigsRequest, - requestHeader = Option(requestHeader)) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) + val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) + kafkaApis = createKafkaApis(authorizer = Some(authorizer), configRepository = configRepository) kafkaApis.handleDescribeConfigsRequest(request) @@ -326,205 +283,6 @@ class KafkaApisTest extends Logging { assertEquals(propValue, describeConfigsResponseData.value) } - @Test - def testEnvelopeRequestHandlingAsController(): Unit = { - testEnvelopeRequestWithAlterConfig( - alterConfigHandler = () => ApiError.NONE, - expectedError = Errors.NONE - ) - } - - @Test - def testEnvelopeRequestWithAlterConfigUnhandledError(): Unit = { - testEnvelopeRequestWithAlterConfig( - alterConfigHandler = () => throw new IllegalStateException(), - expectedError = Errors.UNKNOWN_SERVER_ERROR - ) - } - - private def testEnvelopeRequestWithAlterConfig( - alterConfigHandler: () => ApiError, - expectedError: Errors - ): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - - authorizeResource(authorizer, AclOperation.CLUSTER_ACTION, ResourceType.CLUSTER, Resource.CLUSTER_NAME, AuthorizationResult.ALLOWED) - - val operation = AclOperation.ALTER_CONFIGS - val resourceName = "topic-1" - val requestHeader = new RequestHeader(ApiKeys.ALTER_CONFIGS, ApiKeys.ALTER_CONFIGS.latestVersion, - clientId, 0) - - when(controller.isActive).thenReturn(true) - - authorizeResource(authorizer, operation, ResourceType.TOPIC, resourceName, AuthorizationResult.ALLOWED) - - val configResource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName) - when(adminManager.alterConfigs(any(), ArgumentMatchers.eq(false))) - .thenAnswer(_ => { - Map(configResource -> alterConfigHandler.apply()) - }) - - val configs = Map( - configResource -> new AlterConfigsRequest.Config( - Seq(new AlterConfigsRequest.ConfigEntry("foo", "bar")).asJava)) - val alterConfigsRequest = new AlterConfigsRequest.Builder(configs.asJava, false).build(requestHeader.apiVersion) - - val startTimeNanos = time.nanoseconds() - val queueDurationNanos = 5 * 1000 * 1000 - val request = TestUtils.buildEnvelopeRequest( - alterConfigsRequest, kafkaPrincipalSerde, requestChannelMetrics, startTimeNanos, startTimeNanos + queueDurationNanos) - - val capturedResponse: ArgumentCaptor[AlterConfigsResponse] = ArgumentCaptor.forClass(classOf[AlterConfigsResponse]) - val capturedRequest: ArgumentCaptor[RequestChannel.Request] = ArgumentCaptor.forClass(classOf[RequestChannel.Request]) - kafkaApis = createKafkaApis(authorizer = Some(authorizer), enableForwarding = true) - kafkaApis.handle(request, RequestLocal.withThreadConfinedCaching) - - verify(requestChannel).sendResponse( - capturedRequest.capture(), - capturedResponse.capture(), - any() - ) - assertEquals(Some(request), capturedRequest.getValue.envelope) - // the dequeue time of forwarded request should equals to envelop request - assertEquals(request.requestDequeueTimeNanos, capturedRequest.getValue.requestDequeueTimeNanos) - val innerResponse = capturedResponse.getValue - val responseMap = innerResponse.data.responses().asScala.map { resourceResponse => - resourceResponse.resourceName -> Errors.forCode(resourceResponse.errorCode) - }.toMap - - assertEquals(Map(resourceName -> expectedError), responseMap) - - verify(controller).isActive - verify(adminManager).alterConfigs(any(), ArgumentMatchers.eq(false)) - } - - @Test - def testInvalidEnvelopeRequestWithNonForwardableAPI(): Unit = { - val requestHeader = new RequestHeader(ApiKeys.LEAVE_GROUP, ApiKeys.LEAVE_GROUP.latestVersion, - clientId, 0) - val leaveGroupRequest = new LeaveGroupRequest.Builder("group", - Collections.singletonList(new MemberIdentity())).build(requestHeader.apiVersion) - - when(controller.isActive).thenReturn(true) - - val request = TestUtils.buildEnvelopeRequest( - leaveGroupRequest, kafkaPrincipalSerde, requestChannelMetrics, time.nanoseconds()) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(enableForwarding = true) - kafkaApis.handle(request, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[EnvelopeResponse](request) - assertEquals(Errors.INVALID_REQUEST, response.error()) - } - - @Test - def testEnvelopeRequestWithNotFromPrivilegedListener(): Unit = { - testInvalidEnvelopeRequest(Errors.NONE, fromPrivilegedListener = false, - shouldCloseConnection = true) - } - - @Test - def testEnvelopeRequestNotAuthorized(): Unit = { - testInvalidEnvelopeRequest(Errors.CLUSTER_AUTHORIZATION_FAILED, - performAuthorize = true, authorizeResult = AuthorizationResult.DENIED) - } - - @Test - def testEnvelopeRequestNotControllerHandling(): Unit = { - testInvalidEnvelopeRequest(Errors.NOT_CONTROLLER, performAuthorize = true, isActiveController = false) - } - - private def testInvalidEnvelopeRequest(expectedError: Errors, - fromPrivilegedListener: Boolean = true, - shouldCloseConnection: Boolean = false, - performAuthorize: Boolean = false, - authorizeResult: AuthorizationResult = AuthorizationResult.ALLOWED, - isActiveController: Boolean = true): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - - if (performAuthorize) { - authorizeResource(authorizer, AclOperation.CLUSTER_ACTION, ResourceType.CLUSTER, Resource.CLUSTER_NAME, authorizeResult) - } - - val resourceName = "topic-1" - val requestHeader = new RequestHeader(ApiKeys.ALTER_CONFIGS, ApiKeys.ALTER_CONFIGS.latestVersion, - clientId, 0) - - when(controller.isActive).thenReturn(isActiveController) - - val configResource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName) - - val configs = Map( - configResource -> new AlterConfigsRequest.Config( - Seq(new AlterConfigsRequest.ConfigEntry("foo", "bar")).asJava)) - val alterConfigsRequest = new AlterConfigsRequest.Builder(configs.asJava, false) - .build(requestHeader.apiVersion) - - val request = TestUtils.buildEnvelopeRequest( - alterConfigsRequest, kafkaPrincipalSerde, requestChannelMetrics, time.nanoseconds(), fromPrivilegedListener = fromPrivilegedListener) - - val capturedResponse: ArgumentCaptor[AbstractResponse] = ArgumentCaptor.forClass(classOf[AbstractResponse]) - kafkaApis = createKafkaApis(authorizer = Some(authorizer), enableForwarding = true) - kafkaApis.handle(request, RequestLocal.withThreadConfinedCaching) - - if (shouldCloseConnection) { - verify(requestChannel).closeConnection( - ArgumentMatchers.eq(request), - ArgumentMatchers.eq(java.util.Collections.emptyMap()) - ) - } else { - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None)) - val response = capturedResponse.getValue.asInstanceOf[EnvelopeResponse] - assertEquals(expectedError, response.error) - } - if (performAuthorize) { - verify(authorizer).authorize(any(), any()) - } - } - - @Test - def testAlterConfigsWithAuthorizer(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - - val authorizedTopic = "authorized-topic" - val unauthorizedTopic = "unauthorized-topic" - val (authorizedResource, unauthorizedResource) = - createConfigsWithAuthorization(authorizer, authorizedTopic, unauthorizedTopic) - - val configs = Map( - authorizedResource -> new AlterConfigsRequest.Config( - Seq(new AlterConfigsRequest.ConfigEntry("foo", "bar")).asJava), - unauthorizedResource -> new AlterConfigsRequest.Config( - Seq(new AlterConfigsRequest.ConfigEntry("foo-1", "bar-1")).asJava) - ) - - val topicHeader = new RequestHeader(ApiKeys.ALTER_CONFIGS, ApiKeys.ALTER_CONFIGS.latestVersion, - clientId, 0) - - val alterConfigsRequest = new AlterConfigsRequest.Builder(configs.asJava, false) - .build(topicHeader.apiVersion) - val request = buildRequest(alterConfigsRequest) - - when(controller.isActive).thenReturn(false) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - when(adminManager.alterConfigs(any(), ArgumentMatchers.eq(false))) - .thenReturn(Map(authorizedResource -> ApiError.NONE)) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - kafkaApis.handleAlterConfigsRequest(request) - - val response = verifyNoThrottling[AlterConfigsResponse](request) - verifyAlterConfigResult(response, Map(authorizedTopic -> Errors.NONE, - unauthorizedTopic -> Errors.TOPIC_AUTHORIZATION_FAILED)) - verify(authorizer, times(2)).authorize(any(), any()) - verify(adminManager).alterConfigs(any(), anyBoolean()) - } - @Test def testElectLeadersForwarding(): Unit = { val requestBuilder = new ElectLeadersRequest.Builder(ElectionType.PREFERRED, null, 30000) @@ -546,20 +304,15 @@ class KafkaApisTest extends Logging { val incrementalAlterConfigsRequest = getIncrementalAlterConfigRequestBuilder( Seq(resource), "consumer.session.timeout.ms", "45000").build(requestHeader.apiVersion) - val request = buildRequest(incrementalAlterConfigsRequest, - fromPrivilegedListener = true, requestHeader = Option(requestHeader)) - - when(controller.isActive).thenReturn(true) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - when(adminManager.incrementalAlterConfigs(any(), ArgumentMatchers.eq(false))) - .thenReturn(Map(resource -> ApiError.NONE)) + val request = buildRequest(incrementalAlterConfigsRequest, requestHeader = Option(requestHeader)) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) createKafkaApis(authorizer = Some(authorizer)).handleIncrementalAlterConfigsRequest(request) - val response = verifyNoThrottling[IncrementalAlterConfigsResponse](request) - verifyIncrementalAlterConfigResult(response, Map(consumerGroupId -> Errors.NONE)) - verify(authorizer, times(1)).authorize(any(), any()) - verify(adminManager).incrementalAlterConfigs(any(), anyBoolean()) + verify(forwardingManager, times(1)).forwardRequest( + any(), + any(), + any() + ) } @Test @@ -585,7 +338,7 @@ class KafkaApisTest extends Logging { cgConfigs.put(SHARE_SESSION_TIMEOUT_MS_CONFIG, GroupCoordinatorConfig.SHARE_GROUP_SESSION_TIMEOUT_MS_DEFAULT.toString) cgConfigs.put(SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, GroupCoordinatorConfig.SHARE_GROUP_HEARTBEAT_INTERVAL_MS_DEFAULT.toString) cgConfigs.put(SHARE_RECORD_LOCK_DURATION_MS_CONFIG, ShareGroupConfig.SHARE_GROUP_RECORD_LOCK_DURATION_MS_DEFAULT.toString) - cgConfigs.put(SHARE_AUTO_OFFSET_RESET_CONFIG, GroupConfig.defaultShareAutoOffsetReset.toString) + cgConfigs.put(SHARE_AUTO_OFFSET_RESET_CONFIG, GroupConfig.SHARE_AUTO_OFFSET_RESET_DEFAULT) when(configRepository.groupConfig(consumerGroupId)).thenReturn(cgConfigs) val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() @@ -620,10 +373,6 @@ class KafkaApisTest extends Logging { val subscriptionName = "client_metric_subscription_1" val authorizedResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, subscriptionName) - val authorizer: Authorizer = mock(classOf[Authorizer]) - authorizeResource(authorizer, AclOperation.ALTER_CONFIGS, ResourceType.CLUSTER, - Resource.CLUSTER_NAME, AuthorizationResult.ALLOWED) - val props = ClientMetricsTestUtils.defaultProperties val configEntries = new util.ArrayList[AlterConfigsRequest.ConfigEntry]() props.forEach((x, y) => @@ -632,51 +381,39 @@ class KafkaApisTest extends Logging { val configs = Map(authorizedResource -> new AlterConfigsRequest.Config(configEntries)) val requestHeader = new RequestHeader(ApiKeys.ALTER_CONFIGS, ApiKeys.ALTER_CONFIGS.latestVersion, clientId, 0) - val request = buildRequest( - new AlterConfigsRequest.Builder(configs.asJava, false).build(requestHeader.apiVersion)) + val apiRequest = new AlterConfigsRequest.Builder(configs.asJava, false).build(requestHeader.apiVersion) + val request = buildRequest(apiRequest) - when(controller.isActive).thenReturn(false) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - when(adminManager.alterConfigs(any(), ArgumentMatchers.eq(false))) - .thenReturn(Map(authorizedResource -> ApiError.NONE)) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) + kafkaApis = createKafkaApis() kafkaApis.handleAlterConfigsRequest(request) - val response = verifyNoThrottling[AlterConfigsResponse](request) - verifyAlterConfigResult(response, Map(subscriptionName -> Errors.NONE)) - verify(authorizer, times(1)).authorize(any(), any()) - verify(adminManager).alterConfigs(any(), anyBoolean()) + verify(forwardingManager, times(1)).forwardRequest( + any(), + any(), + any() + ) } @Test def testIncrementalClientMetricAlterConfigs(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val subscriptionName = "client_metric_subscription_1" val resource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, subscriptionName) - authorizeResource(authorizer, AclOperation.ALTER_CONFIGS, ResourceType.CLUSTER, - Resource.CLUSTER_NAME, AuthorizationResult.ALLOWED) - val requestHeader = new RequestHeader(ApiKeys.INCREMENTAL_ALTER_CONFIGS, ApiKeys.INCREMENTAL_ALTER_CONFIGS.latestVersion, clientId, 0) val incrementalAlterConfigsRequest = getIncrementalAlterConfigRequestBuilder( Seq(resource), "metrics", "foo.bar").build(requestHeader.apiVersion) - val request = buildRequest(incrementalAlterConfigsRequest, - fromPrivilegedListener = true, requestHeader = Option(requestHeader)) + val request = buildRequest(incrementalAlterConfigsRequest, requestHeader = Option(requestHeader)) - when(controller.isActive).thenReturn(true) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - when(adminManager.incrementalAlterConfigs(any(), ArgumentMatchers.eq(false))) - .thenReturn(Map(resource -> ApiError.NONE)) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) + kafkaApis = createKafkaApis() kafkaApis.handleIncrementalAlterConfigsRequest(request) - val response = verifyNoThrottling[IncrementalAlterConfigsResponse](request) - verifyIncrementalAlterConfigResult(response, Map(subscriptionName -> Errors.NONE )) - verify(authorizer, times(1)).authorize(any(), any()) - verify(adminManager).incrementalAlterConfigs(any(), anyBoolean()) + verify(forwardingManager, times(1)).forwardRequest( + any(), + any(), + any() + ) } private def getIncrementalAlterConfigRequestBuilder(configResources: Seq[ConfigResource], @@ -710,7 +447,7 @@ class KafkaApisTest extends Logging { val cmConfigs = ClientMetricsTestUtils.defaultProperties when(configRepository.config(resource)).thenReturn(cmConfigs) - metadataCache = mock(classOf[ZkMetadataCache]) + metadataCache = mock(classOf[KRaftMetadataCache]) when(metadataCache.contains(subscriptionName)).thenReturn(true) val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() @@ -721,8 +458,7 @@ class KafkaApisTest extends Logging { .build(requestHeader.apiVersion) val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) + kafkaApis = createKafkaApis(authorizer = Some(authorizer), configRepository = configRepository) kafkaApis.handleDescribeConfigsRequest(request) @@ -739,28 +475,12 @@ class KafkaApisTest extends Logging { assertEquals(cmConfigs.size, configs.size) } - @Test - def testDescribeQuorumNotAllowedForZkClusters(): Unit = { - val requestData = DescribeQuorumRequest.singletonRequest(KafkaRaftServer.MetadataPartition) - val requestBuilder = new DescribeQuorumRequest.Builder(requestData) - val request = buildRequest(requestBuilder.build(DescribeQuorumRequestData.HIGHEST_SUPPORTED_VERSION)) - - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(enableForwarding = true) - kafkaApis.handle(request, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[DescribeQuorumResponse](request) - assertEquals(Errors.UNKNOWN_SERVER_ERROR, Errors.forCode(response.data.errorCode)) - assertEquals(Errors.UNKNOWN_SERVER_ERROR.message(), response.data.errorMessage) - } - @Test def testDescribeQuorumForwardedForKRaftClusters(): Unit = { val requestData = DescribeQuorumRequest.singletonRequest(KafkaRaftServer.MetadataPartition) val requestBuilder = new DescribeQuorumRequest.Builder(requestData) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() testForwardableApi(kafkaApis = kafkaApis, ApiKeys.DESCRIBE_QUORUM, requestBuilder @@ -772,15 +492,7 @@ class KafkaApisTest extends Logging { requestBuilder: AbstractRequest.Builder[_ <: AbstractRequest] ): Unit = { metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(enableForwarding = true, raftSupport = true) - testForwardableApi(kafkaApis = kafkaApis, - apiKey, - requestBuilder - ) - } - - private def testForwardableApi(apiKey: ApiKeys, requestBuilder: AbstractRequest.Builder[_ <: AbstractRequest]): Unit = { - kafkaApis = createKafkaApis(enableForwarding = true) + kafkaApis = createKafkaApis() testForwardableApi(kafkaApis = kafkaApis, apiKey, requestBuilder @@ -798,13 +510,6 @@ class KafkaApisTest extends Logging { val apiRequest = requestBuilder.build(topicHeader.apiVersion) val request = buildRequest(apiRequest) - if (kafkaApis.metadataSupport.isInstanceOf[ZkSupport]) { - // The controller check only makes sense for ZK clusters. For KRaft, - // controller requests are handled on a separate listener, so there - // is no choice but to forward them. - when(controller.isActive).thenReturn(false) - } - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) val forwardCallback: ArgumentCaptor[Option[AbstractResponse] => Unit] = ArgumentCaptor.forClass(classOf[Option[AbstractResponse] => Unit]) @@ -822,10 +527,6 @@ class KafkaApisTest extends Logging { val capturedResponse = verifyNoThrottling[AbstractResponse](request) assertEquals(expectedResponse.data, capturedResponse.data) - - if (kafkaApis.metadataSupport.isInstanceOf[ZkSupport]) { - verify(controller).isActive - } } private def authorizeResource(authorizer: Authorizer, @@ -848,207 +549,41 @@ class KafkaApisTest extends Logging { .thenReturn(Seq(result).asJava) } - private def verifyAlterConfigResult(response: AlterConfigsResponse, - expectedResults: Map[String, Errors]): Unit = { - val responseMap = response.data.responses().asScala.map { resourceResponse => - resourceResponse.resourceName -> Errors.forCode(resourceResponse.errorCode) - }.toMap - - assertEquals(expectedResults, responseMap) - } - - private def createConfigsWithAuthorization(authorizer: Authorizer, - authorizedTopic: String, - unauthorizedTopic: String): (ConfigResource, ConfigResource) = { - val authorizedResource = new ConfigResource(ConfigResource.Type.TOPIC, authorizedTopic) - - val unauthorizedResource = new ConfigResource(ConfigResource.Type.TOPIC, unauthorizedTopic) - - createTopicAuthorization(authorizer, AclOperation.ALTER_CONFIGS, authorizedTopic, unauthorizedTopic) - (authorizedResource, unauthorizedResource) - } - @Test def testIncrementalAlterConfigsWithAuthorizer(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizedTopic = "authorized-topic" - val unauthorizedTopic = "unauthorized-topic" - val (authorizedResource, unauthorizedResource) = - createConfigsWithAuthorization(authorizer, authorizedTopic, unauthorizedTopic) + val localResource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "localResource") + val forwardedResource = new ConfigResource(ConfigResource.Type.GROUP, "forwardedResource") val requestHeader = new RequestHeader(ApiKeys.INCREMENTAL_ALTER_CONFIGS, ApiKeys.INCREMENTAL_ALTER_CONFIGS.latestVersion, clientId, 0) - val incrementalAlterConfigsRequest = getIncrementalAlterConfigRequestBuilder(Seq(authorizedResource, unauthorizedResource)) + val incrementalAlterConfigsRequest = getIncrementalAlterConfigRequestBuilder(Seq(localResource, forwardedResource)) .build(requestHeader.apiVersion) - val request = buildRequest(incrementalAlterConfigsRequest, - fromPrivilegedListener = true, requestHeader = Option(requestHeader)) + val request = buildRequest(incrementalAlterConfigsRequest, requestHeader = Option(requestHeader)) - when(controller.isActive).thenReturn(true) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - when(adminManager.incrementalAlterConfigs(any(), ArgumentMatchers.eq(false))) - .thenReturn(Map(authorizedResource -> ApiError.NONE)) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleIncrementalAlterConfigsRequest(request) - val capturedResponse = verifyNoThrottling[IncrementalAlterConfigsResponse](request) - verifyIncrementalAlterConfigResult(capturedResponse, Map( - authorizedTopic -> Errors.NONE, - unauthorizedTopic -> Errors.TOPIC_AUTHORIZATION_FAILED - )) - - verify(authorizer, times(2)).authorize(any(), any()) - verify(adminManager).incrementalAlterConfigs(any(), anyBoolean()) + verify(authorizer, times(1)).authorize(any(), any()) + verify(forwardingManager, times(1)).forwardRequest( + any(), + any(), + any() + ) } private def getIncrementalAlterConfigRequestBuilder(configResources: Seq[ConfigResource]): IncrementalAlterConfigsRequest.Builder = { val resourceMap = configResources.map(configResource => { configResource -> Set( new AlterConfigOp(new ConfigEntry("foo", "bar"), - OpType.forId(configResource.`type`.id))).asJavaCollection + OpType.SET)).asJavaCollection }).toMap.asJava new IncrementalAlterConfigsRequest.Builder(resourceMap, false) } - private def verifyIncrementalAlterConfigResult(response: IncrementalAlterConfigsResponse, - expectedResults: Map[String, Errors]): Unit = { - val responseMap = response.data.responses.asScala.map { resourceResponse => - resourceResponse.resourceName -> Errors.forCode(resourceResponse.errorCode) - }.toMap - assertEquals(expectedResults, responseMap) - } - - @Test - def testAlterClientQuotasWithAuthorizer(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - - authorizeResource(authorizer, AclOperation.ALTER_CONFIGS, ResourceType.CLUSTER, - Resource.CLUSTER_NAME, AuthorizationResult.DENIED) - - val quotaEntity = new ClientQuotaEntity(Collections.singletonMap(ClientQuotaEntity.USER, "user")) - val quotas = Seq(new ClientQuotaAlteration(quotaEntity, Seq.empty.asJavaCollection)) - - val requestHeader = new RequestHeader(ApiKeys.ALTER_CLIENT_QUOTAS, ApiKeys.ALTER_CLIENT_QUOTAS.latestVersion, clientId, 0) - - val alterClientQuotasRequest = new AlterClientQuotasRequest.Builder(quotas.asJavaCollection, false) - .build(requestHeader.apiVersion) - val request = buildRequest(alterClientQuotasRequest, - fromPrivilegedListener = true, requestHeader = Option(requestHeader)) - - when(controller.isActive).thenReturn(true) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - anyLong)).thenReturn(0) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - kafkaApis.handleAlterClientQuotasRequest(request) - - val capturedResponse = verifyNoThrottling[AlterClientQuotasResponse](request) - verifyAlterClientQuotaResult(capturedResponse, Map(quotaEntity -> Errors.CLUSTER_AUTHORIZATION_FAILED)) - - verify(authorizer).authorize(any(), any()) - verify(clientRequestQuotaManager).maybeRecordAndGetThrottleTimeMs(any(), anyLong) - } - - @Test - def testAlterClientQuotasWithForwarding(): Unit = { - val requestBuilder = new AlterClientQuotasRequest.Builder(List.empty.asJava, false) - testForwardableApi(ApiKeys.ALTER_CLIENT_QUOTAS, requestBuilder) - } - - private def verifyAlterClientQuotaResult(response: AlterClientQuotasResponse, - expected: Map[ClientQuotaEntity, Errors]): Unit = { - val futures = expected.keys.map(quotaEntity => quotaEntity -> new KafkaFutureImpl[Void]()).toMap - response.complete(futures.asJava) - futures.foreach { - case (entity, future) => - future.whenComplete((_, thrown) => - assertEquals(thrown, expected(entity).exception()) - ).isDone - } - } - - @Test - def testCreateTopicsWithAuthorizer(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - - val authorizedTopic = "authorized-topic" - val unauthorizedTopic = "unauthorized-topic" - - authorizeResource(authorizer, AclOperation.CREATE, ResourceType.CLUSTER, - Resource.CLUSTER_NAME, AuthorizationResult.DENIED, logIfDenied = false) - - createCombinedTopicAuthorization(authorizer, AclOperation.CREATE, - authorizedTopic, unauthorizedTopic) - - createCombinedTopicAuthorization(authorizer, AclOperation.DESCRIBE_CONFIGS, - authorizedTopic, unauthorizedTopic, logIfDenied = false) - - val requestHeader = new RequestHeader(ApiKeys.CREATE_TOPICS, ApiKeys.CREATE_TOPICS.latestVersion, clientId, 0) - - when(controller.isActive).thenReturn(true) - - val topics = new CreateTopicsRequestData.CreatableTopicCollection(3) - val topicToCreate = new CreateTopicsRequestData.CreatableTopic() - .setName(authorizedTopic) - topics.add(topicToCreate) - - val topicToFilter = new CreateTopicsRequestData.CreatableTopic() - .setName(unauthorizedTopic) - topics.add(topicToFilter) - - val topicToProhibited = new CreateTopicsRequestData.CreatableTopic() - .setName(Topic.CLUSTER_METADATA_TOPIC_NAME) - topics.add(topicToProhibited) - - val timeout = 10 - val createTopicsRequest = new CreateTopicsRequest.Builder( - new CreateTopicsRequestData() - .setTimeoutMs(timeout) - .setValidateOnly(false) - .setTopics(topics)) - .build(requestHeader.apiVersion) - val request = buildRequest(createTopicsRequest, - fromPrivilegedListener = true, requestHeader = Option(requestHeader)) - - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - when(clientControllerQuotaManager.newQuotaFor( - ArgumentMatchers.eq(request), ArgumentMatchers.eq(6))).thenReturn(UnboundedControllerMutationQuota) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - kafkaApis.handleCreateTopicsRequest(request) - - val capturedCallback: ArgumentCaptor[Map[String, ApiError] => Unit] = ArgumentCaptor.forClass(classOf[Map[String, ApiError] => Unit]) - - verify(adminManager).createTopics( - ArgumentMatchers.eq(timeout), - ArgumentMatchers.eq(false), - ArgumentMatchers.eq(Map(authorizedTopic -> topicToCreate)), - any(), - ArgumentMatchers.eq(UnboundedControllerMutationQuota), - capturedCallback.capture()) - capturedCallback.getValue.apply(Map(authorizedTopic -> ApiError.NONE)) - - val capturedResponse = verifyNoThrottling[CreateTopicsResponse](request) - verifyCreateTopicsResult(capturedResponse, - Map(authorizedTopic -> Errors.NONE, - unauthorizedTopic -> Errors.TOPIC_AUTHORIZATION_FAILED, - Topic.CLUSTER_METADATA_TOPIC_NAME -> Errors.INVALID_REQUEST), - Map(authorizedTopic -> Errors.NONE, - unauthorizedTopic -> Errors.TOPIC_AUTHORIZATION_FAILED, - Topic.CLUSTER_METADATA_TOPIC_NAME -> Errors.NONE)) - } - - @Test - def testCreateTopicsWithForwarding(): Unit = { - val requestBuilder = new CreateTopicsRequest.Builder( - new CreateTopicsRequestData().setTopics( - new CreatableTopicCollection(Collections.singleton( - new CreatableTopic().setName("topic").setNumPartitions(1). - setReplicationFactor(1.toShort)).iterator()))) - testForwardableApi(ApiKeys.CREATE_TOPICS, requestBuilder) - } - @ParameterizedTest @CsvSource(value = Array("0,1500", "1500,0", "3000,1000")) def testKRaftControllerThrottleTimeEnforced( @@ -1068,7 +603,7 @@ class KafkaApisTest extends Logging { val requestBuilder = new CreateTopicsRequest.Builder(requestData).build() val request = buildRequest(requestBuilder) - kafkaApis = createKafkaApis(enableForwarding = true, raftSupport = true) + kafkaApis = createKafkaApis() val forwardCallback: ArgumentCaptor[Option[AbstractResponse] => Unit] = ArgumentCaptor.forClass(classOf[Option[AbstractResponse] => Unit]) @@ -1100,180 +635,6 @@ class KafkaApisTest extends Logging { assertEquals(expectedThrottleTimeMs, responseData.throttleTimeMs) } - @Test - def testCreatePartitionsAuthorization(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - - val timeoutMs = 35000 - val requestData = new CreatePartitionsRequestData() - .setTimeoutMs(timeoutMs) - .setValidateOnly(false) - val fooCreatePartitionsData = new CreatePartitionsTopic().setName("foo").setAssignments(null).setCount(2) - val barCreatePartitionsData = new CreatePartitionsTopic().setName("bar").setAssignments(null).setCount(10) - requestData.topics().add(fooCreatePartitionsData) - requestData.topics().add(barCreatePartitionsData) - - val fooResource = new ResourcePattern(ResourceType.TOPIC, "foo", PatternType.LITERAL) - val fooAction = new Action(AclOperation.ALTER, fooResource, 1, true, true) - - val barResource = new ResourcePattern(ResourceType.TOPIC, "bar", PatternType.LITERAL) - val barAction = new Action(AclOperation.ALTER, barResource, 1, true, true) - - when(authorizer.authorize( - any[RequestContext](), - any[util.List[Action]]() - )).thenAnswer { invocation => - val actions = invocation.getArgument[util.List[Action]](1).asScala - val results = actions.map { action => - if (action == fooAction) AuthorizationResult.ALLOWED - else if (action == barAction) AuthorizationResult.DENIED - else throw new AssertionError(s"Unexpected action $action") - } - new util.ArrayList[AuthorizationResult](results.asJava) - } - - val request = buildRequest(new CreatePartitionsRequest.Builder(requestData).build()) - - when(controller.isActive).thenReturn(true) - when(controller.isTopicQueuedForDeletion("foo")).thenReturn(false) - when(clientControllerQuotaManager.newQuotaFor( - ArgumentMatchers.eq(request), ArgumentMatchers.anyShort()) - ).thenReturn(UnboundedControllerMutationQuota) - when(adminManager.createPartitions( - timeoutMs = ArgumentMatchers.eq(timeoutMs), - newPartitions = ArgumentMatchers.eq(Seq(fooCreatePartitionsData)), - validateOnly = ArgumentMatchers.eq(false), - controllerMutationQuota = ArgumentMatchers.eq(UnboundedControllerMutationQuota), - callback = ArgumentMatchers.any[Map[String, ApiError] => Unit]() - )).thenAnswer { invocation => - val callback = invocation.getArgument[Map[String, ApiError] => Unit](4) - callback.apply(Map("foo" -> ApiError.NONE)) - } - - kafkaApis.handle(request, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[CreatePartitionsResponse](request) - val results = response.data.results.asScala - assertEquals(Some(Errors.NONE), results.find(_.name == "foo").map(result => Errors.forCode(result.errorCode))) - assertEquals(Some(Errors.TOPIC_AUTHORIZATION_FAILED), results.find(_.name == "bar").map(result => Errors.forCode(result.errorCode))) - } - - private def createTopicAuthorization(authorizer: Authorizer, - operation: AclOperation, - authorizedTopic: String, - unauthorizedTopic: String, - logIfAllowed: Boolean = true, - logIfDenied: Boolean = true): Unit = { - authorizeResource(authorizer, operation, ResourceType.TOPIC, - authorizedTopic, AuthorizationResult.ALLOWED, logIfAllowed, logIfDenied) - authorizeResource(authorizer, operation, ResourceType.TOPIC, - unauthorizedTopic, AuthorizationResult.DENIED, logIfAllowed, logIfDenied) - } - - private def createCombinedTopicAuthorization(authorizer: Authorizer, - operation: AclOperation, - authorizedTopic: String, - unauthorizedTopic: String, - logIfAllowed: Boolean = true, - logIfDenied: Boolean = true): Unit = { - val expectedAuthorizedActions = Seq( - new Action(operation, - new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), - 1, logIfAllowed, logIfDenied), - new Action(operation, - new ResourcePattern(ResourceType.TOPIC, unauthorizedTopic, PatternType.LITERAL), - 1, logIfAllowed, logIfDenied)) - - when(authorizer.authorize( - any[RequestContext], argThat((t: java.util.List[Action]) => t != null && t.containsAll(expectedAuthorizedActions.asJava)) - )).thenAnswer { invocation => - val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]] - actions.asScala.map { action => - if (action.resourcePattern().name().equals(authorizedTopic)) - AuthorizationResult.ALLOWED - else - AuthorizationResult.DENIED - }.asJava - } - } - - private def verifyCreateTopicsResult(response: CreateTopicsResponse, - expectedErrorCodes: Map[String, Errors], - expectedTopicConfigErrorCodes: Map[String, Errors]): Unit = { - val actualErrorCodes = response.data.topics().asScala.map { topicResponse => - topicResponse.name() -> Errors.forCode(topicResponse.errorCode) - }.toMap - - assertEquals(expectedErrorCodes, actualErrorCodes) - - val actualTopicConfigErrorCodes = response.data.topics().asScala.map { topicResponse => - topicResponse.name() -> Errors.forCode(topicResponse.topicConfigErrorCode()) - }.toMap - - assertEquals(expectedTopicConfigErrorCodes, actualTopicConfigErrorCodes) - } - - @Test - def testCreateAclWithForwarding(): Unit = { - val requestBuilder = new CreateAclsRequest.Builder(new CreateAclsRequestData()) - testForwardableApi(ApiKeys.CREATE_ACLS, requestBuilder) - } - - @Test - def testDeleteAclWithForwarding(): Unit = { - val requestBuilder = new DeleteAclsRequest.Builder(new DeleteAclsRequestData()) - testForwardableApi(ApiKeys.DELETE_ACLS, requestBuilder) - } - - @Test - def testCreateDelegationTokenWithForwarding(): Unit = { - val requestBuilder = new CreateDelegationTokenRequest.Builder(new CreateDelegationTokenRequestData()) - testForwardableApi(ApiKeys.CREATE_DELEGATION_TOKEN, requestBuilder) - } - - @Test - def testRenewDelegationTokenWithForwarding(): Unit = { - val requestBuilder = new RenewDelegationTokenRequest.Builder(new RenewDelegationTokenRequestData()) - testForwardableApi(ApiKeys.RENEW_DELEGATION_TOKEN, requestBuilder) - } - - @Test - def testExpireDelegationTokenWithForwarding(): Unit = { - val requestBuilder = new ExpireDelegationTokenRequest.Builder(new ExpireDelegationTokenRequestData()) - testForwardableApi(ApiKeys.EXPIRE_DELEGATION_TOKEN, requestBuilder) - } - - @Test - def testAlterPartitionReassignmentsWithForwarding(): Unit = { - val requestBuilder = new AlterPartitionReassignmentsRequest.Builder(new AlterPartitionReassignmentsRequestData()) - testForwardableApi(ApiKeys.ALTER_PARTITION_REASSIGNMENTS, requestBuilder) - } - - @Test - def testCreatePartitionsWithForwarding(): Unit = { - val requestBuilder = new CreatePartitionsRequest.Builder(new CreatePartitionsRequestData()) - testForwardableApi(ApiKeys.CREATE_PARTITIONS, requestBuilder) - } - - @Test - def testUpdateFeaturesWithForwarding(): Unit = { - val requestBuilder = new UpdateFeaturesRequest.Builder(new UpdateFeaturesRequestData()) - testForwardableApi(ApiKeys.UPDATE_FEATURES, requestBuilder) - } - - @Test - def testDeleteTopicsWithForwarding(): Unit = { - val requestBuilder = new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData()) - testForwardableApi(ApiKeys.DELETE_TOPICS, requestBuilder) - } - - @Test - def testAlterScramWithForwarding(): Unit = { - val requestBuilder = new AlterUserScramCredentialsRequest.Builder(new AlterUserScramCredentialsRequestData()) - testForwardableApi(ApiKeys.ALTER_USER_SCRAM_CREDENTIALS, requestBuilder) - } - @Test def testFindCoordinatorAutoTopicCreationForOffsetTopic(): Unit = { testFindCoordinatorWithTopicCreation(CoordinatorType.GROUP) @@ -1494,8 +855,7 @@ class KafkaApisTest extends Logging { any[Long])).thenReturn(0) val capturedRequest = verifyTopicCreation(topicName, enableAutoTopicCreation, isInternal, request) - kafkaApis = createKafkaApis(authorizer = Some(authorizer), enableForwarding = enableAutoTopicCreation, - overrideProperties = topicConfigOverride) + kafkaApis = createKafkaApis(authorizer = Some(authorizer), overrideProperties = topicConfigOverride) kafkaApis.handleTopicMetadataRequest(request) val response = verifyNoThrottling[MetadataResponse](request) @@ -1871,6 +1231,7 @@ class KafkaApisTest extends Logging { 15L, 0.toShort, Map(invalidTopicPartition -> partitionOffsetCommitData).asJava, + true ).build() val request = buildRequest(offsetCommitRequest) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), @@ -2152,6 +1513,7 @@ class KafkaApisTest extends Logging { producerId, epoch, Map(topicPartition -> partitionOffsetCommitData).asJava, + version >= TxnOffsetCommitRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 ).build(version) val request = buildRequest(offsetCommitRequest) @@ -2299,6 +1661,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(epoch), ArgumentMatchers.eq(Set(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition))), responseCallback.capture(), + ArgumentMatchers.eq(TransactionVersion.TV_0), ArgumentMatchers.eq(requestLocal) )).thenAnswer(_ => responseCallback.getValue.apply(Errors.PRODUCER_FENCED)) val kafkaApis = createKafkaApis() @@ -2357,6 +1720,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(epoch), ArgumentMatchers.eq(Set(topicPartition)), responseCallback.capture(), + ArgumentMatchers.eq(TransactionVersion.TV_0), ArgumentMatchers.eq(requestLocal) )).thenAnswer(_ => responseCallback.getValue.apply(Errors.PRODUCER_FENCED)) val kafkaApis = createKafkaApis() @@ -2432,6 +1796,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(epoch), ArgumentMatchers.eq(Set(tp0)), responseCallback.capture(), + any[TransactionVersion], ArgumentMatchers.eq(requestLocal) )).thenAnswer(_ => responseCallback.getValue.apply(Errors.NONE)) @@ -2646,7 +2011,7 @@ class KafkaApisTest extends Logging { val tp = new TopicPartition("topic", 0) - val produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(tp.topic).setPartitionData(Collections.singletonList( @@ -2708,7 +2073,7 @@ class KafkaApisTest extends Logging { val newLeaderId = 2 val newLeaderEpoch = 5 - val produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(tp.topic).setPartitionData(Collections.singletonList( @@ -2773,7 +2138,7 @@ class KafkaApisTest extends Logging { val tp = new TopicPartition(topic, 0) - val produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(tp.topic).setPartitionData(Collections.singletonList( @@ -2827,7 +2192,7 @@ class KafkaApisTest extends Logging { @Test def testProduceResponseMetadataLookupErrorOnNotLeaderOrFollower(): Unit = { val topic = "topic" - metadataCache = mock(classOf[ZkMetadataCache]) + metadataCache = mock(classOf[KRaftMetadataCache]) for (version <- 10 to ApiKeys.PRODUCE.latestVersion) { @@ -2837,7 +2202,7 @@ class KafkaApisTest extends Logging { val tp = new TopicPartition(topic, 0) - val produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(tp.topic).setPartitionData(Collections.singletonList( @@ -2894,13 +2259,13 @@ class KafkaApisTest extends Logging { addTopicToMetadataCache(topic, numPartitions = 2) - for (version <- 3 to ApiKeys.PRODUCE.latestVersion) { + for (version <- ApiKeys.PRODUCE.oldestVersion to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) val tp = new TopicPartition("topic", 0) - val produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(tp.topic).setPartitionData(Collections.singletonList( @@ -2964,52 +2329,7 @@ class KafkaApisTest extends Logging { checkInvalidPartition(-1) checkInvalidPartition(1) // topic has only one partition } - - @Test - def shouldThrowUnsupportedVersionExceptionOnHandleAddOffsetToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = { - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_0_10_2_IV0) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_0_10_2_IV0) - assertThrows(classOf[UnsupportedVersionException], - () => kafkaApis.handleAddOffsetsToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) - } - - @Test - def shouldThrowUnsupportedVersionExceptionOnHandleAddPartitionsToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = { - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_0_10_2_IV0) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_0_10_2_IV0) - assertThrows(classOf[UnsupportedVersionException], - () => kafkaApis.handleAddPartitionsToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) - } - - @Test - def shouldThrowUnsupportedVersionExceptionOnHandleTxnOffsetCommitRequestWhenInterBrokerProtocolNotSupported(): Unit = { - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_0_10_2_IV0) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_0_10_2_IV0) - assertThrows(classOf[UnsupportedVersionException], - () => kafkaApis.handleAddPartitionsToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) - } - - @Test - def shouldThrowUnsupportedVersionExceptionOnHandleEndTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = { - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_0_10_2_IV0) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_0_10_2_IV0) - assertThrows(classOf[UnsupportedVersionException], - () => kafkaApis.handleEndTxnRequest(null, RequestLocal.withThreadConfinedCaching)) - } - - @Test - def shouldThrowUnsupportedVersionExceptionOnHandleWriteTxnMarkersRequestWhenInterBrokerProtocolNotSupported(): Unit = { - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_0_10_2_IV0) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_0_10_2_IV0) - assertThrows(classOf[UnsupportedVersionException], - () => kafkaApis.handleWriteTxnMarkersRequest(null, RequestLocal.withThreadConfinedCaching)) - } - + @Test def requiredAclsNotPresentWriteTxnMarkersThrowsAuthorizationException(): Unit = { val topicPartition = new TopicPartition("t", 0) @@ -3034,27 +2354,6 @@ class KafkaApisTest extends Logging { () => kafkaApis.handleWriteTxnMarkersRequest(request, RequestLocal.withThreadConfinedCaching)) } - @Test - def shouldRespondWithUnsupportedForMessageFormatOnHandleWriteTxnMarkersWhenMagicLowerThanRequired(): Unit = { - val topicPartition = new TopicPartition("t", 0) - val (_, request) = createWriteTxnMarkersRequest(asList(topicPartition)) - val expectedErrors = Map(topicPartition -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT).asJava - val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) - - when(replicaManager.getMagic(topicPartition)) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) - kafkaApis = createKafkaApis() - kafkaApis.handleWriteTxnMarkersRequest(request, RequestLocal.withThreadConfinedCaching) - - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None) - ) - val markersResponse = capturedResponse.getValue - assertEquals(expectedErrors, markersResponse.errorsByProducerId.get(1L)) - } - @Test def shouldRespondWithUnknownTopicWhenPartitionIsNotHosted(): Unit = { val topicPartition = new TopicPartition("t", 0) @@ -3062,7 +2361,7 @@ class KafkaApisTest extends Logging { val expectedErrors = Map(topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION).asJava val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) - when(replicaManager.getMagic(topicPartition)) + when(replicaManager.onlinePartition(topicPartition)) .thenReturn(None) kafkaApis = createKafkaApis() kafkaApis.handleWriteTxnMarkersRequest(request, RequestLocal.withThreadConfinedCaching) @@ -3081,7 +2380,7 @@ class KafkaApisTest extends Logging { // This test verifies the response will not be sent prematurely because of calling replicaManager append // with no records. val topicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) - val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), + val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( asList( new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), new TxnMarkerEntry(2, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), @@ -3089,8 +2388,8 @@ class KafkaApisTest extends Logging { val request = buildRequest(writeTxnMarkersRequest) val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) - when(replicaManager.getMagic(any())) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V2)) + when(replicaManager.onlinePartition(any())) + .thenReturn(Some(mock(classOf[Partition]))) when(groupCoordinator.isNewGroupCoordinator) .thenReturn(true) when(groupCoordinator.completeTransaction( @@ -3113,139 +2412,7 @@ class KafkaApisTest extends Logging { val markersResponse = capturedResponse.getValue assertEquals(2, markersResponse.errorsByProducerId.size()) } - - @Test - def shouldRespondWithUnsupportedMessageFormatForBadPartitionAndNoErrorsForGoodPartition(): Unit = { - val tp1 = new TopicPartition("t", 0) - val tp2 = new TopicPartition("t1", 0) - val (_, request) = createWriteTxnMarkersRequest(asList(tp1, tp2)) - val expectedErrors = Map(tp1 -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, tp2 -> Errors.NONE).asJava - - val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) - val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - - when(replicaManager.getMagic(tp1)) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V1)) - when(replicaManager.getMagic(tp2)) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V2)) - - val requestLocal = RequestLocal.withThreadConfinedCaching - when(replicaManager.appendRecords(anyLong, - anyShort, - ArgumentMatchers.eq(true), - ArgumentMatchers.eq(AppendOrigin.COORDINATOR), - any(), - responseCallback.capture(), - any(), - any(), - ArgumentMatchers.eq(requestLocal), - any(), - any() - )).thenAnswer(_ => responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))) - kafkaApis = createKafkaApis() - kafkaApis.handleWriteTxnMarkersRequest(request, requestLocal) - - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None) - ) - val markersResponse = capturedResponse.getValue - assertEquals(expectedErrors, markersResponse.errorsByProducerId.get(1L)) - } - - @Test - def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlagAndLeaderEpoch(): Unit = { - shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag( - LeaderAndIsr.INITIAL_LEADER_EPOCH + 2, deletePartition = true) - } - - @Test - def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlagAndDeleteSentinel(): Unit = { - shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag( - LeaderAndIsr.EPOCH_DURING_DELETE, deletePartition = true) - } - - @Test - def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlagAndNoEpochSentinel(): Unit = { - shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag( - LeaderAndIsr.NO_EPOCH, deletePartition = true) - } - - @Test - def shouldNotResignCoordinatorsIfStopReplicaReceivedWithoutDeleteFlag(): Unit = { - shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag( - LeaderAndIsr.INITIAL_LEADER_EPOCH + 2, deletePartition = false) - } - - def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag(leaderEpoch: Int, - deletePartition: Boolean): Unit = { - val controllerId = 0 - val controllerEpoch = 5 - val brokerEpoch = 230498320L - - val fooPartition = new TopicPartition("foo", 0) - val groupMetadataPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) - val txnStatePartition = new TopicPartition(Topic.TRANSACTION_STATE_TOPIC_NAME, 0) - - val topicStates = Seq( - new StopReplicaTopicState() - .setTopicName(groupMetadataPartition.topic) - .setPartitionStates(Seq(new StopReplicaPartitionState() - .setPartitionIndex(groupMetadataPartition.partition) - .setLeaderEpoch(leaderEpoch) - .setDeletePartition(deletePartition)).asJava), - new StopReplicaTopicState() - .setTopicName(txnStatePartition.topic) - .setPartitionStates(Seq(new StopReplicaPartitionState() - .setPartitionIndex(txnStatePartition.partition) - .setLeaderEpoch(leaderEpoch) - .setDeletePartition(deletePartition)).asJava), - new StopReplicaTopicState() - .setTopicName(fooPartition.topic) - .setPartitionStates(Seq(new StopReplicaPartitionState() - .setPartitionIndex(fooPartition.partition) - .setLeaderEpoch(leaderEpoch) - .setDeletePartition(deletePartition)).asJava) - ).asJava - - val stopReplicaRequest = new StopReplicaRequest.Builder( - ApiKeys.STOP_REPLICA.latestVersion, - controllerId, - controllerEpoch, - brokerEpoch, - false, - topicStates - ).build() - val request = buildRequest(stopReplicaRequest) - - when(replicaManager.stopReplicas( - ArgumentMatchers.eq(request.context.correlationId), - ArgumentMatchers.eq(controllerId), - ArgumentMatchers.eq(controllerEpoch), - ArgumentMatchers.eq(stopReplicaRequest.partitionStates().asScala) - )).thenReturn( - (mutable.Map( - groupMetadataPartition -> Errors.NONE, - txnStatePartition -> Errors.NONE, - fooPartition -> Errors.NONE - ), Errors.NONE) - ) - when(controller.brokerEpoch).thenReturn(brokerEpoch) - kafkaApis = createKafkaApis() - kafkaApis.handleStopReplicaRequest(request) - - if (deletePartition) { - if (leaderEpoch >= 0) { - verify(txnCoordinator).onResignation(txnStatePartition.partition, Some(leaderEpoch)) - verify(groupCoordinator).onResignation(groupMetadataPartition.partition, OptionalInt.of(leaderEpoch)) - } else { - verify(txnCoordinator).onResignation(txnStatePartition.partition, None) - verify(groupCoordinator).onResignation(groupMetadataPartition.partition, OptionalInt.empty) - } - } - } - + @Test def shouldRespondWithUnknownTopicOrPartitionForBadPartitionAndNoErrorsForGoodPartition(): Unit = { val tp1 = new TopicPartition("t", 0) @@ -3256,10 +2423,10 @@ class KafkaApisTest extends Logging { val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - when(replicaManager.getMagic(tp1)) + when(replicaManager.onlinePartition(tp1)) .thenReturn(None) - when(replicaManager.getMagic(tp2)) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V2)) + when(replicaManager.onlinePartition(tp2)) + .thenReturn(Some(mock(classOf[Partition]))) val requestLocal = RequestLocal.withThreadConfinedCaching when(replicaManager.appendRecords(anyLong, @@ -3291,8 +2458,8 @@ class KafkaApisTest extends Logging { def shouldAppendToLogOnWriteTxnMarkersWhenCorrectMagicVersion(allowedAclOperation: String): Unit = { val topicPartition = new TopicPartition("t", 0) val request = createWriteTxnMarkersRequest(asList(topicPartition))._2 - when(replicaManager.getMagic(topicPartition)) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V2)) + when(replicaManager.onlinePartition(topicPartition)) + .thenReturn(Some(mock(classOf[Partition]))) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -3318,18 +2485,143 @@ class KafkaApisTest extends Logging { )).thenReturn(allowedList) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - kafkaApis.handleWriteTxnMarkersRequest(request, requestLocal) - verify(replicaManager).appendRecords(anyLong, - anyShort, - ArgumentMatchers.eq(true), - ArgumentMatchers.eq(AppendOrigin.COORDINATOR), - any(), - any(), - any(), - any(), - ArgumentMatchers.eq(requestLocal), - any(), - any()) + kafkaApis.handleWriteTxnMarkersRequest(request, requestLocal) + verify(replicaManager).appendRecords(anyLong, + anyShort, + ArgumentMatchers.eq(true), + ArgumentMatchers.eq(AppendOrigin.COORDINATOR), + any(), + any(), + any(), + any(), + ArgumentMatchers.eq(requestLocal), + any(), + any()) + } + + @Test + def testHandleWriteTxnMarkersRequestWithOldGroupCoordinator(): Unit = { + val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) + val offset1 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 1) + val foo0 = new TopicPartition("foo", 0) + val foo1 = new TopicPartition("foo", 1) + + val allPartitions = List( + offset0, + offset1, + foo0, + foo1 + ) + + val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( + List( + new TxnMarkerEntry( + 1L, + 1.toShort, + 0, + TransactionResult.COMMIT, + List(offset0, foo0).asJava + ), + new TxnMarkerEntry( + 2L, + 1.toShort, + 0, + TransactionResult.ABORT, + List(offset1, foo1).asJava + ) + ).asJava + ).build() + + val requestChannelRequest = buildRequest(writeTxnMarkersRequest) + + allPartitions.foreach { tp => + when(replicaManager.onlinePartition(tp)) + .thenReturn(Some(mock(classOf[Partition]))) + } + + when(groupCoordinator.onTransactionCompleted( + ArgumentMatchers.eq(1L), + ArgumentMatchers.any(), + ArgumentMatchers.eq(TransactionResult.COMMIT) + )).thenReturn(CompletableFuture.completedFuture[Void](null)) + + when(groupCoordinator.onTransactionCompleted( + ArgumentMatchers.eq(2L), + ArgumentMatchers.any(), + ArgumentMatchers.eq(TransactionResult.ABORT) + )).thenReturn(FutureUtils.failedFuture[Void](Errors.NOT_CONTROLLER.exception)) + + val entriesPerPartition: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + + when(replicaManager.appendRecords( + ArgumentMatchers.eq(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT.toLong), + ArgumentMatchers.eq(-1), + ArgumentMatchers.eq(true), + ArgumentMatchers.eq(AppendOrigin.COORDINATOR), + entriesPerPartition.capture(), + responseCallback.capture(), + any(), + any(), + ArgumentMatchers.eq(RequestLocal.noCaching()), + any(), + any() + )).thenAnswer { _ => + responseCallback.getValue.apply( + entriesPerPartition.getValue.keySet.map { tp => + tp -> new PartitionResponse(Errors.NONE) + }.toMap + ) + } + kafkaApis = createKafkaApis(overrideProperties = Map( + GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG -> "false" + )) + kafkaApis.handleWriteTxnMarkersRequest(requestChannelRequest, RequestLocal.noCaching()) + + val expectedResponse = new WriteTxnMarkersResponseData() + .setMarkers(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerResult() + .setProducerId(1L) + .setTopics(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName(Topic.GROUP_METADATA_TOPIC_NAME) + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code) + ).asJava), + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName("foo") + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code) + ).asJava) + ).asJava), + new WriteTxnMarkersResponseData.WritableTxnMarkerResult() + .setProducerId(2L) + .setTopics(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName(Topic.GROUP_METADATA_TOPIC_NAME) + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(1) + .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) + ).asJava), + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName("foo") + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code) + ).asJava) + ).asJava) + ).asJava) + + val response = verifyNoThrottling[WriteTxnMarkersResponse](requestChannelRequest) + assertEquals(normalize(expectedResponse), normalize(response.data)) } @Test @@ -3347,7 +2639,6 @@ class KafkaApisTest extends Logging { ) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - ApiKeys.WRITE_TXN_MARKERS.latestVersion(), List( new TxnMarkerEntry( 1L, @@ -3369,8 +2660,8 @@ class KafkaApisTest extends Logging { val requestChannelRequest = buildRequest(writeTxnMarkersRequest) allPartitions.foreach { tp => - when(replicaManager.getMagic(tp)) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V2)) + when(replicaManager.onlinePartition(tp)) + .thenReturn(Some(mock(classOf[Partition]))) } when(groupCoordinator.completeTransaction( @@ -3473,7 +2764,6 @@ class KafkaApisTest extends Logging { val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - ApiKeys.WRITE_TXN_MARKERS.latestVersion(), List( new TxnMarkerEntry( 1L, @@ -3487,8 +2777,8 @@ class KafkaApisTest extends Logging { val requestChannelRequest = buildRequest(writeTxnMarkersRequest) - when(replicaManager.getMagic(offset0)) - .thenReturn(Some(RecordBatch.MAGIC_VALUE_V2)) + when(replicaManager.onlinePartition(offset0)) + .thenReturn(Some(mock(classOf[Partition]))) when(groupCoordinator.completeTransaction( ArgumentMatchers.eq(offset0), @@ -3719,7 +3009,8 @@ class KafkaApisTest extends Logging { val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(List( "group-1", "group-2", - "group-3" + "group-3", + "group-4" ).asJava) val requestChannelRequest = buildRequest(new DescribeGroupsRequest.Builder(describeGroupsRequest).build()) @@ -3746,7 +3037,12 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.NOT_COORDINATOR.code), new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-3") - .setErrorCode(Errors.REQUEST_TIMED_OUT.code) + .setErrorCode(Errors.REQUEST_TIMED_OUT.code), + new DescribeGroupsResponseData.DescribedGroup() + .setGroupId("group-4") + .setGroupState("Dead") + .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code) + .setErrorMessage("Group group-4 is not a classic group.") ).asJava future.complete(groupResults) @@ -4239,11 +3535,6 @@ class KafkaApisTest extends Logging { testConsumerListOffsetWithUnsupportedVersion(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP, 8) } - @Test - def testListOffsetNegativeTimestampWithZeroVersion(): Unit = { - testConsumerListOffsetWithUnsupportedVersion(-3, 0) - } - @Test def testListOffsetNegativeTimestampWithOneOrAboveVersion(): Unit = { testConsumerListOffsetWithUnsupportedVersion(-6, 1) @@ -4271,80 +3562,21 @@ class KafkaApisTest extends Logging { assertEquals(Set(0), response.brokers.asScala.map(_.id).toSet) } - - /** - * Metadata request to fetch all topics should not result in the followings: - * 1) Auto topic creation - * 2) UNKNOWN_TOPIC_OR_PARTITION - * - * This case is testing the case that a topic is being deleted from MetadataCache right after - * authorization but before checking in MetadataCache. - */ - @Test - def testGetAllTopicMetadataShouldNotCreateTopicOrReturnUnknownTopicPartition(): Unit = { - // Setup: authorizer authorizes 2 topics, but one got deleted in metadata cache - metadataCache = mock(classOf[ZkMetadataCache]) - when(metadataCache.getAliveBrokerNodes(any())).thenReturn(List(new Node(brokerId,"localhost", 0))) - when(metadataCache.getControllerId).thenReturn(None) - - // 2 topics returned for authorization in during handle - val topicsReturnedFromMetadataCacheForAuthorization = Set("remaining-topic", "later-deleted-topic") - when(metadataCache.getAllTopics()).thenReturn(topicsReturnedFromMetadataCacheForAuthorization) - // 1 topic is deleted from metadata right at the time between authorization and the next getTopicMetadata() call - when(metadataCache.getTopicMetadata( - ArgumentMatchers.eq(topicsReturnedFromMetadataCacheForAuthorization), - any[ListenerName], - anyBoolean, - anyBoolean - )).thenReturn(Seq( - new MetadataResponseTopic() - .setErrorCode(Errors.NONE.code) - .setName("remaining-topic") - .setIsInternal(false) - )) - - - var createTopicIsCalled: Boolean = false - // Specific mock on zkClient for this use case - // Expect it's never called to do auto topic creation - when(zkClient.setOrCreateEntityConfigs( - ArgumentMatchers.eq(ConfigType.TOPIC), - anyString, - any[Properties] - )).thenAnswer(_ => { - createTopicIsCalled = true - }) - // No need to use - when(zkClient.getAllBrokersInCluster) - .thenReturn(Seq(new Broker( - brokerId, "localhost", 9902, - ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT - ))) - - - val (requestListener, _) = updateMetadataCacheWithInconsistentListeners() - val response = sendMetadataRequestWithInconsistentListeners(requestListener) - - assertFalse(createTopicIsCalled) - val responseTopics = response.topicMetadata().asScala.map { metadata => metadata.topic() } - assertEquals(List("remaining-topic"), responseTopics) - assertTrue(response.topicsByError(Errors.UNKNOWN_TOPIC_OR_PARTITION).isEmpty) - } - @Test def testUnauthorizedTopicMetadataRequest(): Unit = { // 1. Set up broker information val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT) - val broker = new UpdateMetadataBroker() - .setId(0) - .setRack("rack") - .setEndpoints(Seq( - new UpdateMetadataEndpoint() - .setHost("broker0") - .setPort(9092) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListener.value) - ).asJava) + val endpoints = new BrokerEndpointCollection() + endpoints.add( + new BrokerEndpoint() + .setHost("broker0") + .setPort(9092) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setName(plaintextListener.value) + ) + MetadataCacheTest.updateCache(metadataCache, + Seq(new RegisterBrokerRecord().setBrokerId(0).setRack("rack").setFenced(false).setEndPoints(endpoints)) + ) // 2. Set up authorizer val authorizer: Authorizer = mock(classOf[Authorizer]) @@ -4358,41 +3590,33 @@ class KafkaApisTest extends Logging { when(authorizer.authorize(any[RequestContext], argThat((t: java.util.List[Action]) => t.containsAll(expectedActions.asJava)))) .thenAnswer { invocation => - val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]].asScala - actions.map { action => - if (action.resourcePattern().name().equals(authorizedTopic)) - AuthorizationResult.ALLOWED - else - AuthorizationResult.DENIED - }.asJava - } + val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]].asScala + actions.map { action => + if (action.resourcePattern().name().equals(authorizedTopic)) + AuthorizationResult.ALLOWED + else + AuthorizationResult.DENIED + }.asJava + } // 3. Set up MetadataCache val authorizedTopicId = Uuid.randomUuid() val unauthorizedTopicId = Uuid.randomUuid() + addTopicToMetadataCache(authorizedTopic, 1, topicId = authorizedTopicId) + addTopicToMetadataCache(unauthorizedTopic, 1, topicId = unauthorizedTopicId) - val topicIds = new util.HashMap[String, Uuid]() - topicIds.put(authorizedTopic, authorizedTopicId) - topicIds.put(unauthorizedTopic, unauthorizedTopicId) - - def createDummyPartitionStates(topic: String) = { - new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(0) + def createDummyPartitionRecord(topicId: Uuid) = { + new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(0) .setLeader(0) .setLeaderEpoch(0) .setReplicas(Collections.singletonList(0)) - .setZkVersion(0) .setIsr(Collections.singletonList(0)) } - // Send UpdateMetadataReq to update MetadataCache - val partitionStates = Seq(unauthorizedTopic, authorizedTopic).map(createDummyPartitionStates) - - val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0, - 0, 0, partitionStates.asJava, Seq(broker).asJava, topicIds).build() - metadataCache.asInstanceOf[ZkMetadataCache].updateMetadata(correlationId = 0, updateMetadataRequest) + val partitionRecords = Seq(authorizedTopicId, unauthorizedTopicId).map(createDummyPartitionRecord) + MetadataCacheTest.updateCache(metadataCache, partitionRecords) // 4. Send TopicMetadataReq using topicId val metadataReqByTopicId = new MetadataRequest.Builder(util.Arrays.asList(authorizedTopicId, unauthorizedTopicId)).build() @@ -4621,9 +3845,9 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, partitionData.errorCode) assertEquals(newLeaderId, partitionData.currentLeader.leaderId()) assertEquals(newLeaderEpoch, partitionData.currentLeader.leaderEpoch()) - val node = response.data.nodeEndpoints.asScala.head - assertEquals(2, node.nodeId) - assertEquals("broker2", node.host) + val node = response.data.nodeEndpoints.asScala + assertEquals(Seq(2), node.map(_.nodeId)) + assertEquals(Seq("broker2"), node.map(_.host)) } @Test @@ -4639,7 +3863,7 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -4682,7 +3906,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4711,7 +3935,7 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -4765,7 +3989,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4816,7 +4040,7 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -4868,7 +4092,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4920,7 +4144,7 @@ class KafkaApisTest extends Logging { addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( FutureUtils.failedFuture[util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData]](Errors.UNKNOWN_SERVER_ERROR.exception()) ) @@ -4951,7 +4175,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4972,7 +4196,7 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -5028,7 +4252,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -5047,7 +4271,7 @@ class KafkaApisTest extends Logging { val groupId = "group" - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( FutureUtils.failedFuture[util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData]](Errors.UNKNOWN_SERVER_ERROR.exception()) ) @@ -5092,7 +4316,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -5111,7 +4335,7 @@ class KafkaApisTest extends Logging { val records = MemoryRecords.EMPTY - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -5148,7 +4372,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -5176,7 +4400,7 @@ class KafkaApisTest extends Logging { val groupId = "group" val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -5219,7 +4443,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5269,7 +4493,7 @@ class KafkaApisTest extends Logging { val groupId = "group" val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -5312,7 +4536,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5362,7 +4586,7 @@ class KafkaApisTest extends Logging { val records2 = memoryRecords(10, 10) val records3 = memoryRecords(10, 20) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -5459,7 +4683,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5590,7 +4814,7 @@ class KafkaApisTest extends Logging { val groupId = "group" - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)) -> new ShareFetchResponseData.PartitionData() @@ -5792,7 +5016,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -6065,7 +5289,7 @@ class KafkaApisTest extends Logging { val tp2 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( tp1 -> new ShareFetchResponseData.PartitionData() @@ -6157,7 +5381,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -6229,7 +5453,7 @@ class KafkaApisTest extends Logging { val tp2 = new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( tp1 -> new ShareFetchResponseData.PartitionData() @@ -6303,7 +5527,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -6369,7 +5593,7 @@ class KafkaApisTest extends Logging { val tp2 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( tp1 -> new ShareFetchResponseData.PartitionData() @@ -6446,7 +5670,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -6523,7 +5747,7 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) val tp4 = new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( tp2 -> new ShareFetchResponseData.PartitionData() @@ -6615,7 +5839,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, @@ -6715,7 +5939,7 @@ class KafkaApisTest extends Logging { val groupId = "group" - when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), any())).thenReturn( CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() @@ -6788,7 +6012,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -6875,7 +6099,7 @@ class KafkaApisTest extends Logging { GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG -> "false", ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) @@ -6918,7 +6142,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "false"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) @@ -6971,7 +6195,7 @@ class KafkaApisTest extends Logging { ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Option(authorizer), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) @@ -7034,7 +6258,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -7101,7 +6325,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -7149,7 +6373,7 @@ class KafkaApisTest extends Logging { GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG -> "false", ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7191,7 +6415,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "false"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7243,7 +6467,7 @@ class KafkaApisTest extends Logging { ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Option(authorizer), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7294,7 +6518,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7345,7 +6569,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7394,7 +6618,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7469,7 +6693,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7532,7 +6756,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -7599,7 +6823,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -7667,7 +6891,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -7753,7 +6977,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareFetchRequest(shareFetchRequest, topicNames, erroneous) assertEquals(4, acknowledgeBatches.size) @@ -7822,7 +7046,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareFetchRequest(shareFetchRequest, topicIdNames, erroneous) val erroneousTopicIdPartitions = kafkaApis.validateAcknowledgementBatches(acknowledgeBatches, erroneous) @@ -7895,7 +7119,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareAcknowledgeRequest(shareAcknowledgeRequest, topicNames, erroneous) assertEquals(3, acknowledgeBatches.size) @@ -7962,7 +7186,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareAcknowledgeRequest(shareAcknowledgeRequest, topicIdNames, erroneous) val erroneousTopicIdPartitions = kafkaApis.validateAcknowledgementBatches(acknowledgeBatches, erroneous) @@ -8034,7 +7258,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -8113,7 +7337,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -8193,7 +7417,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -8267,7 +7491,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -8364,7 +7588,7 @@ class KafkaApisTest extends Logging { overrideProperties = Map( ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true) + ) val response = kafkaApis.processShareAcknowledgeResponse(responseAcknowledgeData, request) val responseData = response.data() val topicResponses = responseData.responses() @@ -8828,107 +8052,6 @@ class KafkaApisTest extends Logging { assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, response.error) } - @Test - def rejectJoinGroupRequestWhenStaticMembershipNotSupported(): Unit = { - val joinGroupRequest = new JoinGroupRequest.Builder( - new JoinGroupRequestData() - .setGroupId("test") - .setMemberId("test") - .setGroupInstanceId("instanceId") - .setProtocolType("consumer") - .setProtocols(new JoinGroupRequestData.JoinGroupRequestProtocolCollection) - ).build() - - val requestChannelRequest = buildRequest(joinGroupRequest) - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_2_2_IV1) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_2_2_IV1) - kafkaApis.handleJoinGroupRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[JoinGroupResponse](requestChannelRequest) - assertEquals(Errors.UNSUPPORTED_VERSION, response.error()) - } - - @Test - def rejectSyncGroupRequestWhenStaticMembershipNotSupported(): Unit = { - val syncGroupRequest = new SyncGroupRequest.Builder( - new SyncGroupRequestData() - .setGroupId("test") - .setMemberId("test") - .setGroupInstanceId("instanceId") - .setGenerationId(1) - ).build() - - val requestChannelRequest = buildRequest(syncGroupRequest) - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_2_2_IV1) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_2_2_IV1) - kafkaApis.handleSyncGroupRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[SyncGroupResponse](requestChannelRequest) - assertEquals(Errors.UNSUPPORTED_VERSION, response.error) - } - - @Test - def rejectHeartbeatRequestWhenStaticMembershipNotSupported(): Unit = { - val heartbeatRequest = new HeartbeatRequest.Builder( - new HeartbeatRequestData() - .setGroupId("test") - .setMemberId("test") - .setGroupInstanceId("instanceId") - .setGenerationId(1) - ).build() - val requestChannelRequest = buildRequest(heartbeatRequest) - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_2_2_IV1) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_2_2_IV1) - kafkaApis.handleHeartbeatRequest(requestChannelRequest) - - val response = verifyNoThrottling[HeartbeatResponse](requestChannelRequest) - assertEquals(Errors.UNSUPPORTED_VERSION, response.error()) - } - - @Test - def rejectOffsetCommitRequestWhenStaticMembershipNotSupported(): Unit = { - val offsetCommitRequest = new OffsetCommitRequest.Builder( - new OffsetCommitRequestData() - .setGroupId("test") - .setMemberId("test") - .setGroupInstanceId("instanceId") - .setGenerationIdOrMemberEpoch(100) - .setTopics(Collections.singletonList( - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setName("test") - .setPartitions(Collections.singletonList( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setCommittedMetadata("") - )) - )) - ).build() - - val requestChannelRequest = buildRequest(offsetCommitRequest) - - metadataCache = MetadataCache.zkMetadataCache(brokerId, IBP_2_2_IV1) - brokerEpochManager = new ZkBrokerEpochManager(metadataCache, controller, None) - kafkaApis = createKafkaApis(IBP_2_2_IV1) - kafkaApis.handleOffsetCommitRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) - - val expectedTopicErrors = Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setName("test") - .setPartitions(Collections.singletonList( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.UNSUPPORTED_VERSION.code) - )) - ) - val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) - assertEquals(expectedTopicErrors, response.data.topics()) - } - @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.LEAVE_GROUP) def testHandleLeaveGroupWithMultipleMembers(version: Short): Unit = { @@ -9112,10 +8235,6 @@ class KafkaApisTest extends Logging { @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) def testHandleOffsetFetchWithMultipleGroups(version: Short): Unit = { - // Version 0 gets offsets from Zookeeper. We are not interested - // in testing this here. - if (version == 0) return - def makeRequest(version: Short): RequestChannel.Request = { val groups = Map( "group-1" -> List( @@ -9237,10 +8356,6 @@ class KafkaApisTest extends Logging { @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) def testHandleOffsetFetchWithSingleGroup(version: Short): Unit = { - // Version 0 gets offsets from Zookeeper. We are not interested - // in testing this here. - if (version == 0) return - def makeRequest(version: Short): RequestChannel.Request = { buildRequest(new OffsetFetchRequest.Builder( "group-1", @@ -9683,227 +8798,6 @@ class KafkaApisTest extends Logging { assertEquals(records.sizeInBytes(), brokerTopicStats.allTopicsStats.replicationBytesOutRate.get.count()) } - @Test - def rejectInitProducerIdWhenIdButNotEpochProvided(): Unit = { - val initProducerIdRequest = new InitProducerIdRequest.Builder( - new InitProducerIdRequestData() - .setTransactionalId("known") - .setTransactionTimeoutMs(TimeUnit.MINUTES.toMillis(15).toInt) - .setProducerId(10) - .setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) - ).build() - - val requestChannelRequest = buildRequest(initProducerIdRequest) - kafkaApis = createKafkaApis(IBP_2_2_IV1) - kafkaApis.handleInitProducerIdRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[InitProducerIdResponse](requestChannelRequest) - assertEquals(Errors.INVALID_REQUEST, response.error) - } - - @Test - def rejectInitProducerIdWhenEpochButNotIdProvided(): Unit = { - val initProducerIdRequest = new InitProducerIdRequest.Builder( - new InitProducerIdRequestData() - .setTransactionalId("known") - .setTransactionTimeoutMs(TimeUnit.MINUTES.toMillis(15).toInt) - .setProducerId(RecordBatch.NO_PRODUCER_ID) - .setProducerEpoch(2) - ).build() - val requestChannelRequest = buildRequest(initProducerIdRequest) - kafkaApis = createKafkaApis(IBP_2_2_IV1) - kafkaApis.handleInitProducerIdRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) - - val response = verifyNoThrottling[InitProducerIdResponse](requestChannelRequest) - assertEquals(Errors.INVALID_REQUEST, response.error) - } - - @Test - def testUpdateMetadataRequestWithCurrentBrokerEpoch(): Unit = { - val currentBrokerEpoch = 1239875L - testUpdateMetadataRequest(currentBrokerEpoch, currentBrokerEpoch, Errors.NONE) - } - - @Test - def testUpdateMetadataRequestWithNewerBrokerEpochIsValid(): Unit = { - val currentBrokerEpoch = 1239875L - testUpdateMetadataRequest(currentBrokerEpoch, currentBrokerEpoch + 1, Errors.NONE) - } - - @Test - def testUpdateMetadataRequestWithStaleBrokerEpochIsRejected(): Unit = { - val currentBrokerEpoch = 1239875L - testUpdateMetadataRequest(currentBrokerEpoch, currentBrokerEpoch - 1, Errors.STALE_BROKER_EPOCH) - } - - def testUpdateMetadataRequest(currentBrokerEpoch: Long, brokerEpochInRequest: Long, expectedError: Errors): Unit = { - val updateMetadataRequest = createBasicMetadataRequest("topicA", 1, brokerEpochInRequest, 1) - val request = buildRequest(updateMetadataRequest) - - val capturedResponse: ArgumentCaptor[UpdateMetadataResponse] = ArgumentCaptor.forClass(classOf[UpdateMetadataResponse]) - - when(controller.brokerEpoch).thenReturn(currentBrokerEpoch) - when(replicaManager.maybeUpdateMetadataCache( - ArgumentMatchers.eq(request.context.correlationId), - any() - )).thenReturn( - Seq() - ) - kafkaApis = createKafkaApis() - kafkaApis.handleUpdateMetadataRequest(request, RequestLocal.withThreadConfinedCaching) - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None) - ) - val updateMetadataResponse = capturedResponse.getValue - assertEquals(expectedError, updateMetadataResponse.error()) - if (expectedError == Errors.NONE) { - verify(replicaManager).maybeUpdateMetadataCache( - ArgumentMatchers.eq(request.context.correlationId), - any() - ) - } - } - - @Test - def testLeaderAndIsrRequestWithCurrentBrokerEpoch(): Unit = { - val currentBrokerEpoch = 1239875L - testLeaderAndIsrRequest(currentBrokerEpoch, currentBrokerEpoch, Errors.NONE) - } - - @Test - def testLeaderAndIsrRequestWithNewerBrokerEpochIsValid(): Unit = { - val currentBrokerEpoch = 1239875L - testLeaderAndIsrRequest(currentBrokerEpoch, currentBrokerEpoch + 1, Errors.NONE) - } - - @Test - def testLeaderAndIsrRequestWithStaleBrokerEpochIsRejected(): Unit = { - val currentBrokerEpoch = 1239875L - testLeaderAndIsrRequest(currentBrokerEpoch, currentBrokerEpoch - 1, Errors.STALE_BROKER_EPOCH) - } - - def testLeaderAndIsrRequest(currentBrokerEpoch: Long, brokerEpochInRequest: Long, expectedError: Errors): Unit = { - val controllerId = 2 - val controllerEpoch = 6 - val capturedResponse: ArgumentCaptor[LeaderAndIsrResponse] = ArgumentCaptor.forClass(classOf[LeaderAndIsrResponse]) - val partitionStates = Seq( - new LeaderAndIsrRequestData.LeaderAndIsrPartitionState() - .setTopicName("topicW") - .setPartitionIndex(1) - .setControllerEpoch(1) - .setLeader(0) - .setLeaderEpoch(1) - .setIsr(asList(0, 1)) - .setPartitionEpoch(2) - .setReplicas(asList(0, 1, 2)) - .setIsNew(false) - ).asJava - val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder( - ApiKeys.LEADER_AND_ISR.latestVersion, - controllerId, - controllerEpoch, - brokerEpochInRequest, - partitionStates, - Collections.singletonMap("topicW", Uuid.randomUuid()), - asList(new Node(0, "host0", 9090), new Node(1, "host1", 9091)) - ).build() - val request = buildRequest(leaderAndIsrRequest) - val response = new LeaderAndIsrResponse(new LeaderAndIsrResponseData() - .setErrorCode(Errors.NONE.code) - .setPartitionErrors(asList()), leaderAndIsrRequest.version()) - - when(controller.brokerEpoch).thenReturn(currentBrokerEpoch) - when(replicaManager.becomeLeaderOrFollower( - ArgumentMatchers.eq(request.context.correlationId), - any(), - any() - )).thenReturn( - response - ) - kafkaApis = createKafkaApis() - kafkaApis.handleLeaderAndIsrRequest(request) - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None) - ) - val leaderAndIsrResponse = capturedResponse.getValue - assertEquals(expectedError, leaderAndIsrResponse.error()) - } - - @Test - def testStopReplicaRequestWithCurrentBrokerEpoch(): Unit = { - val currentBrokerEpoch = 1239875L - testStopReplicaRequest(currentBrokerEpoch, currentBrokerEpoch, Errors.NONE) - } - - @Test - def testStopReplicaRequestWithNewerBrokerEpochIsValid(): Unit = { - val currentBrokerEpoch = 1239875L - testStopReplicaRequest(currentBrokerEpoch, currentBrokerEpoch + 1, Errors.NONE) - } - - @Test - def testStopReplicaRequestWithStaleBrokerEpochIsRejected(): Unit = { - val currentBrokerEpoch = 1239875L - testStopReplicaRequest(currentBrokerEpoch, currentBrokerEpoch - 1, Errors.STALE_BROKER_EPOCH) - } - - def testStopReplicaRequest(currentBrokerEpoch: Long, brokerEpochInRequest: Long, expectedError: Errors): Unit = { - val controllerId = 0 - val controllerEpoch = 5 - val capturedResponse: ArgumentCaptor[StopReplicaResponse] = ArgumentCaptor.forClass(classOf[StopReplicaResponse]) - val fooPartition = new TopicPartition("foo", 0) - val topicStates = Seq( - new StopReplicaTopicState() - .setTopicName(fooPartition.topic) - .setPartitionStates(Seq(new StopReplicaPartitionState() - .setPartitionIndex(fooPartition.partition) - .setLeaderEpoch(1) - .setDeletePartition(false)).asJava) - ).asJava - val stopReplicaRequest = new StopReplicaRequest.Builder( - ApiKeys.STOP_REPLICA.latestVersion, - controllerId, - controllerEpoch, - brokerEpochInRequest, - false, - topicStates - ).build() - val request = buildRequest(stopReplicaRequest) - - when(controller.brokerEpoch).thenReturn(currentBrokerEpoch) - when(replicaManager.stopReplicas( - ArgumentMatchers.eq(request.context.correlationId), - ArgumentMatchers.eq(controllerId), - ArgumentMatchers.eq(controllerEpoch), - ArgumentMatchers.eq(stopReplicaRequest.partitionStates().asScala) - )).thenReturn( - (mutable.Map( - fooPartition -> Errors.NONE - ), Errors.NONE) - ) - kafkaApis = createKafkaApis() - kafkaApis.handleStopReplicaRequest(request) - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None) - ) - val stopReplicaResponse = capturedResponse.getValue - assertEquals(expectedError, stopReplicaResponse.error()) - if (expectedError != Errors.STALE_BROKER_EPOCH) { - verify(replicaManager).stopReplicas( - ArgumentMatchers.eq(request.context.correlationId), - ArgumentMatchers.eq(controllerId), - ArgumentMatchers.eq(controllerEpoch), - ArgumentMatchers.eq(stopReplicaRequest.partitionStates().asScala) - ) - } - } - @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.LIST_GROUPS) def testListGroupsRequest(version: Short): Unit = { @@ -10087,30 +8981,28 @@ class KafkaApisTest extends Logging { @Test def testDescribeClusterRequest(): Unit = { val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT) - val brokers = Seq( - new UpdateMetadataBroker() - .setId(0) - .setRack("rack") - .setEndpoints(Seq( - new UpdateMetadataEndpoint() - .setHost("broker0") - .setPort(9092) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListener.value) - ).asJava), - new UpdateMetadataBroker() - .setId(1) - .setRack("rack") - .setEndpoints(Seq( - new UpdateMetadataEndpoint() - .setHost("broker1") - .setPort(9092) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListener.value)).asJava) + val endpoints = new BrokerEndpointCollection() + endpoints.add( + new BrokerEndpoint() + .setHost("broker0") + .setPort(9092) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setName(plaintextListener.value) ) - val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0, - 0, 0, Seq.empty[UpdateMetadataPartitionState].asJava, brokers.asJava, Collections.emptyMap()).build() - MetadataCacheTest.updateCache(metadataCache, updateMetadataRequest) + endpoints.add( + new BrokerEndpoint() + .setHost("broker1") + .setPort(9092) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setName(plaintextListener.value) + ) + + MetadataCacheTest.updateCache(metadataCache, + Seq(new RegisterBrokerRecord() + .setBrokerId(brokerId) + .setRack("rack") + .setFenced(false) + .setEndPoints(endpoints))) val describeClusterRequest = new DescribeClusterRequest.Builder(new DescribeClusterRequestData() .setIncludeClusterAuthorizedOperations(true)).build() @@ -10121,7 +9013,6 @@ class KafkaApisTest extends Logging { val describeClusterResponse = verifyNoThrottling[DescribeClusterResponse](request) - assertEquals(metadataCache.getControllerId.get.id, describeClusterResponse.data.controllerId) assertEquals(clusterId, describeClusterResponse.data.clusterId) assertEquals(8096, describeClusterResponse.data.clusterAuthorizedOperations) assertEquals(metadataCache.getAliveBrokerNodes(plaintextListener).toSet, @@ -10134,35 +9025,37 @@ class KafkaApisTest extends Logging { private def updateMetadataCacheWithInconsistentListeners(): (ListenerName, ListenerName) = { val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT) val anotherListener = new ListenerName("LISTENER2") - val brokers = Seq( - new UpdateMetadataBroker() - .setId(0) - .setRack("rack") - .setEndpoints(Seq( - new UpdateMetadataEndpoint() - .setHost("broker0") - .setPort(9092) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListener.value), - new UpdateMetadataEndpoint() - .setHost("broker0") - .setPort(9093) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(anotherListener.value) - ).asJava), - new UpdateMetadataBroker() - .setId(1) - .setRack("rack") - .setEndpoints(Seq( - new UpdateMetadataEndpoint() - .setHost("broker1") - .setPort(9092) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListener.value)).asJava) - ) - val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0, - 0, 0, Seq.empty[UpdateMetadataPartitionState].asJava, brokers.asJava, Collections.emptyMap()).build() - MetadataCacheTest.updateCache(metadataCache, updateMetadataRequest) + + val endpoints0 = new BrokerEndpointCollection() + endpoints0.add( + new BrokerEndpoint() + .setHost("broker0") + .setPort(9092) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setName(plaintextListener.value) + ) + endpoints0.add( + new BrokerEndpoint() + .setHost("broker0") + .setPort(9093) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setName(anotherListener.value) + ) + + val endpoints1 = new BrokerEndpointCollection() + endpoints1.add( + new BrokerEndpoint() + .setHost("broker1") + .setPort(9092) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setName(plaintextListener.value) + ) + + MetadataCacheTest.updateCache(metadataCache, + Seq(new RegisterBrokerRecord().setBrokerId(0).setRack("rack").setFenced(false).setEndPoints(endpoints0), + new RegisterBrokerRecord().setBrokerId(1).setRack("rack").setFenced(false).setEndPoints(endpoints1)) + ) + (plaintextListener, anotherListener) } @@ -10274,7 +9167,7 @@ class KafkaApisTest extends Logging { } private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = { - val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), + val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( asList(new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))).build() (writeTxnMarkersRequest, buildRequest(writeTxnMarkersRequest)) } @@ -10352,51 +9245,64 @@ class KafkaApisTest extends Logging { ).asInstanceOf[T] } - private def createBasicMetadataRequest(topic: String, - numPartitions: Int, - brokerEpoch: Long, - numBrokers: Int, - topicId: Uuid = Uuid.ZERO_UUID): UpdateMetadataRequest = { + private def createBasicMetadata(topic: String, + numPartitions: Int, + brokerEpoch: Long, + numBrokers: Int, + topicId: Uuid): Seq[ApiMessage] = { + + val results = new mutable.ArrayBuffer[ApiMessage]() + val topicRecord = new TopicRecord().setName(topic).setTopicId(topicId) + results += topicRecord + val replicas = List(0.asInstanceOf[Integer]).asJava - def createPartitionState(partition: Int) = new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(partition) - .setControllerEpoch(1) + def createPartitionRecord(partition: Int) = new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(partition) .setLeader(0) .setLeaderEpoch(1) .setReplicas(replicas) - .setZkVersion(0) .setIsr(replicas) val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT) - val partitionStates = (0 until numPartitions).map(createPartitionState) + val partitionRecords = (0 until numPartitions).map(createPartitionRecord) val liveBrokers = (0 until numBrokers).map( - brokerId => createMetadataBroker(brokerId, plaintextListener)) - new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0, - 0, brokerEpoch, partitionStates.asJava, liveBrokers.asJava, Collections.singletonMap(topic, topicId)).build() + brokerId => createMetadataBroker(brokerId, plaintextListener, brokerEpoch)) + partitionRecords.foreach(record => results += record) + liveBrokers.foreach(record => results +=record) + + results.toSeq } private def setupBasicMetadataCache(topic: String, numPartitions: Int, numBrokers: Int, topicId: Uuid): Unit = { - val updateMetadataRequest = createBasicMetadataRequest(topic, numPartitions, 0, numBrokers, topicId) - MetadataCacheTest.updateCache(metadataCache, updateMetadataRequest) + val updateMetadata = createBasicMetadata(topic, numPartitions, 0, numBrokers, topicId) + MetadataCacheTest.updateCache(metadataCache, updateMetadata) } private def addTopicToMetadataCache(topic: String, numPartitions: Int, numBrokers: Int = 1, topicId: Uuid = Uuid.ZERO_UUID): Unit = { - val updateMetadataRequest = createBasicMetadataRequest(topic, numPartitions, 0, numBrokers, topicId) - MetadataCacheTest.updateCache(metadataCache, updateMetadataRequest) + val updateMetadata = createBasicMetadata(topic, numPartitions, 0, numBrokers, topicId) + MetadataCacheTest.updateCache(metadataCache, updateMetadata) } private def createMetadataBroker(brokerId: Int, - listener: ListenerName): UpdateMetadataBroker = { - new UpdateMetadataBroker() - .setId(brokerId) - .setRack("rack") - .setEndpoints(Seq(new UpdateMetadataEndpoint() + listener: ListenerName, + brokerEpoch: Long): RegisterBrokerRecord = { + val endpoints = new BrokerEndpointCollection() + endpoints.add( + new BrokerEndpoint() .setHost("broker" + brokerId) .setPort(9092) .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(listener.value)).asJava) + .setName(listener.value) + ) + + new RegisterBrokerRecord() + .setBrokerId(brokerId) + .setRack("rack") + .setFenced(false) + .setEndPoints(endpoints) + .setBrokerEpoch(brokerEpoch) } @Test @@ -10754,260 +9660,13 @@ class KafkaApisTest extends Logging { assertEquals("Ongoing", transactionState.transactionState) } - @Test - def testDeleteTopicsByIdAuthorization(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val controllerContext: ControllerContext = mock(classOf[ControllerContext]) - - when(clientControllerQuotaManager.newQuotaFor( - any[RequestChannel.Request], - anyShort - )).thenReturn(UnboundedControllerMutationQuota) - when(controller.isActive).thenReturn(true) - when(controller.controllerContext).thenReturn(controllerContext) - - val topicResults = Map( - AclOperation.DESCRIBE -> Map( - "foo" -> AuthorizationResult.DENIED, - "bar" -> AuthorizationResult.ALLOWED - ), - AclOperation.DELETE -> Map( - "foo" -> AuthorizationResult.DENIED, - "bar" -> AuthorizationResult.DENIED - ) - ) - when(authorizer.authorize(any[RequestContext], isNotNull[util.List[Action]])).thenAnswer(invocation => { - val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]] - actions.asScala.map { action => - val topic = action.resourcePattern.name - val ops = action.operation() - topicResults(ops)(topic) - }.asJava - }) - - // Try to delete three topics: - // 1. One without describe permission - // 2. One without delete permission - // 3. One which is authorized, but doesn't exist - val topicIdsMap = Map( - Uuid.randomUuid() -> Some("foo"), - Uuid.randomUuid() -> Some("bar"), - Uuid.randomUuid() -> None - ) - - topicIdsMap.foreach { case (topicId, topicNameOpt) => - when(controllerContext.topicName(topicId)).thenReturn(topicNameOpt) - } - - val topicDatas = topicIdsMap.keys.map { topicId => - new DeleteTopicsRequestData.DeleteTopicState().setTopicId(topicId) - }.toList - val deleteRequest = new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData() - .setTopics(topicDatas.asJava)) - .build(ApiKeys.DELETE_TOPICS.latestVersion) - - val request = buildRequest(deleteRequest) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - kafkaApis.handleDeleteTopicsRequest(request) - verify(authorizer, times(2)).authorize(any(), any()) - - val deleteResponse = verifyNoThrottling[DeleteTopicsResponse](request) - - topicIdsMap.foreach { case (topicId, nameOpt) => - val response = deleteResponse.data.responses.asScala.find(_.topicId == topicId).get - nameOpt match { - case Some("foo") => - assertNull(response.name) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED, Errors.forCode(response.errorCode)) - case Some("bar") => - assertEquals("bar", response.name) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED, Errors.forCode(response.errorCode)) - case None => - assertNull(response.name) - assertEquals(Errors.UNKNOWN_TOPIC_ID, Errors.forCode(response.errorCode)) - case _ => - fail("Unexpected topic id/name mapping") - } - } - } - - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testDeleteTopicsByNameAuthorization(usePrimitiveTopicNameArray: Boolean): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - - when(clientControllerQuotaManager.newQuotaFor( - any[RequestChannel.Request], - anyShort - )).thenReturn(UnboundedControllerMutationQuota) - when(controller.isActive).thenReturn(true) - - // Try to delete three topics: - // 1. One without describe permission - // 2. One without delete permission - // 3. One which is authorized, but doesn't exist - - val topicResults = Map( - AclOperation.DESCRIBE -> Map( - "foo" -> AuthorizationResult.DENIED, - "bar" -> AuthorizationResult.ALLOWED, - "baz" -> AuthorizationResult.ALLOWED - ), - AclOperation.DELETE -> Map( - "foo" -> AuthorizationResult.DENIED, - "bar" -> AuthorizationResult.DENIED, - "baz" -> AuthorizationResult.ALLOWED - ) - ) - when(authorizer.authorize(any[RequestContext], isNotNull[util.List[Action]])).thenAnswer(invocation => { - val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]] - actions.asScala.map { action => - val topic = action.resourcePattern.name - val ops = action.operation() - topicResults(ops)(topic) - }.asJava - }) - - val deleteRequest = if (usePrimitiveTopicNameArray) { - new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData() - .setTopicNames(List("foo", "bar", "baz").asJava)) - .build(5.toShort) - } else { - val topicDatas = List( - new DeleteTopicsRequestData.DeleteTopicState().setName("foo"), - new DeleteTopicsRequestData.DeleteTopicState().setName("bar"), - new DeleteTopicsRequestData.DeleteTopicState().setName("baz") - ) - new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData() - .setTopics(topicDatas.asJava)) - .build(ApiKeys.DELETE_TOPICS.latestVersion) - } - - val request = buildRequest(deleteRequest) - when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), - any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - kafkaApis.handleDeleteTopicsRequest(request) - verify(authorizer, times(2)).authorize(any(), any()) - - val deleteResponse = verifyNoThrottling[DeleteTopicsResponse](request) - - def lookupErrorCode(topic: String): Option[Errors] = { - Option(deleteResponse.data.responses().find(topic)) - .map(result => Errors.forCode(result.errorCode)) - } - - assertEquals(Some(Errors.TOPIC_AUTHORIZATION_FAILED), lookupErrorCode("foo")) - assertEquals(Some(Errors.TOPIC_AUTHORIZATION_FAILED), lookupErrorCode("bar")) - assertEquals(Some(Errors.UNKNOWN_TOPIC_OR_PARTITION), lookupErrorCode("baz")) - } - - private def createMockRequest(): RequestChannel.Request = { - val request: RequestChannel.Request = mock(classOf[RequestChannel.Request]) - val requestHeader: RequestHeader = mock(classOf[RequestHeader]) - when(request.header).thenReturn(requestHeader) - when(requestHeader.apiKey()).thenReturn(ApiKeys.values().head) - request - } - - private def verifyShouldNeverHandleErrorMessage(handler: RequestChannel.Request => Unit): Unit = { - val request = createMockRequest() - val e = assertThrows(classOf[UnsupportedVersionException], () => handler(request)) - assertEquals(KafkaApis.shouldNeverReceive(request).getMessage, e.getMessage) - } - - private def verifyShouldAlwaysForwardErrorMessage(handler: RequestChannel.Request => Unit): Unit = { - val request = createMockRequest() - val e = assertThrows(classOf[UnsupportedVersionException], () => handler(request)) - assertEquals(KafkaApis.shouldAlwaysForward(request).getMessage, e.getMessage) - } - - @Test - def testRaftShouldNeverHandleLeaderAndIsrRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldNeverHandleErrorMessage(kafkaApis.handleLeaderAndIsrRequest) - } - - @Test - def testRaftShouldNeverHandleStopReplicaRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldNeverHandleErrorMessage(kafkaApis.handleStopReplicaRequest) - } - - @Test - def testRaftShouldNeverHandleUpdateMetadataRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldNeverHandleErrorMessage(kafkaApis.handleUpdateMetadataRequest(_, RequestLocal.withThreadConfinedCaching)) - } - - @Test - def testRaftShouldNeverHandleControlledShutdownRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldNeverHandleErrorMessage(kafkaApis.handleControlledShutdownRequest) - } - - @Test - def testRaftShouldNeverHandleAlterPartitionRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldNeverHandleErrorMessage(kafkaApis.handleAlterPartitionRequest) - } - - @Test - def testRaftShouldNeverHandleEnvelope(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldNeverHandleErrorMessage(kafkaApis.handleEnvelope(_, RequestLocal.withThreadConfinedCaching)) - } - - @Test - def testRaftShouldAlwaysForwardCreateTopicsRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleCreateTopicsRequest) - } - - @Test - def testRaftShouldAlwaysForwardCreatePartitionsRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleCreatePartitionsRequest) - } - - @Test - def testRaftShouldAlwaysForwardDeleteTopicsRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleDeleteTopicsRequest) - } - - @Test - def testRaftShouldAlwaysForwardCreateAcls(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleCreateAcls) - } - - @Test - def testRaftShouldAlwaysForwardDeleteAcls(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleDeleteAcls) - } - @Test def testEmptyLegacyAlterConfigsRequestWithKRaft(): Unit = { val request = buildRequest(new AlterConfigsRequest(new AlterConfigsRequestData(), 1.toShort)) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handleAlterConfigsRequest(request) val response = verifyNoThrottling[AlterConfigsResponse](request) assertEquals(new AlterConfigsResponseData(), response.data()) @@ -11027,7 +9686,7 @@ class KafkaApisTest extends Logging { metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handleAlterConfigsRequest(request) val response = verifyNoThrottling[AlterConfigsResponse](request) assertEquals(new AlterConfigsResponseData().setResponses(asList( @@ -11039,20 +9698,13 @@ class KafkaApisTest extends Logging { response.data()) } - @Test - def testRaftShouldAlwaysForwardAlterPartitionReassignmentsRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleAlterPartitionReassignmentsRequest) - } - @Test def testEmptyIncrementalAlterConfigsRequestWithKRaft(): Unit = { val request = buildRequest(new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(), 1.toShort)) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handleIncrementalAlterConfigsRequest(request) val response = verifyNoThrottling[IncrementalAlterConfigsResponse](request) assertEquals(new IncrementalAlterConfigsResponseData(), response.data()) @@ -11072,7 +9724,7 @@ class KafkaApisTest extends Logging { metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handleIncrementalAlterConfigsRequest(request) val response = verifyNoThrottling[IncrementalAlterConfigsResponse](request) assertEquals(new IncrementalAlterConfigsResponseData().setResponses(asList( @@ -11084,75 +9736,13 @@ class KafkaApisTest extends Logging { response.data()) } - @Test - // Test that in KRaft mode, a request that isn't forwarded gets the correct error message. - // We skip the pre-forward checks in handleCreateTokenRequest - def testRaftShouldAlwaysForwardCreateTokenRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleCreateTokenRequestZk) - } - - @Test - // Test that in KRaft mode, a request that isn't forwarded gets the correct error message. - // We skip the pre-forward checks in handleRenewTokenRequest - def testRaftShouldAlwaysForwardRenewTokenRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleRenewTokenRequestZk) - } - - @Test - // Test that in KRaft mode, a request that isn't forwarded gets the correct error message. - // We skip the pre-forward checks in handleExpireTokenRequest - def testRaftShouldAlwaysForwardExpireTokenRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleExpireTokenRequestZk) - } - - @Test - def testRaftShouldAlwaysForwardAlterClientQuotasRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleAlterClientQuotasRequest) - } - - @Test - def testRaftShouldAlwaysForwardAlterUserScramCredentialsRequest(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleAlterUserScramCredentialsRequest) - } - - @Test - def testRaftShouldAlwaysForwardUpdateFeatures(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleUpdateFeatures) - } - - @Test - def testRaftShouldAlwaysForwardElectLeaders(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleElectLeaders) - } - - @Test - def testRaftShouldAlwaysForwardListPartitionReassignments(): Unit = { - metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) - verifyShouldAlwaysForwardErrorMessage(kafkaApis.handleListPartitionReassignmentsRequest) - } - @Test def testConsumerGroupHeartbeatReturnsUnsupportedVersion(): Unit = { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val expectedHeartbeatResponse = new ConsumerGroupHeartbeatResponseData() @@ -11167,7 +9757,7 @@ class KafkaApisTest extends Logging { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) val future = new CompletableFuture[ConsumerGroupHeartbeatResponseData]() when(groupCoordinator.consumerGroupHeartbeat( @@ -11175,8 +9765,7 @@ class KafkaApisTest extends Logging { consumerGroupHeartbeatRequest )).thenReturn(future) kafkaApis = createKafkaApis( - featureVersions = Seq(GroupVersion.GV_1), - raftSupport = true + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11194,7 +9783,7 @@ class KafkaApisTest extends Logging { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) val future = new CompletableFuture[ConsumerGroupHeartbeatResponseData]() when(groupCoordinator.consumerGroupHeartbeat( @@ -11202,8 +9791,7 @@ class KafkaApisTest extends Logging { consumerGroupHeartbeatRequest )).thenReturn(future) kafkaApis = createKafkaApis( - featureVersions = Seq(GroupVersion.GV_1), - raftSupport = true + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11218,15 +9806,14 @@ class KafkaApisTest extends Logging { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis( authorizer = Some(authorizer), - featureVersions = Seq(GroupVersion.GV_1), - raftSupport = true + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11251,8 +9838,7 @@ class KafkaApisTest extends Logging { any[util.List[String]] )).thenReturn(future) kafkaApis = createKafkaApis( - featureVersions = Seq(GroupVersion.GV_1), - raftSupport = true + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11295,7 +9881,7 @@ class KafkaApisTest extends Logging { val expectedResponse = new ConsumerGroupDescribeResponseData() expectedResponse.groups.add(expectedDescribedGroup) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) @@ -11322,8 +9908,7 @@ class KafkaApisTest extends Logging { future.complete(List().asJava) kafkaApis = createKafkaApis( authorizer = Some(authorizer), - featureVersions = Seq(GroupVersion.GV_1), - raftSupport = true + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11345,8 +9930,7 @@ class KafkaApisTest extends Logging { any[util.List[String]] )).thenReturn(future) kafkaApis = createKafkaApis( - featureVersions = Seq(GroupVersion.GV_1), - raftSupport = true + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11355,18 +9939,6 @@ class KafkaApisTest extends Logging { assertEquals(Errors.FENCED_MEMBER_EPOCH.code, response.data.groups.get(0).errorCode) } - @Test - def testGetTelemetrySubscriptionsNotAllowedForZkClusters(): Unit = { - val data = new GetTelemetrySubscriptionsRequestData() - - val request = buildRequest(new GetTelemetrySubscriptionsRequest.Builder(data, true).build()) - kafkaApis = createKafkaApis(enableForwarding = true) - kafkaApis.handle(request, RequestLocal.noCaching) - - val response = verifyNoThrottling[GetTelemetrySubscriptionsResponse](request) - assertEquals(Errors.UNKNOWN_SERVER_ERROR, Errors.forCode(response.data.errorCode)) - } - @Test def testGetTelemetrySubscriptions(): Unit = { val request = buildRequest(new GetTelemetrySubscriptionsRequest.Builder( @@ -11378,7 +9950,7 @@ class KafkaApisTest extends Logging { new GetTelemetrySubscriptionsResponseData())) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) val response = verifyNoThrottling[GetTelemetrySubscriptionsResponse](request) @@ -11397,7 +9969,7 @@ class KafkaApisTest extends Logging { any[RequestContext]())).thenThrow(new RuntimeException("test")) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) val response = verifyNoThrottling[GetTelemetrySubscriptionsResponse](request) @@ -11406,18 +9978,6 @@ class KafkaApisTest extends Logging { assertEquals(expectedResponse, response.data) } - @Test - def testPushTelemetryNotAllowedForZkClusters(): Unit = { - val data = new PushTelemetryRequestData() - - val request = buildRequest(new PushTelemetryRequest.Builder(data, true).build()) - kafkaApis = createKafkaApis(enableForwarding = true) - kafkaApis.handle(request, RequestLocal.noCaching) - - val response = verifyNoThrottling[PushTelemetryResponse](request) - assertEquals(Errors.UNKNOWN_SERVER_ERROR, Errors.forCode(response.data.errorCode)) - } - @Test def testPushTelemetry(): Unit = { val request = buildRequest(new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true).build()) @@ -11427,7 +9987,7 @@ class KafkaApisTest extends Logging { .thenReturn(new PushTelemetryResponse(new PushTelemetryResponseData())) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) val response = verifyNoThrottling[PushTelemetryResponse](request) @@ -11444,7 +10004,7 @@ class KafkaApisTest extends Logging { .thenThrow(new RuntimeException("test")) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) val response = verifyNoThrottling[PushTelemetryResponse](request) @@ -11452,16 +10012,6 @@ class KafkaApisTest extends Logging { assertEquals(expectedResponse, response.data) } - @Test - def testListClientMetricsResourcesNotAllowedForZkClusters(): Unit = { - val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) - kafkaApis = createKafkaApis(enableForwarding = true) - kafkaApis.handle(request, RequestLocal.noCaching) - - val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) - assertEquals(Errors.UNKNOWN_SERVER_ERROR, Errors.forCode(response.data.errorCode)) - } - @Test def testListClientMetricsResources(): Unit = { val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) @@ -11471,7 +10021,7 @@ class KafkaApisTest extends Logging { resources.add("test1") resources.add("test2") when(clientMetricsManager.listClientMetricsResources).thenReturn(resources.asJava) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) val expectedResponse = new ListClientMetricsResourcesResponseData().setClientMetricsResources( @@ -11486,7 +10036,7 @@ class KafkaApisTest extends Logging { val resources = new mutable.HashSet[String] when(clientMetricsManager.listClientMetricsResources).thenReturn(resources.asJava) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) val expectedResponse = new ListClientMetricsResourcesResponseData() @@ -11499,7 +10049,7 @@ class KafkaApisTest extends Logging { metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientMetricsManager.listClientMetricsResources).thenThrow(new RuntimeException("test")) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(request, RequestLocal.noCaching) val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) @@ -11513,7 +10063,7 @@ class KafkaApisTest extends Logging { val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis(raftSupport = true) + kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) val expectedHeartbeatResponse = new ShareGroupHeartbeatResponseData() @@ -11536,7 +10086,6 @@ class KafkaApisTest extends Logging { metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis( overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11561,7 +10110,6 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis( overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Some(authorizer), - raftSupport = true ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11583,7 +10131,6 @@ class KafkaApisTest extends Logging { metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis( overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), - raftSupport = true ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11691,7 +10238,7 @@ class KafkaApisTest extends Logging { val response = getReadShareGroupResponse( readRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, readStateResultData @@ -11746,7 +10293,7 @@ class KafkaApisTest extends Logging { val response = getReadShareGroupResponse( readRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, readStateResultData @@ -11760,6 +10307,102 @@ class KafkaApisTest extends Logging { }) } + @Test + def testReadShareGroupStateSummarySuccess(): Unit = { + val topicId = Uuid.randomUuid(); + val readSummaryRequestData = new ReadShareGroupStateSummaryRequestData() + .setGroupId("group1") + .setTopics(List( + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId) + .setPartitions(List( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(1) + .setLeaderEpoch(1) + ).asJava) + ).asJava) + + val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = List( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(1) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage(null) + .setStateEpoch(1) + .setStartOffset(10) + ).asJava) + ).asJava + + val config = Map( + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + ) + + val response = getReadShareGroupSummaryResponse( + readSummaryRequestData, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + verifyNoErr = true, + null, + readStateSummaryResultData + ) + + assertNotNull(response.data) + assertEquals(1, response.data.results.size) + } + + @Test + def testReadShareGroupStateSummaryAuthorizationFailed(): Unit = { + val topicId = Uuid.randomUuid(); + val readSummaryRequestData = new ReadShareGroupStateSummaryRequestData() + .setGroupId("group1") + .setTopics(List( + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId) + .setPartitions(List( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(1) + .setLeaderEpoch(1) + ).asJava) + ).asJava) + + val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = List( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(1) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage(null) + .setStateEpoch(1) + .setStartOffset(10) + ).asJava) + ).asJava + + val authorizer: Authorizer = mock(classOf[Authorizer]) + when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) + + val config = Map( + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + ) + + val response = getReadShareGroupSummaryResponse( + readSummaryRequestData, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, + verifyNoErr = false, + authorizer, + readStateSummaryResultData + ) + + assertNotNull(response.data) + assertEquals(1, response.data.results.size) + response.data.results.forEach(readResult => { + assertEquals(1, readResult.partitions.size) + assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), readResult.partitions.get(0).errorCode()) + }) + } + @Test def testWriteShareGroupStateSuccess(): Unit = { val topicId = Uuid.randomUuid(); @@ -11801,7 +10444,7 @@ class KafkaApisTest extends Logging { val response = getWriteShareGroupResponse( writeRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, writeStateResultData @@ -11856,7 +10499,7 @@ class KafkaApisTest extends Logging { val response = getWriteShareGroupResponse( writeRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, writeStateResultData @@ -11886,7 +10529,6 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), - raftSupport = true ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -11915,7 +10557,6 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), - raftSupport = true ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching()) @@ -11931,6 +10572,35 @@ class KafkaApisTest extends Logging { response } + def getReadShareGroupSummaryResponse(requestData: ReadShareGroupStateSummaryRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + readStateSummaryResult: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult]): ReadShareGroupStateSummaryResponse = { + val requestChannelRequest = buildRequest(new ReadShareGroupStateSummaryRequest.Builder(requestData, true).build()) + + val future = new CompletableFuture[ReadShareGroupStateSummaryResponseData]() + when(shareCoordinator.readStateSummary( + any[RequestContext], + any[ReadShareGroupStateSummaryRequestData] + )).thenReturn(future) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + kafkaApis = createKafkaApis( + overrideProperties = configOverrides, + authorizer = Option(authorizer), + ) + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching()) + + future.complete(new ReadShareGroupStateSummaryResponseData() + .setResults(readStateSummaryResult)) + + val response = verifyNoThrottling[ReadShareGroupStateSummaryResponse](requestChannelRequest) + if (verifyNoErr) { + val expectedReadShareGroupStateSummaryResponseData = new ReadShareGroupStateSummaryResponseData() + .setResults(readStateSummaryResult) + assertEquals(expectedReadShareGroupStateSummaryResponseData, response.data) + } + response + } + def getWriteShareGroupResponse(requestData: WriteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, verifyNoErr: Boolean = true, authorizer: Authorizer = null, writeStateResult: util.List[WriteShareGroupStateResponseData.WriteStateResult]): WriteShareGroupStateResponse = { @@ -11945,7 +10615,6 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), - raftSupport = true ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching()) diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index 1c5c2d3b0cdd4..31c192ff9ef30 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -37,9 +37,7 @@ import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.security.PasswordEncoderConfigs import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.server.common.MetadataVersion.{IBP_0_8_2, IBP_3_0_IV1} import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms, ZkConfigs} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.MetricConfigs @@ -48,14 +46,13 @@ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.api.function.Executable -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ class KafkaConfigTest { @Test def testLogRetentionTimeHoursProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_HOURS_CONFIG, "1") val cfg = KafkaConfig.fromProps(props) @@ -64,7 +61,7 @@ class KafkaConfigTest { @Test def testLogRetentionTimeMinutesProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MINUTES_CONFIG, "30") val cfg = KafkaConfig.fromProps(props) @@ -73,7 +70,7 @@ class KafkaConfigTest { @Test def testLogRetentionTimeMsProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, "1800000") val cfg = KafkaConfig.fromProps(props) @@ -82,7 +79,7 @@ class KafkaConfigTest { @Test def testLogRetentionTimeNoConfigProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val cfg = KafkaConfig.fromProps(props) assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRetentionTimeMillis) @@ -90,7 +87,7 @@ class KafkaConfigTest { @Test def testLogRetentionTimeBothMinutesAndHoursProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MINUTES_CONFIG, "30") props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_HOURS_CONFIG, "1") @@ -100,7 +97,7 @@ class KafkaConfigTest { @Test def testLogRetentionTimeBothMinutesAndMsProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, "1800000") props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MINUTES_CONFIG, "10") @@ -110,11 +107,11 @@ class KafkaConfigTest { @Test def testLogRetentionUnlimited(): Unit = { - val props1 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181) - val props2 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181) - val props3 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181) - val props4 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181) - val props5 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181) + val props1 = TestUtils.createBrokerConfig(0, port = 8181) + val props2 = TestUtils.createBrokerConfig(0, port = 8181) + val props3 = TestUtils.createBrokerConfig(0, port = 8181) + val props4 = TestUtils.createBrokerConfig(0, port = 8181) + val props5 = TestUtils.createBrokerConfig(0, port = 8181) props1.setProperty("log.retention.ms", "-1") props2.setProperty("log.retention.minutes", "-1") @@ -140,9 +137,9 @@ class KafkaConfigTest { @Test def testLogRetentionValid(): Unit = { - val props1 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) - val props2 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) - val props3 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props1 = TestUtils.createBrokerConfig(0, port = 8181) + val props2 = TestUtils.createBrokerConfig(0, port = 8181) + val props3 = TestUtils.createBrokerConfig(0, port = 8181) props1.setProperty("log.retention.ms", "0") props2.setProperty("log.retention.minutes", "0") @@ -156,19 +153,22 @@ class KafkaConfigTest { @Test def testAdvertiseDefaults(): Unit = { - val port = 9999 + val brokerProt = 9999 + val controllerPort = 10000 val hostName = "fake-host" val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, s"PLAINTEXT://$hostName:$port") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, s"$hostName:$controllerPort") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, s"PLAINTEXT://$hostName:$brokerProt") val serverConfig = KafkaConfig.fromProps(props) val endpoints = serverConfig.effectiveAdvertisedBrokerListeners assertEquals(1, endpoints.size) val endpoint = endpoints.find(_.securityProtocol == SecurityProtocol.PLAINTEXT).get assertEquals(endpoint.host, hostName) - assertEquals(endpoint.port, port) + assertEquals(endpoint.port, brokerProt) } @Test @@ -176,7 +176,7 @@ class KafkaConfigTest { val advertisedHostName = "routable-host" val advertisedPort = 1234 - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, s"PLAINTEXT://$advertisedHostName:$advertisedPort") val serverConfig = KafkaConfig.fromProps(props) @@ -190,8 +190,10 @@ class KafkaConfigTest { @Test def testDuplicateListeners(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9095") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") // listeners with duplicate port props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://localhost:9091,SSL://localhost:9091") @@ -202,7 +204,7 @@ class KafkaConfigTest { assertBadConfigContainingMessage(props, "Each listener must have a different name") // advertised listeners can have duplicate ports - props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "HOST:SASL_SSL,LB:SASL_SSL") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "HOST:SASL_SSL,LB:SASL_SSL,CONTROLLER:SASL_SSL") props.setProperty(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "HOST") props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "HOST://localhost:9091,LB://localhost:9092") props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "HOST://localhost:9091,LB://localhost:9091") @@ -216,8 +218,11 @@ class KafkaConfigTest { @Test def testIPv4AndIPv6SamePortListeners(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.put(ServerConfigs.BROKER_ID_CONFIG, "1") - props.put(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9091") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_DEFAULT + ",CONTROLLER:PLAINTEXT") props.put(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://[::1]:9092,SSL://[::1]:9092") var caught = assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)) @@ -258,31 +263,6 @@ class KafkaConfigTest { assertTrue(isValidKafkaConfig(props)) } - @Test - def testControlPlaneListenerName(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) - props.setProperty("listeners", "PLAINTEXT://localhost:0,CONTROLLER://localhost:5000") - props.setProperty("listener.security.protocol.map", "PLAINTEXT:PLAINTEXT,CONTROLLER:SSL") - props.setProperty("control.plane.listener.name", "CONTROLLER") - KafkaConfig.fromProps(props) - - val serverConfig = KafkaConfig.fromProps(props) - val controlEndpoint = serverConfig.controlPlaneListener.get - assertEquals("localhost", controlEndpoint.host) - assertEquals(5000, controlEndpoint.port) - assertEquals(SecurityProtocol.SSL, controlEndpoint.securityProtocol) - - //advertised listener should contain control-plane listener - val advertisedEndpoints = serverConfig.effectiveAdvertisedBrokerListeners - assertTrue(advertisedEndpoints.exists { endpoint => - endpoint.securityProtocol == controlEndpoint.securityProtocol && endpoint.listenerName.value().equals(controlEndpoint.listenerName.value()) - }) - - // interBrokerListener name should be different from control-plane listener name - val interBrokerListenerName = serverConfig.interBrokerListenerName - assertFalse(interBrokerListenerName.value().equals(controlEndpoint.listenerName.value())) - } - @Test def testControllerListenerNames(): Unit = { val props = new Properties() @@ -302,23 +282,6 @@ class KafkaConfigTest { assertEquals(SecurityProtocol.SASL_SSL, controllerEndpoint.securityProtocol) } - @Test - def testControlPlaneListenerNameNotAllowedWithKRaft(): Unit = { - val props = new Properties() - props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker,controller") - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://localhost:9092,SSL://localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") - props.setProperty(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG, "SSL") - - assertFalse(isValidKafkaConfig(props)) - assertBadConfigContainingMessage(props, "control.plane.listener.name is not supported in KRaft mode.") - - props.remove(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG) - KafkaConfig.fromProps(props) - } - @Test def testControllerListenerDefinedForKRaftController(): Unit = { val props = new Properties() @@ -509,24 +472,13 @@ class KafkaConfigTest { KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("CONTROLLER2"))) } - @Test - def testControllerListenerNameDoesNotMapToPlaintextByDefaultForNonKRaft(): Unit = { - val props = new Properties() - props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://localhost:9092") - assertBadConfigContainingMessage(props, - "Error creating broker listeners from 'CONTROLLER://localhost:9092': No security protocol defined for listener CONTROLLER") - // Valid now - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://localhost:9092") - assertEquals(None, KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("CONTROLLER"))) - } - @Test def testBadListenerProtocol(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9092") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "BAD://localhost:9091") assertFalse(isValidKafkaConfig(props)) @@ -535,11 +487,13 @@ class KafkaConfigTest { @Test def testListenerNamesWithAdvertisedListenerUnset(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9092") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "CLIENT://localhost:9091,REPLICATION://localhost:9092,INTERNAL://localhost:9093") - props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CLIENT:SSL,REPLICATION:SSL,INTERNAL:PLAINTEXT") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CLIENT:SSL,REPLICATION:SSL,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT") props.setProperty(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "REPLICATION") val config = KafkaConfig.fromProps(props) val expectedListeners = Seq( @@ -551,7 +505,8 @@ class KafkaConfigTest { val expectedSecurityProtocolMap = Map( new ListenerName("CLIENT") -> SecurityProtocol.SSL, new ListenerName("REPLICATION") -> SecurityProtocol.SSL, - new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT + new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT, + new ListenerName("CONTROLLER") -> SecurityProtocol.PLAINTEXT ) assertEquals(expectedSecurityProtocolMap, config.effectiveListenerSecurityProtocolMap) } @@ -559,12 +514,14 @@ class KafkaConfigTest { @Test def testListenerAndAdvertisedListenerNames(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9092") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "EXTERNAL://localhost:9091,INTERNAL://localhost:9093") props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://lb1.example.com:9000,INTERNAL://host1:9093") - props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "EXTERNAL:SSL,INTERNAL:PLAINTEXT") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "EXTERNAL:SSL,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT") props.setProperty(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "INTERNAL") val config = KafkaConfig.fromProps(props) @@ -582,7 +539,8 @@ class KafkaConfigTest { val expectedSecurityProtocolMap = Map( new ListenerName("EXTERNAL") -> SecurityProtocol.SSL, - new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT + new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT, + new ListenerName("CONTROLLER") -> SecurityProtocol.PLAINTEXT ) assertEquals(expectedSecurityProtocolMap, config.effectiveListenerSecurityProtocolMap) } @@ -624,9 +582,12 @@ class KafkaConfigTest { @Test def testCaseInsensitiveListenerProtocol(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9093") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "plaintext://localhost:9091,SsL://localhost:9092") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,SSL:SSL,CONTROLLER:PLAINTEXT") val config = KafkaConfig.fromProps(props) assertEquals(Some("SSL://localhost:9092"), config.listeners.find(_.listenerName.value == "SSL").map(_.connectionString)) assertEquals(Some("PLAINTEXT://localhost:9091"), config.listeners.find(_.listenerName.value == "PLAINTEXT").map(_.connectionString)) @@ -639,8 +600,10 @@ class KafkaConfigTest { @Test def testListenerDefaults(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9093") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") // configuration with no listeners val conf = KafkaConfig.fromProps(props) @@ -649,29 +612,26 @@ class KafkaConfigTest { assertEquals(conf.effectiveAdvertisedBrokerListeners, listenerListToEndPoints("PLAINTEXT://:9092")) } - @nowarn("cat=deprecation") @Test def testVersionConfiguration(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") val conf = KafkaConfig.fromProps(props) - assertEquals(MetadataVersion.latestProduction, conf.interBrokerProtocolVersion) + assertEquals(MetadataVersion.MINIMUM_KRAFT_VERSION, conf.interBrokerProtocolVersion) - props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "0.8.2.0") - // We need to set the message format version to make the configuration valid. - props.setProperty(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG, "0.8.2.0") + props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "3.0.0-IV1") val conf2 = KafkaConfig.fromProps(props) - assertEquals(IBP_0_8_2, conf2.interBrokerProtocolVersion) + assertEquals(MetadataVersion.IBP_3_0_IV1, conf2.interBrokerProtocolVersion) - // check that 0.8.2.0 is the same as 0.8.2.1 - props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "0.8.2.1") - // We need to set the message format version to make the configuration valid - props.setProperty(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG, "0.8.2.1") + // check that patch version doesn't affect equality + props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "3.0.1-IV1") val conf3 = KafkaConfig.fromProps(props) - assertEquals(IBP_0_8_2, conf3.interBrokerProtocolVersion) + assertEquals(MetadataVersion.IBP_3_0_IV1, conf3.interBrokerProtocolVersion) - //check that latest is newer than 0.8.2 + //check that latest is newer than 3.0.1-IV0 assertTrue(MetadataVersion.latestTesting.isAtLeast(conf3.interBrokerProtocolVersion)) } @@ -686,7 +646,7 @@ class KafkaConfigTest { @Test def testUncleanLeaderElectionDefault(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val serverConfig = KafkaConfig.fromProps(props) assertEquals(serverConfig.uncleanLeaderElectionEnable, false) @@ -694,7 +654,7 @@ class KafkaConfigTest { @Test def testUncleanElectionDisabled(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, String.valueOf(false)) val serverConfig = KafkaConfig.fromProps(props) @@ -703,7 +663,7 @@ class KafkaConfigTest { @Test def testUncleanElectionEnabled(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, String.valueOf(true)) val serverConfig = KafkaConfig.fromProps(props) @@ -712,7 +672,7 @@ class KafkaConfigTest { @Test def testUncleanElectionInvalid(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "invalid") assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) @@ -720,7 +680,7 @@ class KafkaConfigTest { @Test def testLogRollTimeMsProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG, "1800000") val cfg = KafkaConfig.fromProps(props) @@ -729,7 +689,7 @@ class KafkaConfigTest { @Test def testLogRollTimeBothMsAndHoursProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG, "1800000") props.setProperty(ServerLogConfigs.LOG_ROLL_TIME_HOURS_CONFIG, "1") @@ -739,7 +699,7 @@ class KafkaConfigTest { @Test def testLogRollTimeNoConfigProvided(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val cfg = KafkaConfig.fromProps(props) assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRollTimeMillis ) @@ -747,14 +707,14 @@ class KafkaConfigTest { @Test def testDefaultCompressionType(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val serverConfig = KafkaConfig.fromProps(props) assertEquals(serverConfig.compressionType, "producer") } @Test def testValidCompressionType(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty("compression.type", "gzip") val serverConfig = KafkaConfig.fromProps(props) assertEquals(serverConfig.compressionType, "gzip") @@ -762,14 +722,14 @@ class KafkaConfigTest { @Test def testInvalidCompressionType(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerConfigs.COMPRESSION_TYPE_CONFIG, "abc") assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) } @Test def testInvalidGzipCompressionLevel(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerConfigs.COMPRESSION_TYPE_CONFIG, "gzip") props.setProperty(ServerConfigs.COMPRESSION_GZIP_LEVEL_CONFIG, (CompressionType.GZIP.maxLevel() + 1).toString) assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) @@ -777,7 +737,7 @@ class KafkaConfigTest { @Test def testInvalidLz4CompressionLevel(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerConfigs.COMPRESSION_TYPE_CONFIG, "lz4") props.setProperty(ServerConfigs.COMPRESSION_LZ4_LEVEL_CONFIG, (CompressionType.LZ4.maxLevel() + 1).toString) assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) @@ -785,7 +745,7 @@ class KafkaConfigTest { @Test def testInvalidZstdCompressionLevel(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ServerConfigs.COMPRESSION_TYPE_CONFIG, "zstd") props.setProperty(ServerConfigs.COMPRESSION_ZSTD_LEVEL_CONFIG, (CompressionType.ZSTD.maxLevel() + 1).toString) assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) @@ -793,7 +753,7 @@ class KafkaConfigTest { @Test def testInvalidInterBrokerSecurityProtocol(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "SSL://localhost:0") props.setProperty(ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG, SecurityProtocol.PLAINTEXT.toString) assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)) @@ -801,55 +761,32 @@ class KafkaConfigTest { @Test def testEqualAdvertisedListenersProtocol(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://localhost:9092,SSL://localhost:9093") props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "PLAINTEXT://localhost:9092,SSL://localhost:9093") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL") KafkaConfig.fromProps(props) } @Test def testInvalidAdvertisedListenersProtocol(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "TRACE://localhost:9091,SSL://localhost:9093") props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "PLAINTEXT://localhost:9092") assertBadConfigContainingMessage(props, "No security protocol defined for listener TRACE") - props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,TRACE:PLAINTEXT,SSL:SSL") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,TRACE:PLAINTEXT,SSL:SSL") assertBadConfigContainingMessage(props, "advertised.listeners listener names must be equal to or a subset of the ones defined in listeners") } - @nowarn("cat=deprecation") - @Test - def testInterBrokerVersionMessageFormatCompatibility(): Unit = { - def buildConfig(interBrokerProtocol: MetadataVersion, messageFormat: MetadataVersion): KafkaConfig = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) - props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, interBrokerProtocol.version) - props.setProperty(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG, messageFormat.version) - KafkaConfig.fromProps(props) - } - - MetadataVersion.VERSIONS.foreach { interBrokerVersion => - MetadataVersion.VERSIONS.foreach { messageFormatVersion => - if (interBrokerVersion.highestSupportedRecordVersion.value >= messageFormatVersion.highestSupportedRecordVersion.value) { - val config = buildConfig(interBrokerVersion, messageFormatVersion) - assertEquals(interBrokerVersion, config.interBrokerProtocolVersion) - if (interBrokerVersion.isAtLeast(IBP_3_0_IV1)) - assertEquals(IBP_3_0_IV1, config.logMessageFormatVersion) - else - assertEquals(messageFormatVersion, config.logMessageFormatVersion) - } else { - assertThrows(classOf[IllegalArgumentException], () => buildConfig(interBrokerVersion, messageFormatVersion)) - } - } - } - } - @Test - @nowarn("cat=deprecation") // See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for deprecation details def testFromPropsInvalid(): Unit = { def baseProperties: Properties = { val validRequiredProperties = new Properties() - validRequiredProperties.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "127.0.0.1:2181") + validRequiredProperties.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + validRequiredProperties.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") + validRequiredProperties.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + validRequiredProperties.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") validRequiredProperties } // to ensure a basis is valid - bootstraps all needed validation @@ -940,7 +877,6 @@ class KafkaConfigTest { case ServerLogConfigs.LOG_FLUSH_INTERVAL_MESSAGES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") case ServerLogConfigs.LOG_FLUSH_SCHEDULER_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerLogConfigs.LOG_FLUSH_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerLogConfigs.LOG_FLUSH_START_OFFSET_CHECKPOINT_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") @@ -966,8 +902,6 @@ class KafkaConfigTest { case ReplicationConfigs.LEADER_IMBALANCE_PER_BROKER_PERCENTAGE_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.LEADER_IMBALANCE_CHECK_INTERVAL_SECONDS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_boolean", "0") - case ServerConfigs.CONTROLLED_SHUTDOWN_MAX_RETRIES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case ServerConfigs.CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_boolean", "0") case GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") @@ -1063,14 +997,6 @@ class KafkaConfigTest { // Security config case SecurityConfig.SECURITY_PROVIDERS_CONFIG => - // Password encoder configs - case PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG => - case PasswordEncoderConfigs.PASSWORD_ENCODER_OLD_SECRET_CONFIG => - case PasswordEncoderConfigs.PASSWORD_ENCODER_KEYFACTORY_ALGORITHM_CONFIG => - case PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_CONFIG => - case PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "-1", "0") - case PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "-1", "0") - //delegation token configs case DelegationTokenManagerConfigs.DELEGATION_TOKEN_SECRET_KEY_CONFIG => // ignore case DelegationTokenManagerConfigs.DELEGATION_TOKEN_MAX_LIFETIME_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") @@ -1104,8 +1030,8 @@ class KafkaConfigTest { case RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP => // ignore string case RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -2) - case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -2) + case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1, -2) + case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1, -2) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) @@ -1153,12 +1079,14 @@ class KafkaConfigTest { } } - @nowarn("cat=deprecation") @Test def testDynamicLogConfigs(): Unit = { def baseProperties: Properties = { val validRequiredProperties = new Properties() - validRequiredProperties.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "127.0.0.1:2181") + validRequiredProperties.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + validRequiredProperties.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") + validRequiredProperties.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9093") + validRequiredProperties.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") validRequiredProperties } @@ -1190,7 +1118,7 @@ class KafkaConfigTest { case TopicConfig.COMPRESSION_ZSTD_LEVEL_CONFIG => assertDynamic(kafkaConfigProp, "5", () => config.zstdCompressionLevel) case TopicConfig.SEGMENT_BYTES_CONFIG => - assertDynamic(kafkaConfigProp, 10000, () => config.logSegmentBytes) + assertDynamic(kafkaConfigProp, 1048576, () => config.logSegmentBytes) case TopicConfig.SEGMENT_MS_CONFIG => assertDynamic(kafkaConfigProp, 10001L, () => config.logRollTimeMillis) case TopicConfig.DELETE_RETENTION_MS_CONFIG => @@ -1207,10 +1135,6 @@ class KafkaConfigTest { assertDynamic(kafkaConfigProp, 10007, () => config.logIndexIntervalBytes) case TopicConfig.MAX_MESSAGE_BYTES_CONFIG => assertDynamic(kafkaConfigProp, 10008, () => config.messageMaxBytes) - case TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG => - assertDynamic(kafkaConfigProp, false, () => config.logMessageDownConversionEnable) - case TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG => - assertDynamic(kafkaConfigProp, 10009, () => config.logMessageTimestampDifferenceMaxMs) case TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG => assertDynamic(kafkaConfigProp, 10015L, () => config.logMessageTimestampBeforeMaxMs) case TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG => @@ -1239,7 +1163,6 @@ class KafkaConfigTest { assertDynamic(kafkaConfigProp, 10015L, () => config.remoteLogManagerConfig.logLocalRetentionMs) case TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG => assertDynamic(kafkaConfigProp, 10016L, () => config.remoteLogManagerConfig.logLocalRetentionBytes) - case TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG => // not dynamically updatable case QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG => // topic only config @@ -1254,7 +1177,9 @@ class KafkaConfigTest { @Test def testSpecificProperties(): Unit = { val defaults = new Properties() - defaults.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "127.0.0.1:2181") + defaults.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + defaults.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9092") + defaults.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") // For ZkConnectionTimeoutMs defaults.setProperty(ZkConfigs.ZK_SESSION_TIMEOUT_MS_CONFIG, "1234") defaults.setProperty(ServerConfigs.BROKER_ID_GENERATION_ENABLE_CONFIG, "false") @@ -1273,7 +1198,6 @@ class KafkaConfigTest { defaults.setProperty(MetricConfigs.METRIC_RECORDING_LEVEL_CONFIG, Sensor.RecordingLevel.DEBUG.toString) val config = KafkaConfig.fromProps(defaults) - assertEquals("127.0.0.1:2181", config.zkConnect) assertEquals(1234, config.zkConnectionTimeoutMs) assertEquals(false, config.brokerIdGenerationEnable) assertEquals(1, config.maxReservedBrokerId) @@ -1300,14 +1224,17 @@ class KafkaConfigTest { @Test def testNonroutableAdvertisedListeners(): Unit = { val props = new Properties() - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "127.0.0.1:2181") + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9092") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://0.0.0.0:9092") - assertFalse(isValidKafkaConfig(props)) + assertBadConfigContainingMessage(props, "advertised.listeners cannot use the nonroutable meta-address 0.0.0.0. Use a routable IP address.") } @Test def testMaxConnectionsPerIpProp(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG, "0") assertFalse(isValidKafkaConfig(props)) props.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, "127.0.0.1:100") @@ -1410,6 +1337,23 @@ class KafkaConfigTest { KafkaConfig.fromProps(props) } + @Test + def testControllerListenerNamesValidForKRaftControllerOnly(): Unit = { + val props = new Properties() + props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") + props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "1@localhost:9092") + props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "SASL_SSL://:9092,CONTROLLER://:9093") + props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "SASL_SSL:SASL_SSL,CONTROLLER:SASL_SSL") + props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "SASL_SSL") + props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER,SASL_SSL") + + val expectedExceptionContainsText = + """controller.listener.names must not contain an explicitly set inter.broker.listener.name configuration value + |when process.roles=controller""".stripMargin.replaceAll("\n", " ") + assertBadConfigContainingMessage(props, expectedExceptionContainsText) + } + @Test def testControllerQuorumVoterStringsToNodes(): Unit = { assertThrows(classOf[ConfigException], () => QuorumConfig.quorumVoterStringsToNodes(Collections.singletonList(""))) @@ -1422,6 +1366,7 @@ class KafkaConfigTest { @Test def testInvalidQuorumVoterConfig(): Unit = { + assertInvalidQuorumVoters("") assertInvalidQuorumVoters("1") assertInvalidQuorumVoters("1@") assertInvalidQuorumVoters("1:") @@ -1437,16 +1382,11 @@ class KafkaConfigTest { } private def assertInvalidQuorumVoters(value: String): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, value) assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) } - @Test - def testValidEmptyQuorumVotersParsing(): Unit = { - assertValidQuorumVoters(new util.HashMap[Integer, InetSocketAddress](), "") - } - @Test def testValidQuorumVotersParsingWithIpAddress(): Unit = { val expected = new util.HashMap[Integer, InetSocketAddress]() @@ -1464,7 +1404,7 @@ class KafkaConfigTest { } private def assertValidQuorumVoters(expectedVoters: util.Map[Integer, InetSocketAddress], value: String): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, value) val addresses = QuorumConfig.parseVoterConnections(KafkaConfig.fromProps(props).quorumConfig.voters) assertEquals(expectedVoters, addresses) @@ -1477,7 +1417,7 @@ class KafkaConfigTest { InetSocketAddress.createUnresolved("kafka2", 9092) ) - val props = TestUtils.createBrokerConfig(0, null) + val props = TestUtils.createBrokerConfig(0) props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "kafka1:9092,kafka2:9092") val addresses = QuorumConfig.parseBootstrapServers( @@ -1528,84 +1468,6 @@ class KafkaConfigTest { assertFalse(isValidKafkaConfig(props)) } - @Test - def testRejectsLargeNodeIdForZkBasedCaseWithAutoGenEnabled(): Unit = { - // Generation of Broker IDs is supported when using ZooKeeper-based controllers, - // so pick a broker ID greater than reserved.broker.max.id, which defaults to 1000, - // and make sure it is not allowed with broker.id.generation.enable=true (true is the default) - val largeBrokerId = 2000 - val props = TestUtils.createBrokerConfig(largeBrokerId, TestUtils.MockZkConnect, port = TestUtils.MockZkPort) - val listeners = "PLAINTEXT://A:9092,SSL://B:9093,SASL_SSL://C:9094" - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, listeners) - assertFalse(isValidKafkaConfig(props)) - } - - @Test - def testAcceptsNegativeOneNodeIdForZkBasedCaseWithAutoGenEnabled(): Unit = { - // -1 is the default for both node.id and broker.id; it implies "auto-generate" and should succeed - val props = TestUtils.createBrokerConfig(-1, TestUtils.MockZkConnect, port = TestUtils.MockZkPort) - val listeners = "PLAINTEXT://A:9092,SSL://B:9093,SASL_SSL://C:9094" - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, listeners) - KafkaConfig.fromProps(props) - } - - @Test - def testRejectsNegativeTwoNodeIdForZkBasedCaseWithAutoGenEnabled(): Unit = { - // -1 implies "auto-generate" and should succeed, but -2 does not and should fail - val negativeTwoNodeId = -2 - val props = TestUtils.createBrokerConfig(negativeTwoNodeId, TestUtils.MockZkConnect, port = TestUtils.MockZkPort) - val listeners = "PLAINTEXT://A:9092,SSL://B:9093,SASL_SSL://C:9094" - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, listeners) - props.setProperty(KRaftConfigs.NODE_ID_CONFIG, negativeTwoNodeId.toString) - props.setProperty(ServerConfigs.BROKER_ID_CONFIG, negativeTwoNodeId.toString) - assertFalse(isValidKafkaConfig(props)) - } - - @Test - def testAcceptsLargeNodeIdForZkBasedCaseWithAutoGenDisabled(): Unit = { - // Ensure a broker ID greater than reserved.broker.max.id, which defaults to 1000, - // is allowed with broker.id.generation.enable=false - val largeBrokerId = 2000 - val props = TestUtils.createBrokerConfig(largeBrokerId, TestUtils.MockZkConnect, port = TestUtils.MockZkPort) - val listeners = "PLAINTEXT://A:9092,SSL://B:9093,SASL_SSL://C:9094" - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, listeners) - props.setProperty(ServerConfigs.BROKER_ID_GENERATION_ENABLE_CONFIG, "false") - KafkaConfig.fromProps(props) - } - - @Test - def testRejectsNegativeNodeIdForZkBasedCaseWithAutoGenDisabled(): Unit = { - // -1 is the default for both node.id and broker.id - val props = TestUtils.createBrokerConfig(-1, TestUtils.MockZkConnect, port = TestUtils.MockZkPort) - val listeners = "PLAINTEXT://A:9092,SSL://B:9093,SASL_SSL://C:9094" - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.setProperty(ServerConfigs.BROKER_ID_GENERATION_ENABLE_CONFIG, "false") - assertFalse(isValidKafkaConfig(props)) - } - - @Test - def testZookeeperConnectRequiredIfEmptyProcessRoles(): Unit = { - val props = new Properties() - props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "") - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://127.0.0.1:9092") - assertFalse(isValidKafkaConfig(props)) - } - - @Test - def testZookeeperConnectNotRequiredIfNonEmptyProcessRoles(): Unit = { - val props = new Properties() - props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://127.0.0.1:9092") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") - KafkaConfig.fromProps(props) - } - @Test def testCustomMetadataLogDir(): Unit = { val metadataDir = "/path/to/metadata/dir" @@ -1671,6 +1533,7 @@ class KafkaConfigTest { @Test def testNodeIdMustNotBeDifferentThanBrokerId(): Unit = { val props = new Properties() + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2") assertEquals("You must set `node.id` to the same value as `broker.id`.", @@ -1682,8 +1545,7 @@ class KafkaConfigTest { val props = new Properties() props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") - assertEquals("Missing configuration `node.id` which is required when `process.roles` " + - "is defined (i.e. when running in KRaft mode).", + assertEquals("Missing required configuration \"node.id\" which has no default value.", assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)).getMessage()) } @@ -1726,7 +1588,10 @@ class KafkaConfigTest { @Test def testSaslJwksEndpointRetryDefaults(): Unit = { val props = new Properties() - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") + props.setProperty(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "CONTROLLER://localhost:9092") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") val config = KafkaConfig.fromProps(props) assertNotNull(config.getLong(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS)) assertNotNull(config.getLong(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS)) @@ -1734,7 +1599,7 @@ class KafkaConfigTest { @Test def testInvalidAuthorizerClassName(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) val configs = new util.HashMap[Object, Object](props) configs.put(ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG, null) val ce = assertThrows(classOf[ConfigException], () => KafkaConfig.apply(configs)) @@ -1743,7 +1608,7 @@ class KafkaConfigTest { @Test def testInvalidSecurityInterBrokerProtocol(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.setProperty(ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG, "abc") val ce = assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) assertTrue(ce.getMessage.contains(ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG)) @@ -1797,15 +1662,6 @@ class KafkaConfigTest { } } - @Test - def testInvalidInterBrokerProtocolVersionKRaft(): Unit = { - val props = new Properties() - props.putAll(kraftProps()) - props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "2.8") - assertEquals("A non-KRaft version 2.8 given for inter.broker.protocol.version. The minimum version is 3.0-IV1", - assertThrows(classOf[ConfigException], () => new KafkaConfig(props)).getMessage) - } - @Test def testDefaultInterBrokerProtocolVersionKRaft(): Unit = { val props = new Properties() @@ -1833,45 +1689,6 @@ class KafkaConfigTest { ) } - @Test - def testMigrationCannotBeEnabledWithJBOD(): Unit = { - val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect, port = TestUtils.MockZkPort, logDirCount = 2) - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "3000@localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, MetadataVersion.IBP_3_7_IV1.version()) - - assertEquals( - "requirement failed: Cannot enable ZooKeeper migration with multiple log directories " + - "(aka JBOD) without setting 'inter.broker.protocol.version' to 3.7-IV2 or higher", - assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage) - } - - @Test - def testMigrationCannotBeEnabledWithBrokerIdGeneration(): Unit = { - val props = TestUtils.createBrokerConfig(-1, TestUtils.MockZkConnect, port = TestUtils.MockZkPort, logDirCount = 2) - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "3000@localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - assertEquals( - "requirement failed: broker.id generation is incompatible with ZooKeeper migration. Please stop using it before enabling migration (set broker.id to a value greater or equal to 0).", - assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage) - } - - @Test - def testMigrationEnabledKRaftMode(): Unit = { - val props = new Properties() - props.putAll(kraftProps()) - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - - assertEquals( - "If using `zookeeper.metadata.migration.enable` in KRaft mode, `zookeeper.connect` must also be set.", - assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)).getMessage) - - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") - KafkaConfig.fromProps(props) - } - @Test def testConsumerGroupSessionTimeoutValidation(): Unit = { val props = new Properties() @@ -1965,7 +1782,7 @@ class KafkaConfigTest { @Test def testSingleLogDirectoryWithRemoteLogStorage(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) + val props = TestUtils.createBrokerConfig(0, port = 8181) props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, String.valueOf(true)) props.put(ServerLogConfigs.LOG_DIRS_CONFIG, "/tmp/a") assertDoesNotThrow(() => KafkaConfig.fromProps(props)) diff --git a/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala b/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala index 68820f847621f..e07ae3032ca6b 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala @@ -71,7 +71,7 @@ class KafkaMetricsReporterTest extends QuorumTestHarness { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - val props = TestUtils.createBrokerConfig(1, zkConnectOrNull) + val props = TestUtils.createBrokerConfig(1) props.setProperty(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, "kafka.server.KafkaMetricsReporterTest$MockMetricsReporter") props.setProperty(ServerConfigs.BROKER_ID_GENERATION_ENABLE_CONFIG, "true") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") @@ -84,13 +84,8 @@ class KafkaMetricsReporterTest extends QuorumTestHarness { @ValueSource(strings = Array("kraft")) def testMetricsContextNamespacePresent(quorum: String): Unit = { assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.CLUSTERID.get()) - if (isKRaftTest()) { - assertNull(KafkaMetricsReporterTest.MockMetricsReporter.BROKERID.get()) - assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.NODEID.get()) - } else { - assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.BROKERID.get()) - assertNull(KafkaMetricsReporterTest.MockMetricsReporter.NODEID.get()) - } + assertNull(KafkaMetricsReporterTest.MockMetricsReporter.BROKERID.get()) + assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.NODEID.get()) assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.JMXPREFIX.get()) broker.shutdown() diff --git a/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala b/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala index 54164c064e871..4cc3f968d2769 100644 --- a/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala @@ -16,6 +16,8 @@ */ package kafka.server +import org.apache.kafka.common.Uuid +import org.apache.kafka.common.message.LeaveGroupResponseData import org.apache.kafka.common.test.api.ClusterInstance import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.test.api.ClusterTestExtensions @@ -23,26 +25,91 @@ import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.JoinGroupRequest import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.group.classic.ClassicGroupState +import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup.ConsumerGroupState import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.extension.ExtendWith +import scala.jdk.CollectionConverters._ + @ExtendWith(value = Array(classOf[ClusterTestExtensions])) -@ClusterTestDefaults(types = Array(Type.KRAFT)) +@ClusterTestDefaults(types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") +)) class LeaveGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest(serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - )) + @ClusterTest + def testLeaveGroupWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + + // Create the topic. + createTopic( + topic = "foo", + numPartitions = 3 + ) + + def instanceId(memberId: String): String = "instance_" + memberId + val memberIds = Range(0, 3).map { __ => + Uuid.randomUuid().toString + } + + for (version <- 3 to ApiKeys.LEAVE_GROUP.latestVersion(isUnstableApiEnabled)) { + // Join with all the members. + memberIds.foreach { memberId => + assertEquals(Errors.NONE.code, consumerGroupHeartbeat( + groupId = "group", + memberId = memberId, + memberEpoch = 0, + instanceId = instanceId(memberId), + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List("foo"), + topicPartitions = List.empty, + ).errorCode) + } + + assertEquals( + new LeaveGroupResponseData() + .setMembers(List( + new LeaveGroupResponseData.MemberResponse() + .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) + .setGroupInstanceId(instanceId(memberIds(0))), + new LeaveGroupResponseData.MemberResponse() + .setMemberId(memberIds(1)) + .setGroupInstanceId(instanceId(memberIds(1))), + new LeaveGroupResponseData.MemberResponse() + .setMemberId(memberIds(2)) + .setGroupInstanceId(null) + ).asJava), + classicLeaveGroup( + groupId = "group", + memberIds = List( + JoinGroupRequest.UNKNOWN_MEMBER_ID, + memberIds(1), + memberIds(2) + ), + groupInstanceIds = List( + instanceId(memberIds(0)), + instanceId(memberIds(1)), + null + ), + version = version.toShort + ) + ) + + assertEquals( + ConsumerGroupState.EMPTY.toString, + consumerGroupDescribe(List("group")).head.groupState + ) + } + } + + @ClusterTest def testLeaveGroupWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testLeaveGroup() } - @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - )) + @ClusterTest def testLeaveGroupWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { testLeaveGroup() } diff --git a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala index 9acf1a85a9294..5ba6ef34603a3 100644 --- a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala @@ -165,14 +165,7 @@ class ListOffsetsRequestTest extends BaseRequestTest { private[this] def fetchOffsetAndEpochWithError(serverId: Int, timestamp: Long, version: Short): (Long, Int, Short) = { val partitionData = sendRequest(serverId, timestamp, version) - - if (version == 0) { - if (partitionData.oldStyleOffsets().isEmpty) - (-1, partitionData.leaderEpoch, partitionData.errorCode()) - else - (partitionData.oldStyleOffsets().asScala.head, partitionData.leaderEpoch, partitionData.errorCode()) - } else - (partitionData.offset, partitionData.leaderEpoch, partitionData.errorCode()) + (partitionData.offset, partitionData.leaderEpoch, partitionData.errorCode()) } @ParameterizedTest diff --git a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala index 7882696e22ebd..aa2e634e9bfaf 100644 --- a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala @@ -20,23 +20,21 @@ import java.io.File import java.util.Collections import java.util.concurrent.{ExecutionException, TimeUnit} import kafka.api.IntegrationTestHarness -import kafka.controller.{OfflineReplica, PartitionAndReplica} import kafka.utils.TestUtils.{Checkpoint, LogDirFailureType, Roll, waitUntilTrue} import kafka.utils.{CoreUtils, TestInfoUtils, TestUtils} import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.{KafkaStorageException, NotLeaderOrFollowerException} -import org.apache.kafka.common.utils.{Exit, Utils} +import org.apache.kafka.common.utils.Utils import org.apache.kafka.metadata.BrokerState import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.params.provider.MethodSource import org.junit.jupiter.params.ParameterizedTest import java.nio.file.Files -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ /** @@ -100,37 +98,6 @@ class LogDirFailureTest extends IntegrationTestHarness { testProduceAfterLogDirFailureOnLeader(Roll, quorum) } - // Broker should halt on any log directory failure if inter-broker protocol < 1.0 - @nowarn("cat=deprecation") - @Test - def testZkBrokerWithOldInterBrokerProtocolShouldHaltOnLogDirFailure(): Unit = { - @volatile var statusCodeOption: Option[Int] = None - Exit.setHaltProcedure { (statusCode, _) => - statusCodeOption = Some(statusCode) - throw new IllegalArgumentException - } - - var server: KafkaServer = null - try { - val props = TestUtils.createBrokerConfig(brokerCount, zkConnect, logDirCount = 3) - props.put(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "0.11.0") - props.put(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG, "0.11.0") - val kafkaConfig = KafkaConfig.fromProps(props) - val logDir = new File(kafkaConfig.logDirs.head) - // Make log directory of the partition on the leader broker inaccessible by replacing it with a file - CoreUtils.swallow(Utils.delete(logDir), this) - Files.createFile(logDir.toPath) - assertTrue(logDir.isFile) - - server = TestUtils.createServer(kafkaConfig) - TestUtils.waitUntilTrue(() => statusCodeOption.contains(1), "timed out waiting for broker to halt") - } finally { - Exit.resetHaltProcedure() - if (server != null) - TestUtils.shutdownServers(List(server)) - } - } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceErrorFromFailureOnCheckpoint(quorum: String, groupProtocol: String): Unit = { @@ -227,26 +194,17 @@ class LogDirFailureTest extends IntegrationTestHarness { // Consumer should receive some messages TestUtils.pollUntilAtLeastNumRecords(consumer, 1) - if (quorum == "kraft") { - waitUntilTrue(() => { - // get the broker with broker.nodeId == originalLeaderServerId - val brokerWithDirFail = brokers.find(_.config.nodeId == originalLeaderServerId).map(_.asInstanceOf[BrokerServer]) - // check if the broker has the offline log dir - val hasOfflineDir = brokerWithDirFail.exists(_.logDirFailureChannel.hasOfflineLogDir(failedLogDir.toPath.toString)) - // check if the broker has the offline replica - hasOfflineDir && brokerWithDirFail.exists(broker => - broker.replicaManager.metadataCache - .getClusterMetadata(broker.clusterId, broker.config.interBrokerListenerName) - .partition(new TopicPartition(topic, 0)).offlineReplicas().map(_.id()).contains(originalLeaderServerId)) - }, "Expected to find an offline log dir") - } else { - // There should be no remaining LogDirEventNotification znode - assertTrue(zkClient.getAllLogDirEventNotifications.isEmpty) - // The controller should have marked the replica on the original leader as offline - val controllerServer = servers.find(_.kafkaController.isActive).get - val offlineReplicas = controllerServer.kafkaController.controllerContext.replicasInState(topic, OfflineReplica) - assertTrue(offlineReplicas.contains(PartitionAndReplica(new TopicPartition(topic, 0), originalLeaderServerId))) - } + waitUntilTrue(() => { + // get the broker with broker.nodeId == originalLeaderServerId + val brokerWithDirFail = brokers.find(_.config.nodeId == originalLeaderServerId).map(_.asInstanceOf[BrokerServer]) + // check if the broker has the offline log dir + val hasOfflineDir = brokerWithDirFail.exists(_.logDirFailureChannel.hasOfflineLogDir(failedLogDir.toPath.toString)) + // check if the broker has the offline replica + hasOfflineDir && brokerWithDirFail.exists(broker => + broker.replicaManager.metadataCache + .getClusterMetadata(broker.clusterId, broker.config.interBrokerListenerName) + .partition(new TopicPartition(topic, 0)).offlineReplicas().map(_.id()).contains(originalLeaderServerId)) + }, "Expected to find an offline log dir") } diff --git a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala index 8d1241c134dcb..efb057bd1cb3b 100755 --- a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala @@ -17,15 +17,15 @@ package kafka.server -import kafka.log.{OffsetResultHolder, UnifiedLog} +import kafka.log.UnifiedLog import kafka.utils.TestUtils import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.record.FileRecords import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, ListOffsetsRequest, ListOffsetsResponse} -import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{IsolationLevel, TopicPartition} -import org.apache.kafka.storage.internals.log.{LogSegment, LogStartOffsetIncrementReason} +import org.apache.kafka.storage.internals.log.{LogSegment, LogStartOffsetIncrementReason, OffsetResultHolder} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest @@ -52,16 +52,14 @@ class LogOffsetTest extends BaseRequestTest { props.put("num.partitions", "20") props.put("log.retention.hours", "10") props.put("log.retention.check.interval.ms", (5 * 1000 * 60).toString) - props.put("log.segment.bytes", "140") } - @deprecated("ListOffsetsRequest V0", since = "") @ParameterizedTest @ValueSource(strings = Array("kraft")) def testGetOffsetsForUnknownTopic(quorum: String): Unit = { val topicPartition = new TopicPartition("foo", 0) val request = ListOffsetsRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED) - .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP, 10).asJava).build(0) + .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP).asJava).build(1) val response = sendListOffsetsRequest(request) assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code, findPartition(response.topics.asScala, topicPartition).errorCode) } @@ -82,15 +80,15 @@ class LogOffsetTest extends BaseRequestTest { log.maybeIncrementLogStartOffset(3, LogStartOffsetIncrementReason.ClientRecordDeletion) log.deleteOldSegments() - val offsets = log.legacyFetchOffsetsBefore(ListOffsetsRequest.LATEST_TIMESTAMP, 15) - assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 3L), offsets) + val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).timestampAndOffsetOpt.map(_.offset) + assertEquals(Optional.of(20L), offset) TestUtils.waitUntilTrue(() => isLeaderLocalOnBroker(topic, topicPartition.partition, broker), "Leader should be elected") - val request = ListOffsetsRequest.Builder.forReplica(0, 0) - .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP, 15).asJava).build() - val consumerOffsets = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).oldStyleOffsets.asScala - assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 3L), consumerOffsets) + val request = ListOffsetsRequest.Builder.forReplica(1, 1) + .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP).asJava).build() + val consumerOffset = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).offset + assertEquals(20L, consumerOffset) } @ParameterizedTest @@ -112,7 +110,7 @@ class LogOffsetTest extends BaseRequestTest { log.truncateTo(0) - assertEquals(Option.empty, log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP).timestampAndOffsetOpt) + assertEquals(Optional.empty, log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP).timestampAndOffsetOpt) } @ParameterizedTest @@ -149,19 +147,19 @@ class LogOffsetTest extends BaseRequestTest { log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0) log.flush(false) - val offsets = log.legacyFetchOffsetsBefore(ListOffsetsRequest.LATEST_TIMESTAMP, 15) - assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), offsets) + val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).timestampAndOffsetOpt.map(_.offset) + assertEquals(Optional.of(20L), offset) TestUtils.waitUntilTrue(() => isLeaderLocalOnBroker(topic, 0, broker), "Leader should be elected") - val request = ListOffsetsRequest.Builder.forReplica(0, 0) - .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP, 15).asJava).build() - val consumerOffsets = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).oldStyleOffsets.asScala - assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), consumerOffsets) + val request = ListOffsetsRequest.Builder.forReplica(1, 1) + .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP).asJava).build() + val consumerOffset = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).offset + assertEquals(20L, consumerOffset) // try to fetch using latest offset val fetchRequest = FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion, 0, 1, - Map(topicPartition -> new FetchRequest.PartitionData(topicId, consumerOffsets.head, FetchRequest.INVALID_LOG_START_OFFSET, + Map(topicPartition -> new FetchRequest.PartitionData(topicId, consumerOffset, FetchRequest.INVALID_LOG_START_OFFSET, 300 * 1024, Optional.empty())).asJava).build() val fetchResponse = sendFetchRequest(fetchRequest) assertFalse(FetchResponse.recordsOrFail(fetchResponse.responseData(topicNames, ApiKeys.FETCH.latestVersion).get(topicPartition)).batches.iterator.hasNext) @@ -182,10 +180,10 @@ class LogOffsetTest extends BaseRequestTest { var offsetChanged = false for (_ <- 1 to 14) { val topicPartition = new TopicPartition(topic, 0) - val request = ListOffsetsRequest.Builder.forReplica(0, 0) - .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.EARLIEST_TIMESTAMP, 1).asJava).build() - val consumerOffsets = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).oldStyleOffsets.asScala - if (consumerOffsets.head == 1) + val request = ListOffsetsRequest.Builder.forReplica(1, 1) + .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.EARLIEST_TIMESTAMP).asJava).build() + val consumerOffset = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).offset + if (consumerOffset == 1) offsetChanged = true } assertFalse(offsetChanged) @@ -201,40 +199,9 @@ class LogOffsetTest extends BaseRequestTest { log.updateHighWatermark(log.logEndOffset) assertEquals(0L, log.logEndOffset) - assertEquals(OffsetResultHolder(None), log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP)) - } - - @deprecated("legacyFetchOffsetsBefore", since = "") - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testGetOffsetsBeforeNow(quorum: String): Unit = { - val random = new Random - val topic = "kafka-" - val topicPartition = new TopicPartition(topic, random.nextInt(3)) - - createTopic(topic, 3) - - val logManager = broker.logManager - val log = logManager.getOrCreateLog(topicPartition, topicId = None) - - for (_ <- 0 until 20) - log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0) - log.flush(false) - - val now = Time.SYSTEM.milliseconds + 30000 // pretend it is the future to avoid race conditions with the fs - - val offsets = log.legacyFetchOffsetsBefore(now, 15) - assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), offsets) - - TestUtils.waitUntilTrue(() => isLeaderLocalOnBroker(topic, topicPartition.partition, broker), - "Leader should be elected") - val request = ListOffsetsRequest.Builder.forReplica(0, 0) - .setTargetTimes(buildTargetTimes(topicPartition, now, 15).asJava).build() - val consumerOffsets = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).oldStyleOffsets.asScala - assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), consumerOffsets) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP)) } - @deprecated("legacyFetchOffsetsBefore", since = "") @ParameterizedTest @ValueSource(strings = Array("kraft")) def testGetOffsetsBeforeEarliestTime(quorum: String): Unit = { @@ -250,15 +217,15 @@ class LogOffsetTest extends BaseRequestTest { log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0) log.flush(false) - val offsets = log.legacyFetchOffsetsBefore(ListOffsetsRequest.EARLIEST_TIMESTAMP, 10) - assertEquals(Seq(0L), offsets) + val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP).timestampAndOffsetOpt.map(_.offset) + assertEquals(Optional.of(0L), offset) TestUtils.waitUntilTrue(() => isLeaderLocalOnBroker(topic, topicPartition.partition, broker), "Leader should be elected") - val request = ListOffsetsRequest.Builder.forReplica(0, 0) - .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.EARLIEST_TIMESTAMP, 10).asJava).build() - val consumerOffsets = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).oldStyleOffsets.asScala - assertEquals(Seq(0L), consumerOffsets) + val request = ListOffsetsRequest.Builder.forReplica(1, 1) + .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.EARLIEST_TIMESTAMP).asJava).build() + val offsetFromResponse = findPartition(sendListOffsetsRequest(request).topics.asScala, topicPartition).offset + assertEquals(0L, offsetFromResponse) } /* We test that `fetchOffsetsBefore` works correctly if `LogSegment.size` changes after each invocation (simulating @@ -303,13 +270,12 @@ class LogOffsetTest extends BaseRequestTest { connectAndReceive[FetchResponse](request) } - private def buildTargetTimes(tp: TopicPartition, timestamp: Long, maxNumOffsets: Int): List[ListOffsetsTopic] = { + private def buildTargetTimes(tp: TopicPartition, timestamp: Long): List[ListOffsetsTopic] = { List(new ListOffsetsTopic() .setName(tp.topic) .setPartitions(List(new ListOffsetsPartition() .setPartitionIndex(tp.partition) - .setTimestamp(timestamp) - .setMaxNumOffsets(maxNumOffsets)).asJava) + .setTimestamp(timestamp)).asJava) ) } diff --git a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala index 156fa60a1d53d..f9970d2967afa 100755 --- a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala @@ -82,7 +82,7 @@ class LogRecoveryTest extends QuorumTestHarness { override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - configs = TestUtils.createBrokerConfigs(2, zkConnectOrNull, enableControlledShutdown = false).map(KafkaConfig.fromProps(_, overridingProps)) + configs = TestUtils.createBrokerConfigs(2, enableControlledShutdown = false).map(KafkaConfig.fromProps(_, overridingProps)) // start both servers server1 = createBroker(configProps1) diff --git a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala index 4e70652494c3b..669210f8fcc8b 100644 --- a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala +++ b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala @@ -16,22 +16,18 @@ */ package kafka.server -import kafka.cluster.Broker -import kafka.server.metadata.{KRaftMetadataCache, MetadataSnapshot, ZkMetadataCache} +import kafka.server.metadata.KRaftMetadataCache import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponsePartition -import org.apache.kafka.common.message.UpdateMetadataRequestData -import org.apache.kafka.common.message.UpdateMetadataRequestData.{UpdateMetadataBroker, UpdateMetadataEndpoint, UpdateMetadataPartitionState, UpdateMetadataTopicState} import org.apache.kafka.common.metadata.RegisterBrokerRecord.{BrokerEndpoint, BrokerEndpointCollection} import org.apache.kafka.common.metadata._ import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors} +import org.apache.kafka.common.protocol.{ApiMessage, Errors} import org.apache.kafka.common.record.RecordBatch -import org.apache.kafka.common.requests.{AbstractControlRequest, UpdateMetadataRequest} import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.common.{DirectoryId, Node, TopicPartition, Uuid} -import org.apache.kafka.image.{ClusterImage, MetadataDelta, MetadataImage, MetadataProvenance} -import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion} +import org.apache.kafka.common.{DirectoryId, Uuid} +import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} +import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.KRaftVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest @@ -44,29 +40,19 @@ import scala.collection.{Seq, mutable} import scala.jdk.CollectionConverters._ object MetadataCacheTest { - def zkCacheProvider(): util.stream.Stream[MetadataCache] = - util.stream.Stream.of[MetadataCache]( - MetadataCache.zkMetadataCache(1, MetadataVersion.latestTesting()) - ) - def cacheProvider(): util.stream.Stream[MetadataCache] = util.stream.Stream.of[MetadataCache]( - MetadataCache.zkMetadataCache(1, MetadataVersion.latestTesting()), MetadataCache.kRaftMetadataCache(1, () => KRaftVersion.KRAFT_VERSION_0) ) - def updateCache(cache: MetadataCache, request: UpdateMetadataRequest, records: Seq[ApiMessage] = List()): Unit = { + def updateCache(cache: MetadataCache, records: Seq[ApiMessage]): Unit = { cache match { - case c: ZkMetadataCache => c.updateMetadata(0, request) case c: KRaftMetadataCache => { - // UpdateMetadataRequest always contains a full list of brokers, but may contain - // a partial list of partitions. Therefore, base our delta off a partial image that - // contains no brokers, but which contains the previous partitions. val image = c.currentImage() val partialImage = new MetadataImage( new MetadataProvenance(100L, 10, 1000L, true), image.features(), - ClusterImage.EMPTY, + image.cluster(), image.topics(), image.configs(), image.clientQuotas(), @@ -75,60 +61,7 @@ object MetadataCacheTest { image.scram(), image.delegationTokens()) val delta = new MetadataDelta.Builder().setImage(partialImage).build() - - def toRecord(broker: UpdateMetadataBroker): RegisterBrokerRecord = { - val endpoints = new BrokerEndpointCollection() - broker.endpoints().forEach { e => - endpoints.add(new BrokerEndpoint(). - setName(e.listener()). - setHost(e.host()). - setPort(e.port()). - setSecurityProtocol(e.securityProtocol())) - } - val prevBroker = Option(image.cluster().broker(broker.id())) - // UpdateMetadataRequest doesn't contain all the broker registration fields, so get - // them from the previous registration if available. - val (epoch, incarnationId, fenced) = prevBroker match { - case None => (0L, Uuid.ZERO_UUID, false) - case Some(b) => (b.epoch(), b.incarnationId(), b.fenced()) - } - new RegisterBrokerRecord(). - setBrokerId(broker.id()). - setBrokerEpoch(epoch). - setIncarnationId(incarnationId). - setEndPoints(endpoints). - setRack(broker.rack()). - setFenced(fenced) - } - request.liveBrokers().iterator().asScala.foreach { brokerInfo => - delta.replay(toRecord(brokerInfo)) - } - - def toRecords(topic: UpdateMetadataTopicState): Seq[ApiMessage] = { - val results = new mutable.ArrayBuffer[ApiMessage]() - results += new TopicRecord().setName(topic.topicName()).setTopicId(topic.topicId()) - topic.partitionStates().forEach { partition => - if (partition.leader() == LeaderAndIsr.LEADER_DURING_DELETE) { - results += new RemoveTopicRecord().setTopicId(topic.topicId()) - } else { - results += new PartitionRecord(). - setPartitionId(partition.partitionIndex()). - setTopicId(topic.topicId()). - setReplicas(partition.replicas()). - setIsr(partition.isr()). - setRemovingReplicas(Collections.emptyList()). - setAddingReplicas(Collections.emptyList()). - setLeader(partition.leader()). - setLeaderEpoch(partition.leaderEpoch()). - setPartitionEpoch(partition.zkVersion()) - } - } - results - } - request.topicStates().forEach { topic => - toRecords(topic).foreach(delta.replay) - } - records.foreach(delta.replay) + records.foreach(record => delta.replay(record)) c.setImage(delta.apply(new MetadataProvenance(100L, 10, 1000L, true))) } case _ => throw new RuntimeException("Unsupported cache type") @@ -153,70 +86,59 @@ class MetadataCacheTest { val topic0 = "topic-0" val topic1 = "topic-1" - val zkVersion = 3 - val controllerId = 2 - val controllerEpoch = 1 + val topicIds = new util.HashMap[String, Uuid]() + topicIds.put(topic0, Uuid.randomUuid()) + topicIds.put(topic1, Uuid.randomUuid()) - def endpoints(brokerId: Int): Seq[UpdateMetadataEndpoint] = { + def endpoints(brokerId: Int): BrokerEndpointCollection = { val host = s"foo-$brokerId" - Seq( - new UpdateMetadataEndpoint() + new BrokerEndpointCollection(Seq( + new BrokerEndpoint() .setHost(host) .setPort(9092) .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT).value), - new UpdateMetadataEndpoint() + .setName(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT).value), + new BrokerEndpoint() .setHost(host) .setPort(9093) .setSecurityProtocol(SecurityProtocol.SSL.id) - .setListener(ListenerName.forSecurityProtocol(SecurityProtocol.SSL).value) - ) + .setName(ListenerName.forSecurityProtocol(SecurityProtocol.SSL).value) + ).iterator.asJava) } val brokers = (0 to 4).map { brokerId => - new UpdateMetadataBroker() - .setId(brokerId) - .setEndpoints(endpoints(brokerId).asJava) + new RegisterBrokerRecord() + .setBrokerId(brokerId) + .setEndPoints(endpoints(brokerId)) .setRack("rack1") } + val topic0Record = new TopicRecord().setName(topic0).setTopicId(topicIds.get(topic0)) + val topic1Record = new TopicRecord().setName(topic1).setTopicId(topicIds.get(topic1)) + val partitionStates = Seq( - new UpdateMetadataPartitionState() - .setTopicName(topic0) - .setPartitionIndex(0) - .setControllerEpoch(controllerEpoch) + new PartitionRecord() + .setTopicId(topicIds.get(topic0)) + .setPartitionId(0) .setLeader(0) .setLeaderEpoch(0) .setIsr(asList(0, 1, 3)) - .setZkVersion(zkVersion) .setReplicas(asList(0, 1, 3)), - new UpdateMetadataPartitionState() - .setTopicName(topic0) - .setPartitionIndex(1) - .setControllerEpoch(controllerEpoch) + new PartitionRecord() + .setTopicId(topicIds.get(topic0)) + .setPartitionId(1) .setLeader(1) .setLeaderEpoch(1) .setIsr(asList(1, 0)) - .setZkVersion(zkVersion) .setReplicas(asList(1, 2, 0, 4)), - new UpdateMetadataPartitionState() - .setTopicName(topic1) - .setPartitionIndex(0) - .setControllerEpoch(controllerEpoch) + new PartitionRecord() + .setTopicId(topicIds.get(topic1)) + .setPartitionId(0) .setLeader(2) .setLeaderEpoch(2) .setIsr(asList(2, 1)) - .setZkVersion(zkVersion) .setReplicas(asList(2, 1, 3))) - - val topicIds = new util.HashMap[String, Uuid]() - topicIds.put(topic0, Uuid.randomUuid()) - topicIds.put(topic1, Uuid.randomUuid()) - - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch, - partitionStates.asJava, brokers.asJava, topicIds).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) + MetadataCacheTest.updateCache(cache, brokers ++ Seq(topic0Record, topic1Record) ++ partitionStates) for (securityProtocol <- Seq(SecurityProtocol.PLAINTEXT, SecurityProtocol.SSL)) { val listenerName = ListenerName.forSecurityProtocol(securityProtocol) @@ -230,14 +152,14 @@ class MetadataCacheTest { assertEquals(topic, topicMetadata.name) assertEquals(topicIds.get(topic), topicMetadata.topicId()) - val topicPartitionStates = partitionStates.filter { ps => ps.topicName == topic } + val topicPartitionStates = partitionStates.filter { ps => ps.topicId == topicIds.get(topic) } val partitionMetadatas = topicMetadata.partitions.asScala.sortBy(_.partitionIndex) assertEquals(topicPartitionStates.size, partitionMetadatas.size, s"Unexpected partition count for topic $topic") partitionMetadatas.zipWithIndex.foreach { case (partitionMetadata, partitionId) => assertEquals(Errors.NONE.code, partitionMetadata.errorCode) assertEquals(partitionId, partitionMetadata.partitionIndex) - val partitionState = topicPartitionStates.find(_.partitionIndex == partitionId).getOrElse( + val partitionState = topicPartitionStates.find(_.partitionId == partitionId).getOrElse( fail(s"Unable to find partition state for partition $partitionId")) assertEquals(partitionState.leader, partitionMetadata.leaderId) assertEquals(partitionState.leaderEpoch, partitionMetadata.leaderEpoch) @@ -257,18 +179,20 @@ class MetadataCacheTest { def getTopicMetadataPartitionLeaderNotAvailable(cache: MetadataCache): Unit = { val securityProtocol = SecurityProtocol.PLAINTEXT val listenerName = ListenerName.forSecurityProtocol(securityProtocol) - val brokers = Seq(new UpdateMetadataBroker() - .setId(0) - .setEndpoints(Seq(new UpdateMetadataEndpoint() + val brokers = Seq(new RegisterBrokerRecord() + .setBrokerId(0) + .setFenced(false) + .setEndPoints(new BrokerEndpointCollection(Seq(new BrokerEndpoint() .setHost("foo") .setPort(9092) .setSecurityProtocol(securityProtocol.id) - .setListener(listenerName.value)).asJava)) - val metadataCacheBrokerId = 0 + .setName(listenerName.value) + ).iterator.asJava))) + // leader is not available. expect LEADER_NOT_AVAILABLE for any metadata version. - verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, metadataCacheBrokerId, brokers, listenerName, + verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, brokers, listenerName, leader = 1, Errors.LEADER_NOT_AVAILABLE, errorUnavailableListeners = false) - verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, metadataCacheBrokerId, brokers, listenerName, + verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, brokers, listenerName, leader = 1, Errors.LEADER_NOT_AVAILABLE, errorUnavailableListeners = true) } @@ -279,66 +203,66 @@ class MetadataCacheTest { // return LEADER_NOT_AVAILABLE or LISTENER_NOT_FOUND errors for old and new versions respectively. val plaintextListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT) val sslListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.SSL) - val broker0Endpoints = Seq( - new UpdateMetadataEndpoint() + val broker0Endpoints = new BrokerEndpointCollection(Seq( + new BrokerEndpoint() .setHost("host0") .setPort(9092) .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListenerName.value), - new UpdateMetadataEndpoint() + .setName(plaintextListenerName.value), + new BrokerEndpoint() .setHost("host0") .setPort(9093) .setSecurityProtocol(SecurityProtocol.SSL.id) - .setListener(sslListenerName.value)) - val broker1Endpoints = Seq(new UpdateMetadataEndpoint() - .setHost("host1") - .setPort(9092) - .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) - .setListener(plaintextListenerName.value)) + .setName(sslListenerName.value) + ).iterator.asJava) + + val broker1Endpoints = new BrokerEndpointCollection(Seq( + new BrokerEndpoint() + .setHost("host1") + .setPort(9092) + .setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + .setName(plaintextListenerName.value) + ).iterator.asJava) + val brokers = Seq( - new UpdateMetadataBroker() - .setId(0) - .setEndpoints(broker0Endpoints.asJava), - new UpdateMetadataBroker() - .setId(1) - .setEndpoints(broker1Endpoints.asJava)) - val metadataCacheBrokerId = 0 + new RegisterBrokerRecord() + .setBrokerId(0) + .setFenced(false) + .setEndPoints(broker0Endpoints), + new RegisterBrokerRecord() + .setBrokerId(1) + .setFenced(false) + .setEndPoints(broker1Endpoints)) + // leader available in cache but listener name not present. expect LISTENER_NOT_FOUND error for new metadata version - verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, metadataCacheBrokerId, brokers, sslListenerName, + verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, brokers, sslListenerName, leader = 1, Errors.LISTENER_NOT_FOUND, errorUnavailableListeners = true) // leader available in cache but listener name not present. expect LEADER_NOT_AVAILABLE error for old metadata version - verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, metadataCacheBrokerId, brokers, sslListenerName, + verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache, brokers, sslListenerName, leader = 1, Errors.LEADER_NOT_AVAILABLE, errorUnavailableListeners = false) } private def verifyTopicMetadataPartitionLeaderOrEndpointNotAvailable(cache: MetadataCache, - metadataCacheBrokerId: Int, - brokers: Seq[UpdateMetadataBroker], + brokers: Seq[RegisterBrokerRecord], listenerName: ListenerName, leader: Int, expectedError: Errors, errorUnavailableListeners: Boolean): Unit = { val topic = "topic" - - val zkVersion = 3 - val controllerId = 2 - val controllerEpoch = 1 + val topicId = Uuid.randomUuid() + val topicRecords = Seq(new TopicRecord().setName(topic).setTopicId(topicId)) val leaderEpoch = 1 - val partitionStates = Seq(new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(controllerEpoch) - .setLeader(leader) - .setLeaderEpoch(leaderEpoch) - .setIsr(asList(0)) - .setZkVersion(zkVersion) - .setReplicas(asList(0))) - - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch, - partitionStates.asJava, brokers.asJava, util.Collections.emptyMap()).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) + val partitionEpoch = 3 + val partitionStates = Seq(new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(0) + .setPartitionEpoch(partitionEpoch) + .setLeader(leader) + .setLeaderEpoch(leaderEpoch) + .setIsr(asList(0)) + .setReplicas(asList(0))) + MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableListeners = errorUnavailableListeners) assertEquals(1, topicMetadatas.size) @@ -360,20 +284,26 @@ class MetadataCacheTest { @MethodSource(Array("cacheProvider")) def getTopicMetadataReplicaNotAvailable(cache: MetadataCache): Unit = { val topic = "topic" + val topicId = Uuid.randomUuid() - val zkVersion = 3 - val controllerId = 2 - val controllerEpoch = 1 + val partitionEpoch = 3 val securityProtocol = SecurityProtocol.PLAINTEXT val listenerName = ListenerName.forSecurityProtocol(securityProtocol) - val brokers = Seq(new UpdateMetadataBroker() - .setId(0) - .setEndpoints(Seq(new UpdateMetadataEndpoint() + val endPoints = new BrokerEndpointCollection(Seq(new BrokerEndpoint() .setHost("foo") .setPort(9092) .setSecurityProtocol(securityProtocol.id) - .setListener(listenerName.value)).asJava)) + .setName(listenerName.value) + ).iterator.asJava) + val brokers = Seq(new RegisterBrokerRecord() + .setBrokerId(0) + .setFenced(false) + .setEndPoints(endPoints)) + + val topicRecords = Seq(new TopicRecord() + .setName(topic) + .setTopicId(topicId)) // replica 1 is not available val leader = 0 val leaderEpoch = 0 @@ -381,20 +311,15 @@ class MetadataCacheTest { val isr = asList[Integer](0) val partitionStates = Seq( - new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(controllerEpoch) + new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(0) .setLeader(leader) .setLeaderEpoch(leaderEpoch) .setIsr(isr) - .setZkVersion(zkVersion) + .setPartitionEpoch(partitionEpoch) .setReplicas(replicas)) - - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch, - partitionStates.asJava, brokers.asJava, util.Collections.emptyMap()).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) + MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) // Validate errorUnavailableEndpoints = false val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false) @@ -433,20 +358,27 @@ class MetadataCacheTest { @MethodSource(Array("cacheProvider")) def getTopicMetadataIsrNotAvailable(cache: MetadataCache): Unit = { val topic = "topic" + val topicId = Uuid.randomUuid() - val zkVersion = 3 - val controllerId = 2 - val controllerEpoch = 1 val securityProtocol = SecurityProtocol.PLAINTEXT val listenerName = ListenerName.forSecurityProtocol(securityProtocol) - val brokers = Seq(new UpdateMetadataBroker() - .setId(0) - .setRack("rack1") - .setEndpoints(Seq(new UpdateMetadataEndpoint() + + val endpoints = new BrokerEndpointCollection(Seq(new BrokerEndpoint() .setHost("foo") .setPort(9092) .setSecurityProtocol(securityProtocol.id) - .setListener(listenerName.value)).asJava)) + .setName(listenerName.value) + ).iterator.asJava) + + val brokers = Seq(new RegisterBrokerRecord() + .setBrokerId(0) + .setRack("rack1") + .setFenced(false) + .setEndPoints(endpoints)) + + val topicRecords = Seq(new TopicRecord() + .setName(topic) + .setTopicId(topicId)) // replica 1 is not available val leader = 0 @@ -454,20 +386,14 @@ class MetadataCacheTest { val replicas = asList[Integer](0) val isr = asList[Integer](0, 1) - val partitionStates = Seq(new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(controllerEpoch) + val partitionStates = Seq(new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(0) .setLeader(leader) .setLeaderEpoch(leaderEpoch) .setIsr(isr) - .setZkVersion(zkVersion) .setReplicas(replicas)) - - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch, - partitionStates.asJava, brokers.asJava, util.Collections.emptyMap()).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) + MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) // Validate errorUnavailableEndpoints = false val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false) @@ -506,33 +432,33 @@ class MetadataCacheTest { @MethodSource(Array("cacheProvider")) def getTopicMetadataWithNonSupportedSecurityProtocol(cache: MetadataCache): Unit = { val topic = "topic" + val topicId = Uuid.randomUuid() val securityProtocol = SecurityProtocol.PLAINTEXT - val brokers = Seq(new UpdateMetadataBroker() - .setId(0) + + val brokers = new RegisterBrokerRecord() + .setBrokerId(0) .setRack("") - .setEndpoints(Seq(new UpdateMetadataEndpoint() + .setEndPoints(new BrokerEndpointCollection(Seq(new BrokerEndpoint() .setHost("foo") .setPort(9092) .setSecurityProtocol(securityProtocol.id) - .setListener(ListenerName.forSecurityProtocol(securityProtocol).value)).asJava)) - val controllerEpoch = 1 + .setName(ListenerName.forSecurityProtocol(securityProtocol).value) + ).iterator.asJava)) + + val topicRecord = new TopicRecord().setName(topic).setTopicId(topicId) + val leader = 0 val leaderEpoch = 0 val replicas = asList[Integer](0) val isr = asList[Integer](0, 1) - val partitionStates = Seq(new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(controllerEpoch) + val partitionStates = Seq(new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(0) .setLeader(leader) .setLeaderEpoch(leaderEpoch) .setIsr(isr) - .setZkVersion(3) .setReplicas(replicas)) - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, brokerEpoch, partitionStates.asJava, - brokers.asJava, util.Collections.emptyMap()).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) + MetadataCacheTest.updateCache(cache, Seq(brokers, topicRecord) ++ partitionStates) val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.SSL)) assertEquals(1, topicMetadata.size) @@ -544,37 +470,37 @@ class MetadataCacheTest { @MethodSource(Array("cacheProvider")) def getAliveBrokersShouldNotBeMutatedByUpdateCache(cache: MetadataCache): Unit = { val topic = "topic" + val topicId = Uuid.randomUuid() + val topicRecords = Seq(new TopicRecord().setName(topic).setTopicId(topicId)) def updateCache(brokerIds: Seq[Int]): Unit = { val brokers = brokerIds.map { brokerId => val securityProtocol = SecurityProtocol.PLAINTEXT - new UpdateMetadataBroker() - .setId(brokerId) + new RegisterBrokerRecord() + .setBrokerId(brokerId) .setRack("") - .setEndpoints(Seq(new UpdateMetadataEndpoint() + .setFenced(false) + .setBrokerEpoch(brokerEpoch) + .setEndPoints(new BrokerEndpointCollection(Seq(new BrokerEndpoint() .setHost("foo") .setPort(9092) .setSecurityProtocol(securityProtocol.id) - .setListener(ListenerName.forSecurityProtocol(securityProtocol).value)).asJava) + .setName(ListenerName.forSecurityProtocol(securityProtocol).value) + ).iterator.asJava)) } - val controllerEpoch = 1 val leader = 0 val leaderEpoch = 0 val replicas = asList[Integer](0) val isr = asList[Integer](0, 1) - val partitionStates = Seq(new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(0) - .setControllerEpoch(controllerEpoch) + val partitionStates = Seq(new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(0) .setLeader(leader) .setLeaderEpoch(leaderEpoch) .setIsr(isr) - .setZkVersion(3) .setReplicas(replicas)) - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, brokerEpoch, partitionStates.asJava, - brokers.asJava, util.Collections.emptyMap()).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) + + MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) } val initialBrokerIds = (0 to 2) @@ -585,65 +511,6 @@ class MetadataCacheTest { assertEquals(initialBrokerIds.toSet, aliveBrokersFromCache.map(_.id).toSet) } - // This test runs only for the ZK cache, because KRaft mode doesn't support offline - // replicas yet. TODO: implement KAFKA-13005. - @ParameterizedTest - @MethodSource(Array("zkCacheProvider")) - def testGetClusterMetadataWithOfflineReplicas(cache: MetadataCache): Unit = { - val topic = "topic" - val topicPartition = new TopicPartition(topic, 0) - val securityProtocol = SecurityProtocol.PLAINTEXT - val listenerName = ListenerName.forSecurityProtocol(securityProtocol) - - val brokers = Seq( - new UpdateMetadataBroker() - .setId(0) - .setRack("r") - .setEndpoints(Seq(new UpdateMetadataEndpoint() - .setHost("foo") - .setPort(9092) - .setSecurityProtocol(securityProtocol.id) - .setListener(listenerName.value)).asJava), - new UpdateMetadataBroker() - .setId(1) - .setEndpoints(Seq.empty.asJava) - ) - val controllerEpoch = 1 - val leader = 1 - val leaderEpoch = 0 - val replicas = asList[Integer](0, 1) - val isr = asList[Integer](0, 1) - val offline = asList[Integer](1) - val partitionStates = Seq(new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(topicPartition.partition) - .setControllerEpoch(controllerEpoch) - .setLeader(leader) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setZkVersion(3) - .setReplicas(replicas) - .setOfflineReplicas(offline)) - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, brokerEpoch, partitionStates.asJava, - brokers.asJava, Collections.emptyMap()).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) - - val expectedNode0 = new Node(0, "foo", 9092, "r") - val expectedNode1 = new Node(1, "", -1) - - val cluster = cache.getClusterMetadata("clusterId", listenerName) - assertEquals(expectedNode0, cluster.nodeById(0)) - assertNull(cluster.nodeById(1)) - assertEquals(expectedNode1, cluster.leaderFor(topicPartition)) - - val partitionInfo = cluster.partition(topicPartition) - assertEquals(expectedNode1, partitionInfo.leader) - assertEquals(Seq(expectedNode0, expectedNode1), partitionInfo.replicas.toSeq) - assertEquals(Seq(expectedNode0, expectedNode1), partitionInfo.inSyncReplicas.toSeq) - assertEquals(Seq(expectedNode1), partitionInfo.offlineReplicas.toSeq) - } - @Test def testIsBrokerFenced(): Unit = { val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) @@ -749,8 +616,6 @@ class MetadataCacheTest { def testGetTopicMetadataForDescribeTopicPartitionsResponse(): Unit = { val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) - val controllerId = 2 - val controllerEpoch = 1 val securityProtocol = SecurityProtocol.PLAINTEXT val listenerName = ListenerName.forSecurityProtocol(securityProtocol) val topic0 = "test0" @@ -806,23 +671,32 @@ class MetadataCacheTest { .setPartitionEpoch(11) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), ) - + new BrokerEndpointCollection() val brokers = Seq( - new UpdateMetadataBroker().setId(0).setEndpoints(Seq(new UpdateMetadataEndpoint().setHost("foo0").setPort(9092).setSecurityProtocol(securityProtocol.id).setListener(listenerName.value)).asJava), - new UpdateMetadataBroker().setId(1).setEndpoints(Seq(new UpdateMetadataEndpoint().setHost("foo1").setPort(9093).setSecurityProtocol(securityProtocol.id).setListener(listenerName.value)).asJava), - new UpdateMetadataBroker().setId(2).setEndpoints(Seq(new UpdateMetadataEndpoint().setHost("foo2").setPort(9094).setSecurityProtocol(securityProtocol.id).setListener(listenerName.value)).asJava), - new UpdateMetadataBroker().setId(3).setEndpoints(Seq(new UpdateMetadataEndpoint().setHost("foo3").setPort(9095).setSecurityProtocol(securityProtocol.id).setListener(listenerName.value)).asJava), + new RegisterBrokerRecord().setBrokerEpoch(brokerEpoch).setFenced(false).setBrokerId(0) + .setEndPoints(new BrokerEndpointCollection(Seq(new BrokerEndpoint().setHost("foo0").setPort(9092) + .setSecurityProtocol(securityProtocol.id).setName(listenerName.value) + ).iterator.asJava)), + new RegisterBrokerRecord().setBrokerEpoch(brokerEpoch).setFenced(false).setBrokerId(1) + .setEndPoints(new BrokerEndpointCollection(Seq(new BrokerEndpoint().setHost("foo1").setPort(9093) + .setSecurityProtocol(securityProtocol.id).setName(listenerName.value) + ).iterator.asJava)), + new RegisterBrokerRecord().setBrokerEpoch(brokerEpoch).setFenced(false).setBrokerId(2) + .setEndPoints(new BrokerEndpointCollection(Seq(new BrokerEndpoint().setHost("foo2").setPort(9094) + .setSecurityProtocol(securityProtocol.id).setName(listenerName.value) + ).iterator.asJava)), + new RegisterBrokerRecord().setBrokerEpoch(brokerEpoch).setFenced(false).setBrokerId(3) + .setEndPoints(new BrokerEndpointCollection(Seq(new BrokerEndpoint().setHost("foo3").setPort(9095) + .setSecurityProtocol(securityProtocol.id).setName(listenerName.value) + ).iterator.asJava)), ) - val version = ApiKeys.UPDATE_METADATA.latestVersion - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch, - List[UpdateMetadataPartitionState]().asJava, brokers.asJava, topicIds).build() var recordSeq = Seq[ApiMessage]( new TopicRecord().setName(topic0).setTopicId(topicIds.get(topic0)), new TopicRecord().setName(topic1).setTopicId(topicIds.get(topic1)) ) recordSeq = recordSeq ++ partitionMap.values.toSeq - MetadataCacheTest.updateCache(metadataCache, updateMetadataRequest, recordSeq) + MetadataCacheTest.updateCache(metadataCache, brokers ++ recordSeq) def checkTopicMetadata(topic: String, partitionIds: Set[Int], partitions: mutable.Buffer[DescribeTopicPartitionsResponsePartition]): Unit = { partitions.foreach(partition => { @@ -912,178 +786,47 @@ class MetadataCacheTest { @MethodSource(Array("cacheProvider")) def testGetPartitionInfo(cache: MetadataCache): Unit = { val topic = "topic" + val topicId = Uuid.randomUuid() val partitionIndex = 0 - val controllerEpoch = 1 val leader = 0 val leaderEpoch = 0 val isr = asList[Integer](2, 3, 0) - val zkVersion = 3 val replicas = asList[Integer](2, 3, 0, 1, 4) - val offlineReplicas = asList[Integer](0) - val partitionStates = Seq(new UpdateMetadataPartitionState() - .setTopicName(topic) - .setPartitionIndex(partitionIndex) - .setControllerEpoch(controllerEpoch) + val topicRecords = Seq(new TopicRecord().setName(topic).setTopicId(topicId)) + + val partitionStates = Seq(new PartitionRecord() + .setTopicId(topicId) + .setPartitionId(partitionIndex) .setLeader(leader) .setLeaderEpoch(leaderEpoch) .setIsr(isr) - .setZkVersion(zkVersion) - .setReplicas(replicas) - .setOfflineReplicas(offlineReplicas)) - - val version = ApiKeys.UPDATE_METADATA.latestVersion + .setReplicas(replicas)) - val controllerId = 2 val securityProtocol = SecurityProtocol.PLAINTEXT val listenerName = ListenerName.forSecurityProtocol(securityProtocol) - val brokers = Seq(new UpdateMetadataBroker() - .setId(0) + val brokers = Seq(new RegisterBrokerRecord() + .setBrokerId(0) + .setBrokerEpoch(brokerEpoch) .setRack("rack1") - .setEndpoints(Seq(new UpdateMetadataEndpoint() - .setHost("foo") - .setPort(9092) - .setSecurityProtocol(securityProtocol.id) - .setListener(listenerName.value)).asJava)) - val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch, brokerEpoch, - partitionStates.asJava, brokers.asJava, util.Collections.emptyMap(), false, AbstractControlRequest.Type.UNKNOWN).build() - MetadataCacheTest.updateCache(cache, updateMetadataRequest) + .setEndPoints(new BrokerEndpointCollection( + Seq(new BrokerEndpoint() + .setHost("foo") + .setPort(9092) + .setSecurityProtocol(securityProtocol.id) + .setName(listenerName.value) + ).iterator.asJava))) + + MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) val partitionState = cache.getPartitionInfo(topic, partitionIndex).get assertEquals(topic, partitionState.topicName()) assertEquals(partitionIndex, partitionState.partitionIndex()) - if (cache.isInstanceOf[ZkMetadataCache]) { - assertEquals(controllerEpoch, partitionState.controllerEpoch()) - } else { - assertEquals(-1, partitionState.controllerEpoch()) - } + assertEquals(-1, partitionState.controllerEpoch()) assertEquals(leader, partitionState.leader()) assertEquals(leaderEpoch, partitionState.leaderEpoch()) assertEquals(isr, partitionState.isr()) - assertEquals(zkVersion, partitionState.zkVersion()) assertEquals(replicas, partitionState.replicas()) - if (cache.isInstanceOf[ZkMetadataCache]) { - assertEquals(offlineReplicas, partitionState.offlineReplicas()) - } - } - - def setupInitialAndFullMetadata(): ( - Map[String, Uuid], mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]], - Map[String, Uuid], Seq[UpdateMetadataPartitionState] - ) = { - def addTopic( - name: String, - partitions: Int, - topicStates: mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]] - ): Unit = { - val partitionMap = mutable.LongMap.empty[UpdateMetadataPartitionState] - for (i <- 0 until partitions) { - partitionMap.put(i, new UpdateMetadataPartitionState() - .setTopicName(name) - .setPartitionIndex(i) - .setControllerEpoch(2) - .setLeader(0) - .setLeaderEpoch(10) - .setIsr(asList(0, 1)) - .setZkVersion(10) - .setReplicas(asList(0, 1, 2))) - } - topicStates.put(name, partitionMap) - } - - val initialTopicStates = mutable.AnyRefMap.empty[String, mutable.LongMap[UpdateMetadataPartitionState]] - addTopic("test-topic-1", 3, initialTopicStates) - addTopic("test-topic-2", 3, initialTopicStates) - - val initialTopicIds = Map( - "test-topic-1" -> Uuid.fromString("IQ2F1tpCRoSbjfq4zBJwpg"), - "test-topic-2" -> Uuid.fromString("4N8_J-q7SdWHPFkos275pQ") - ) - - val newTopicIds = Map( - "different-topic" -> Uuid.fromString("DraFMNOJQOC5maTb1vtZ8Q") - ) - - val newPartitionStates = Seq(new UpdateMetadataPartitionState() - .setTopicName("different-topic") - .setPartitionIndex(0) - .setControllerEpoch(42) - .setLeader(0) - .setLeaderEpoch(10) - .setIsr(asList[Integer](0, 1, 2)) - .setZkVersion(1) - .setReplicas(asList[Integer](0, 1, 2))) - - (initialTopicIds, initialTopicStates, newTopicIds, newPartitionStates) - } - - /** - * Verify the behavior of ZkMetadataCache when handling "Full" UpdateMetadataRequest - */ - @Test - def testHandleFullUpdateMetadataRequestInZkMigration(): Unit = { - val (initialTopicIds, initialTopicStates, newTopicIds, newPartitionStates) = setupInitialAndFullMetadata() - - val updateMetadataRequestBuilder = () => new UpdateMetadataRequest.Builder(8, 1, 42, brokerEpoch, - newPartitionStates.asJava, Seq.empty.asJava, newTopicIds.asJava, true, AbstractControlRequest.Type.FULL).build() - - def verifyMetadataCache( - updateMetadataRequest: UpdateMetadataRequest, - zkMigrationEnabled: Boolean = true - )( - verifier: ZkMetadataCache => Unit - ): Unit = { - val cache = MetadataCache.zkMetadataCache(1, MetadataVersion.latestTesting(), zkMigrationEnabled = zkMigrationEnabled) - cache.updateMetadata(1, new UpdateMetadataRequest.Builder(8, 1, 42, brokerEpoch, - initialTopicStates.flatMap(_._2.values).toList.asJava, Seq.empty.asJava, initialTopicIds.asJava).build()) - cache.updateMetadata(1, updateMetadataRequest) - verifier.apply(cache) - } - - // KRaft=false Type=FULL, migration disabled - var updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setIsKRaftController(true) - updateMetadataRequest.data().setType(AbstractControlRequest.Type.FULL.toByte) - verifyMetadataCache(updateMetadataRequest, zkMigrationEnabled = false) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } - - // KRaft=true Type=FULL - updateMetadataRequest = updateMetadataRequestBuilder.apply() - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(1, cache.getAllTopics().size) - assertFalse(cache.contains("test-topic-1")) - assertFalse(cache.contains("test-topic-1")) - } - - // KRaft=false Type=FULL - updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setIsKRaftController(false) - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } - - // KRaft=true Type=INCREMENTAL - updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setType(AbstractControlRequest.Type.INCREMENTAL.toByte) - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } - - // KRaft=true Type=UNKNOWN - updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setType(AbstractControlRequest.Type.UNKNOWN.toByte) - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } } @Test @@ -1133,406 +876,88 @@ class MetadataCacheTest { val fooTopicName: String = "foo" val fooTopicId: Uuid = Uuid.fromString("HDceyWK0Ry-j3XLR8DvvGA") - val oldFooPart0 = new UpdateMetadataPartitionState(). - setTopicName(fooTopicName). - setPartitionIndex(0). - setControllerEpoch(oldRequestControllerEpoch). + val oldFooPart0 = new PartitionRecord(). + setTopicId(fooTopicId). + setPartitionId(0). setLeader(4). setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - val newFooPart0 = new UpdateMetadataPartitionState(). - setTopicName(fooTopicName). - setPartitionIndex(0). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(4, 5, 6)) + val newFooPart0 = new PartitionRecord(). + setTopicId(fooTopicId). + setPartitionId(0). setLeader(5). setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - val oldFooPart1 = new UpdateMetadataPartitionState(). - setTopicName(fooTopicName). - setPartitionIndex(1). - setControllerEpoch(oldRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(4, 5, 6)) + val oldFooPart1 = new PartitionRecord(). + setTopicId(fooTopicId). + setPartitionId(1). setLeader(5). setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - val newFooPart1 = new UpdateMetadataPartitionState(). - setTopicName(fooTopicName). - setPartitionIndex(1). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(4, 5, 6)) + val newFooPart1 = new PartitionRecord(). + setTopicId(fooTopicId). + setPartitionId(1). setLeader(5). setIsr(java.util.Arrays.asList(4, 5)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - + setReplicas(java.util.Arrays.asList(4, 5, 6)) val barTopicName: String = "bar" val barTopicId: Uuid = Uuid.fromString("97FBD1g4QyyNNZNY94bkRA") val recreatedBarTopicId: Uuid = Uuid.fromString("lZokxuaPRty7c5P4dNdTYA") - val oldBarPart0 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(0). - setControllerEpoch(oldRequestControllerEpoch). + val oldBarPart0 = new PartitionRecord(). + setTopicId(fooTopicId). + setPartitionId(0). setLeader(7). setIsr(java.util.Arrays.asList(7, 8)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(7, 8, 9)). - setOfflineReplicas(java.util.Collections.emptyList()) - val newBarPart0 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(0). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(7, 8, 9)) + val newBarPart0 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(0). setLeader(7). setIsr(java.util.Arrays.asList(7, 8)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(7, 8, 9)). - setOfflineReplicas(java.util.Collections.emptyList()) - val deletedBarPart0 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(0). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(7, 8, 9)) + val deletedBarPart0 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(0). setLeader(-2). setIsr(java.util.Arrays.asList(7, 8)). - setZkVersion(0). - setReplicas(java.util.Arrays.asList(7, 8, 9)). - setOfflineReplicas(java.util.Collections.emptyList()) - val oldBarPart1 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(1). - setControllerEpoch(oldRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(7, 8, 9)) + val oldBarPart1 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(1). setLeader(5). setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - val newBarPart1 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(1). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(4, 5, 6)) + val newBarPart1 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(1). setLeader(5). setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - val deletedBarPart1 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(1). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(4, 5, 6)) + val deletedBarPart1 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(1). setLeader(-2). setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(0). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - val oldBarPart2 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(2). - setControllerEpoch(oldRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(4, 5, 6)) + + val oldBarPart2 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(2). setLeader(9). setIsr(java.util.Arrays.asList(7, 8, 9)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(7, 8, 9)). - setOfflineReplicas(java.util.Collections.emptyList()) - val newBarPart2 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(2). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(7, 8, 9)) + + val newBarPart2 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(2). setLeader(8). setIsr(java.util.Arrays.asList(7, 8)). - setZkVersion(789). - setReplicas(java.util.Arrays.asList(7, 8, 9)). - setOfflineReplicas(java.util.Collections.emptyList()) - val deletedBarPart2 = new UpdateMetadataPartitionState(). - setTopicName(barTopicName). - setPartitionIndex(2). - setControllerEpoch(newRequestControllerEpoch). + setReplicas(java.util.Arrays.asList(7, 8, 9)) + + val deletedBarPart2 = new PartitionRecord(). + setTopicId(barTopicId). + setPartitionId(2). setLeader(-2). setIsr(java.util.Arrays.asList(7, 8, 9)). - setZkVersion(0). - setReplicas(java.util.Arrays.asList(7, 8, 9)). - setOfflineReplicas(java.util.Collections.emptyList()) - - @Test - def testCreateDeletionEntries(): Unit = { - assertEquals(new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq( - new UpdateMetadataPartitionState(). - setTopicName(fooTopicName). - setPartitionIndex(0). - setControllerEpoch(newRequestControllerEpoch). - setLeader(-2). - setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(0). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()), - new UpdateMetadataPartitionState(). - setTopicName(fooTopicName). - setPartitionIndex(1). - setControllerEpoch(newRequestControllerEpoch). - setLeader(-2). - setIsr(java.util.Arrays.asList(4, 5, 6)). - setZkVersion(0). - setReplicas(java.util.Arrays.asList(4, 5, 6)). - setOfflineReplicas(java.util.Collections.emptyList()) - ).asJava), - ZkMetadataCache.createDeletionEntries(fooTopicName, - fooTopicId, - Seq(oldFooPart0, oldFooPart1), - newRequestControllerEpoch)) - } - - val prevSnapshot: MetadataSnapshot = { - val parts = new mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]] - val fooParts = new mutable.LongMap[UpdateMetadataPartitionState] - fooParts.put(0L, oldFooPart0) - fooParts.put(1L, oldFooPart1) - parts.put(fooTopicName, fooParts) - val barParts = new mutable.LongMap[UpdateMetadataPartitionState] - barParts.put(0L, oldBarPart0) - barParts.put(1L, oldBarPart1) - barParts.put(2L, oldBarPart2) - parts.put(barTopicName, barParts) - MetadataSnapshot(parts, - Map[String, Uuid]( - fooTopicName -> fooTopicId, - barTopicName -> barTopicId - ), - Some(KRaftCachedControllerId(1)), - mutable.LongMap[Broker](), - mutable.LongMap[collection.Map[ListenerName, Node]]() - ) - } - - def transformKRaftControllerFullMetadataRequest( - currentMetadata: MetadataSnapshot, - requestControllerEpoch: Int, - requestTopicStates: util.List[UpdateMetadataTopicState], - ): (util.List[UpdateMetadataTopicState], util.List[String]) = { - - val logs = new util.ArrayList[String] - val results = ZkMetadataCache.transformKRaftControllerFullMetadataRequest( - currentMetadata, requestControllerEpoch, requestTopicStates, log => logs.add(log)) - (results, logs) - } - - @Test - def transformUMRWithNoChanges(): Unit = { - assertEquals((Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(newBarPart0, newBarPart1, newBarPart2).asJava) - ).asJava, - List[String]().asJava), - transformKRaftControllerFullMetadataRequest(prevSnapshot, - newRequestControllerEpoch, - Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(newBarPart0, newBarPart1, newBarPart2).asJava) - ).asJava - ) - ) - } - - @Test - def transformUMRWithMissingBar(): Unit = { - assertEquals((Seq( - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(deletedBarPart0, deletedBarPart1, deletedBarPart2).asJava), - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - ).asJava, - List[String]( - "Removing topic bar with ID 97FBD1g4QyyNNZNY94bkRA from the metadata cache since the full UMR did not include it.", - ).asJava), - transformKRaftControllerFullMetadataRequest(prevSnapshot, - newRequestControllerEpoch, - Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - ).asJava - ) - ) - } - - @Test - def transformUMRWithRecreatedBar(): Unit = { - assertEquals((Seq( - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(deletedBarPart0, deletedBarPart1, deletedBarPart2).asJava), - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(recreatedBarTopicId). - setPartitionStates(Seq(newBarPart0, newBarPart1, newBarPart2).asJava), - ).asJava, - List[String]( - "Removing topic bar with ID 97FBD1g4QyyNNZNY94bkRA from the metadata cache since the full UMR did not include it.", - ).asJava), - transformKRaftControllerFullMetadataRequest(prevSnapshot, - newRequestControllerEpoch, - Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(recreatedBarTopicId). - setPartitionStates(Seq(newBarPart0, newBarPart1, newBarPart2).asJava) - ).asJava - ) - ) - } - - val buggySnapshot: MetadataSnapshot = new MetadataSnapshot( - new mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]], - prevSnapshot.topicIds, - prevSnapshot.controllerId, - prevSnapshot.aliveBrokers, - prevSnapshot.aliveNodes) - - @Test - def transformUMRWithBuggySnapshot(): Unit = { - assertEquals((Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(newBarPart0, newBarPart1, newBarPart2).asJava), - ).asJava, - List[String]( - "Error: topic foo appeared in currentMetadata.topicNames, but not in currentMetadata.partitionStates.", - "Error: topic bar appeared in currentMetadata.topicNames, but not in currentMetadata.partitionStates.", - ).asJava), - transformKRaftControllerFullMetadataRequest(buggySnapshot, - newRequestControllerEpoch, - Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(newBarPart0, newBarPart1, newBarPart2).asJava) - ).asJava - ) - ) - } - - @Test - def testUpdateZkMetadataCacheViaHybridUMR(): Unit = { - val cache = MetadataCache.zkMetadataCache(1, MetadataVersion.latestTesting()) - cache.updateMetadata(123, createFullUMR(Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(oldFooPart0, oldFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(oldBarPart0, oldBarPart1).asJava), - ))) - checkCacheContents(cache, Map( - fooTopicId -> Seq(oldFooPart0, oldFooPart1), - barTopicId -> Seq(oldBarPart0, oldBarPart1), - )) - } - - @Test - def testUpdateZkMetadataCacheWithRecreatedTopic(): Unit = { - val cache = MetadataCache.zkMetadataCache(1, MetadataVersion.latestTesting()) - cache.updateMetadata(123, createFullUMR(Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(oldFooPart0, oldFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(oldBarPart0, oldBarPart1).asJava), - ))) - cache.updateMetadata(124, createFullUMR(Seq( - new UpdateMetadataTopicState(). - setTopicName(fooTopicName). - setTopicId(fooTopicId). - setPartitionStates(Seq(newFooPart0, newFooPart1).asJava), - new UpdateMetadataTopicState(). - setTopicName(barTopicName). - setTopicId(barTopicId). - setPartitionStates(Seq(oldBarPart0, oldBarPart1).asJava), - ))) - checkCacheContents(cache, Map( - fooTopicId -> Seq(newFooPart0, newFooPart1), - barTopicId -> Seq(oldBarPart0, oldBarPart1), - )) - } - - def createFullUMR( - topicStates: Seq[UpdateMetadataTopicState] - ): UpdateMetadataRequest = { - val data = new UpdateMetadataRequestData(). - setControllerId(0). - setIsKRaftController(true). - setControllerEpoch(123). - setBrokerEpoch(456). - setTopicStates(topicStates.asJava) - new UpdateMetadataRequest(data, 8.toShort) - } - - def checkCacheContents( - cache: ZkMetadataCache, - expected: Map[Uuid, Iterable[UpdateMetadataPartitionState]], - ): Unit = { - val expectedTopics = new util.HashMap[String, Uuid] - val expectedIds = new util.HashMap[Uuid, String] - val expectedParts = new util.HashMap[String, util.Set[TopicPartition]] - expected.foreach { - case (id, states) => - states.foreach { - case state => - expectedTopics.put(state.topicName(), id) - expectedIds.put(id, state.topicName()) - expectedParts.computeIfAbsent(state.topicName(), - _ => new util.HashSet[TopicPartition]()). - add(new TopicPartition(state.topicName(), state.partitionIndex())) - } - } - assertEquals(expectedTopics, cache.topicNamesToIds()) - assertEquals(expectedIds, cache.topicIdsToNames()) - cache.getAllTopics().foreach(topic => - assertEquals(expectedParts.getOrDefault(topic, Collections.emptySet()), - cache.getTopicPartitions(topic).asJava) - ) - } + setReplicas(java.util.Arrays.asList(7, 8, 9)) } diff --git a/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala b/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala index 6e05b165a87a9..ddfa9b42d4c0e 100644 --- a/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala @@ -20,7 +20,6 @@ package kafka.server import java.util.Optional import kafka.utils.TestUtils import org.apache.kafka.common.Uuid -import org.apache.kafka.common.errors.UnsupportedVersionException import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse} @@ -41,52 +40,17 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { doSetup(testInfo, createOffsetsTopic = false) } - @ParameterizedTest - @ValueSource(strings = Array("kraft")) - def testClusterIdWithRequestVersion1(quorum: String): Unit = { - val v1MetadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) - val v1ClusterId = v1MetadataResponse.clusterId - assertNull(v1ClusterId, s"v1 clusterId should be null") - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testClusterIdIsValid(quorum: String): Unit = { - val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(2.toShort)) + val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) isValidClusterId(metadataResponse.clusterId) } - /** - * This test only runs in ZK mode because in KRaft mode, the controller ID visible to - * the client is randomized. - */ - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testControllerId(quorum: String): Unit = { - val controllerServer = servers.find(_.kafkaController.isActive).get - val controllerId = controllerServer.config.brokerId - val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) - - assertEquals(controllerId, - metadataResponse.controller.id, "Controller id should match the active controller") - - // Fail over the controller - controllerServer.shutdown() - controllerServer.startup() - - val controllerServer2 = servers.find(_.kafkaController.isActive).get - val controllerId2 = controllerServer2.config.brokerId - assertNotEquals(controllerId, controllerId2, "Controller id should switch to a new broker") - TestUtils.waitUntilTrue(() => { - val metadataResponse2 = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) - metadataResponse2.controller != null && controllerServer2.dataPlaneRequestProcessor.brokerId == metadataResponse2.controller.id - }, "Controller id should match the active controller after failover", 5000) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testRack(quorum: String): Unit = { - val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) + val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) // Validate rack matches what's set in generateConfigs() above metadataResponse.brokers.forEach { broker => assertEquals(s"rack/${broker.id}", broker.rack, "Rack information should match config") @@ -102,7 +66,7 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { createTopic(internalTopic, 3, 2) createTopic(notInternalTopic, 3, 2) - val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) + val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) assertTrue(metadataResponse.errors.isEmpty, "Response should have no errors") val topicMetadata = metadataResponse.topicMetadata.asScala @@ -122,9 +86,7 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { createTopic("t1", 3, 2) createTopic("t2", 3, 2) - // v0, Doesn't support a "no topics" request - // v1, Empty list represents "no topics" - val metadataResponse = sendMetadataRequest(new MetadataRequest.Builder(List[String]().asJava, true, 1.toShort).build) + val metadataResponse = sendMetadataRequest(new MetadataRequest.Builder(List[String]().asJava, true, 4.toShort).build) assertTrue(metadataResponse.errors.isEmpty, "Response should have no errors") assertTrue(metadataResponse.topicMetadata.isEmpty, "Response should have no topics") } @@ -134,30 +96,17 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { def testAutoTopicCreation(quorum: String): Unit = { val topic1 = "t1" val topic2 = "t2" - val topic3 = "t3" - val topic4 = "t4" - val topic5 = "t5" + val topic3 = "t4" + val topic4 = "t5" createTopic(topic1) val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true).build()) assertNull(response1.errors.get(topic1)) checkAutoCreatedTopic(topic2, response1) - // The default behavior in old versions of the metadata API is to allow topic creation, so - // protocol downgrades should happen gracefully when auto-creation is explicitly requested. - val response2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic3).asJava, true).build(1)) - checkAutoCreatedTopic(topic3, response2) - - // V3 doesn't support a configurable allowAutoTopicCreation, so disabling auto-creation is not supported - assertThrows(classOf[UnsupportedVersionException], () => sendMetadataRequest(new MetadataRequest(requestData(List(topic4), allowAutoTopicCreation = false), 3.toShort))) - - // V4 and higher support a configurable allowAutoTopicCreation - val response3 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic4, topic5).asJava, false, 4.toShort).build) - assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic4)) - assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic5)) - if (!isKRaftTest()) { - assertEquals(None, zkClient.getTopicPartitionCount(topic5)) - } + val response2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic3, topic4).asJava, false, 4.toShort).build) + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response2.errors.get(topic3)) + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response2.errors.get(topic4)) } @ParameterizedTest @@ -171,46 +120,11 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1).asJava, true).build) assertEquals(1, response1.topicMetadata.size) val topicMetadata = response1.topicMetadata.asScala.head - if (isKRaftTest()) { - assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, topicMetadata.error) - } else { - assertEquals(Errors.INVALID_REPLICATION_FACTOR, topicMetadata.error) - } + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, topicMetadata.error) assertEquals(topic1, topicMetadata.topic) assertEquals(0, topicMetadata.partitionMetadata.size) } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testAutoCreateOfCollidingTopics(quorum: String): Unit = { - val topic1 = "testAutoCreate.Topic" - val topic2 = "testAutoCreate_Topic" - val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true).build) - assertEquals(2, response1.topicMetadata.size) - - val responseMap = response1.topicMetadata.asScala.map(metadata => (metadata.topic(), metadata.error)).toMap - - assertEquals(Set(topic1, topic2), responseMap.keySet) - // The topic creation will be delayed, and the name collision error will be swallowed. - assertEquals(Set(Errors.LEADER_NOT_AVAILABLE, Errors.INVALID_TOPIC_EXCEPTION), responseMap.values.toSet) - - val topicCreated = responseMap.head._1 - TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topicCreated, 0) - TestUtils.waitForPartitionMetadata(brokers, topicCreated, 0) - - // retry the metadata for the first auto created topic - val response2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topicCreated).asJava, true).build) - val topicMetadata1 = response2.topicMetadata.asScala.head - assertEquals(Errors.NONE, topicMetadata1.error) - assertEquals(Seq(Errors.NONE), topicMetadata1.partitionMetadata.asScala.map(_.error)) - assertEquals(1, topicMetadata1.partitionMetadata.size) - val partitionMetadata = topicMetadata1.partitionMetadata.asScala.head - assertEquals(0, partitionMetadata.partition) - assertEquals(2, partitionMetadata.replicaIds.size) - assertTrue(partitionMetadata.leaderId.isPresent) - assertTrue(partitionMetadata.leaderId.get >= 0) - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testAllTopicsRequest(quorum: String): Unit = { @@ -218,15 +132,10 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { createTopic("t1", 3, 2) createTopic("t2", 3, 2) - // v0, Empty list represents all topics - val metadataResponseV0 = sendMetadataRequest(new MetadataRequest(requestData(List(), allowAutoTopicCreation = true), 0.toShort)) - assertTrue(metadataResponseV0.errors.isEmpty, "V0 Response should have no errors") - assertEquals(2, metadataResponseV0.topicMetadata.size(), "V0 Response should have 2 (all) topics") - - // v1, Null represents all topics - val metadataResponseV1 = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) - assertTrue(metadataResponseV1.errors.isEmpty, "V1 Response should have no errors") - assertEquals(2, metadataResponseV1.topicMetadata.size(), "V1 Response should have 2 (all) topics") + // v4, Null represents all topics + val metadataResponseV1 = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) + assertTrue(metadataResponseV1.errors.isEmpty, "V4 Response should have no errors") + assertEquals(2, metadataResponseV1.topicMetadata.size(), "V4 Response should have 2 (all) topics") } @ParameterizedTest @@ -308,25 +217,15 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { !response.brokers.asScala.exists(_.id == downNode.dataPlaneRequestProcessor.brokerId) }, "Replica was not found down", 50000) - // Validate version 0 still filters unavailable replicas and contains error - val v0MetadataResponse = sendMetadataRequest(new MetadataRequest(requestData(List(replicaDownTopic), allowAutoTopicCreation = true), 0.toShort)) - val v0BrokerIds = v0MetadataResponse.brokers().asScala.map(_.id).toSeq - assertTrue(v0MetadataResponse.errors.isEmpty, "Response should have no errors") - assertFalse(v0BrokerIds.contains(downNode.config.brokerId), s"The downed broker should not be in the brokers list") - assertTrue(v0MetadataResponse.topicMetadata.size == 1, "Response should have one topic") - val v0PartitionMetadata = v0MetadataResponse.topicMetadata.asScala.head.partitionMetadata.asScala.head - assertTrue(v0PartitionMetadata.error == Errors.REPLICA_NOT_AVAILABLE, "PartitionMetadata should have an error") - assertTrue(v0PartitionMetadata.replicaIds.size == replicaCount - 1, s"Response should have ${replicaCount - 1} replicas") - - // Validate version 1 returns unavailable replicas with no error - val v1MetadataResponse = sendMetadataRequest(new MetadataRequest.Builder(List(replicaDownTopic).asJava, true).build(1)) - val v1BrokerIds = v1MetadataResponse.brokers().asScala.map(_.id).toSeq - assertTrue(v1MetadataResponse.errors.isEmpty, "Response should have no errors") - assertFalse(v1BrokerIds.contains(downNode.config.brokerId), s"The downed broker should not be in the brokers list") - assertEquals(1, v1MetadataResponse.topicMetadata.size, "Response should have one topic") - val v1PartitionMetadata = v1MetadataResponse.topicMetadata.asScala.head.partitionMetadata.asScala.head - assertEquals(Errors.NONE, v1PartitionMetadata.error, "PartitionMetadata should have no errors") - assertEquals(replicaCount, v1PartitionMetadata.replicaIds.size, s"Response should have $replicaCount replicas") + // Validate version 4 returns unavailable replicas with no error + val v4MetadataResponse = sendMetadataRequest(new MetadataRequest.Builder(List(replicaDownTopic).asJava, true).build(4)) + val v4BrokerIds = v4MetadataResponse.brokers().asScala.map(_.id).toSeq + assertTrue(v4MetadataResponse.errors.isEmpty, "Response should have no errors") + assertFalse(v4BrokerIds.contains(downNode.config.brokerId), s"The downed broker should not be in the brokers list") + assertEquals(1, v4MetadataResponse.topicMetadata.size, "Response should have one topic") + val v4PartitionMetadata = v4MetadataResponse.topicMetadata.asScala.head.partitionMetadata.asScala.head + assertEquals(Errors.NONE, v4PartitionMetadata.error, "PartitionMetadata should have no errors") + assertEquals(replicaCount, v4PartitionMetadata.replicaIds.size, s"Response should have $replicaCount replicas") } @ParameterizedTest @@ -396,11 +295,7 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { } } - val brokerToShutdown = if (isKRaftTest()) { - brokers.last - } else { - servers.filterNot(_.kafkaController.isActive).last - } + val brokerToShutdown = brokers.last brokerToShutdown.shutdown() brokerToShutdown.awaitShutdown() checkMetadata(brokers, brokers.size - 1) diff --git a/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala b/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala index 304fd0de97071..5d50de0409535 100644 --- a/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala +++ b/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala @@ -107,7 +107,6 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, lastOffset, lastEpoch, maxTimestamp, - shallowOffsetOfMaxTimestamp, Time.SYSTEM.milliseconds(), state.logStartOffset, RecordValidationStats.EMPTY, @@ -163,6 +162,4 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, assertEquals(expectedEpoch, fetchState(partition).flatMap(_.lastFetchedEpoch)) } } - - override protected val isOffsetForLeaderEpochSupported: Boolean = true } diff --git a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala index bb237032c9c73..6b0c5c6d0a5bb 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala @@ -78,7 +78,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator val (memberId, memberEpoch) = joinConsumerGroup("grp", useNewProtocol) // Start from version 1 because version 0 goes to ZK. - for (version <- 1 to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { + for (version <- ApiKeys.OFFSET_COMMIT.oldestVersion to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { // Commit offset. commitOffset( groupId = "grp", diff --git a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala index f4718e32db064..fc06a9eeeb759 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala @@ -22,7 +22,7 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection -import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.junit.jupiter.api.Assertions._ @@ -40,8 +40,7 @@ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest { val partition = new TopicPartition(topic, 0) val epochs = offsetForLeaderTopicCollectionFor(partition, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH) - val request = OffsetsForLeaderEpochRequest.Builder.forFollower( - ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion, epochs, 1).build() + val request = OffsetsForLeaderEpochRequest.Builder.forFollower(epochs, 1).build() // Unknown topic val randomBrokerId = brokers.head.config.brokerId @@ -69,8 +68,7 @@ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest { def assertResponseErrorForEpoch(error: Errors, brokerId: Int, currentLeaderEpoch: Optional[Integer]): Unit = { val epochs = offsetForLeaderTopicCollectionFor(topicPartition, 0, currentLeaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)) - val request = OffsetsForLeaderEpochRequest.Builder.forFollower( - ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion, epochs, 1).build() + val request = OffsetsForLeaderEpochRequest.Builder.forFollower(epochs, 1).build() assertResponseError(error, brokerId, request) } diff --git a/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala b/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala index cb781589d35c0..64111f1487513 100644 --- a/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala @@ -20,11 +20,12 @@ package kafka.server import java.nio.ByteBuffer import java.util.{Collections, Properties} import kafka.utils.TestUtils +import org.apache.kafka.clients.admin.{Admin, TopicDescription} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.message.ProduceRequestData -import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record._ import org.apache.kafka.common.requests.{ProduceRequest, ProduceResponse} import org.apache.kafka.server.metrics.KafkaYammerMetrics @@ -35,7 +36,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} import org.junit.jupiter.params.provider.ValueSource -import scala.annotation.nowarn +import java.util.concurrent.TimeUnit import scala.jdk.CollectionConverters._ /** @@ -53,17 +54,18 @@ class ProduceRequestTest extends BaseRequestTest { def sendAndCheck(memoryRecords: MemoryRecords, expectedOffset: Long): Unit = { val topicPartition = new TopicPartition("topic", partition) - val produceResponse = sendProduceRequest(leader, - ProduceRequest.forCurrentMagic(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName(topicPartition.topic()) - .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(topicPartition.partition()) - .setRecords(memoryRecords)))).iterator)) - .setAcks((-1).toShort) - .setTimeoutMs(3000) - .setTransactionalId(null)).build()) + val produceRequest = ProduceRequest.builder(new ProduceRequestData() + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( + new ProduceRequestData.TopicProduceData() + .setName(topicPartition.topic()) + .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() + .setIndex(topicPartition.partition()) + .setRecords(memoryRecords)))).iterator)) + .setAcks((-1).toShort) + .setTimeoutMs(3000) + .setTransactionalId(null)).build() + assertEquals(ApiKeys.PRODUCE.latestVersion(), produceRequest.version()) + val produceResponse = sendProduceRequest(leader, produceRequest) assertEquals(1, produceResponse.data.responses.size) val topicProduceResponse = produceResponse.data.responses.asScala.head assertEquals(1, topicProduceResponse.partitionResponses.size) @@ -84,6 +86,23 @@ class ProduceRequestTest extends BaseRequestTest { new SimpleRecord(System.currentTimeMillis(), "key2".getBytes, "value2".getBytes)), 1) } + private def getPartitionToLeader( + admin: Admin, + topic: String + ): Map[Int, Int] = { + var topicDescription: TopicDescription = null + TestUtils.waitUntilTrue(() => { + val topicMap = admin. + describeTopics(java.util.Arrays.asList(topic)). + allTopicNames().get(10, TimeUnit.MINUTES) + topicDescription = topicMap.get(topic) + topicDescription != null + }, "Timed out waiting to describe topic " + topic) + topicDescription.partitions().asScala.map(p => { + p.partition() -> p.leader().id() + }).toMap + } + @ParameterizedTest @MethodSource(Array("timestampConfigProvider")) def testProduceWithInvalidTimestamp(messageTimeStampConfig: String, recordTimestamp: Long): Unit = { @@ -91,7 +110,17 @@ class ProduceRequestTest extends BaseRequestTest { val partition = 0 val topicConfig = new Properties topicConfig.setProperty(messageTimeStampConfig, "1000") - val partitionToLeader = TestUtils.createTopic(zkClient, topic, 1, 1, servers, topicConfig) + val admin = createAdminClient() + TestUtils.createTopicWithAdmin( + admin = admin, + topic = topic, + brokers = brokers, + controllers = controllerServers, + numPartitions = 1, + replicationFactor = 1, + topicConfig = topicConfig + ) + val partitionToLeader = getPartitionToLeader(admin, topic) val leader = partitionToLeader(partition) def createRecords(magicValue: Byte, timestamp: Long, codec: Compression): MemoryRecords = { @@ -105,7 +134,7 @@ class ProduceRequestTest extends BaseRequestTest { val records = createRecords(RecordBatch.MAGIC_VALUE_V2, recordTimestamp, Compression.gzip().build()) val topicPartition = new TopicPartition("topic", partition) - val produceResponse = sendProduceRequest(leader, ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceResponse = sendProduceRequest(leader, ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() .setName(topicPartition.topic()) @@ -138,7 +167,14 @@ class ProduceRequestTest extends BaseRequestTest { val partition = 0 // Create a single-partition topic and find a broker which is not the leader - val partitionToLeader = createTopic(topic) + val admin = createAdminClient() + TestUtils.createTopicWithAdmin( + admin = admin, + topic = topic, + brokers = brokers, + controllers = controllerServers + ) + val partitionToLeader = getPartitionToLeader(admin, topic) val leader = partitionToLeader(partition) val nonReplicaOpt = brokers.find(_.config.brokerId != leader) assertTrue(nonReplicaOpt.isDefined) @@ -147,7 +183,7 @@ class ProduceRequestTest extends BaseRequestTest { // Send the produce request to the non-replica val records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("key".getBytes, "value".getBytes)) val topicPartition = new TopicPartition("topic", partition) - val produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() .setName(topicPartition.topic()) @@ -185,7 +221,7 @@ class ProduceRequestTest extends BaseRequestTest { val lz4ChecksumOffset = 6 memoryRecords.buffer.array.update(DefaultRecordBatch.RECORD_BATCH_OVERHEAD + lz4ChecksumOffset, 0) val topicPartition = new TopicPartition("topic", partition) - val produceResponse = sendProduceRequest(leader, ProduceRequest.forCurrentMagic(new ProduceRequestData() + val produceResponse = sendProduceRequest(leader, ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() .setName(topicPartition.topic()) @@ -245,14 +281,6 @@ class ProduceRequestTest extends BaseRequestTest { assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse1.errorCode)) assertEquals(0, partitionProduceResponse1.baseOffset) assertEquals(-1, partitionProduceResponse1.logAppendTimeMs) - - // produce request with v3: returns Errors.UNSUPPORTED_COMPRESSION_TYPE. - val produceResponse2 = sendProduceRequest(leader, new ProduceRequest.Builder(3, 3, partitionRecords).buildUnsafe(3)) - val topicProduceResponse2 = produceResponse2.data.responses.asScala.head - val partitionProduceResponse2 = topicProduceResponse2.partitionResponses.asScala.head - val tp2 = new TopicPartition(topicProduceResponse2.name, partitionProduceResponse2.index) - assertEquals(topicPartition, tp2) - assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE, Errors.forCode(partitionProduceResponse2.errorCode)) } private def sendProduceRequest(leaderId: Int, request: ProduceRequest): ProduceResponse = { @@ -263,11 +291,9 @@ class ProduceRequestTest extends BaseRequestTest { object ProduceRequestTest { - @nowarn("cat=deprecation") // See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for deprecation details def timestampConfigProvider: java.util.stream.Stream[Arguments] = { val fiveMinutesInMs: Long = 5 * 60 * 60 * 1000L java.util.stream.Stream.of[Arguments]( - Arguments.of(TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, Long.box(System.currentTimeMillis() - fiveMinutesInMs)), Arguments.of(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, Long.box(System.currentTimeMillis() - fiveMinutesInMs)), Arguments.of(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.box(System.currentTimeMillis() + fiveMinutesInMs)) ) diff --git a/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala index 8f00f0aa1c1ba..9bf4d4d7e001f 100644 --- a/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala +++ b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala @@ -43,7 +43,7 @@ class SimpleControllerNodeProvider extends ControllerNodeProvider { def saslMechanism: String = SaslConfigs.DEFAULT_SASL_MECHANISM override def getControllerInfo(): ControllerInformation = ControllerInformation(Option(node.get()), - listenerName, securityProtocol, saslMechanism, isZkController = false) + listenerName, securityProtocol, saslMechanism) } class RegistrationTestContext( diff --git a/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala index 98436915dbfab..f60d0f0e3fd11 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala @@ -21,18 +21,16 @@ import kafka.log.{LogManager, UnifiedLog} import kafka.server.AbstractFetcherThread.ResultWithPartitions import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.server.ReplicaAlterLogDirsThread.ReassignmentState -import kafka.server.metadata.ZkMetadataCache -import kafka.utils.{DelayedItem, TestUtils} +import kafka.utils.TestUtils import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset -import org.apache.kafka.common.message.UpdateMetadataRequestData import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.MemoryRecords -import org.apache.kafka.common.requests.{FetchRequest, UpdateMetadataRequest} +import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.server.{BrokerFeatures, common} -import org.apache.kafka.server.common.{DirectoryEventHandler, MetadataVersion, OffsetAndEpoch} +import org.apache.kafka.server.common +import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -42,8 +40,8 @@ import org.mockito.ArgumentMatchers.{any, anyBoolean} import org.mockito.Mockito.{doNothing, mock, never, times, verify, verifyNoInteractions, verifyNoMoreInteractions, when} import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} -import java.util.{Collections, Optional, OptionalInt, OptionalLong} -import scala.collection.{Map, Seq} +import java.util.{Optional, OptionalInt, OptionalLong} +import scala.collection.Seq import scala.jdk.CollectionConverters._ class ReplicaAlterLogDirsThreadTest { @@ -51,23 +49,10 @@ class ReplicaAlterLogDirsThreadTest { private val t1p0 = new TopicPartition("topic1", 0) private val t1p1 = new TopicPartition("topic1", 1) private val topicId = Uuid.randomUuid() - private val topicIds = collection.immutable.Map("topic1" -> topicId) private val topicNames = collection.immutable.Map(topicId -> "topic1") private val tid1p0 = new TopicIdPartition(topicId, t1p0) private val failedPartitions = new FailedPartitions - - private val partitionStates = List(new UpdateMetadataRequestData.UpdateMetadataPartitionState() - .setTopicName("topic1") - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0)).asJava - - private val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), - 0, 0, 0, partitionStates, Collections.emptyList(), topicIds.asJava).build() - // TODO: support raft code? - private val metadataCache = new ZkMetadataCache(0, MetadataVersion.latestTesting(), BrokerFeatures.createEmpty()) - metadataCache.updateMetadata(0, updateMetadataRequest) + private val metadataCache = MetadataCache.kRaftMetadataCache(1, () => KRaftVersion.LATEST_PRODUCTION) private def initialFetchState(fetchOffset: Long, leaderEpoch: Int = 1): InitialFetchState = { InitialFetchState(topicId = Some(topicId), leader = new BrokerEndPoint(0, "localhost", 9092), @@ -77,7 +62,7 @@ class ReplicaAlterLogDirsThreadTest { @Test def shouldNotAddPartitionIfFutureLogIsNotDefined(): Unit = { val brokerId = 1 - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId)) val replicaManager = Mockito.mock(classOf[ReplicaManager]) val quotaManager = Mockito.mock(classOf[ReplicationQuotaManager]) @@ -105,7 +90,7 @@ class ReplicaAlterLogDirsThreadTest { def shouldUpdateLeaderEpochAfterFencedEpochError(): Unit = { val brokerId = 1 val partitionId = 0 - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId)) val partition = Mockito.mock(classOf[Partition]) val replicaManager = Mockito.mock(classOf[ReplicaManager]) @@ -205,7 +190,7 @@ class ReplicaAlterLogDirsThreadTest { def shouldReplaceCurrentLogDirWhenCaughtUp(): Unit = { val brokerId = 1 val partitionId = 0 - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId)) val partition = Mockito.mock(classOf[Partition]) val replicaManager = Mockito.mock(classOf[ReplicaManager]) @@ -284,7 +269,7 @@ class ReplicaAlterLogDirsThreadTest { def shouldReplaceCurrentLogDirWhenCaughtUpWithAfterAssignmentRequestHasBeenCompleted(): Unit = { val brokerId = 1 val partitionId = 0 - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId)) val partition = Mockito.mock(classOf[Partition]) val replicaManager = Mockito.mock(classOf[ReplicaManager]) @@ -381,7 +366,7 @@ class ReplicaAlterLogDirsThreadTest { def shouldRevertAnyScheduledAssignmentRequestIfAssignmentIsCancelled(): Unit = { val brokerId = 1 val partitionId = 0 - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId)) val partition = Mockito.mock(classOf[Partition]) val replicaManager = Mockito.mock(classOf[ReplicaManager]) @@ -478,7 +463,7 @@ class ReplicaAlterLogDirsThreadTest { val replicaManager = Mockito.mock(classOf[ReplicaManager]) val directoryEventHandler = mock(classOf[DirectoryEventHandler]) val quotaManager = Mockito.mock(classOf[ReplicationQuotaManager]) - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val endPoint = new BrokerEndPoint(0, "localhost", 1000) val leader = new LocalLeaderEndPoint(endPoint, config, replicaManager, quotaManager) val thread = new ReplicaAlterLogDirsThread( @@ -540,7 +525,7 @@ class ReplicaAlterLogDirsThreadTest { @Test def issuesEpochRequestFromLocalReplica(): Unit = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) //Setup all dependencies @@ -614,7 +599,7 @@ class ReplicaAlterLogDirsThreadTest { @Test def fetchEpochsFromLeaderShouldHandleExceptionFromGetLocalReplica(): Unit = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) //Setup all dependencies val partitionT1p0: Partition = mock(classOf[Partition]) @@ -680,7 +665,7 @@ class ReplicaAlterLogDirsThreadTest { val truncateCaptureT1p1: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) // Setup all the dependencies - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val logT1p0: UnifiedLog = mock(classOf[UnifiedLog]) @@ -772,7 +757,7 @@ class ReplicaAlterLogDirsThreadTest { val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) // Setup all the dependencies - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -858,7 +843,7 @@ class ReplicaAlterLogDirsThreadTest { val truncated: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) // Setup all the dependencies - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -913,7 +898,7 @@ class ReplicaAlterLogDirsThreadTest { val truncated: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) // Setup all the dependencies - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -1002,7 +987,7 @@ class ReplicaAlterLogDirsThreadTest { def shouldFetchLeaderEpochOnFirstFetchOnly(): Unit = { //Setup all dependencies - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -1065,7 +1050,7 @@ class ReplicaAlterLogDirsThreadTest { def shouldFetchOneReplicaAtATime(): Unit = { //Setup all dependencies - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -1117,7 +1102,7 @@ class ReplicaAlterLogDirsThreadTest { def shouldFetchNonDelayedAndNonTruncatingReplicas(): Unit = { //Setup all dependencies - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -1168,7 +1153,7 @@ class ReplicaAlterLogDirsThreadTest { // one partition is ready and one is delayed val ResultWithPartitions(fetchRequest2Opt, partitionsWithError2) = thread.leader.buildFetch(Map( t1p0 -> PartitionFetchState(Some(topicId), 140, None, leaderEpoch, state = Fetching, lastFetchedEpoch = None), - t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(new DelayedItem(5000)), state = Fetching, lastFetchedEpoch = None))) + t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = None))) assertTrue(fetchRequest2Opt.isDefined) val fetchRequest2 = fetchRequest2Opt.get @@ -1181,8 +1166,8 @@ class ReplicaAlterLogDirsThreadTest { // both partitions are delayed val ResultWithPartitions(fetchRequest3Opt, partitionsWithError3) = thread.leader.buildFetch(Map( - t1p0 -> PartitionFetchState(Some(topicId), 140, None, leaderEpoch, delay = Some(new DelayedItem(5000)), state = Fetching, lastFetchedEpoch = None), - t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(new DelayedItem(5000)), state = Fetching, lastFetchedEpoch = None))) + t1p0 -> PartitionFetchState(Some(topicId), 140, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = None), + t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = None))) assertTrue(fetchRequest3Opt.isEmpty, "Expected no fetch requests since all partitions are delayed") assertFalse(partitionsWithError3.nonEmpty) } diff --git a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala index a398fc68cf029..c7c43e85965bc 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala @@ -21,24 +21,20 @@ import kafka.log.{LogManager, UnifiedLog} import kafka.server.AbstractFetcherThread.ResultWithPartitions import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.server.epoch.util.MockBlockingSender -import kafka.server.metadata.ZkMetadataCache import kafka.utils.TestUtils import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.common.message.{FetchResponseData, UpdateMetadataRequestData} +import org.apache.kafka.common.message.FetchResponseData import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset -import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, RecordValidationStats, SimpleRecord} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} -import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, UpdateMetadataRequest} +import org.apache.kafka.common.requests.{FetchRequest, FetchResponse} import org.apache.kafka.common.utils.{LogContext, Time} -import org.apache.kafka.server.BrokerFeatures import org.apache.kafka.server.config.ReplicationConfigs -import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} -import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 +import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.storage.internals.log.LogAppendInfo import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -48,12 +44,12 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers.{any, anyBoolean, anyLong} -import org.mockito.Mockito.{mock, never, times, verify, when} +import org.mockito.Mockito.{mock, times, verify, when} import java.nio.charset.StandardCharsets import java.util import java.util.{Collections, Optional, OptionalInt} -import scala.collection.{Map, mutable} +import scala.collection.mutable import scala.jdk.CollectionConverters._ class ReplicaFetcherThreadTest { @@ -70,26 +66,7 @@ class ReplicaFetcherThreadTest { private val brokerEndPoint = new BrokerEndPoint(0, "localhost", 1000) private val failedPartitions = new FailedPartitions - private val partitionStates = List( - new UpdateMetadataRequestData.UpdateMetadataPartitionState() - .setTopicName("topic1") - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0), - new UpdateMetadataRequestData.UpdateMetadataPartitionState() - .setTopicName("topic2") - .setPartitionIndex(0) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0), - ).asJava - - private val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), - 0, 0, 0, partitionStates, Collections.emptyList(), topicIds.asJava).build() - // TODO: support raft code? - private var metadataCache = new ZkMetadataCache(0, MetadataVersion.latestTesting(), BrokerFeatures.createEmpty()) - metadataCache.updateMetadata(0, updateMetadataRequest) + private val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION) private def initialFetchState(topicId: Option[Uuid], fetchOffset: Long, leaderEpoch: Int = 1): InitialFetchState = { InitialFetchState(topicId = topicId, leader = new BrokerEndPoint(0, "localhost", 9092), @@ -130,95 +107,12 @@ class ReplicaFetcherThreadTest { ApiKeys.FETCH.latestVersion(true), testingVersion.fetchRequestVersion ) - assertEquals( - ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(true), - testingVersion.offsetForLeaderEpochRequestVersion - ) assertEquals( ApiKeys.LIST_OFFSETS.latestVersion(true), testingVersion.listOffsetRequestVersion ) } - @Test - def testFetchLeaderEpochRequestIfLastEpochDefinedForSomePartitions(): Unit = { - val config = kafkaConfigNoTruncateOnFetch - - //Setup all dependencies - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val leaderEpoch = 5 - - //Stubs - when(partition.localLogOrException).thenReturn(log) - when(log.logEndOffset).thenReturn(0) - when(log.highWatermark).thenReturn(0) - when(log.latestEpoch) - .thenReturn(Some(leaderEpoch)) - .thenReturn(Some(leaderEpoch)) - .thenReturn(None) // t2p1 doesn't support epochs - when(log.endOffsetForEpoch(leaderEpoch)).thenReturn( - Some(new OffsetAndEpoch(0, leaderEpoch))) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) - stub(partition, replicaManager, log) - - //Define the offsets for the OffsetsForLeaderEpochResponse - val offsets = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1)).asJava - - //Create the fetcher thread - val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM) - - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork) - - // topic 1 supports epoch, t2 doesn't. - thread.addPartitions(Map( - t1p0 -> initialFetchState(Some(topicId1), 0L), - t1p1 -> initialFetchState(Some(topicId2), 0L), - t2p1 -> initialFetchState(Some(topicId2), 0L))) - - assertPartitionStates(thread, shouldBeReadyForFetch = false, shouldBeTruncatingLog = true, shouldBeDelayed = false) - //Loop 1 - thread.doWork() - assertEquals(1, mockNetwork.epochFetchCount) - assertEquals(1, mockNetwork.fetchCount) - - assertPartitionStates(thread, shouldBeReadyForFetch = true, shouldBeTruncatingLog = false, shouldBeDelayed = false) - - //Loop 2 we should not fetch epochs - thread.doWork() - assertEquals(1, mockNetwork.epochFetchCount) - assertEquals(2, mockNetwork.fetchCount) - - assertPartitionStates(thread, shouldBeReadyForFetch = true, shouldBeTruncatingLog = false, shouldBeDelayed = false) - - //Loop 3 we should not fetch epochs - thread.doWork() - assertEquals(1, mockNetwork.epochFetchCount) - assertEquals(3, mockNetwork.fetchCount) - - assertPartitionStates(thread, shouldBeReadyForFetch = true, shouldBeTruncatingLog = false, shouldBeDelayed = false) - - //Assert that truncate to is called exactly once (despite two loops) - verify(partition, times(3)).truncateTo(anyLong(), anyBoolean()) - } - /** * Assert that all partitions' states are as expected * @@ -244,7 +138,7 @@ class ReplicaFetcherThreadTest { @Test def shouldHandleExceptionFromBlockingSend(): Unit = { - val props = TestUtils.createBrokerConfig(1, "localhost:1234") + val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) val mockBlockingSend: BlockingSend = mock(classOf[BlockingSend]) when(mockBlockingSend.brokerEndPoint()).thenReturn(brokerEndPoint) @@ -280,24 +174,16 @@ class ReplicaFetcherThreadTest { verify(mockBlockingSend).sendRequest(any()) } - @Test - def shouldFetchLeaderEpochOnFirstFetchOnlyIfLeaderEpochKnownToBothIbp26(): Unit = { - verifyFetchLeaderEpochOnFirstFetch(IBP_2_6_IV0) - } - @Test def shouldNotFetchLeaderEpochOnFirstFetchWithTruncateOnFetch(): Unit = { verifyFetchLeaderEpochOnFirstFetch(MetadataVersion.latestTesting, epochFetchCount = 0) } - private def verifyFetchLeaderEpochOnFirstFetch(ibp: MetadataVersion, epochFetchCount: Int = 1): Unit = { - val props = TestUtils.createBrokerConfig(1, "localhost:1234") + private def verifyFetchLeaderEpochOnFirstFetch(ibp: MetadataVersion, epochFetchCount: Int): Unit = { + val props = TestUtils.createBrokerConfig(1) props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, ibp.version) val config = KafkaConfig.fromProps(props) - metadataCache = new ZkMetadataCache(0, ibp, BrokerFeatures.createEmpty()) - metadataCache.updateMetadata(0, updateMetadataRequest) - //Setup all dependencies val logManager: LogManager = mock(classOf[LogManager]) val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) @@ -353,217 +239,13 @@ class ReplicaFetcherThreadTest { assertEquals(3, mockNetwork.fetchCount) } - @Test - def shouldTruncateToOffsetSpecifiedInEpochOffsetResponse(): Unit = { - - //Create a capture to track what partitions/offsets are truncated - val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - - // Setup all the dependencies - val config = kafkaConfigNoTruncateOnFetch - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val leaderEpoch = 5 - val initialLEO = 200 - - //Stubs - when(partition.localLogOrException).thenReturn(log) - when(log.highWatermark).thenReturn(initialLEO - 1) - when(log.latestEpoch).thenReturn(Some(leaderEpoch)) - when(log.endOffsetForEpoch(leaderEpoch)).thenReturn( - Some(new OffsetAndEpoch(initialLEO, leaderEpoch))) - when(log.logEndOffset).thenReturn(initialLEO) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) - stub(partition, replicaManager, log) - - //Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation - val offsetsReply = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 156), - t2p1 -> newOffsetForLeaderPartitionResult(t2p1, leaderEpoch, 172)).asJava - - //Create the thread - val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t2p1 -> initialFetchState(Some(topicId2), 0L))) - - //Run it - thread.doWork() - - //We should have truncated to the offsets in the response - verify(partition, times(2)).truncateTo(truncateToCapture.capture(), anyBoolean()) - assertTrue(truncateToCapture.getAllValues.asScala.contains(156), - "Expected " + t1p0 + " to truncate to offset 156 (truncation offsets: " + truncateToCapture.getAllValues + ")") - assertTrue(truncateToCapture.getAllValues.asScala.contains(172), - "Expected " + t2p1 + " to truncate to offset 172 (truncation offsets: " + truncateToCapture.getAllValues + ")") - } - - @Test - def shouldTruncateToOffsetSpecifiedInEpochOffsetResponseIfFollowerHasNoMoreEpochs(): Unit = { - // Create a capture to track what partitions/offsets are truncated - val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - - // Setup all the dependencies - val config = kafkaConfigNoTruncateOnFetch - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val leaderEpochAtFollower = 5 - val leaderEpochAtLeader = 4 - val initialLEO = 200 - - //Stubs - when(partition.localLogOrException).thenReturn(log) - when(log.highWatermark).thenReturn(initialLEO - 3) - when(log.latestEpoch).thenReturn(Some(leaderEpochAtFollower)) - when(log.endOffsetForEpoch(leaderEpochAtLeader)).thenReturn(None) - when(log.logEndOffset).thenReturn(initialLEO) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) - stub(partition, replicaManager, log) - - //Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation - val offsetsReply = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpochAtLeader, 156), - t2p1 -> newOffsetForLeaderPartitionResult(t2p1, leaderEpochAtLeader, 202)).asJava - - //Create the thread - val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t2p1 -> initialFetchState(Some(topicId2), 0L))) - - //Run it - thread.doWork() - - //We should have truncated to the offsets in the response - verify(partition, times(2)).truncateTo(truncateToCapture.capture(), anyBoolean()) - assertTrue(truncateToCapture.getAllValues.asScala.contains(156), - "Expected " + t1p0 + " to truncate to offset 156 (truncation offsets: " + truncateToCapture.getAllValues + ")") - assertTrue(truncateToCapture.getAllValues.asScala.contains(initialLEO), - "Expected " + t2p1 + " to truncate to offset " + initialLEO + - " (truncation offsets: " + truncateToCapture.getAllValues + ")") - } - - @Test - def shouldFetchLeaderEpochSecondTimeIfLeaderRepliesWithEpochNotKnownToFollower(): Unit = { - // Create a capture to track what partitions/offsets are truncated - val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - - val config = kafkaConfigNoTruncateOnFetch - - // Setup all dependencies - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val initialLEO = 200 - - // Stubs - when(partition.localLogOrException).thenReturn(log) - when(log.highWatermark).thenReturn(initialLEO - 2) - when(log.latestEpoch).thenReturn(Some(5)) - when(log.endOffsetForEpoch(4)).thenReturn( - Some(new OffsetAndEpoch(120, 3))) - when(log.endOffsetForEpoch(3)).thenReturn( - Some(new OffsetAndEpoch(120, 3))) - when(log.logEndOffset).thenReturn(initialLEO) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) - stub(partition, replicaManager, log) - - // Define the offsets for the OffsetsForLeaderEpochResponse - val offsets = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, 4, 155), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, 4, 143)).asJava - - // Create the fetcher thread - val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L))) - - // Loop 1 -- both topic partitions will need to fetch another leader epoch - thread.doWork() - assertEquals(1, mockNetwork.epochFetchCount) - assertEquals(0, mockNetwork.fetchCount) - - // Loop 2 should do the second fetch for both topic partitions because the leader replied with - // epoch 4 while follower knows only about epoch 3 - val nextOffsets = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, 3, 101), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, 3, 102)).asJava - mockNetwork.setOffsetsForNextResponse(nextOffsets) - - thread.doWork() - assertEquals(2, mockNetwork.epochFetchCount) - assertEquals(1, mockNetwork.fetchCount) - assertTrue(mockNetwork.lastUsedOffsetForLeaderEpochVersion >= 3, - "OffsetsForLeaderEpochRequest version.") - - //Loop 3 we should not fetch epochs - thread.doWork() - assertEquals(2, mockNetwork.epochFetchCount) - assertEquals(2, mockNetwork.fetchCount) - - verify(partition, times(4)).truncateTo(truncateToCapture.capture(), anyBoolean()) - //We should have truncated to the offsets in the second response - assertTrue(truncateToCapture.getAllValues.asScala.contains(102), - "Expected " + t1p1 + " to truncate to offset 102 (truncation offsets: " + truncateToCapture.getAllValues + ")") - assertTrue(truncateToCapture.getAllValues.asScala.contains(101), - "Expected " + t1p0 + " to truncate to offset 101 (truncation offsets: " + truncateToCapture.getAllValues + ")") - } - @Test def shouldTruncateIfLeaderRepliesWithDivergingEpochNotKnownToFollower(): Unit = { // Create a capture to track what partitions/offsets are truncated val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) // Setup all dependencies val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) @@ -671,7 +353,7 @@ class ReplicaFetcherThreadTest { @Test def testTruncateOnFetchDoesNotUpdateHighWatermark(): Unit = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -755,7 +437,7 @@ class ReplicaFetcherThreadTest { @Test def testLagIsUpdatedWhenNoRecords(): Unit = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:1234")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) val logManager: LogManager = mock(classOf[LogManager]) val log: UnifiedLog = mock(classOf[UnifiedLog]) @@ -781,7 +463,6 @@ class ReplicaFetcherThreadTest { 0, OptionalInt.empty, RecordBatch.NO_TIMESTAMP, - -1L, RecordBatch.NO_TIMESTAMP, -1L, RecordValidationStats.EMPTY, @@ -848,327 +529,9 @@ class ReplicaFetcherThreadTest { assertEquals(Some(lastFetchedEpoch), thread.fetchState(t1p0).flatMap(_.lastFetchedEpoch)) } - @Test - def shouldUseLeaderEndOffsetIfInterBrokerVersionBelow20(): Unit = { - - // Create a capture to track what partitions/offsets are truncated - val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - - val props = TestUtils.createBrokerConfig(1, "localhost:1234") - props.put(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "0.11.0") - val config = KafkaConfig.fromProps(props) - - // Setup all dependencies - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val initialLEO = 200 - - // Stubs - when(partition.localLogOrException).thenReturn(log) - when(log.highWatermark).thenReturn(initialLEO - 2) - when(log.latestEpoch).thenReturn(Some(5)) - when(log.endOffsetForEpoch(4)).thenReturn( - Some(new OffsetAndEpoch(120, 3))) - when(log.endOffsetForEpoch(3)).thenReturn( - Some(new OffsetAndEpoch(120, 3))) - when(log.logEndOffset).thenReturn(initialLEO) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) - stub(partition, replicaManager, log) - - // Define the offsets for the OffsetsForLeaderEpochResponse with undefined epoch to simulate - // older protocol version - val offsets = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, UNDEFINED_EPOCH, 155), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, UNDEFINED_EPOCH, 143)).asJava - - // Create the fetcher thread - val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L))) - - // Loop 1 -- both topic partitions will truncate to leader offset even though they don't know - // about leader epoch - thread.doWork() - assertEquals(1, mockNetwork.epochFetchCount) - assertEquals(1, mockNetwork.fetchCount) - assertEquals(0, mockNetwork.lastUsedOffsetForLeaderEpochVersion, "OffsetsForLeaderEpochRequest version.") - - //Loop 2 we should not fetch epochs - thread.doWork() - assertEquals(1, mockNetwork.epochFetchCount) - assertEquals(2, mockNetwork.fetchCount) - - //We should have truncated to the offsets in the first response - verify(partition, times(2)).truncateTo(truncateToCapture.capture(), anyBoolean()) - assertTrue(truncateToCapture.getAllValues.asScala.contains(155), - "Expected " + t1p0 + " to truncate to offset 155 (truncation offsets: " + truncateToCapture.getAllValues + ")") - assertTrue(truncateToCapture.getAllValues.asScala.contains(143), - "Expected " + t1p1 + " to truncate to offset 143 (truncation offsets: " + truncateToCapture.getAllValues + ")") - } - - @Test - def shouldTruncateToInitialFetchOffsetIfLeaderReturnsUndefinedOffset(): Unit = { - - //Create a capture to track what partitions/offsets are truncated - val truncated: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - - // Setup all the dependencies - val config = kafkaConfigNoTruncateOnFetch - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val initialFetchOffset = 100 - - //Stubs - when(partition.localLogOrException).thenReturn(log) - when(log.highWatermark).thenReturn(initialFetchOffset) - when(log.latestEpoch).thenReturn(Some(5)) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) - stub(partition, replicaManager, log) - - //Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation - val offsetsReply = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET)).asJava - - //Create the thread - val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), initialFetchOffset))) - - //Run it - thread.doWork() - - //We should have truncated to initial fetch offset - verify(partition).truncateTo(truncated.capture(), anyBoolean()) - assertEquals(initialFetchOffset, truncated.getValue) - } - - @Test - def shouldPollIndefinitelyIfLeaderReturnsAnyException(): Unit = { - - //Create a capture to track what partitions/offsets are truncated - val truncated: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - - // Setup all the dependencies - val config = kafkaConfigNoTruncateOnFetch - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val leaderEpoch = 5 - val highWaterMark = 100 - val initialLeo = 300 - - //Stubs - when(log.highWatermark).thenReturn(highWaterMark) - when(partition.localLogOrException).thenReturn(log) - when(log.latestEpoch).thenReturn(Some(leaderEpoch)) - // this is for the last reply with EpochEndOffset(5, 156) - when(log.endOffsetForEpoch(leaderEpoch)).thenReturn( - Some(new OffsetAndEpoch(initialLeo, leaderEpoch))) - when(log.logEndOffset).thenReturn(initialLeo) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) - stub(partition, replicaManager, log) - - //Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation - val offsetsReply = mutable.Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, NOT_LEADER_OR_FOLLOWER, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, UNKNOWN_SERVER_ERROR, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET) - ).asJava - - //Create the thread - val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L))) - - //Run thread 3 times - (0 to 3).foreach { _ => - thread.doWork() - } - - //Then should loop continuously while there is no leader - verify(partition, never()).truncateTo(anyLong(), anyBoolean()) - - //New leader elected and replies - offsetsReply.put(t1p0, newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 156)) - - thread.doWork() - - //Now the final call should have actually done a truncation (to offset 156) - verify(partition).truncateTo(truncated.capture(), anyBoolean()) - assertEquals(156, truncated.getValue) - } - - @Test - def shouldMovePartitionsOutOfTruncatingLogState(): Unit = { - val config = kafkaConfigNoTruncateOnFetch - - //Setup all stubs - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - val leaderEpoch = 4 - - //Stub return values - when(partition.localLogOrException).thenReturn(log) - when(log.highWatermark).thenReturn(0) - when(log.latestEpoch).thenReturn(Some(leaderEpoch)) - when(log.endOffsetForEpoch(leaderEpoch)).thenReturn( - Some(new OffsetAndEpoch(0, leaderEpoch))) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - stub(partition, replicaManager, log) - - //Define the offsets for the OffsetsForLeaderEpochResponse - val offsetsReply = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1) - ).asJava - - //Create the fetcher thread - val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - - //When - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L))) - - //Then all partitions should start in an TruncatingLog state - assertEquals(Option(Truncating), thread.fetchState(t1p0).map(_.state)) - assertEquals(Option(Truncating), thread.fetchState(t1p1).map(_.state)) - - //When - thread.doWork() - - //Then none should be TruncatingLog anymore - assertEquals(Option(Fetching), thread.fetchState(t1p0).map(_.state)) - assertEquals(Option(Fetching), thread.fetchState(t1p1).map(_.state)) - verify(partition, times(2)).truncateTo(0L, false) - } - - @Test - def shouldFilterPartitionsMadeLeaderDuringLeaderEpochRequest(): Unit ={ - val config = kafkaConfigNoTruncateOnFetch - val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long]) - val initialLEO = 100 - - //Setup all stubs - val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) - val logManager: LogManager = mock(classOf[LogManager]) - val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) - val log: UnifiedLog = mock(classOf[UnifiedLog]) - val partition: Partition = mock(classOf[Partition]) - val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) - - //Stub return values - when(partition.localLogOrException).thenReturn(log) - when(log.highWatermark).thenReturn(initialLEO - 2) - when(log.latestEpoch).thenReturn(Some(5)) - when(log.endOffsetForEpoch(5)).thenReturn(Some(new OffsetAndEpoch(initialLEO, 5))) - when(log.logEndOffset).thenReturn(initialLEO) - when(replicaManager.metadataCache).thenReturn(metadataCache) - when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log) - when(replicaManager.logManager).thenReturn(logManager) - when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) - stub(partition, replicaManager, log) - - //Define the offsets for the OffsetsForLeaderEpochResponse - val offsetsReply = Map( - t1p0 -> newOffsetForLeaderPartitionResult(t1p0, 5, 52), - t1p1 -> newOffsetForLeaderPartitionResult(t1p1, 5, 49) - ).asJava - - //Create the fetcher thread - val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM) - val thread = createReplicaFetcherThread( - "bob", - 0, - config, - failedPartitions, - replicaManager, - quota, - mockNetwork - ) - - //When - thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L))) - - //When the epoch request is outstanding, remove one of the partitions to simulate a leader change. We do this via a callback passed to the mock thread - val partitionThatBecameLeader = t1p0 - mockNetwork.setEpochRequestCallback(() => { - thread.removePartitions(Set(partitionThatBecameLeader)) - }) - - //When - thread.doWork() - - //Then we should not have truncated the partition that became leader. Exactly one partition should be truncated. - verify(partition).truncateTo(truncateToCapture.capture(), anyBoolean()) - assertEquals(49, truncateToCapture.getValue) - } - @Test def shouldCatchExceptionFromBlockingSendWhenShuttingDownReplicaFetcherThread(): Unit = { - val props = TestUtils.createBrokerConfig(1, "localhost:1234") + val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) val mockBlockingSend: BlockingSend = mock(classOf[BlockingSend]) @@ -1216,7 +579,7 @@ class ReplicaFetcherThreadTest { val tid1p1 = new TopicIdPartition(topicId1, t1p1) val tid2p1 = new TopicIdPartition(topicId2, t2p1) - val props = TestUtils.createBrokerConfig(1, "localhost:1234") + val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) val mockBlockingSend: BlockingSend = mock(classOf[BlockingSend]) @@ -1294,7 +657,7 @@ class ReplicaFetcherThreadTest { @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testLocalFetchCompletionIfHighWatermarkUpdated(highWatermarkUpdated: Boolean): Unit = { - val props = TestUtils.createBrokerConfig(1, "localhost:1234") + val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) val highWatermarkReceivedFromLeader = 100L @@ -1381,7 +744,7 @@ class ReplicaFetcherThreadTest { } private def assertProcessPartitionDataWhen(isReassigning: Boolean): Unit = { - val props = TestUtils.createBrokerConfig(1, "localhost:1234") + val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) val mockBlockingSend: BlockingSend = mock(classOf[BlockingSend]) @@ -1438,10 +801,4 @@ class ReplicaFetcherThreadTest { when(replicaManager.localLogOrException(t2p1)).thenReturn(log) when(replicaManager.getPartitionOrException(t2p1)).thenReturn(partition) } - - private def kafkaConfigNoTruncateOnFetch: KafkaConfig = { - val props = TestUtils.createBrokerConfig(1, "localhost:1234") - props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, IBP_2_6_IV0.version) - KafkaConfig.fromProps(props) - } } diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala index c4eb5e30f5bbf..09c9a82446adb 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala @@ -471,7 +471,6 @@ class ReplicaManagerConcurrencyTest extends Logging { override def submit( topicPartition: TopicIdPartition, leaderAndIsr: LeaderAndIsr, - controllerEpoch: Int ): CompletableFuture[LeaderAndIsr] = { channel.alterIsr(topicPartition, leaderAndIsr) } diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala index fd39525759816..f0a4be811bb3f 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala @@ -30,6 +30,7 @@ import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.requests.FetchRequest.PartitionData import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.KRaftVersion import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} import org.apache.kafka.server.util.{KafkaScheduler, MockTime} import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogOffsetSnapshot} @@ -42,7 +43,7 @@ import org.mockito.{AdditionalMatchers, ArgumentMatchers} import scala.jdk.CollectionConverters._ class ReplicaManagerQuotasTest { - val configs = TestUtils.createBrokerConfigs(2, TestUtils.MockZkConnect).map(KafkaConfig.fromProps(_, new Properties())) + val configs = TestUtils.createBrokerConfigs(2).map(KafkaConfig.fromProps(_, new Properties())) val time = new MockTime val metrics = new Metrics val record = new SimpleRecord("some-data-in-a-message".getBytes()) @@ -307,7 +308,7 @@ class ReplicaManagerQuotasTest { scheduler = scheduler, logManager = logManager, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(leaderBrokerId, configs.head.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(leaderBrokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(configs.head.logDirs.size), alterPartitionManager = alterIsrManager) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 397dd6bff1e0e..5365de394e52a 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -29,13 +29,13 @@ import kafka.server.epoch.util.MockBlockingSender import kafka.server.share.DelayedShareFetch import kafka.utils.TestUtils.waitUntilTrue import kafka.utils.{Pool, TestUtils} -import kafka.zk.KafkaZkClient import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.{DirectoryId, IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.{InvalidPidMappingException, KafkaStorageException} -import org.apache.kafka.common.message.LeaderAndIsrRequestData +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.message.{DeleteRecordsResponseData, LeaderAndIsrRequestData} import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState @@ -51,17 +51,13 @@ import org.apache.kafka.common.requests.FetchRequest.PartitionData import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests._ import org.apache.kafka.common.security.auth.KafkaPrincipal -import org.apache.kafka.common.utils.{Exit, LogContext, Time, Utils} +import org.apache.kafka.common.utils.{LogContext, Time, Utils} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.image._ import org.apache.kafka.metadata.LeaderConstants.NO_LEADER import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.metadata.migration.ZkMigrationState import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} -import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 -import org.apache.kafka.server.common.{DirectoryEventHandler, MetadataVersion, OffsetAndEpoch, RequestLocal, StopPartition} +import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, MetadataVersion, OffsetAndEpoch, RequestLocal, StopPartition} import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} import org.apache.kafka.server.log.remote.storage._ import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} @@ -71,6 +67,7 @@ import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPa import org.apache.kafka.server.util.timer.MockTimer import org.apache.kafka.server.util.{MockScheduler, MockTime} import org.apache.kafka.storage.internals.checkpoint.{LazyOffsetCheckpoints, OffsetCheckpointFile, PartitionMetadataFile} +import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LocalLog, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetSnapshot, LogSegments, LogStartOffsetIncrementReason, ProducerStateManager, ProducerStateManagerConfig, RemoteStorageFetchInfo, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ @@ -121,7 +118,7 @@ class ReplicaManagerTest { private var mockRemoteLogManager: RemoteLogManager = _ private var addPartitionsToTxnManager: AddPartitionsToTxnManager = _ private var brokerTopicStats: BrokerTopicStats = _ - private val transactionSupportedOperation = genericError + private val transactionSupportedOperation = genericErrorSupported private val quotaExceededThrottleTime = 1000 private val quotaAvailableThrottleTime = 0 @@ -136,7 +133,7 @@ class ReplicaManagerTest { @BeforeEach def setUp(): Unit = { - val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(1) config = KafkaConfig.fromProps(props) alterPartitionManager = mock(classOf[AlterPartitionManager]) quotaManager = QuotaFactory.instantiate(config, metrics, time, "") @@ -151,7 +148,7 @@ class ReplicaManagerTest { addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) // Anytime we try to verify, just automatically run the callback as though the transaction was verified. - when(addPartitionsToTxnManager.verifyTransaction(any(), any(), any(), any(), any(), any())).thenAnswer { invocationOnMock => + when(addPartitionsToTxnManager.addOrVerifyTransaction(any(), any(), any(), any(), any(), any())).thenAnswer { invocationOnMock => val callback = invocationOnMock.getArgument(4, classOf[AddPartitionsToTxnManager.AppendCallback]) callback(Map.empty[TopicPartition, Errors].toMap) } @@ -177,7 +174,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager) try { @@ -195,7 +192,7 @@ class ReplicaManagerTest { @Test def testHighwaterMarkRelativeDirectoryMapping(): Unit = { - val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(1) props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) val config = KafkaConfig.fromProps(props) val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) @@ -206,7 +203,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager) try { @@ -232,7 +229,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager, threadNamePrefix = Option(this.getClass.getName)) @@ -269,10 +266,10 @@ class ReplicaManagerTest { } @Test - def testMaybeAddLogDirFetchersWithoutEpochCache(): Unit = { + def testMaybeAddLogDirFetchers(): Unit = { val dir1 = TestUtils.tempDir() val dir2 = TestUtils.tempDir() - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.put("log.dirs", dir1.getAbsolutePath + "," + dir2.getAbsolutePath) val config = KafkaConfig.fromProps(props) val logManager = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(new Properties())) @@ -314,8 +311,6 @@ class ReplicaManagerTest { partition.createLogIfNotExists(isNew = true, isFutureReplica = true, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) - // remove cache to disable OffsetsForLeaderEpoch API - partition.futureLog.get.leaderEpochCache = None // this method should use hw of future log to create log dir fetcher. Otherwise, it causes offset mismatch error rm.maybeAddLogDirFetchers(Set(partition), new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), _ => None) @@ -335,7 +330,7 @@ class ReplicaManagerTest { def testMaybeAddLogDirFetchersPausingCleaning(futureLogCreated: Boolean): Unit = { val dir1 = TestUtils.tempDir() val dir2 = TestUtils.tempDir() - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.put("log.dirs", dir1.getAbsolutePath + "," + dir2.getAbsolutePath) val config = KafkaConfig.fromProps(props) val logManager = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(new Properties())) @@ -407,7 +402,7 @@ class ReplicaManagerTest { @Test def testClearPurgatoryOnBecomingFollower(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) val config = KafkaConfig.fromProps(props) val logProps = new Properties() @@ -493,7 +488,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager, threadNamePrefix = Option(this.getClass.getName)) @@ -1390,13 +1385,6 @@ class ReplicaManagerTest { verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(new Properties, expectTruncation = false) } - @Test - def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdateIbp26(): Unit = { - val extraProps = new Properties - extraProps.put(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, IBP_2_6_IV0.version) - verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(extraProps, expectTruncation = true) - } - /** * If a partition becomes a follower and the leader is unchanged it should check for truncation * if the epoch has increased by more than one (which suggests it has missed an update). For @@ -2276,7 +2264,7 @@ class ReplicaManagerTest { val idempotentRecords = MemoryRecords.withIdempotentRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) handleProduceAppend(replicaManager, tp0, idempotentRecords, transactionalId = null) - verify(addPartitionsToTxnManager, times(0)).verifyTransaction(any(), any(), any(), any(), any[AddPartitionsToTxnManager.AppendCallback](), any()) + verify(addPartitionsToTxnManager, times(0)).addOrVerifyTransaction(any(), any(), any(), any(), any[AddPartitionsToTxnManager.AppendCallback](), any()) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) // If we supply a transactional ID and some transactional and some idempotent records, we should only verify the topic partition with transactional records. @@ -2286,7 +2274,7 @@ class ReplicaManagerTest { val idempotentRecords2 = MemoryRecords.withIdempotentRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) handleProduceAppendToMultipleTopics(replicaManager, Map(tp0 -> transactionalRecords, tp1 -> idempotentRecords2), transactionalId) - verify(addPartitionsToTxnManager, times(1)).verifyTransaction( + verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2323,7 +2311,7 @@ class ReplicaManagerTest { // We should add these partitions to the manager to verify. val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, origin = appendOrigin, transactionalId = transactionalId) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(1)).verifyTransaction( + verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2343,7 +2331,7 @@ class ReplicaManagerTest { // This time verification is successful. handleProduceAppend(replicaManager, tp0, transactionalRecords, origin = appendOrigin, transactionalId = transactionalId) val appendCallback2 = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(2)).verifyTransaction( + verify(addPartitionsToTxnManager, times(2)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2356,7 +2344,7 @@ class ReplicaManagerTest { val callback2: AddPartitionsToTxnManager.AppendCallback = appendCallback2.getValue() callback2(Map.empty[TopicPartition, Errors].toMap) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) - assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId)) + assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -2383,7 +2371,7 @@ class ReplicaManagerTest { // We should add these partitions to the manager to verify. val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(1)).verifyTransaction( + verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2406,7 +2394,7 @@ class ReplicaManagerTest { val result2 = handleProduceAppend(replicaManager, tp0, transactionalRecords2, transactionalId = transactionalId) val appendCallback2 = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(2)).verifyTransaction( + verify(addPartitionsToTxnManager, times(2)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2490,7 +2478,7 @@ class ReplicaManagerTest { assertThrows(classOf[InvalidPidMappingException], () => handleProduceAppendToMultipleTopics(replicaManager, transactionalRecords, transactionalId = transactionalId)) // We should not add these partitions to the manager to verify. - verify(addPartitionsToTxnManager, times(0)).verifyTransaction(any(), any(), any(), any(), any(), any()) + verify(addPartitionsToTxnManager, times(0)).addOrVerifyTransaction(any(), any(), any(), any(), any(), any()) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -2514,7 +2502,7 @@ class ReplicaManagerTest { handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId).onFire { response => assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, response.error) } - verify(addPartitionsToTxnManager, times(0)).verifyTransaction(any(), any(), any(), any(), any(), any()) + verify(addPartitionsToTxnManager, times(0)).addOrVerifyTransaction(any(), any(), any(), any(), any(), any()) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -2522,7 +2510,7 @@ class ReplicaManagerTest { @Test def testDisabledTransactionVerification(): Unit = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.put("transaction.partition.verification.enable", "false") val config = KafkaConfig.fromProps(props) @@ -2548,10 +2536,10 @@ class ReplicaManagerTest { assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp, producerId)) // We should not add these partitions to the manager to verify. - verify(addPartitionsToTxnManager, times(0)).verifyTransaction(any(), any(), any(), any(), any(), any()) + verify(addPartitionsToTxnManager, times(0)).addOrVerifyTransaction(any(), any(), any(), any(), any(), any()) // Dynamically enable verification. - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) val props = new Properties() props.put(TransactionLogConfig.TRANSACTION_PARTITION_VERIFICATION_ENABLE_CONFIG, "true") config.dynamicConfig.updateBrokerConfig(config.brokerId, props) @@ -2562,9 +2550,9 @@ class ReplicaManagerTest { new SimpleRecord("message".getBytes)) handleProduceAppend(replicaManager, tp, moreTransactionalRecords, transactionalId = transactionalId) - verify(addPartitionsToTxnManager, times(0)).verifyTransaction(any(), any(), any(), any(), any(), any()) + verify(addPartitionsToTxnManager, times(0)).addOrVerifyTransaction(any(), any(), any(), any(), any(), any()) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp, producerId)) - assertTrue(replicaManager.localLog(tp).get.hasOngoingTransaction(producerId)) + assertTrue(replicaManager.localLog(tp).get.hasOngoingTransaction(producerId, producerEpoch)) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -2591,7 +2579,7 @@ class ReplicaManagerTest { // We should add these partitions to the manager to verify. val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(1)).verifyTransaction( + verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2603,7 +2591,7 @@ class ReplicaManagerTest { assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) // Disable verification - config.dynamicConfig.initialize(None, None) + config.dynamicConfig.initialize(None) val props = new Properties() props.put(TransactionLogConfig.TRANSACTION_PARTITION_VERIFICATION_ENABLE_CONFIG, "false") config.dynamicConfig.updateBrokerConfig(config.brokerId, props) @@ -2617,9 +2605,9 @@ class ReplicaManagerTest { // This time we do not verify handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId) - verify(addPartitionsToTxnManager, times(1)).verifyTransaction(any(), any(), any(), any(), any(), any()) + verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction(any(), any(), any(), any(), any(), any()) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) - assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId)) + assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -2656,7 +2644,7 @@ class ReplicaManagerTest { val expectedMessage = s"Unable to verify the partition has been added to the transaction. Underlying error: ${error.toString}" val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(1)).verifyTransaction( + verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2687,7 +2675,7 @@ class ReplicaManagerTest { try { val result = maybeStartTransactionVerificationForPartition(replicaManager, tp0, transactionalId, producerId, producerEpoch) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(0)).verifyTransaction( + verify(addPartitionsToTxnManager, times(0)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), @@ -2701,18 +2689,8 @@ class ReplicaManagerTest { } } - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testFullLeaderAndIsrStrayPartitions(zkMigrationEnabled: Boolean): Unit = { - val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect) - if (zkMigrationEnabled) { - props.put(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "" + zkMigrationEnabled) - props.put(QuorumConfig.QUORUM_VOTERS_CONFIG, "3000@localhost:9071") - props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT") - config = KafkaConfig.fromProps(props) - } - + @Test + def testFullLeaderAndIsrStrayPartitions(): Unit = { val logManager = TestUtils.createLogManager(config.logDirs.map(new File(_)), defaultConfig = new LogConfig(new Properties()), time = time) val quotaManager = QuotaFactory.instantiate(config, metrics, time, "") val replicaManager = new ReplicaManager( @@ -2722,7 +2700,7 @@ class ReplicaManagerTest { scheduler = time.scheduler, logManager = logManager, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager, threadNamePrefix = Option(this.getClass.getName)) @@ -2772,11 +2750,7 @@ class ReplicaManagerTest { val stray0 = replicaManager.getPartition(new TopicPartition("hosted-stray", 0)) - if (zkMigrationEnabled) { - assertEquals(HostedPartition.None, stray0) - } else { - assertTrue(stray0.isInstanceOf[HostedPartition.Online]) - } + assertTrue(stray0.isInstanceOf[HostedPartition.Online]) } finally { Utils.tryAll(util.Arrays.asList[Callable[Void]] ( () => { @@ -2806,7 +2780,7 @@ class ReplicaManagerTest { scheduler = time.scheduler, logManager = logManager, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager, threadNamePrefix = Option(this.getClass.getName)) @@ -2912,7 +2886,7 @@ class ReplicaManagerTest { leaderEpochFromLeader: Int = 3, extraProps: Properties = new Properties(), topicId: Option[Uuid] = None): (ReplicaManager, LogManager) = { - val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(0) props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) props.asScala ++= extraProps.asScala val config = KafkaConfig.fromProps(props) @@ -2926,8 +2900,8 @@ class ReplicaManagerTest { val maxTransactionTimeoutMs = 30000 val maxProducerIdExpirationMs = 30000 val segments = new LogSegments(tp) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, tp, mockLogDirFailureChannel, logConfig.recordVersion, "", None, time.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, tp, mockLogDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(tp, logDir, maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, true), time) val offsets = new LogLoader( @@ -2941,7 +2915,7 @@ class ReplicaManagerTest { segments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false @@ -2955,8 +2929,7 @@ class ReplicaManagerTest { producerIdExpirationCheckIntervalMs = 30000, leaderEpochCache = leaderEpochCache, producerStateManager = producerStateManager, - _topicId = topicId, - keepPartitionMetadataFile = true) { + _topicId = topicId) { override def endOffsetForEpoch(leaderEpoch: Int): Option[OffsetAndEpoch] = { assertEquals(leaderEpoch, leaderEpochFromLeader) @@ -3383,7 +3356,7 @@ class ReplicaManagerTest { buildRemoteLogAuxState: Boolean = false, remoteFetchQuotaExceeded: Option[Boolean] = None ): ReplicaManager = { - val props = TestUtils.createBrokerConfig(brokerId, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(brokerId) val path1 = TestUtils.tempRelativeDir("data").getAbsolutePath val path2 = TestUtils.tempRelativeDir("data2").getAbsolutePath props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, enableRemoteStorage.toString) @@ -3744,8 +3717,8 @@ class ReplicaManagerTest { private def prepareDifferentReplicaManagers(brokerTopicStats1: BrokerTopicStats, brokerTopicStats2: BrokerTopicStats): (ReplicaManager, ReplicaManager) = { - val props0 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) - val props1 = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect) + val props0 = TestUtils.createBrokerConfig(0) + val props1 = TestUtils.createBrokerConfig(1) props0.put("log0.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) props1.put("log1.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) @@ -4126,7 +4099,12 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val props = new Properties() - props.put("zookeeper.connect", "test") + props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") + props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "0") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + props.setProperty("controller.quorum.bootstrap.servers", "localhost:9093") + props.setProperty("listeners", "CONTROLLER://:9093") + props.setProperty("advertised.listeners", "CONTROLLER://127.0.0.1:9093") props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, true.toString) props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteStorageManager].getName) props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteLogMetadataManager].getName) @@ -4234,7 +4212,12 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val props = new Properties() - props.put("zookeeper.connect", "test") + props.setProperty("process.roles", "controller") + props.setProperty("node.id", "0") + props.setProperty("controller.listener.names", "CONTROLLER") + props.setProperty("controller.quorum.bootstrap.servers", "localhost:9093") + props.setProperty("listeners", "CONTROLLER://:9093") + props.setProperty("advertised.listeners", "CONTROLLER://127.0.0.1:9093") props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, true.toString) props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteStorageManager].getName) props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteLogMetadataManager].getName) @@ -4388,10 +4371,12 @@ class ReplicaManagerTest { replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) // Replicas fetch from the leader periodically, therefore we check that the metric value is increasing - assertTrue(brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count > 0) + waitUntilTrue(() => brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count > 0, + "Should have buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).failedBuildRemoteLogAuxStateRate.count) // Verify aggregate metrics - assertTrue(brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count > 0) + waitUntilTrue(() => brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count > 0, + "Should have all topic buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.allTopicsStats.failedBuildRemoteLogAuxStateRate.count) } finally { replicaManager.shutdown(checkpointHW = false) @@ -4530,7 +4515,7 @@ class ReplicaManagerTest { when(mockLog.logStartOffset).thenReturn(endOffset).thenReturn(startOffset) when(mockLog.logEndOffset).thenReturn(endOffset) when(mockLog.localLogStartOffset()).thenReturn(endOffset - 10) - when(mockLog.leaderEpochCache).thenReturn(None) + when(mockLog.leaderEpochCache).thenReturn(mock(classOf[LeaderEpochFileCache])) when(mockLog.latestEpoch).thenReturn(Some(0)) val producerStateManager = mock(classOf[ProducerStateManager]) when(mockLog.producerStateManager).thenReturn(producerStateManager) @@ -4630,7 +4615,7 @@ class ReplicaManagerTest { def testReplicaNotAvailable(): Unit = { def createReplicaManager(): ReplicaManager = { - val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect) + val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) new ReplicaManager( @@ -4640,7 +4625,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager) { override def getPartitionOrException(topicPartition: TopicPartition): Partition = { @@ -5853,115 +5838,6 @@ class ReplicaManagerTest { } } - @Test - def testFetcherAreNotRestartedIfLeaderEpochIsNotBumpedWithZkPath(): Unit = { - val localId = 0 - val topicPartition = new TopicPartition("foo", 0) - - val mockReplicaFetcherManager = mock(classOf[ReplicaFetcherManager]) - val replicaManager = setupReplicaManagerWithMockedPurgatories( - timer = new MockTimer(time), - brokerId = localId, - aliveBrokerIds = Seq(localId, localId + 1, localId + 2), - mockReplicaFetcherManager = Some(mockReplicaFetcherManager) - ) - - try { - when(mockReplicaFetcherManager.removeFetcherForPartitions( - Set(topicPartition)) - ).thenReturn(Map.empty[TopicPartition, PartitionFetchState]) - - // Make the local replica the follower. - var request = makeLeaderAndIsrRequest( - topicId = FOO_UUID, - topicPartition = topicPartition, - replicas = Seq(localId, localId + 1), - leaderAndIsr = new LeaderAndIsr( - localId + 1, - 0, - List(localId, localId + 1).map(Int.box).asJava, - LeaderRecoveryState.RECOVERED, - 0 - ) - ) - - replicaManager.becomeLeaderOrFollower(0, request, (_, _) => ()) - - // Check the state of that partition. - val HostedPartition.Online(followerPartition) = replicaManager.getPartition(topicPartition) - assertFalse(followerPartition.isLeader) - assertEquals(0, followerPartition.getLeaderEpoch) - assertEquals(0, followerPartition.getPartitionEpoch) - - // Verify that the partition was removed and added back. - verify(mockReplicaFetcherManager).removeFetcherForPartitions(Set(topicPartition)) - verify(mockReplicaFetcherManager).addFetcherForPartitions(Map(topicPartition -> InitialFetchState( - topicId = Some(FOO_UUID), - leader = new BrokerEndPoint(localId + 1, s"host${localId + 1}", localId + 1), - currentLeaderEpoch = 0, - initOffset = 0 - ))) - - reset(mockReplicaFetcherManager) - - // Apply changes that bumps the partition epoch. - request = makeLeaderAndIsrRequest( - topicId = FOO_UUID, - topicPartition = topicPartition, - replicas = Seq(localId, localId + 1, localId + 2), - leaderAndIsr = new LeaderAndIsr( - localId + 1, - 0, - List(localId, localId + 1).map(Int.box).asJava, - LeaderRecoveryState.RECOVERED, - 1 - ) - ) - - replicaManager.becomeLeaderOrFollower(0, request, (_, _) => ()) - - assertFalse(followerPartition.isLeader) - assertEquals(0, followerPartition.getLeaderEpoch) - // Partition updates is fenced based on the leader epoch on the ZK path. - assertEquals(0, followerPartition.getPartitionEpoch) - - // As the update is fenced based on the leader epoch, removeFetcherForPartitions and - // addFetcherForPartitions are not called at all. - reset(mockReplicaFetcherManager) - - // Apply changes that bumps the leader epoch. - request = makeLeaderAndIsrRequest( - topicId = FOO_UUID, - topicPartition = topicPartition, - replicas = Seq(localId, localId + 1, localId + 2), - leaderAndIsr = new LeaderAndIsr( - localId + 2, - 1, - List(localId, localId + 1, localId + 2).map(Int.box).asJava, - LeaderRecoveryState.RECOVERED, - 2 - ) - ) - - replicaManager.becomeLeaderOrFollower(0, request, (_, _) => ()) - - assertFalse(followerPartition.isLeader) - assertEquals(1, followerPartition.getLeaderEpoch) - assertEquals(2, followerPartition.getPartitionEpoch) - - // Verify that the partition was removed and added back. - verify(mockReplicaFetcherManager).removeFetcherForPartitions(Set(topicPartition)) - verify(mockReplicaFetcherManager).addFetcherForPartitions(Map(topicPartition -> InitialFetchState( - topicId = Some(FOO_UUID), - leader = new BrokerEndPoint(localId + 2, s"host${localId + 2}", localId + 2), - currentLeaderEpoch = 1, - initOffset = 0 - ))) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testFetcherAreNotRestartedIfLeaderEpochIsNotBumpedWithKRaftPath(enableRemoteStorage: Boolean): Unit = { @@ -6366,8 +6242,7 @@ class ReplicaManagerTest { private def imageFromTopics(topicsImage: TopicsImage): MetadataImage = { val featuresImageLatest = new FeaturesImage( Collections.emptyMap(), - MetadataVersion.latestProduction(), - ZkMigrationState.NONE) + MetadataVersion.latestProduction()) new MetadataImage( new MetadataProvenance(100L, 10, 1000L, true), featuresImageLatest, @@ -6547,7 +6422,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager)) @@ -6600,20 +6475,6 @@ class ReplicaManagerTest { val newFoo0 = new TopicIdPartition(Uuid.fromString("JRCmVxWxQamFs4S8NXYufg"), new TopicPartition("foo", 0)) val bar0 = new TopicIdPartition(Uuid.fromString("69O438ZkTSeqqclTtZO2KA"), new TopicPartition("bar", 0)) - def setupReplicaManagerForKRaftMigrationTest(): ReplicaManager = { - setupReplicaManagerWithMockedPurgatories( - brokerId = 3, - timer = new MockTimer(time), - aliveBrokerIds = Seq(0, 1, 2), - propsModifier = props => { - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "1000@localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT") - }, - defaultTopicRemoteLogStorageEnable = false) - } - def verifyPartitionIsOnlineAndHasId( replicaManager: ReplicaManager, topicIdPartition: TopicIdPartition @@ -6638,116 +6499,6 @@ class ReplicaManagerTest { assertEquals(HostedPartition.None, partition, s"Expected ${topicIdPartition} to be offline, but it was: ${partition}") } - @Test - def testFullLairDuringKRaftMigration(): Unit = { - val replicaManager = setupReplicaManagerForKRaftMigrationTest() - try { - val becomeLeaderRequest = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(foo0, foo1, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) - verifyPartitionIsOnlineAndHasId(replicaManager, foo0) - verifyPartitionIsOnlineAndHasId(replicaManager, foo1) - verifyPartitionIsOnlineAndHasId(replicaManager, bar0) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testFullLairDuringKRaftMigrationRemovesOld(): Unit = { - val replicaManager = setupReplicaManagerForKRaftMigrationTest() - try { - val becomeLeaderRequest1 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(foo0, foo1, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest1, (_, _) => ()) - val becomeLeaderRequest2 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(2, becomeLeaderRequest2, (_, _) => ()) - - verifyPartitionIsOffline(replicaManager, foo0) - verifyPartitionIsOffline(replicaManager, foo1) - verifyPartitionIsOnlineAndHasId(replicaManager, bar0) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testFullLairDuringKRaftMigrationWithTopicRecreations(): Unit = { - val replicaManager = setupReplicaManagerForKRaftMigrationTest() - try { - val becomeLeaderRequest1 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(foo0, foo1, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest1, (_, _) => ()) - val becomeLeaderRequest2 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(newFoo0, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(2, becomeLeaderRequest2, (_, _) => ()) - - verifyPartitionIsOnlineAndHasId(replicaManager, newFoo0) - verifyPartitionIsOffline(replicaManager, foo1) - verifyPartitionIsOnlineAndHasId(replicaManager, bar0) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testMetadataLogDirFailureInZkShouldNotHaltBroker(): Unit = { - // Given - val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect, logDirCount = 2) - val config = KafkaConfig.fromProps(props) - val logDirFiles = config.logDirs.map(new File(_)) - val logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size) - val logManager = TestUtils.createLogManager(logDirFiles, defaultConfig = new LogConfig(new Properties()), time = time) - val mockZkClient = mock(classOf[KafkaZkClient]) - val replicaManager = new ReplicaManager( - metrics = metrics, - config = config, - time = time, - scheduler = time.scheduler, - logManager = logManager, - quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), - logDirFailureChannel = logDirFailureChannel, - alterPartitionManager = alterPartitionManager, - threadNamePrefix = Option(this.getClass.getName), - zkClient = Option(mockZkClient), - ) - try { - logManager.startup(Set.empty[String]) - replicaManager.startup() - - Exit.setHaltProcedure((_, _) => fail("Test failure, broker should not have halted")) - - // When - logDirFailureChannel.maybeAddOfflineLogDir(logDirFiles.head.getAbsolutePath, "test failure", null) - - // Then - TestUtils.retry(60000) { - verify(mockZkClient).propagateLogDirEvent(config.brokerId) - } - } finally { - Utils.tryAll(util.Arrays.asList[Callable[Void]]( - () => { - replicaManager.shutdown(checkpointHW = false) - null - }, - () => { - try { - logManager.shutdown() - } catch { - case _: Exception => - } - null - }, - () => { - quotaManager.shutdown() - null - } - )) - } - } - @Test def testRemoteReadQuotaExceeded(): Unit = { when(mockRemoteLogManager.getFetchThrottleTimeMs).thenReturn(quotaExceededThrottleTime) @@ -6796,6 +6547,104 @@ class ReplicaManagerTest { assertEquals(Double.NaN, maxMetric.metricValue) } + @Test + def testBecomeFollowerInvokeOnBecomingFollowerListener(): Unit = { + val localId = 1 + val topicPartition = new TopicPartition("foo", 0) + val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId) + // Attach listener to partition. + val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) + replicaManager.createPartition(topicPartition).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val listener = new MockPartitionListener + assertTrue(replicaManager.maybeAddListener(topicPartition, listener)) + listener.verify() + + try { + // Make the local replica the leader + val leaderTopicsDelta = topicsCreateDelta(localId, true) + val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) + + replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) + + // Check the state of that partition and fetcher + val HostedPartition.Online(leaderPartition) = replicaManager.getPartition(topicPartition) + assertTrue(leaderPartition.isLeader) + assertEquals(0, leaderPartition.getLeaderEpoch) + // On becoming follower listener should not be invoked yet. + listener.verify() + + // Change the local replica to follower + val followerTopicsDelta = topicsChangeDelta(leaderMetadataImage.topics(), localId, false) + val followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) + replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) + + // On becoming follower listener should be invoked. + listener.verify(expectedFollower = true) + + // Check the state of that partition. + val HostedPartition.Online(followerPartition) = replicaManager.getPartition(topicPartition) + assertFalse(followerPartition.isLeader) + assertEquals(1, followerPartition.getLeaderEpoch) + } finally { + replicaManager.shutdown(checkpointHW = false) + } + } + + @Test + def testDeleteRecordsInternalTopicDeleteDisallowed(): Unit = { + val localId = 1 + val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) + val directoryEventHandler = mock(classOf[DirectoryEventHandler]) + + val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) + val directoryIds = rm.logManager.directoryIdsSet.toList + assertEquals(directoryIds.size, 2) + val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) + val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get + partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), + None) + + def callback(responseStatus: Map[TopicPartition, DeleteRecordsResponseData.DeleteRecordsPartitionResult]): Unit = { + assert(responseStatus.values.head.errorCode == Errors.INVALID_TOPIC_EXCEPTION.code) + } + + // default internal topics delete disabled + rm.deleteRecords( + timeout = 0L, + Map[TopicPartition, Long](topicPartition0.topicPartition() -> 10L), + responseCallback = callback + ) + } + + @Test + def testDeleteRecordsInternalTopicDeleteAllowed(): Unit = { + val localId = 1 + val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) + val directoryEventHandler = mock(classOf[DirectoryEventHandler]) + + val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) + val directoryIds = rm.logManager.directoryIdsSet.toList + assertEquals(directoryIds.size, 2) + val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) + val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get + partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), + None) + + def callback(responseStatus: Map[TopicPartition, DeleteRecordsResponseData.DeleteRecordsPartitionResult]): Unit = { + assert(responseStatus.values.head.errorCode == Errors.NONE.code) + } + + // internal topics delete allowed + rm.deleteRecords( + timeout = 0L, + Map[TopicPartition, Long](topicPartition0.topicPartition() -> 0L), + responseCallback = callback, + allowInternalTopicDeletion = true + ) + } + private def readFromLogWithOffsetOutOfRange(tp: TopicPartition): Seq[(TopicIdPartition, LogReadResult)] = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true) try { diff --git a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala index b61225757037c..ae1e0a1f5e871 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala @@ -22,7 +22,6 @@ import java.util.{Collections, Properties} import java.util.Map.Entry import kafka.server.KafkaConfig.fromProps import kafka.utils.TestUtils._ -import kafka.utils.CoreUtils._ import org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry, NewTopic} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} @@ -34,7 +33,7 @@ import org.apache.kafka.common.message.BrokerRegistrationRequestData.{Listener, import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol.PLAINTEXT import org.apache.kafka.controller.ControllerRequestContextUtil -import org.apache.kafka.server.common.{Features, MetadataVersion} +import org.apache.kafka.server.common.{Feature, MetadataVersion} import org.apache.kafka.server.config.QuotaConfig import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ @@ -89,7 +88,7 @@ class ReplicationQuotasTest extends QuorumTestHarness { * regular replication works as expected. */ - brokers = (100 to 105).map { id => createBroker(fromProps(createBrokerConfig(id, zkConnectOrNull))) } + brokers = (100 to 105).map { id => createBroker(fromProps(createBrokerConfig(id))) } //Given six partitions, led on nodes 0,1,2,3,4,5 but with followers on node 6,7 (not started yet) //And two extra partitions 6,7, which we don't intend on throttling. @@ -112,31 +111,21 @@ class ReplicationQuotasTest extends QuorumTestHarness { //replicate for each of the two follower brokers. if (!leaderThrottle) throttle = throttle * 3 - Using(createAdminClient(brokers, listenerName)) { admin => - if (isKRaftTest()) { - (106 to 107).foreach(registerBroker) - } + Using.resource(createAdminClient(brokers, listenerName)) { admin => + (106 to 107).foreach(registerBroker) admin.createTopics(List(new NewTopic(topic, assignment.map(a => a._1.asInstanceOf[Integer] -> a._2.map(_.asInstanceOf[Integer]).toList.asJava).asJava)).asJava).all().get() //Set the throttle limit on all 8 brokers, but only assign throttled replicas to the six leaders, or two followers (100 to 107).foreach { brokerId => - if (isKRaftTest()) { - val entry = new SimpleImmutableEntry[AlterConfigOp.OpType, String](SET, throttle.toString) - .asInstanceOf[Entry[AlterConfigOp.OpType, String]] - controllerServer.controller.incrementalAlterConfigs( - ControllerRequestContextUtil.ANONYMOUS_CONTEXT, - Map(new ConfigResource(BROKER, String.valueOf(brokerId)) -> Map( - QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG -> entry, - QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG -> entry).asJava).asJava, - false - ).get() - } else { - adminZkClient.changeBrokerConfig(Seq(brokerId), - propsWith( - (QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, throttle.toString), - (QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, throttle.toString) - )) - } + val entry = new SimpleImmutableEntry[AlterConfigOp.OpType, String](SET, throttle.toString) + .asInstanceOf[Entry[AlterConfigOp.OpType, String]] + controllerServer.controller.incrementalAlterConfigs( + ControllerRequestContextUtil.ANONYMOUS_CONTEXT, + Map(new ConfigResource(BROKER, String.valueOf(brokerId)) -> Map( + QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG -> entry, + QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG -> entry).asJava).asJava, + false + ).get() } //Either throttle the six leaders or the two followers val configEntry = if (leaderThrottle) @@ -213,7 +202,7 @@ class ReplicationQuotasTest extends QuorumTestHarness { */ //2 brokers with 1MB Segment Size & 1 partition - val config: Properties = createBrokerConfig(100, zkConnectOrNull) + val config: Properties = createBrokerConfig(100) config.put("log.segment.bytes", (1024 * 1024).toString) brokers = Seq(createBroker(fromProps(config))) @@ -223,10 +212,8 @@ class ReplicationQuotasTest extends QuorumTestHarness { val expectedDuration = 4 val throttle: Long = msg.length * msgCount / expectedDuration - Using(createAdminClient(brokers, listenerName)) { admin => - if (isKRaftTest()) { - registerBroker(101) - } + Using.resource(createAdminClient(brokers, listenerName)) { admin => + registerBroker(101) admin.createTopics( List(new NewTopic(topic, Collections.singletonMap(0, List(100, 101).map(_.asInstanceOf[Integer]).asJava))).asJava ).all().get() @@ -244,7 +231,7 @@ class ReplicationQuotasTest extends QuorumTestHarness { //Start the new broker (and hence start replicating) debug("Starting new broker") - brokers = brokers :+ createBroker(fromProps(createBrokerConfig(101, zkConnectOrNull))) + brokers = brokers :+ createBroker(fromProps(createBrokerConfig(101))) val start = System.currentTimeMillis() waitForOffsetsToMatch(msgCount, 0, 101) @@ -274,7 +261,7 @@ class ReplicationQuotasTest extends QuorumTestHarness { def createBrokers(brokerIds: Seq[Int]): Unit = { brokerIds.foreach { id => - brokers = brokers :+ createBroker(fromProps(createBrokerConfig(id, zkConnectOrNull))) + brokers = brokers :+ createBroker(fromProps(createBrokerConfig(id))) } } @@ -295,7 +282,7 @@ class ReplicationQuotasTest extends QuorumTestHarness { .setName(MetadataVersion.FEATURE_NAME) .setMinSupportedVersion(MetadataVersion.latestProduction().featureLevel()) .setMaxSupportedVersion(MetadataVersion.latestTesting().featureLevel())) - Features.PRODUCTION_FEATURES.forEach { feature => + Feature.PRODUCTION_FEATURES.forEach { feature => features.add(new BrokerRegistrationRequestData.Feature() .setName(feature.featureName()) .setMinSupportedVersion(feature.minimumProduction()) diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index 1c3161f886053..85c4f8ba2a2f2 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -246,7 +246,7 @@ class RequestQuotaTest extends BaseRequestTest { private def requestBuilder(apiKey: ApiKeys): AbstractRequest.Builder[_ <: AbstractRequest] = { apiKey match { case ApiKeys.PRODUCE => - requests.ProduceRequest.forCurrentMagic(new ProduceRequestData() + requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() .setName(tp.topic()).setPartitionData(Collections.singletonList( @@ -479,7 +479,7 @@ class RequestQuotaTest extends BaseRequestTest { ) case ApiKeys.WRITE_TXN_MARKERS => - new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), List.empty.asJava) + new WriteTxnMarkersRequest.Builder(java.util.List.of[WriteTxnMarkersRequest.TxnMarkerEntry]) case ApiKeys.TXN_OFFSET_COMMIT => new TxnOffsetCommitRequest.Builder( @@ -487,7 +487,8 @@ class RequestQuotaTest extends BaseRequestTest { "test-txn-group", 2, 0, - Map.empty[TopicPartition, TxnOffsetCommitRequest.CommittedOffset].asJava + Map.empty[TopicPartition, TxnOffsetCommitRequest.CommittedOffset].asJava, + true ) case ApiKeys.DESCRIBE_ACLS => @@ -623,7 +624,7 @@ class RequestQuotaTest extends BaseRequestTest { new AlterUserScramCredentialsRequest.Builder(new AlterUserScramCredentialsRequestData()) case ApiKeys.VOTE => - new VoteRequest.Builder(VoteRequest.singletonRequest(tp, null, 1, 2, 0, 10)) + new VoteRequest.Builder(VoteRequest.singletonRequest(tp, null, 1, 2, 0, 10, true)) case ApiKeys.BEGIN_QUORUM_EPOCH => new BeginQuorumEpochRequest.Builder(BeginQuorumEpochRequest.singletonRequest(tp, null, 2, 5)) @@ -637,7 +638,7 @@ class RequestQuotaTest extends BaseRequestTest { Topic.CLUSTER_METADATA_TOPIC_PARTITION)) case ApiKeys.ALTER_PARTITION => - new AlterPartitionRequest.Builder(new AlterPartitionRequestData(), true) + new AlterPartitionRequest.Builder(new AlterPartitionRequestData()) case ApiKeys.UPDATE_FEATURES => new UpdateFeaturesRequest.Builder(new UpdateFeaturesRequestData()) @@ -683,10 +684,10 @@ class RequestQuotaTest extends BaseRequestTest { new AllocateProducerIdsRequest.Builder(new AllocateProducerIdsRequestData()) case ApiKeys.CONSUMER_GROUP_HEARTBEAT => - new ConsumerGroupHeartbeatRequest.Builder(new ConsumerGroupHeartbeatRequestData(), true) + new ConsumerGroupHeartbeatRequest.Builder(new ConsumerGroupHeartbeatRequestData()) case ApiKeys.CONSUMER_GROUP_DESCRIBE => - new ConsumerGroupDescribeRequest.Builder(new ConsumerGroupDescribeRequestData(), true) + new ConsumerGroupDescribeRequest.Builder(new ConsumerGroupDescribeRequestData()) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => new GetTelemetrySubscriptionsRequest.Builder(new GetTelemetrySubscriptionsRequestData()) @@ -738,6 +739,15 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => new ReadShareGroupStateSummaryRequest.Builder(new ReadShareGroupStateSummaryRequestData(), true) + + case ApiKeys.STREAMS_GROUP_HEARTBEAT => + new StreamsGroupHeartbeatRequest.Builder(new StreamsGroupHeartbeatRequestData(), true) + + case ApiKeys.STREAMS_GROUP_DESCRIBE => + new StreamsGroupDescribeRequest.Builder(new StreamsGroupDescribeRequestData(), true) + + case ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS => + new DescribeShareGroupOffsetsRequest.Builder(new DescribeShareGroupOffsetsRequestData(), true) case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) diff --git a/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala index 1e336abdc1839..70791a4cef05f 100644 --- a/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala @@ -16,7 +16,7 @@ */ package kafka.server -import kafka.api.{KafkaSasl, SaslSetup} +import kafka.api.SaslSetup import kafka.security.JaasTestUtils import kafka.server.SaslApiVersionsRequestTest.{kafkaClientSaslMechanism, kafkaServerSaslMechanisms} import org.apache.kafka.common.test.api.{ClusterTemplate, Type, ClusterTestExtensions, ClusterConfig, ClusterInstance} @@ -51,7 +51,6 @@ object SaslApiVersionsRequestTest { // Configure control plane listener to make sure we have separate listeners for testing. val serverProperties = new java.util.HashMap[String, String]() - serverProperties.put(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG, controlPlaneListenerName) serverProperties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, s"$controlPlaneListenerName:$securityProtocol,$securityProtocol:$securityProtocol") serverProperties.put("listeners", s"$securityProtocol://localhost:0,$controlPlaneListenerName://localhost:0") serverProperties.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, s"$securityProtocol://localhost:0,$controlPlaneListenerName://localhost:0") @@ -74,7 +73,7 @@ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVe @BeforeEach def setupSasl(): Unit = { sasl = new SaslSetup() {} - sasl.startSasl(sasl.jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), KafkaSasl, JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) + sasl.startSasl(sasl.jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME)) } @ClusterTemplate("saslApiVersionsRequestClusterConfig") diff --git a/core/src/test/scala/unit/kafka/server/ServerMetricsTest.scala b/core/src/test/scala/unit/kafka/server/ServerMetricsTest.scala index 9f788ae31231f..e15ec5814f8b3 100755 --- a/core/src/test/scala/unit/kafka/server/ServerMetricsTest.scala +++ b/core/src/test/scala/unit/kafka/server/ServerMetricsTest.scala @@ -29,7 +29,7 @@ class ServerMetricsTest { def testMetricsConfig(): Unit = { val recordingLevels = List(Sensor.RecordingLevel.DEBUG, Sensor.RecordingLevel.INFO) val illegalNames = List("IllegalName", "") - val props = TestUtils.createBrokerConfig(0, "localhost:2818") + val props = TestUtils.createBrokerConfig(0) for (recordingLevel <- recordingLevels) { props.put(MetricConfigs.METRIC_RECORDING_LEVEL_CONFIG, recordingLevel.name) diff --git a/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala b/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala index ad0c004517b08..cb3ebc93785b7 100644 --- a/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala +++ b/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala @@ -18,27 +18,17 @@ package kafka.server import kafka.utils.{CoreUtils, TestInfoUtils, TestUtils} -import java.io.{DataInputStream, File} -import java.net.ServerSocket -import java.util.Collections -import java.util.concurrent.{CancellationException, Executors, TimeUnit} -import kafka.cluster.Broker -import kafka.controller.{ControllerChannelManager, ControllerContext, StateChangeLogger} +import java.io.File +import java.util.concurrent.CancellationException import kafka.integration.KafkaServerTestHarness import kafka.log.LogManager -import kafka.zookeeper.ZooKeeperClientTimeoutException import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} -import org.apache.kafka.common.Uuid -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.ApiKeys -import org.apache.kafka.common.requests.LeaderAndIsrRequest import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.serialization.{IntegerDeserializer, IntegerSerializer, StringDeserializer, StringSerializer} -import org.apache.kafka.common.utils.{Exit, Time} +import org.apache.kafka.common.utils.Exit import org.apache.kafka.metadata.BrokerState -import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs, ZkConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} import org.junit.jupiter.api.{BeforeEach, TestInfo, Timeout} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.function.Executable @@ -71,7 +61,7 @@ class ServerShutdownTest extends KafkaServerTestHarness { propsToChangeUponRestart.put(ServerLogConfigs.LOG_DIR_CONFIG, originals.get(ServerLogConfigs.LOG_DIR_CONFIG)) } } - priorConfig = Some(KafkaConfig.fromProps(TestUtils.createBrokerConfigs(1, zkConnectOrNull).head, propsToChangeUponRestart)) + priorConfig = Some(KafkaConfig.fromProps(TestUtils.createBrokerConfigs(1).head, propsToChangeUponRestart)) Seq(priorConfig.get) } @@ -147,16 +137,10 @@ class ServerShutdownTest extends KafkaServerTestHarness { @ParameterizedTest @ValueSource(strings = Array("kraft")) def testCleanShutdownAfterFailedStartup(quorum: String): Unit = { - if (isKRaftTest()) { - propsToChangeUponRestart.setProperty(KRaftConfigs.INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG, "1000") - shutdownBroker() - shutdownKRaftController() - verifyCleanShutdownAfterFailedStartup[CancellationException] - } else { - propsToChangeUponRestart.setProperty(ZkConfigs.ZK_CONNECTION_TIMEOUT_MS_CONFIG, "50") - propsToChangeUponRestart.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "some.invalid.hostname.foo.bar.local:65535") - verifyCleanShutdownAfterFailedStartup[ZooKeeperClientTimeoutException] - } + propsToChangeUponRestart.setProperty(KRaftConfigs.INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG, "1000") + shutdownBroker() + shutdownKRaftController() + verifyCleanShutdownAfterFailedStartup[CancellationException] } @ParameterizedTest @@ -190,15 +174,6 @@ class ServerShutdownTest extends KafkaServerTestHarness { } } - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testCleanShutdownWithZkUnavailable(quorum: String): Unit = { - shutdownZooKeeper() - shutdownBroker() - CoreUtils.delete(broker.config.logDirs) - verifyNonDaemonThreadsStatus() - } - @ParameterizedTest @ValueSource(strings = Array("kraft")) def testShutdownWithKRaftControllerUnavailable(quorum: String): Unit = { @@ -218,7 +193,7 @@ class ServerShutdownTest extends KafkaServerTestHarness { // goes wrong so that awaitShutdown doesn't hang case e: Exception => assertCause(exceptionClassTag.runtimeClass, e) - assertEquals(if (isKRaftTest()) BrokerState.SHUTTING_DOWN else BrokerState.NOT_RUNNING, brokers.head.brokerState) + assertEquals(BrokerState.SHUTTING_DOWN, brokers.head.brokerState) } finally { shutdownBroker() } @@ -252,69 +227,6 @@ class ServerShutdownTest extends KafkaServerTestHarness { brokers.head.shutdown() } - // Verify that if controller is in the midst of processing a request, shutdown completes - // without waiting for request timeout. Since this involves LeaderAndIsr request, it is - // ZK-only for now. - @ParameterizedTest - @ValueSource(strings = Array("zk")) - def testControllerShutdownDuringSend(quorum: String): Unit = { - val securityProtocol = SecurityProtocol.PLAINTEXT - val listenerName = ListenerName.forSecurityProtocol(securityProtocol) - - val controllerId = 2 - val metrics = new Metrics - val executor = Executors.newSingleThreadExecutor - var serverSocket: ServerSocket = null - var controllerChannelManager: ControllerChannelManager = null - - try { - // Set up a server to accept a connection and receive one byte from the first request. No response is sent. - serverSocket = new ServerSocket(0) - val receiveFuture = executor.submit(new Runnable { - override def run(): Unit = { - val socket = serverSocket.accept() - val inputStream = new DataInputStream(socket.getInputStream) - inputStream.readByte() - inputStream.close() - } - }) - - // Start a ControllerChannelManager - val brokerAndEpochs = Map((new Broker(1, "localhost", serverSocket.getLocalPort, listenerName, securityProtocol), 0L)) - val controllerConfig = KafkaConfig.fromProps(TestUtils.createBrokerConfig(controllerId, zkConnect)) - val controllerContext = new ControllerContext - controllerContext.setLiveBrokers(brokerAndEpochs) - controllerChannelManager = new ControllerChannelManager( - () => controllerContext.epoch, - controllerConfig, - Time.SYSTEM, - metrics, - new StateChangeLogger(controllerId, inControllerContext = true, None)) - controllerChannelManager.startup(controllerContext.liveOrShuttingDownBrokers) - - // Initiate a sendRequest and wait until connection is established and one byte is received by the peer - val requestBuilder = new LeaderAndIsrRequest.Builder(ApiKeys.LEADER_AND_ISR.latestVersion, - controllerId, 1, 0L, Seq.empty.asJava, Collections.singletonMap(topic, Uuid.randomUuid()), - brokerAndEpochs.keys.map(_.node(listenerName)).toSet.asJava) - controllerChannelManager.sendRequest(1, requestBuilder) - receiveFuture.get(10, TimeUnit.SECONDS) - - // Shutdown controller. Request timeout is 30s, verify that shutdown completed well before that - val shutdownFuture = executor.submit(new Runnable { - override def run(): Unit = controllerChannelManager.shutdown() - }) - shutdownFuture.get(10, TimeUnit.SECONDS) - - } finally { - if (serverSocket != null) - serverSocket.close() - if (controllerChannelManager != null) - controllerChannelManager.shutdown() - executor.shutdownNow() - metrics.close() - } - } - private def config: KafkaConfig = configs.head private def broker: KafkaBroker = brokers.head private def shutdownBroker(): Unit = killBroker(0) // idempotent diff --git a/core/src/test/scala/unit/kafka/server/ServerTest.scala b/core/src/test/scala/unit/kafka/server/ServerTest.scala index 62345c446e27d..4b2b900b3757d 100644 --- a/core/src/test/scala/unit/kafka/server/ServerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ServerTest.scala @@ -20,7 +20,7 @@ import java.util.Properties import org.apache.kafka.common.Uuid import org.apache.kafka.common.metrics.MetricsContext import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ZkConfigs} +import org.apache.kafka.server.config.KRaftConfigs import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -47,23 +47,4 @@ class ServerTest { Server.NodeIdLabel -> nodeId.toString ), context.contextLabels.asScala) } - - @Test - def testCreateZkKafkaMetricsContext(): Unit = { - val brokerId = 0 - val clusterId = Uuid.randomUuid().toString - - val props = new Properties() - props.put(ServerConfigs.BROKER_ID_CONFIG, brokerId.toString) - props.put(ZkConfigs.ZK_CONNECT_CONFIG, "127.0.0.1:0") - val config = KafkaConfig.fromProps(props) - - val context = Server.createKafkaMetricsContext(config, clusterId) - assertEquals(Map( - MetricsContext.NAMESPACE -> Server.MetricsPrefix, - Server.ClusterIdLabel -> clusterId, - Server.BrokerIdLabel -> brokerId.toString - ), context.contextLabels.asScala) - } - } diff --git a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala index 8097021e4cb52..f60fc12f47b4b 100644 --- a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala @@ -16,6 +16,7 @@ */ package kafka.server +import kafka.utils.TestUtils import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterInstance, ClusterTest, ClusterTestDefaults, ClusterTestExtensions, ClusterTests, Type} import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords import org.apache.kafka.common.message.{ShareAcknowledgeRequestData, ShareAcknowledgeResponseData, ShareFetchRequestData, ShareFetchResponseData} @@ -28,7 +29,6 @@ import org.junit.jupiter.api.extension.ExtendWith import java.util import java.util.Collections -import scala.collection.convert.ImplicitConversions.`list asScalaBuffer` import scala.jdk.CollectionConverters._ @Timeout(1200) @@ -253,13 +253,26 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) - val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) - val shareFetchResponseData = shareFetchResponse.data() - assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(3, shareFetchResponseData.responses().get(0).partitions().size()) + // For the multi partition fetch request, the response may not be available in the first attempt + // as the share partitions might not be initialized yet. So, we retry until we get the response. + var responses = Seq[ShareFetchResponseData.PartitionData]() + TestUtils.waitUntilTrue(() => { + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val shareFetchResponseData = shareFetchResponse.data() + assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) + assertEquals(1, shareFetchResponseData.responses().size()) + val partitionsCount = shareFetchResponseData.responses().get(0).partitions().size() + if (partitionsCount > 0) { + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + shareFetchResponseData.responses().get(0).partitions().asScala.foreach(partitionData => { + if (!partitionData.acquiredRecords().isEmpty) { + responses = responses :+ partitionData + } + }) + } + responses.size == 3 + }, "Share fetch request failed", 5000) val expectedPartitionData1 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) @@ -279,7 +292,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setAcknowledgeErrorCode(Errors.NONE.code()) .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - shareFetchResponseData.responses().get(0).partitions().foreach(partitionData => { + responses.foreach(partitionData => { partitionData.partitionIndex() match { case 0 => compareFetchResponsePartitions(expectedPartitionData1, partitionData) case 1 => compareFetchResponsePartitions(expectedPartitionData2, partitionData) @@ -821,38 +834,57 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setAcknowledgeErrorCode(Errors.NONE.code()) .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + val fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above produceData(topicIdPartition, 10) - // Send a third Share Fetch request with piggybacked acknowledgements - shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) - acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records - shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) - - shareFetchResponseData = shareFetchResponse.data() - assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) - expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) .setAcquiredRecords(expectedAcquiredRecords(List(0L, 10L).asJava, List(9L, 19L).asJava, List(2, 1).asJava)) + + val acquiredRecords : util.List[AcquiredRecords] = new util.ArrayList[AcquiredRecords]() + var releaseAcknowledgementSent = false + + TestUtils.waitUntilTrue(() => { + shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + if (releaseAcknowledgementSent) { + // For fourth share fetch request onwards + acknowledgementsMapForFetch = Map.empty + } else { + // Send a third Share Fetch request with piggybacked acknowledgements + acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records + releaseAcknowledgementSent = true + } + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + + shareFetchResponseData = shareFetchResponse.data() + assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) + assertEquals(1, shareFetchResponseData.responses().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + val responseSize = shareFetchResponseData.responses().get(0).partitions().size() + if (responseSize > 0) { + acquiredRecords.addAll(shareFetchResponseData.responses().get(0).partitions().get(0).acquiredRecords()) + } + // There should be 2 acquired record batches finally - + // 1. batch containing 0-9 offsets which were initially acknowledged as RELEASED. + // 2. batch containing 10-19 offsets which were produced in the second produceData function call. + acquiredRecords.size() == 2 + + }, "Share fetch request failed", 5000) + // All the records from offsets 0 to 19 will be fetched. Records from 0 to 9 will have delivery count as 2 because // they are re delivered, and records from 10 to 19 will have delivery count as 1 because they are newly acquired - - fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) - compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) + assertTrue(expectedFetchPartitionData.acquiredRecords().containsAll(acquiredRecords) && + acquiredRecords.containsAll(expectedFetchPartitionData.acquiredRecords())) } @ClusterTests( @@ -1277,7 +1309,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), ) ) - def testShareFetchBrokerRespectsPartitionsSizeLimit(): Unit = { + def testShareFetchBrokerDoesNotRespectPartitionsSizeLimit(): Unit = { val groupId: String = "group" val memberId = Uuid.randomUuid() @@ -1317,10 +1349,10 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(11), Collections.singletonList(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(12), Collections.singletonList(1))) // The first 10 records will be consumed as it is. For the last 3 records, each of size MAX_PARTITION_BYTES/3, - // only 2 of then will be consumed (offsets 10 and 11) because the inclusion of the third last record will exceed - // the max partition bytes limit + // all 3 of then will be consumed (offsets 10, 11 and 12) because even though the inclusion of the third last record will exceed + // the max partition bytes limit. We should only consider the request level maxBytes as the hard limit. val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) @@ -1379,15 +1411,15 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // mocking the behaviour of multiple share consumers from the same share group val metadata1: ShareRequestMetadata = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH) val acknowledgementsMap1: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest1 = createShareFetchRequest(groupId, metadata1, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap1) + val shareFetchRequest1 = createShareFetchRequest(groupId, metadata1, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap1, minBytes = 100, maxBytes = 1500) val metadata2: ShareRequestMetadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) val acknowledgementsMap2: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest2 = createShareFetchRequest(groupId, metadata2, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap2) + val shareFetchRequest2 = createShareFetchRequest(groupId, metadata2, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap2, minBytes = 100, maxBytes = 1500) val metadata3: ShareRequestMetadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) val acknowledgementsMap3: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty - val shareFetchRequest3 = createShareFetchRequest(groupId, metadata3, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap3) + val shareFetchRequest3 = createShareFetchRequest(groupId, metadata3, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap3, minBytes = 100, maxBytes = 1500) val shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1) val shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2) @@ -2230,13 +2262,26 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) - var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) - var shareFetchResponseData = shareFetchResponse.data() - assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) - assertEquals(2, shareFetchResponseData.responses().get(0).partitions().size()) + // For the multi partition fetch request, the response may not be available in the first attempt + // as the share partitions might not be initialized yet. So, we retry until we get the response. + var responses = Seq[ShareFetchResponseData.PartitionData]() + TestUtils.waitUntilTrue(() => { + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val shareFetchResponseData = shareFetchResponse.data() + assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) + assertEquals(1, shareFetchResponseData.responses().size()) + val partitionsCount = shareFetchResponseData.responses().get(0).partitions().size() + if (partitionsCount > 0) { + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + shareFetchResponseData.responses().get(0).partitions().asScala.foreach(partitionData => { + if (!partitionData.acquiredRecords().isEmpty) { + responses = responses :+ partitionData + } + }) + } + responses.size == 2 + }, "Share fetch request failed", 5000) // Producing 10 more records to the topic partitions created above produceData(topicIdPartition1, 10) @@ -2247,9 +2292,9 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) val forget: Seq[TopicIdPartition] = Seq(topicIdPartition1) shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, Seq.empty, forget, acknowledgementsMap) - shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) - shareFetchResponseData = shareFetchResponse.data() + val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) assertEquals(1, shareFetchResponseData.responses().size()) assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) @@ -2265,15 +2310,30 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo compareFetchResponsePartitions(expectedPartitionData, partitionData) } + // For initial fetch request, the response may not be available in the first attempt when the share + // partition is not initialized yet. Hence, wait for response from all partitions before proceeding. private def sendFirstShareFetchRequest(memberId: Uuid, groupId: String, topicIdPartitions: Seq[TopicIdPartition]): Unit = { - val metadata: ShareRequestMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, topicIdPartitions, Seq.empty, Map.empty) - connectAndReceive[ShareFetchResponse](shareFetchRequest) + val partitions: util.Set[Integer] = new util.HashSet() + TestUtils.waitUntilTrue(() => { + val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, topicIdPartitions, Seq.empty, Map.empty) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + val shareFetchResponseData = shareFetchResponse.data() + + assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) + shareFetchResponseData.responses().asScala.foreach(response => { + if (!response.partitions().isEmpty) { + response.partitions().forEach(partitionData => partitions.add(partitionData.partitionIndex)) + } + }) + + partitions.size() == topicIdPartitions.size + }, "Share fetch request failed", 5000) } private def expectedAcquiredRecords(firstOffsets: util.List[Long], lastOffsets: util.List[Long], deliveryCounts: util.List[Int]): util.List[AcquiredRecords] = { val acquiredRecordsList: util.List[AcquiredRecords] = new util.ArrayList() - for (i <- firstOffsets.indices) { + for (i <- firstOffsets.asScala.indices) { acquiredRecordsList.add(new AcquiredRecords() .setFirstOffset(firstOffsets.get(i)) .setLastOffset(lastOffsets.get(i)) @@ -2305,8 +2365,9 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]], maxWaitMs: Int = MAX_WAIT_MS, minBytes: Int = 0, - maxBytes: Int = Int.MaxValue): ShareFetchRequest = { - ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, maxPartitionBytes, send.asJava, forget.asJava, acknowledgementsMap.asJava) + maxBytes: Int = Int.MaxValue, + batchSize: Int = 500): ShareFetchRequest = { + ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, maxPartitionBytes, batchSize, send.asJava, forget.asJava, acknowledgementsMap.asJava) .build() } diff --git a/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala index 507f83b0de8cb..a6a1129d084bf 100644 --- a/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala @@ -20,7 +20,7 @@ import org.apache.kafka.common.test.api.ClusterInstance import org.apache.kafka.common.test.api._ import org.apache.kafka.common.test.api.ClusterTestExtensions import kafka.utils.TestUtils -import org.apache.kafka.common.ShareGroupState +import org.apache.kafka.common.GroupState import org.apache.kafka.common.message.ShareGroupDescribeResponseData.DescribedGroup import org.apache.kafka.common.message.{ShareGroupDescribeRequestData, ShareGroupDescribeResponseData, ShareGroupHeartbeatResponseData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} @@ -86,55 +86,59 @@ class ShareGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCoord // in this test because it does not use FindCoordinator API. createOffsetsTopic() - val admin = cluster.createAdminClient() - TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) + val admin = cluster.admin() + try { + TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) - val clientId = "client-id" - val clientHost = "/127.0.0.1" - val authorizedOperationsInt = Utils.to32BitField( - AclEntry.supportedOperations(ResourceType.GROUP).asScala - .map(_.code.asInstanceOf[JByte]).asJava) + val clientId = "client-id" + val clientHost = "/127.0.0.1" + val authorizedOperationsInt = Utils.to32BitField( + AclEntry.supportedOperations(ResourceType.GROUP).asScala + .map(_.code.asInstanceOf[JByte]).asJava) - // Add first group with one member. - var grp1Member1Response: ShareGroupHeartbeatResponseData = null - TestUtils.waitUntilTrue(() => { - grp1Member1Response = shareGroupHeartbeat( - groupId = "grp-1", - subscribedTopicNames = List("bar"), - ) - grp1Member1Response.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $grp1Member1Response.") + // Add first group with one member. + var grp1Member1Response: ShareGroupHeartbeatResponseData = null + TestUtils.waitUntilTrue(() => { + grp1Member1Response = shareGroupHeartbeat( + groupId = "grp-1", + subscribedTopicNames = List("bar"), + ) + grp1Member1Response.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $grp1Member1Response.") - for (version <- ApiKeys.SHARE_GROUP_DESCRIBE.oldestVersion() to ApiKeys.SHARE_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { - val expected = List( - new DescribedGroup() - .setGroupId("grp-1") - .setGroupState(ShareGroupState.STABLE.toString) - .setGroupEpoch(1) - .setAssignmentEpoch(1) - .setAssignorName("simple") - .setAuthorizedOperations(authorizedOperationsInt) - .setMembers(List( - new ShareGroupDescribeResponseData.Member() - .setMemberId(grp1Member1Response.memberId) - .setMemberEpoch(grp1Member1Response.memberEpoch) - .setClientId(clientId) - .setClientHost(clientHost) - .setSubscribedTopicNames(List("bar").asJava) - ).asJava), - ) + for (version <- ApiKeys.SHARE_GROUP_DESCRIBE.oldestVersion() to ApiKeys.SHARE_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) { + val expected = List( + new DescribedGroup() + .setGroupId("grp-1") + .setGroupState(GroupState.STABLE.toString) + .setGroupEpoch(1) + .setAssignmentEpoch(1) + .setAssignorName("simple") + .setAuthorizedOperations(authorizedOperationsInt) + .setMembers(List( + new ShareGroupDescribeResponseData.Member() + .setMemberId(grp1Member1Response.memberId) + .setMemberEpoch(grp1Member1Response.memberEpoch) + .setClientId(clientId) + .setClientHost(clientHost) + .setSubscribedTopicNames(List("bar").asJava) + ).asJava), + ) - val actual = shareGroupDescribe( - groupIds = List("grp-1"), - includeAuthorizedOperations = true, - version = version.toShort, - ) + val actual = shareGroupDescribe( + groupIds = List("grp-1"), + includeAuthorizedOperations = true, + version = version.toShort, + ) - assertEquals(expected, actual) + assertEquals(expected, actual) + } + } finally { + admin.close() } } } diff --git a/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala index 139209661eba2..71addf733b04a 100644 --- a/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala @@ -17,7 +17,6 @@ package kafka.server import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterInstance, ClusterTest, ClusterTestDefaults, ClusterTestExtensions, Type} -import org.apache.kafka.common.test.api.RaftClusterInvocationContext.RaftClusterInstance import kafka.utils.TestUtils import kafka.utils.TestUtils.waitForAllPartitionsMetadata import org.apache.kafka.clients.admin.{Admin, NewPartitions} @@ -61,88 +60,91 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) def testShareGroupHeartbeatIsAccessibleWhenShareGroupIsEnabled(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Verify the response. - assertNotNull(shareGroupHeartbeatResponse.data.memberId) - assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) - - // Create the topic. - val topicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) - - // Prepare the next heartbeat. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(shareGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch), - true - ).build() + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava), + true + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Verify the response. + assertNotNull(shareGroupHeartbeatResponse.data.memberId) + assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) + + // Create the topic. + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) + + // Prepare the next heartbeat. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(shareGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch), + true + ).build() + + // This is the expected assignment. here + val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) - // This is the expected assignment. here - val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(topicId) - .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + // Heartbeats until the partitions are assigned. + shareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(expectedAssignment, shareGroupHeartbeatResponse.data.assignment) + + // Leave the group. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(shareGroupHeartbeatResponse.data.memberId) + .setMemberEpoch(-1), + true + ).build() - // Heartbeats until the partitions are assigned. - shareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(expectedAssignment, shareGroupHeartbeatResponse.data.assignment) - - // Leave the group. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(shareGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(-1), - true - ).build() - - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - // Verify the response. - assertEquals(-1, shareGroupHeartbeatResponse.data.memberEpoch) + // Verify the response. + assertEquals(-1, shareGroupHeartbeatResponse.data.memberEpoch) + } finally { + admin.close() + } } @ClusterTest( @@ -154,139 +156,142 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) def testShareGroupHeartbeatWithMultipleMembers(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Verify the response for member 1. - val memberId1 = shareGroupHeartbeatResponse.data.memberId - assertNotNull(memberId1) - assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) - - // The second member request to join the group. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true - ).build() - - // Send the second member request until receiving a successful response. - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Verify the response for member 2. - val memberId2 = shareGroupHeartbeatResponse.data.memberId - assertNotNull(memberId2) - assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) - // Verify the member id is different. - assertNotEquals(memberId1, memberId2) - - // Create the topic. - val topicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) - - // This is the expected assignment. - val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(topicId) - .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) - - // Prepare the next heartbeat for member 1. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId1) - .setMemberEpoch(1), - true - ).build() - - // Heartbeats until the partitions are assigned for member 1. - shareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) - - // Prepare the next heartbeat for member 2. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId2) - .setMemberEpoch(2), - true - ).build() - - // Heartbeats until the partitions are assigned for member 2. - shareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) - - // Verify the assignments are not changed for member 1. - // Prepare another heartbeat for member 1 with latest received epoch 3 for member 1. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId1) - .setMemberEpoch(3), - true - ).build() - - // Heartbeats until the response for no change of assignment occurs for member 1 with same epoch. - shareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment == null - }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava), + true + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Verify the response for member 1. + val memberId1 = shareGroupHeartbeatResponse.data.memberId + assertNotNull(memberId1) + assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) + + // The second member request to join the group. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava), + true + ).build() + + // Send the second member request until receiving a successful response. + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Verify the response for member 2. + val memberId2 = shareGroupHeartbeatResponse.data.memberId + assertNotNull(memberId2) + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) + // Verify the member id is different. + assertNotEquals(memberId1, memberId2) + + // Create the topic. + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 3 + ) + + // This is the expected assignment. + val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) - // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + // Prepare the next heartbeat for member 1. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId1) + .setMemberEpoch(1), + true + ).build() + + // Heartbeats until the partitions are assigned for member 1. + shareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + + // Prepare the next heartbeat for member 2. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId2) + .setMemberEpoch(2), + true + ).build() + + // Heartbeats until the partitions are assigned for member 2. + shareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + + // Verify the assignments are not changed for member 1. + // Prepare another heartbeat for member 1 with latest received epoch 3 for member 1. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId1) + .setMemberEpoch(3), + true + ).build() + + // Heartbeats until the response for no change of assignment occurs for member 1 with same epoch. + shareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == null + }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + } finally { + admin.close() + } } @ClusterTest( @@ -298,108 +303,111 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) def testMemberLeavingAndRejoining(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Verify the response for member. - val memberId = shareGroupHeartbeatResponse.data.memberId - assertNotNull(memberId) - assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) - - // Create the topic. - val topicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 2 - ) - - // This is the expected assignment. - val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(topicId) - .setPartitions(List[Integer](0, 1).asJava)).asJava) - - // Prepare the next heartbeat for member. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(1), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) - - // Member leaves the group. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberEpoch(-1) - .setMemberId(memberId), - true - ).build() + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava), + true + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Verify the response for member. + val memberId = shareGroupHeartbeatResponse.data.memberId + assertNotNull(memberId) + assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) + + // Create the topic. + val topicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 2 + ) + + // This is the expected assignment. + val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1).asJava)).asJava) + + // Prepare the next heartbeat for member. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(1), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response. + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) + + // Member leaves the group. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberEpoch(-1) + .setMemberId(memberId), + true + ).build() + + // Send the member request until receiving a successful response. + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not leave the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Verify the response for member. + assertEquals(-1, shareGroupHeartbeatResponse.data.memberEpoch) + + // Member sends request to rejoin the group. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberEpoch(0) + .setMemberId(memberId) + .setSubscribedTopicNames(List("foo").asJava), + true + ).build() - // Send the member request until receiving a successful response. - TestUtils.waitUntilTrue(() => { shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not leave the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Verify the response for member. - assertEquals(-1, shareGroupHeartbeatResponse.data.memberEpoch) - - // Member sends request to rejoin the group. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberEpoch(0) - .setMemberId(memberId) - .setSubscribedTopicNames(List("foo").asJava), - true - ).build() - - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - // Verify the response for member 1. - assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(memberId, shareGroupHeartbeatResponse.data.memberId) - // Partition assignment remains intact on rejoining. - assertEquals(expectedAssignment, shareGroupHeartbeatResponse.data.assignment) + // Verify the response for member 1. + assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(memberId, shareGroupHeartbeatResponse.data.memberId) + // Partition assignment remains intact on rejoining. + assertEquals(expectedAssignment, shareGroupHeartbeatResponse.data.assignment) + } finally { + admin.close() + } } @ClusterTest( @@ -411,181 +419,184 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) def testPartitionAssignmentWithChangingTopics(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - // Heartbeat request to join the group. Note that the member subscribes - // to a nonexistent topic. - var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo", "bar", "baz").asJava), - true - ).build() - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - // Verify the response for member. - val memberId = shareGroupHeartbeatResponse.data.memberId - assertNotNull(memberId) - assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) - // Create the topic foo. - val fooTopicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 2 - ) - // Create the topic bar. - val barTopicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "bar", - numPartitions = 3 - ) - - var expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(fooTopicId) - .setPartitions(List[Integer](0, 1).asJava), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(barTopicId) - .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) - // Prepare the next heartbeat for member. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(1), - true - ).build() - - cluster.waitForTopic("foo", 2) - cluster.waitForTopic("bar", 3) - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment != null && - expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && - shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) - }, msg = s"Could not get partitions for topic foo and bar assigned. Last response $shareGroupHeartbeatResponse.") - // Verify the response. - assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) - // Create the topic baz. - val bazTopicId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "baz", - numPartitions = 4 - ) - - expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(fooTopicId) - .setPartitions(List[Integer](0, 1).asJava), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(barTopicId) - .setPartitions(List[Integer](0, 1, 2).asJava), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(bazTopicId) - .setPartitions(List[Integer](0, 1, 2, 3).asJava)).asJava) - // Prepare the next heartbeat for member. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(2), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment != null && - expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && - shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) - }, msg = s"Could not get partitions for topic baz assigned. Last response $shareGroupHeartbeatResponse.") - // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) - // Increasing the partitions of topic bar which is already being consumed in the share group. - increasePartitions(admin, "bar", 6, Seq.empty) - - expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(fooTopicId) - .setPartitions(List[Integer](0, 1).asJava), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(barTopicId) - .setPartitions(List[Integer](0, 1, 2, 3, 4, 5).asJava), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(bazTopicId) - .setPartitions(List[Integer](0, 1, 2, 3).asJava)).asJava) - // Prepare the next heartbeat for member. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(3), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment != null && - expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && - shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) - }, msg = s"Could not update partitions assignment for topic bar. Last response $shareGroupHeartbeatResponse.") - // Verify the response. - assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) - // Delete the topic foo. - TestUtils.deleteTopicWithAdmin( - admin = admin, - topic = "foo", - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(barTopicId) - .setPartitions(List[Integer](0, 1, 2, 3, 4, 5).asJava), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(bazTopicId) - .setPartitions(List[Integer](0, 1, 2, 3).asJava)).asJava) - // Prepare the next heartbeat for member. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(4), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment != null && - expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && - shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) - }, msg = s"Could not update partitions assignment for topic foo. Last response $shareGroupHeartbeatResponse.") - // Verify the response. - assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch) + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + // Heartbeat request to join the group. Note that the member subscribes + // to a nonexistent topic. + var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo", "bar", "baz").asJava), + true + ).build() + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + // Verify the response for member. + val memberId = shareGroupHeartbeatResponse.data.memberId + assertNotNull(memberId) + assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) + // Create the topic foo. + val fooTopicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 2 + ) + // Create the topic bar. + val barTopicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "bar", + numPartitions = 3 + ) + + var expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List( + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(fooTopicId) + .setPartitions(List[Integer](0, 1).asJava), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(barTopicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + // Prepare the next heartbeat for member. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(1), + true + ).build() + + cluster.waitForTopic("foo", 2) + cluster.waitForTopic("bar", 3) + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment != null && + expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && + shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) + }, msg = s"Could not get partitions for topic foo and bar assigned. Last response $shareGroupHeartbeatResponse.") + // Verify the response. + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) + // Create the topic baz. + val bazTopicId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "baz", + numPartitions = 4 + ) + + expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List( + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(fooTopicId) + .setPartitions(List[Integer](0, 1).asJava), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(barTopicId) + .setPartitions(List[Integer](0, 1, 2).asJava), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(bazTopicId) + .setPartitions(List[Integer](0, 1, 2, 3).asJava)).asJava) + // Prepare the next heartbeat for member. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(2), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment != null && + expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && + shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) + }, msg = s"Could not get partitions for topic baz assigned. Last response $shareGroupHeartbeatResponse.") + // Verify the response. + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + // Increasing the partitions of topic bar which is already being consumed in the share group. + increasePartitions(admin, "bar", 6, Seq.empty) + + expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List( + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(fooTopicId) + .setPartitions(List[Integer](0, 1).asJava), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(barTopicId) + .setPartitions(List[Integer](0, 1, 2, 3, 4, 5).asJava), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(bazTopicId) + .setPartitions(List[Integer](0, 1, 2, 3).asJava)).asJava) + // Prepare the next heartbeat for member. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(3), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment != null && + expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && + shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) + }, msg = s"Could not update partitions assignment for topic bar. Last response $shareGroupHeartbeatResponse.") + // Verify the response. + assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) + // Delete the topic foo. + TestUtils.deleteTopicWithAdmin( + admin = admin, + topic = "foo", + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List( + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(barTopicId) + .setPartitions(List[Integer](0, 1, 2, 3, 4, 5).asJava), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(bazTopicId) + .setPartitions(List[Integer](0, 1, 2, 3).asJava)).asJava) + // Prepare the next heartbeat for member. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(4), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment != null && + expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && + shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) + }, msg = s"Could not update partitions assignment for topic foo. Last response $shareGroupHeartbeatResponse.") + // Verify the response. + assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch) + } finally { + admin.close() + } } @ClusterTest( @@ -601,165 +612,168 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ClusterConfigProperty(key = "group.share.min.session.timeout.ms", value = "501") )) def testMemberJoiningAndExpiring(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Verify the response for member. - val memberId = shareGroupHeartbeatResponse.data.memberId - var memberEpoch = shareGroupHeartbeatResponse.data.memberEpoch - assertNotNull(memberId) - assertEquals(1, memberEpoch) - assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) - - // Create the topic. - val fooId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 2 - ) - - // This is the expected assignment. - var expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(fooId) - .setPartitions(List[Integer](0, 1).asJava)).asJava) - - // Prepare the next heartbeat for member. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get foo partitions assigned. Last response $shareGroupHeartbeatResponse.") - - // Verify the response, the epoch should have been bumped. - assertTrue(shareGroupHeartbeatResponse.data.memberEpoch > memberEpoch) - memberEpoch = shareGroupHeartbeatResponse.data.memberEpoch - - // Prepare the next heartbeat with a new subscribed topic. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setSubscribedTopicNames(List("foo", "bar").asJava), - true - ).build() - - val barId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "bar" - ) - - expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List( - new ShareGroupHeartbeatResponseData.TopicPartitions() + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava), + true + ).build() + + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + + // Verify the response for member. + val memberId = shareGroupHeartbeatResponse.data.memberId + var memberEpoch = shareGroupHeartbeatResponse.data.memberEpoch + assertNotNull(memberId) + assertEquals(1, memberEpoch) + assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) + + // Create the topic. + val fooId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 2 + ) + + // This is the expected assignment. + var expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooId) - .setPartitions(List[Integer](0, 1).asJava), - new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(barId) - .setPartitions(List[Integer](0).asJava)).asJava) - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment != null && - expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && - shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) - }, msg = s"Could not get bar partitions assigned. Last response $shareGroupHeartbeatResponse.") - - // Verify the response, the epoch should have been bumped. - assertTrue(shareGroupHeartbeatResponse.data.memberEpoch > memberEpoch) - memberEpoch = shareGroupHeartbeatResponse.data.memberEpoch - - // Prepare the next heartbeat which is empty to verify no assignment changes. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not get empty heartbeat response. Last response $shareGroupHeartbeatResponse.") - - // Verify the response, the epoch should be same. - assertEquals(memberEpoch, shareGroupHeartbeatResponse.data.memberEpoch) - - // Blocking the thread for 1 sec so that the session times out and the member needs to rejoin. - Thread.sleep(1000) - - // Prepare the next heartbeat which is empty to verify no assignment changes. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.UNKNOWN_MEMBER_ID.code - }, msg = s"Member should have been expired because of the timeout . Last response $shareGroupHeartbeatResponse.") - - // Member sends a request again to join the share group - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo", "bar").asJava), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment != null && - expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && - shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) - }, msg = s"Could not get bar partitions assigned upon rejoining. Last response $shareGroupHeartbeatResponse.") - - // Epoch should have been bumped when a member is removed and again when it joins back. - assertTrue(shareGroupHeartbeatResponse.data.memberEpoch > memberEpoch) + .setPartitions(List[Integer](0, 1).asJava)).asJava) + + // Prepare the next heartbeat for member. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get foo partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response, the epoch should have been bumped. + assertTrue(shareGroupHeartbeatResponse.data.memberEpoch > memberEpoch) + memberEpoch = shareGroupHeartbeatResponse.data.memberEpoch + + // Prepare the next heartbeat with a new subscribed topic. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch) + .setSubscribedTopicNames(List("foo", "bar").asJava), + true + ).build() + + val barId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "bar" + ) + + expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List( + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(fooId) + .setPartitions(List[Integer](0, 1).asJava), + new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(barId) + .setPartitions(List[Integer](0).asJava)).asJava) + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment != null && + expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && + shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) + }, msg = s"Could not get bar partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response, the epoch should have been bumped. + assertTrue(shareGroupHeartbeatResponse.data.memberEpoch > memberEpoch) + memberEpoch = shareGroupHeartbeatResponse.data.memberEpoch + + // Prepare the next heartbeat which is empty to verify no assignment changes. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not get empty heartbeat response. Last response $shareGroupHeartbeatResponse.") + + // Verify the response, the epoch should be same. + assertEquals(memberEpoch, shareGroupHeartbeatResponse.data.memberEpoch) + + // Blocking the thread for 1 sec so that the session times out and the member needs to rejoin. + Thread.sleep(1000) + + // Prepare the next heartbeat which is empty to verify no assignment changes. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(memberEpoch), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.UNKNOWN_MEMBER_ID.code + }, msg = s"Member should have been expired because of the timeout . Last response $shareGroupHeartbeatResponse.") + + // Member sends a request again to join the share group + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo", "bar").asJava), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment != null && + expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && + shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) + }, msg = s"Could not get bar partitions assigned upon rejoining. Last response $shareGroupHeartbeatResponse.") + + // Epoch should have been bumped when a member is removed and again when it joins back. + assertTrue(shareGroupHeartbeatResponse.data.memberEpoch > memberEpoch) + } finally { + admin.close() + } } @ClusterTest( @@ -771,89 +785,92 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) def testGroupCoordinatorChange(): Unit = { - val raftCluster = cluster.asInstanceOf[RaftClusterInstance] - val admin = cluster.createAdminClient() + val admin = cluster.admin() // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = raftCluster.brokers.values().asScala.toSeq, - controllers = raftCluster.controllers().values().asScala.toSeq - ) - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(Uuid.randomUuid.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava), - true - ).build() - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - // Verify the response for member. - val memberId = shareGroupHeartbeatResponse.data.memberId - assertNotNull(memberId) - assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) - assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) - // Create the topic. - val fooId = TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 2 - ) - // This is the expected assignment. - val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(fooId) - .setPartitions(List[Integer](0, 1).asJava)).asJava) - // Prepare the next heartbeat for member. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(1), - true - ).build() - - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && - shareGroupHeartbeatResponse.data.assignment == expectedAssignment - }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - // Verify the response. - assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) - - // Restart the only running broker. - val broker = raftCluster.brokers().values().iterator().next() - raftCluster.shutdownBroker(broker.config.brokerId) - raftCluster.startBroker(broker.config.brokerId) - - // Prepare the next heartbeat for member with no updates. - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(2), - true - ).build() - - // Should receive no error and no assignment changes. - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - - // Verify the response. Epoch should not have changed and null assignments determines that no - // change in old assignment. - assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) - assertNull(shareGroupHeartbeatResponse.data.assignment) + try { + TestUtils.createOffsetsTopicWithAdmin( + admin = admin, + brokers = cluster.brokers.values().asScala.toSeq, + controllers = cluster.controllers().values().asScala.toSeq + ) + // Heartbeat request to join the group. Note that the member subscribes + // to an nonexistent topic. + var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(Uuid.randomUuid.toString) + .setMemberEpoch(0) + .setSubscribedTopicNames(List("foo").asJava), + true + ).build() + // Send the request until receiving a successful response. There is a delay + // here because the group coordinator is loaded in the background. + var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") + // Verify the response for member. + val memberId = shareGroupHeartbeatResponse.data.memberId + assertNotNull(memberId) + assertEquals(1, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(new ShareGroupHeartbeatResponseData.Assignment(), shareGroupHeartbeatResponse.data.assignment) + // Create the topic. + val fooId = TestUtils.createTopicWithAdminRaw( + admin = admin, + topic = "foo", + numPartitions = 2 + ) + // This is the expected assignment. + val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(fooId) + .setPartitions(List[Integer](0, 1).asJava)).asJava) + // Prepare the next heartbeat for member. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(1), + true + ).build() + + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment + }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + // Verify the response. + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) + + // Restart the only running broker. + val broker = cluster.brokers().values().iterator().next() + cluster.shutdownBroker(broker.config.brokerId) + cluster.startBroker(broker.config.brokerId) + + // Prepare the next heartbeat for member with no updates. + shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( + new ShareGroupHeartbeatRequestData() + .setGroupId("grp") + .setMemberId(memberId) + .setMemberEpoch(2), + true + ).build() + + // Should receive no error and no assignment changes. + TestUtils.waitUntilTrue(() => { + shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code + }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") + + // Verify the response. Epoch should not have changed and null assignments determines that no + // change in old assignment. + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) + assertNull(shareGroupHeartbeatResponse.data.assignment) + } finally { + admin.close() + } } private def connectAndReceive(request: ShareGroupHeartbeatRequest): ShareGroupHeartbeatResponse = { diff --git a/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala b/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala new file mode 100644 index 0000000000000..57277f3dbed49 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala @@ -0,0 +1,245 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterInstance, ClusterTest, ClusterTestDefaults, ClusterTestExtensions, Type} +import kafka.utils.TestUtils +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.UnsupportedVersionException +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.requests.JoinGroupRequest +import org.apache.kafka.common.utils.ProducerIdAndEpoch +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.junit.jupiter.api.Assertions.{assertThrows, assertTrue, fail} +import org.junit.jupiter.api.extension.ExtendWith + +import scala.jdk.CollectionConverters.IterableHasAsScala + +@ExtendWith(value = Array(classOf[ClusterTestExtensions])) +@ClusterTestDefaults(types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + ) +) +class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + + @ClusterTest + def testTxnOffsetCommitWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testTxnOffsetCommit(true) + } + + @ClusterTest + def testTxnOffsetCommitWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testTxnOffsetCommit(false) + } + + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + ) + ) + def testTxnOffsetCommitWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testTxnOffsetCommit(false) + } + + private def testTxnOffsetCommit(useNewProtocol: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + + val topic = "topic" + val partition = 0 + val transactionalId = "txn" + val groupId = "group" + + // Creates the __consumer_offsets and __transaction_state topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + createTransactionStateTopic() + + // Join the consumer group. Note that we don't heartbeat here so we must use + // a session long enough for the duration of the test. + val (memberId: String, memberEpoch: Int) = joinConsumerGroup(groupId, useNewProtocol) + assertTrue(memberId != JoinGroupRequest.UNKNOWN_MEMBER_ID) + assertTrue(memberEpoch != JoinGroupRequest.UNKNOWN_GENERATION_ID) + + createTopic(topic, 1) + + for (version <- 0 to ApiKeys.TXN_OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { + // Verify that the TXN_OFFSET_COMMIT request is processed correctly when member id is UNKNOWN_MEMBER_ID + // and generation id is UNKNOWN_GENERATION_ID under all api versions. + verifyTxnCommitAndFetch( + topic = topic, + partition = partition, + transactionalId = transactionalId, + groupId = groupId, + memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID, + generationId = JoinGroupRequest.UNKNOWN_GENERATION_ID, + offset = 100 + version, + version = version.toShort, + expectedTxnCommitError = Errors.NONE + ) + + if (version >= 3) { + // Verify that the TXN_OFFSET_COMMIT request is processed correctly when the member ID + // and generation ID are known. This validation starts from version 3, as the member ID + // must not be empty from version 3 onwards. + verifyTxnCommitAndFetch( + topic = topic, + partition = partition, + transactionalId = transactionalId, + groupId = groupId, + memberId = memberId, + generationId = memberEpoch, + offset = 200 + version, + version = version.toShort, + expectedTxnCommitError = Errors.NONE + ) + + // Verify TXN_OFFSET_COMMIT request failed with incorrect memberId. + verifyTxnCommitAndFetch( + topic = topic, + partition = partition, + transactionalId = transactionalId, + groupId = groupId, + memberId = "non-exist", + generationId = memberEpoch, + offset = 200 + version, + version = version.toShort, + expectedTxnCommitError = Errors.UNKNOWN_MEMBER_ID + ) + + // Verify TXN_OFFSET_COMMIT request failed with incorrect generationId. + verifyTxnCommitAndFetch( + topic = topic, + partition = partition, + transactionalId = transactionalId, + groupId = groupId, + memberId = memberId, + generationId = 100, + offset = 200 + version, + version = version.toShort, + expectedTxnCommitError = Errors.ILLEGAL_GENERATION + ) + } else { + // Verify that the TXN_OFFSET_COMMIT request failed when group metadata is set under version 3. + assertThrows(classOf[UnsupportedVersionException], () => + verifyTxnCommitAndFetch( + topic = topic, + partition = partition, + transactionalId = transactionalId, + groupId = groupId, + memberId = memberId, + generationId = memberEpoch, + offset = 200 + version, + version = version.toShort, + expectedTxnCommitError = Errors.NONE + ) + ) + } + } + } + + private def verifyTxnCommitAndFetch( + topic: String, + partition: Int, + transactionalId: String, + groupId: String, + memberId: String, + generationId: Int, + offset: Long, + version: Short, + expectedTxnCommitError: Errors + ): Unit = { + var producerIdAndEpoch: ProducerIdAndEpoch = null + // Wait until the coordinator finishes loading. + TestUtils.waitUntilTrue(() => + try { + producerIdAndEpoch = initProducerId( + transactionalId = transactionalId, + producerIdAndEpoch = ProducerIdAndEpoch.NONE, + expectedError = Errors.NONE + ) + true + } catch { + case _: Throwable => false + }, "initProducerId request failed" + ) + + addOffsetsToTxn( + groupId = groupId, + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId + ) + + val originalOffset = fetchOffset(topic, partition, groupId) + + commitTxnOffset( + groupId = groupId, + memberId = memberId, + generationId = generationId, + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId, + topic = topic, + partition = partition, + offset = offset, + expectedError = expectedTxnCommitError, + version = version + ) + + endTxn( + producerId = producerIdAndEpoch.producerId, + producerEpoch = producerIdAndEpoch.epoch, + transactionalId = transactionalId, + isTransactionV2Enabled = false, + committed = true, + expectedError = Errors.NONE + ) + + val expectedOffset = if (expectedTxnCommitError == Errors.NONE) offset else originalOffset + + TestUtils.waitUntilTrue(() => + try { + fetchOffset(topic, partition, groupId) == expectedOffset + } catch { + case _: Throwable => false + }, "txn commit offset validation failed" + ) + } + + private def fetchOffset( + topic: String, + partition: Int, + groupId: String + ): Long = { + val fetchOffsetsResp = fetchOffsets( + groups = Map(groupId -> List(new TopicPartition(topic, partition))), + requireStable = true, + version = ApiKeys.OFFSET_FETCH.latestVersion + ) + val groupIdRecord = fetchOffsetsResp.find(_.groupId == groupId).head + val topicRecord = groupIdRecord.topics.asScala.find(_.name == topic).head + val partitionRecord = topicRecord.partitions.asScala.find(_.partitionIndex == partition).head + partitionRecord.committedOffset + } +} diff --git a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala index 13518b0a2bb04..d3bed2905f6ba 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala @@ -26,7 +26,6 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.{OffsetFo import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} @@ -68,7 +67,7 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { @ParameterizedTest @ValueSource(strings = Array("kraft")) def shouldAddCurrentLeaderEpochToMessagesAsTheyAreWrittenToLeader(quorum: String): Unit = { - brokers ++= (0 to 1).map { id => createBroker(fromProps(createBrokerConfig(id, zkConnectOrNull))) } + brokers ++= (0 to 1).map { id => createBroker(fromProps(createBrokerConfig(id))) } // Given two topics with replication of a single partition for (topic <- List(topic1, topic2)) { @@ -103,7 +102,7 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { def shouldSendLeaderEpochRequestAndGetAResponse(quorum: String): Unit = { //3 brokers, put partition on 100/101 and then pretend to be 102 - brokers ++= (100 to 102).map { id => createBroker(fromProps(createBrokerConfig(id, zkConnectOrNull))) } + brokers ++= (100 to 102).map { id => createBroker(fromProps(createBrokerConfig(id))) } val assignment1 = Map(0 -> Seq(100), 1 -> Seq(101)) createTopic(topic1, assignment1) @@ -150,14 +149,10 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { @ValueSource(strings = Array("kraft")) def shouldIncreaseLeaderEpochBetweenLeaderRestarts(quorum: String): Unit = { //Setup: we are only interested in the single partition on broker 101 - brokers += createBroker(fromProps(createBrokerConfig(100, zkConnectOrNull))) - if (isKRaftTest()) { - assertEquals(controllerServer.config.nodeId, waitUntilQuorumLeaderElected(controllerServer)) - } else { - assertEquals(100, TestUtils.waitUntilControllerElected(zkClient)) - } + brokers += createBroker(fromProps(createBrokerConfig(100))) + assertEquals(controllerServer.config.nodeId, waitUntilQuorumLeaderElected(controllerServer)) - brokers += createBroker(fromProps(createBrokerConfig(101, zkConnectOrNull))) + brokers += createBroker(fromProps(createBrokerConfig(101))) def leo() = brokers(1).replicaManager.localLog(tp).get.logEndOffset @@ -287,14 +282,18 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { } private def createTopic(topic: String, partitionReplicaAssignment: collection.Map[Int, Seq[Int]]): Unit = { - Using(createAdminClient(brokers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))) { admin => - TestUtils.createTopicWithAdmin( - admin = admin, - topic = topic, - replicaAssignment = partitionReplicaAssignment, - brokers = brokers, - controllers = controllerServers - ) + Using.resource(createAdminClient(brokers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))) { admin => + try { + TestUtils.createTopicWithAdmin( + admin = admin, + topic = topic, + replicaAssignment = partitionReplicaAssignment, + brokers = brokers, + controllers = controllerServers + ) + } finally { + admin.close() + } } } @@ -321,8 +320,7 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { .setLeaderEpoch(leaderEpoch)) } - val request = OffsetsForLeaderEpochRequest.Builder.forFollower( - ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion, topics, 1) + val request = OffsetsForLeaderEpochRequest.Builder.forFollower(topics, 1) val response = sender.sendRequest(request) response.responseBody.asInstanceOf[OffsetsForLeaderEpochResponse].data.topics.asScala.flatMap { topic => topic.partitions.asScala.map { partition => diff --git a/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala b/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala index 434ce24b92fb1..79f4be41b8f62 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala @@ -28,7 +28,7 @@ import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} -import org.apache.kafka.server.common.OffsetAndEpoch +import org.apache.kafka.server.common.{KRaftVersion, OffsetAndEpoch} import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.log.LogDirFailureChannel import org.junit.jupiter.api.Assertions._ @@ -38,7 +38,7 @@ import org.mockito.Mockito.{mock, when} import scala.jdk.CollectionConverters._ class OffsetsForLeaderEpochTest { - private val config = TestUtils.createBrokerConfigs(1, TestUtils.MockZkConnect).map(KafkaConfig.fromProps).head + private val config = TestUtils.createBrokerConfigs(1).map(KafkaConfig.fromProps).head private val time = new MockTime private val metrics = new Metrics private val alterIsrManager = TestUtils.createAlterIsrManager() @@ -72,7 +72,7 @@ class OffsetsForLeaderEpochTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterIsrManager) val partition = replicaManager.createPartition(tp) @@ -101,7 +101,7 @@ class OffsetsForLeaderEpochTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterIsrManager) replicaManager.createPartition(tp) @@ -132,7 +132,7 @@ class OffsetsForLeaderEpochTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = MetadataCache.zkMetadataCache(config.brokerId, config.interBrokerProtocolVersion), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterIsrManager) diff --git a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala index d0e93a56307ea..08b6bbe7f21d2 100644 --- a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala +++ b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala @@ -181,7 +181,7 @@ class BrokerMetadataPublisherTest { @Test def testNewImagePushedToGroupCoordinator(): Unit = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, "")) + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)) val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_1) val logManager = mock(classOf[LogManager]) val replicaManager = mock(classOf[ReplicaManager]) diff --git a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala index 984b3a8eb8c47..7aa8337f22b9b 100644 --- a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala +++ b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala @@ -27,7 +27,7 @@ import java.util.stream.IntStream import kafka.log.{LogTestUtils, UnifiedLog} import kafka.raft.{KafkaMetadataLog, MetadataLogConfig} import kafka.server.KafkaRaftServer -import kafka.tools.DumpLogSegments.{OffsetsMessageParser, ShareGroupStateMessageParser, TimeIndexDumpErrors} +import kafka.tools.DumpLogSegments.{OffsetsMessageParser, ShareGroupStateMessageParser, TimeIndexDumpErrors, TransactionLogMessageParser} import kafka.utils.TestUtils import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.{Assignment, Subscription} import org.apache.kafka.clients.consumer.internals.ConsumerProtocol @@ -36,14 +36,15 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.metadata.{PartitionChangeRecord, RegisterBrokerRecord, TopicRecord} import org.apache.kafka.common.protocol.{ByteBufferAccessor, ObjectSerializationCache} -import org.apache.kafka.common.record.{ControlRecordType, EndTransactionMarker, MemoryRecords, Record, RecordBatch, RecordVersion, SimpleRecord} +import org.apache.kafka.common.record.{ControlRecordType, EndTransactionMarker, MemoryRecords, Record, RecordVersion, SimpleRecord} import org.apache.kafka.common.utils.{Exit, Utils} import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord import org.apache.kafka.coordinator.group.GroupCoordinatorRecordSerde import org.apache.kafka.coordinator.group.generated.{ConsumerGroupMemberMetadataValue, ConsumerGroupMetadataKey, ConsumerGroupMetadataValue, GroupMetadataKey, GroupMetadataValue} -import org.apache.kafka.coordinator.share.generated.{ShareSnapshotKey, ShareSnapshotValue, ShareUpdateKey, ShareUpdateValue} -import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorRecordSerde} -import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.coordinator.share.generated.{CoordinatorRecordType, ShareSnapshotKey, ShareSnapshotValue, ShareUpdateKey, ShareUpdateValue} +import org.apache.kafka.coordinator.share.ShareCoordinatorRecordSerde +import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogValue} +import org.apache.kafka.coordinator.transaction.{TransactionCoordinatorRecordSerde, TransactionLogConfig} import org.apache.kafka.metadata.MetadataRecordSerde import org.apache.kafka.raft.{KafkaRaftClient, OffsetAndEpoch, VoterSetTest} import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion} @@ -87,6 +88,8 @@ class DumpLogSegmentsTest { private def createTestLog = { val props = new Properties props.setProperty(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "128") + // This test uses future timestamps beyond the default of 1 hour. + props.setProperty(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.MaxValue.toString) log = UnifiedLog( dir = logDir, config = new LogConfig(props), @@ -99,8 +102,7 @@ class DumpLogSegmentsTest { producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, logDirFailureChannel = new LogDirFailureChannel(10), - topicId = None, - keepPartitionMetadataFile = true + topicId = None ) log } @@ -400,7 +402,7 @@ class DumpLogSegmentsTest { log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats, time.scheduler, time) log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), leaderEpoch = 0) val secondSegment = log.roll() - secondSegment.append(1L, RecordBatch.NO_TIMESTAMP, 1L, MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*)) + secondSegment.append(1L, MemoryRecords.withRecords(Compression.NONE, metadataRecords: _*)) secondSegment.flush() log.flush(true) @@ -560,7 +562,7 @@ class DumpLogSegmentsTest { val lastContainedLogTimestamp = 10000 - Using( + Using.resource( new RecordsSnapshotWriter.Builder() .setTime(new MockTime) .setLastContainedLogTimestamp(lastContainedLogTimestamp) @@ -831,6 +833,126 @@ class DumpLogSegmentsTest { ) } + @Test + def testTransactionLogMessageParser(): Unit = { + val serde = new TransactionCoordinatorRecordSerde() + val parser = new TransactionLogMessageParser() + + def serializedRecord(key: ApiMessageAndVersion, value: ApiMessageAndVersion): Record = { + val record = new CoordinatorRecord(key, value) + TestUtils.singletonRecords( + key = serde.serializeKey(record), + value = serde.serializeValue(record) + ).records.iterator.next + } + + // The key is mandatory. + assertEquals( + "Failed to decode message at offset 0 using offset transaction-log decoder (message had a missing key)", + assertThrows( + classOf[RuntimeException], + () => parser.parse(TestUtils.singletonRecords(key = null, value = null).records.iterator.next) + ).getMessage + ) + + // A valid key and value should work. + assertEquals( + ( + Some("{\"type\":\"0\",\"data\":{\"transactionalId\":\"txnId\"}}"), + Some("{\"version\":\"0\",\"data\":{\"producerId\":123,\"producerEpoch\":0,\"transactionTimeoutMs\":0," + + "\"transactionStatus\":0,\"transactionPartitions\":[],\"transactionLastUpdateTimestampMs\":0," + + "\"transactionStartTimestampMs\":0}}") + ), + parser.parse(serializedRecord( + new ApiMessageAndVersion( + new TransactionLogKey() + .setTransactionalId("txnId"), + 0.toShort + ), + new ApiMessageAndVersion( + new TransactionLogValue() + .setProducerId(123L), + 0.toShort + ) + )) + ) + + // A valid key with a tombstone should work. + assertEquals( + ( + Some("{\"type\":\"0\",\"data\":{\"transactionalId\":\"txnId\"}}"), + Some("") + ), + parser.parse(serializedRecord( + new ApiMessageAndVersion( + new TransactionLogKey() + .setTransactionalId("txnId"), + 0.toShort + ), + null + )) + ) + + // An unknown record type should be handled and reported as such. + assertEquals( + ( + Some("Unknown record type 32767 at offset 0, skipping."), + None + ), + parser.parse(serializedRecord( + new ApiMessageAndVersion( + new TransactionLogKey() + .setTransactionalId("txnId"), + Short.MaxValue // Invalid record id. + ), + new ApiMessageAndVersion( + new TransactionLogValue(), + 0.toShort + ) + )) + ) + + // A valid key and value with all fields set should work. + assertEquals( + ( + Some("{\"type\":\"0\",\"data\":{\"transactionalId\":\"txnId\"}}"), + Some("{\"version\":\"1\",\"data\":{\"producerId\":12,\"previousProducerId\":11,\"nextProducerId\":10," + + "\"producerEpoch\":2,\"transactionTimeoutMs\":14,\"transactionStatus\":0," + + "\"transactionPartitions\":[{\"topic\":\"topic1\",\"partitionIds\":[0,1,2]}," + + "{\"topic\":\"topic2\",\"partitionIds\":[3,4,5]}],\"transactionLastUpdateTimestampMs\":123," + + "\"transactionStartTimestampMs\":13}}") + ), + parser.parse(serializedRecord( + new ApiMessageAndVersion( + new TransactionLogKey() + .setTransactionalId("txnId"), + 0.toShort + ), + new ApiMessageAndVersion( + new TransactionLogValue() + .setClientTransactionVersion(0.toShort) + .setNextProducerId(10L) + .setPreviousProducerId(11L) + .setProducerEpoch(2.toShort) + .setProducerId(12L) + .setTransactionLastUpdateTimestampMs(123L) + .setTransactionPartitions(List( + new TransactionLogValue.PartitionsSchema() + .setTopic("topic1") + .setPartitionIds(List(0, 1, 2).map(Integer.valueOf).asJava), + new TransactionLogValue.PartitionsSchema() + .setTopic("topic2") + .setPartitionIds(List(3, 4, 5).map(Integer.valueOf).asJava) + ).asJava) + .setTransactionStartTimestampMs(13L) + .setTransactionStatus(0) + .setTransactionTimeoutMs(14), + 1.toShort + ) + )) + ) + } + private def readBatchMetadata(lines: util.ListIterator[String]): Option[String] = { while (lines.hasNext) { val line = lines.next() @@ -991,14 +1113,14 @@ class DumpLogSegmentsTest { assertEquals( ( Some("{\"type\":\"0\",\"data\":{\"groupId\":\"gs1\",\"topicId\":\"Uj5wn_FqTXirEASvVZRY1w\",\"partition\":0}}"), - Some("{\"type\":\"0\",\"data\":{\"snapshotEpoch\":0,\"stateEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") + Some("{\"version\":\"0\",\"data\":{\"snapshotEpoch\":0,\"stateEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") ), parser.parse(serializedRecord( new ApiMessageAndVersion(new ShareSnapshotKey() .setGroupId("gs1") .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) .setPartition(0), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION), + CoordinatorRecordType.SHARE_SNAPSHOT.id()), new ApiMessageAndVersion(new ShareSnapshotValue() .setSnapshotEpoch(0) .setStateEpoch(0) @@ -1011,7 +1133,7 @@ class DumpLogSegmentsTest { .setDeliveryState(2) .setDeliveryCount(1) ).asJava), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_VALUE_VERSION) + 0.toShort) )) ) @@ -1019,14 +1141,14 @@ class DumpLogSegmentsTest { assertEquals( ( Some("{\"type\":\"1\",\"data\":{\"groupId\":\"gs1\",\"topicId\":\"Uj5wn_FqTXirEASvVZRY1w\",\"partition\":0}}"), - Some("{\"type\":\"0\",\"data\":{\"snapshotEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") + Some("{\"version\":\"0\",\"data\":{\"snapshotEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") ), parser.parse(serializedRecord( new ApiMessageAndVersion(new ShareUpdateKey() .setGroupId("gs1") .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) .setPartition(0), - ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION), + CoordinatorRecordType.SHARE_UPDATE.id()), new ApiMessageAndVersion(new ShareUpdateValue() .setSnapshotEpoch(0) .setLeaderEpoch(0) @@ -1054,7 +1176,7 @@ class DumpLogSegmentsTest { .setGroupId("gs1") .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) .setPartition(0), - 0.toShort + CoordinatorRecordType.SHARE_SNAPSHOT.id() ), null )) diff --git a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala index beff77cf52377..9355a65ceb2cb 100644 --- a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala +++ b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala @@ -27,10 +27,11 @@ import kafka.utils.TestUtils import net.sourceforge.argparse4j.inf.ArgumentParserException import org.apache.kafka.common.metadata.UserScramCredentialRecord import org.apache.kafka.common.utils.Utils -import org.apache.kafka.server.common.{Features, MetadataVersion} +import org.apache.kafka.server.common.{Feature, MetadataVersion} import org.apache.kafka.metadata.bootstrap.BootstrapDirectory import org.apache.kafka.metadata.properties.{MetaPropertiesEnsemble, PropertiesUtils} import org.apache.kafka.metadata.storage.FormatterException +import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertThrows, assertTrue} @@ -50,11 +51,12 @@ class StorageToolTest { properties.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") properties.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2") properties.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, s"2@localhost:9092") - properties.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "PLAINTEXT") + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") properties } - val testingFeatures = Features.FEATURES.toList.asJava + val testingFeatures = Feature.FEATURES.toList.asJava @Test def testConfigToLogDirectories(): Unit = { @@ -292,19 +294,6 @@ Found problem: "Failed to find content in output: " + stream.toString()) } - @Test - def testFormatFailsInZkMode(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.setProperty("log.dirs", availableDirs.mkString(",")) - properties.setProperty("zookeeper.connect", "localhost:2181") - val stream = new ByteArrayOutputStream() - assertEquals("The kafka configuration file appears to be for a legacy cluster. " + - "Formatting is only supported for clusters in KRaft mode.", - assertThrows(classOf[TerseFailure], - () => runFormatCommand(stream, properties)).getMessage) - } - @Test def testFormatWithReleaseVersion(): Unit = { val availableDirs = Seq(TestUtils.tempDir()) @@ -336,7 +325,7 @@ Found problem: properties.putAll(defaultStaticQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) assertEquals("Unsupported feature: non.existent.feature. Supported features are: " + - "group.version, kraft.version, transaction.version", + "eligible.leader.replicas.version, group.version, kraft.version, transaction.version", assertThrows(classOf[FormatterException], () => runFormatCommand(new ByteArrayOutputStream(), properties, Seq("--feature", "non.existent.feature=20"))).getMessage) @@ -483,20 +472,48 @@ Found problem: Seq("--release-version", "3.9-IV0"))).getMessage) } - @Test - def testFormatWithNoInitialControllersSucceedsOnController(): Unit = { + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def testFormatWithNoInitialControllersSucceedsOnController(setKraftVersionFeature: Boolean): Unit = { val availableDirs = Seq(TestUtils.tempDir()) val properties = new Properties() properties.putAll(defaultDynamicQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) val stream = new ByteArrayOutputStream() - assertEquals(0, runFormatCommand(stream, properties, - Seq("--no-initial-controllers", "--release-version", "3.9-IV0"))) + val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--no-initial-controllers") + if (setKraftVersionFeature) { + arguments += "--feature" + arguments += "kraft.version=1" + } + assertEquals(0, runFormatCommand(stream, properties, arguments.toSeq)) assertTrue(stream.toString(). contains("Formatting metadata directory %s".format(availableDirs.head)), "Failed to find content in output: " + stream.toString()) } + @Test + def testFormatWithNoInitialControllersFlagAndStandaloneFlagFails(): Unit = { + val arguments = ListBuffer[String]( + "format", "--cluster-id", "XcZZOzUqS4yHOjhMQB6JLQ", + "--release-version", "3.9-IV0", + "--no-initial-controllers", "--standalone") + val exception = assertThrows(classOf[ArgumentParserException], () => StorageTool.parseArguments(arguments.toArray)) + assertEquals("argument --standalone/-s: not allowed with argument --no-initial-controllers/-N", exception.getMessage) + } + + @Test + def testFormatWithNoInitialControllersFlagAndInitialControllersFlagFails(): Unit = { + val arguments = ListBuffer[String]( + "format", "--cluster-id", "XcZZOzUqS4yHOjhMQB6JLQ", + "--release-version", "3.9-IV0", + "--no-initial-controllers", "--initial-controllers", + "0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," + + "1@localhost:8030:aUARLskQTCW4qCZDtS_cwA," + + "2@localhost:8040:2ggvsS4kQb-fSJ_-zC_Ang") + val exception = assertThrows(classOf[ArgumentParserException], () => StorageTool.parseArguments(arguments.toArray)) + assertEquals("argument --initial-controllers/-I: not allowed with argument --no-initial-controllers/-N", exception.getMessage) + } + @Test def testFormatWithoutStaticQuorumSucceedsWithoutInitialControllersOnBroker(): Unit = { val availableDirs = Seq(TestUtils.tempDir()) @@ -543,8 +560,8 @@ Found problem: s"Output did not contain expected Metadata Version: $output" ) - for (feature <- Features.PRODUCTION_FEATURES.asScala) { - val featureLevel = feature.defaultValue(metadataVersion) + for (feature <- Feature.PRODUCTION_FEATURES.asScala) { + val featureLevel = feature.defaultLevel(metadataVersion) assertTrue(output.contains(s"${feature.featureName()}=$featureLevel"), s"Output did not contain expected feature mapping: $output" ) @@ -566,8 +583,8 @@ Found problem: s"Output did not contain expected Metadata Version: $output" ) - for (feature <- Features.PRODUCTION_FEATURES.asScala) { - val featureLevel = feature.defaultValue(metadataVersion) + for (feature <- Feature.PRODUCTION_FEATURES.asScala) { + val featureLevel = feature.defaultLevel(metadataVersion) assertTrue(output.contains(s"${feature.featureName()}=$featureLevel"), s"Output did not contain expected feature mapping: $output" ) diff --git a/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala b/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala index 3aaa58641bc63..c5671926dbf15 100644 --- a/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala +++ b/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala @@ -28,8 +28,6 @@ import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, Timeout} -import scala.jdk.OptionConverters.RichOption - class SchedulerTest { @@ -140,8 +138,8 @@ class SchedulerTest { val topicPartition = UnifiedLog.parseTopicPartitionName(logDir) val logDirFailureChannel = new LogDirFailureChannel(10) val segments = new LogSegments(topicPartition) - val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, logConfig.recordVersion, "", None, mockTime.scheduler) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, logDir, maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, false), mockTime) val offsets = new LogLoader( @@ -155,7 +153,7 @@ class SchedulerTest { segments, 0L, 0L, - leaderEpochCache.toJava, + leaderEpochCache, producerStateManager, new ConcurrentHashMap[String, Integer], false @@ -166,7 +164,7 @@ class SchedulerTest { localLog = localLog, brokerTopicStats, producerIdExpirationCheckIntervalMs, leaderEpochCache, producerStateManager, - _topicId = None, keepPartitionMetadataFile = true) + _topicId = None) assertTrue(scheduler.taskRunning(log.producerExpireCheck)) log.close() assertFalse(scheduler.taskRunning(log.producerExpireCheck)) diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index ddef861db928a..202600e0f3c42 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -23,7 +23,6 @@ import kafka.security.JaasTestUtils import kafka.server._ import kafka.server.metadata.{ConfigRepository, MockConfigRepository} import kafka.utils.Implicits._ -import kafka.zk._ import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin._ import org.apache.kafka.clients.consumer._ @@ -47,7 +46,6 @@ import org.apache.kafka.common.resource.ResourcePattern import org.apache.kafka.common.security.auth.{KafkaPrincipal, KafkaPrincipalSerde, SecurityProtocol} import org.apache.kafka.common.serialization._ import org.apache.kafka.common.utils.Utils.formatAddress -import org.apache.kafka.common.utils.Time import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.metadata.LeaderAndIsr @@ -56,14 +54,13 @@ import org.apache.kafka.network.metrics.RequestChannelMetrics import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.authorizer.{AuthorizableRequestContext, Authorizer => JAuthorizer} import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, MetadataVersion} -import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ZkConfigs} +import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.apache.kafka.test.{TestUtils => JTestUtils} -import org.apache.zookeeper.KeeperException.SessionExpiredException import org.junit.jupiter.api.Assertions._ import org.mockito.ArgumentMatchers.{any, anyBoolean} import org.mockito.Mockito @@ -78,7 +75,6 @@ import java.util import java.util.concurrent._ import java.util.concurrent.atomic.AtomicBoolean import java.util.{Collections, Optional, Properties} -import scala.annotation.nowarn import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, mutable} import scala.concurrent.duration.FiniteDuration @@ -102,10 +98,6 @@ object TestUtils extends Logging { by any other service and hence we use a reserved port. */ val IncorrectBrokerPort = 225 - /** Port to use for unit tests that mock/don't require a real ZK server. */ - val MockZkPort = 1 - /** ZooKeeper connection string to use for unit tests that mock/don't require a real ZK server. */ - val MockZkConnect = "127.0.0.1:" + MockZkPort // CN in SSL certificates - this is used for endpoint validation when enabled val SslCertificateCn = "localhost" @@ -131,14 +123,7 @@ object TestUtils extends Logging { * Create a random log directory in the format - used for Kafka partition logs. * It is the responsibility of the caller to set up a shutdown hook for deletion of the directory. */ - def randomPartitionLogDir(parentDir: File): File = { - val attempts = 1000 - val f = Iterator.continually(new File(parentDir, "kafka-" + random.nextInt(1000000))) - .take(attempts).find(_.mkdir()) - .getOrElse(sys.error(s"Failed to create directory after $attempts attempts")) - f.deleteOnExit() - f - } + def randomPartitionLogDir(parentDir: File): File = JTestUtils.randomPartitionLogDir(parentDir) /** * Create a temporary file @@ -155,23 +140,6 @@ object TestUtils extends Logging { JTestUtils.tempFile(content) } - /** - * Create a kafka server instance with appropriate test settings - * USING THIS IS A SIGN YOU ARE NOT WRITING A REAL UNIT TEST - * - * @param config The configuration of the server - */ - def createServer(config: KafkaConfig, time: Time = Time.SYSTEM): KafkaServer = { - createServer(config, time, None, startup = true, enableZkApiForwarding = false) - } - - def createServer(config: KafkaConfig, time: Time, threadNamePrefix: Option[String], - startup: Boolean, enableZkApiForwarding: Boolean) = { - val server = new KafkaServer(config, time, threadNamePrefix, enableForwarding = enableZkApiForwarding) - if (startup) server.startup() - server - } - /** * Create a test config for the provided parameters. * @@ -179,7 +147,6 @@ object TestUtils extends Logging { */ def createBrokerConfigs( numConfigs: Int, - zkConnect: String, enableControlledShutdown: Boolean = true, enableDeleteTopic: Boolean = true, interBrokerSecurityProtocol: Option[SecurityProtocol] = None, @@ -198,7 +165,7 @@ object TestUtils extends Logging { enableFetchFromFollower: Boolean = false): Seq[Properties] = { val endingIdNumber = startingIdNumber + numConfigs - 1 (startingIdNumber to endingIdNumber).map { node => - createBrokerConfig(node, zkConnect, enableControlledShutdown, enableDeleteTopic, RandomPort, + createBrokerConfig(node, enableControlledShutdown, enableDeleteTopic, RandomPort, interBrokerSecurityProtocol, trustStoreFile, saslProperties, enablePlaintext = enablePlaintext, enableSsl = enableSsl, enableSaslPlaintext = enableSaslPlaintext, enableSaslSsl = enableSaslSsl, rack = rackInfo.get(node), logDirCount = logDirCount, enableToken = enableToken, numPartitions = numPartitions, defaultReplicationFactor = defaultReplicationFactor, enableFetchFromFollower = enableFetchFromFollower) @@ -237,7 +204,7 @@ object TestUtils extends Logging { } def createDummyBrokerConfig(): Properties = { - createBrokerConfig(0, "") + createBrokerConfig(0) } /** @@ -246,7 +213,6 @@ object TestUtils extends Logging { * Note that if `interBrokerSecurityProtocol` is defined, the listener for the `SecurityProtocol` will be enabled. */ def createBrokerConfig(nodeId: Int, - zkConnect: String, enableControlledShutdown: Boolean = true, enableDeleteTopic: Boolean = true, port: Int = RandomPort, @@ -285,19 +251,15 @@ object TestUtils extends Logging { val props = new Properties props.put(ServerConfigs.UNSTABLE_FEATURE_VERSIONS_ENABLE_CONFIG, "true") props.put(ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, "true") - if (zkConnect == null) { - props.setProperty(KRaftConfigs.SERVER_MAX_STARTUP_TIME_MS_CONFIG, TimeUnit.MINUTES.toMillis(10).toString) - props.put(KRaftConfigs.NODE_ID_CONFIG, nodeId.toString) - props.put(ServerConfigs.BROKER_ID_CONFIG, nodeId.toString) - props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, listeners) - props.put(SocketServerConfigs.LISTENERS_CONFIG, listeners) - props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, protocolAndPorts. - map(p => "%s:%s".format(p._1, p._1)).mkString(",") + ",CONTROLLER:PLAINTEXT") - } else { - if (nodeId >= 0) props.put(ServerConfigs.BROKER_ID_CONFIG, nodeId.toString) - props.put(SocketServerConfigs.LISTENERS_CONFIG, listeners) - } + props.setProperty(KRaftConfigs.SERVER_MAX_STARTUP_TIME_MS_CONFIG, TimeUnit.MINUTES.toMillis(10).toString) + props.put(KRaftConfigs.NODE_ID_CONFIG, nodeId.toString) + props.put(ServerConfigs.BROKER_ID_CONFIG, nodeId.toString) + props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, listeners) + props.put(SocketServerConfigs.LISTENERS_CONFIG, listeners) + props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, protocolAndPorts. + map(p => "%s:%s".format(p._1, p._1)).mkString(",") + ",CONTROLLER:PLAINTEXT") + if (logDirCount > 1) { val logDirs = (1 to logDirCount).toList.map(i => // We would like to allow user to specify both relative path and absolute path as log directory for backward-compatibility reason @@ -308,23 +270,17 @@ object TestUtils extends Logging { } else { props.put(ServerLogConfigs.LOG_DIR_CONFIG, tempDir().getAbsolutePath) } - if (zkConnect == null) { - props.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") - // Note: this is just a placeholder value for controller.quorum.voters. JUnit - // tests use random port assignment, so the controller ports are not known ahead of - // time. Therefore, we ignore controller.quorum.voters and use - // controllerQuorumVotersFuture instead. - props.put(QuorumConfig.QUORUM_VOTERS_CONFIG, "1000@localhost:0") - } else { - props.put(ZkConfigs.ZK_CONNECT_CONFIG, zkConnect) - props.put(ZkConfigs.ZK_CONNECTION_TIMEOUT_MS_CONFIG, "10000") - } + props.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") + // Note: this is just a placeholder value for controller.quorum.voters. JUnit + // tests use random port assignment, so the controller ports are not known ahead of + // time. Therefore, we ignore controller.quorum.voters and use + // controllerQuorumVotersFuture instead. + props.put(QuorumConfig.QUORUM_VOTERS_CONFIG, "1000@localhost:0") props.put(ReplicationConfigs.REPLICA_SOCKET_TIMEOUT_MS_CONFIG, "1500") props.put(ReplicationConfigs.CONTROLLER_SOCKET_TIMEOUT_MS_CONFIG, "1500") props.put(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, enableControlledShutdown.toString) props.put(ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, enableDeleteTopic.toString) props.put(ServerLogConfigs.LOG_DELETE_DELAY_MS_CONFIG, "1000") - props.put(ServerConfigs.CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_CONFIG, "100") props.put(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, "2097152") props.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") props.put(ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_CONFIG, "100") @@ -360,12 +316,8 @@ object TestUtils extends Logging { props } - @nowarn("cat=deprecation") - def setIbpAndMessageFormatVersions(config: Properties, version: MetadataVersion): Unit = { + def setIbpVersion(config: Properties, version: MetadataVersion): Unit = { config.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, version.version) - // for clarity, only set the log message format version if it's not ignored - if (!LogConfig.shouldIgnoreMessageFormatVersion(version)) - config.setProperty(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG, version.version) } def createAdminClient[B <: KafkaBroker]( @@ -496,6 +448,23 @@ object TestUtils extends Logging { ) } + def createTransactionStateTopicWithAdmin[B <: KafkaBroker]( + admin: Admin, + brokers: Seq[B], + controllers: Seq[ControllerServer] + ): Map[Int, Int] = { + val broker = brokers.head + createTopicWithAdmin( + admin = admin, + topic = Topic.TRANSACTION_STATE_TOPIC_NAME, + numPartitions = broker.config.getInt(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG), + replicationFactor = broker.config.getShort(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG).toInt, + brokers = brokers, + controllers = controllers, + topicConfig = new Properties(), + ) + } + def deleteTopicWithAdmin[B <: KafkaBroker]( admin: Admin, topic: String, @@ -513,83 +482,6 @@ object TestUtils extends Logging { controllers.foreach(controller => ensureConsistentKRaftMetadata(brokers, controller)) } - /** - * Create a topic in ZooKeeper. - * Wait until the leader is elected and the metadata is propagated to all brokers. - * Return the leader for each partition. - */ - def createTopic(zkClient: KafkaZkClient, - topic: String, - numPartitions: Int = 1, - replicationFactor: Int = 1, - servers: Seq[KafkaBroker], - topicConfig: Properties = new Properties): scala.collection.immutable.Map[Int, Int] = { - val adminZkClient = new AdminZkClient(zkClient) - // create topic - waitUntilTrue( () => { - var hasSessionExpirationException = false - try { - adminZkClient.createTopic(topic, numPartitions, replicationFactor, topicConfig) - } catch { - case _: SessionExpiredException => hasSessionExpirationException = true - case e: Throwable => throw e // let other exceptions propagate - } - !hasSessionExpirationException}, - s"Can't create topic $topic") - - // wait until we've propagated all partitions metadata to all servers - val allPartitionsMetadata = waitForAllPartitionsMetadata(servers, topic, numPartitions) - - (0 until numPartitions).map { i => - i -> allPartitionsMetadata.get(new TopicPartition(topic, i)).map(_.leader()).getOrElse( - throw new IllegalStateException(s"Cannot get the partition leader for topic: $topic, partition: $i in server metadata cache")) - }.toMap - } - - /** - * Create a topic in ZooKeeper using a customized replica assignment. - * Wait until the leader is elected and the metadata is propagated to all brokers. - * Return the leader for each partition. - */ - def createTopic[B <: KafkaBroker](zkClient: KafkaZkClient, - topic: String, - partitionReplicaAssignment: collection.Map[Int, Seq[Int]], - servers: Seq[B]): scala.collection.immutable.Map[Int, Int] = { - createTopic(zkClient, topic, partitionReplicaAssignment, servers, new Properties()) - } - - /** - * Create a topic in ZooKeeper using a customized replica assignment. - * Wait until the leader is elected and the metadata is propagated to all brokers. - * Return the leader for each partition. - */ - def createTopic(zkClient: KafkaZkClient, - topic: String, - partitionReplicaAssignment: collection.Map[Int, Seq[Int]], - servers: Seq[KafkaBroker], - topicConfig: Properties): scala.collection.immutable.Map[Int, Int] = { - val adminZkClient = new AdminZkClient(zkClient) - // create topic - waitUntilTrue( () => { - var hasSessionExpirationException = false - try { - adminZkClient.createTopicWithAssignment(topic, topicConfig, partitionReplicaAssignment) - } catch { - case _: SessionExpiredException => hasSessionExpirationException = true - case e: Throwable => throw e // let other exceptions propagate - } - !hasSessionExpirationException}, - s"Can't create topic $topic") - - // wait until we've propagated all partitions metadata to all servers - val allPartitionsMetadata = waitForAllPartitionsMetadata(servers, topic, partitionReplicaAssignment.size) - - partitionReplicaAssignment.keySet.map { i => - i -> allPartitionsMetadata.get(new TopicPartition(topic, i)).map(_.leader()).getOrElse( - throw new IllegalStateException(s"Cannot get the partition leader for topic: $topic, partition: $i in server metadata cache")) - }.toMap - } - /** * Wrap a single record log buffer. */ @@ -685,31 +577,6 @@ object TestUtils extends Logging { new KafkaConsumer[K, V](consumerProps, keyDeserializer, valueDeserializer) } - /** - * If neither oldLeaderOpt nor newLeaderOpt is defined, wait until the leader of a partition is elected. - * If oldLeaderOpt is defined, it waits until the new leader is different from the old leader. - * If newLeaderOpt is defined, it waits until the new leader becomes the expected new leader. - * - * @return The new leader (note that negative values are used to indicate conditions like NoLeader and - * LeaderDuringDelete). - * @throws AssertionError if the expected condition is not true within the timeout. - */ - def waitUntilLeaderIsElectedOrChanged( - zkClient: KafkaZkClient, - topic: String, - partition: Int, - timeoutMs: Long = 30000L, - oldLeaderOpt: Option[Int] = None, - newLeaderOpt: Option[Int] = None, - ignoreNoLeader: Boolean = false - ): Int = { - def getPartitionLeader(topic: String, partition: Int): Option[Int] = { - zkClient.getLeaderForPartition(new TopicPartition(topic, partition)) - .filter(p => !ignoreNoLeader || p != LeaderAndIsr.NO_LEADER) - } - doWaitUntilLeaderIsElectedOrChanged(getPartitionLeader, topic, partition, timeoutMs, oldLeaderOpt, newLeaderOpt) - } - /** * If neither oldLeaderOpt nor newLeaderOpt is defined, wait until the leader of a partition is elected. * If oldLeaderOpt is defined, it waits until the new leader is different from the old leader. @@ -1014,17 +881,12 @@ object TestUtils extends Logging { TestUtils.waitUntilTrue( () => { brokers.forall { broker => - val metadataOffset = broker.asInstanceOf[BrokerServer].sharedServer.loader.lastAppliedOffset() - metadataOffset >= controllerOffset + val loader = broker.asInstanceOf[BrokerServer].sharedServer.loader + loader == null || loader.lastAppliedOffset() >= controllerOffset } }, msg) } - def waitUntilControllerElected(zkClient: KafkaZkClient, timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = { - val (controllerId, _) = computeUntilTrue(zkClient.getControllerId, waitTime = timeout)(_.isDefined) - controllerId.getOrElse(throw new AssertionError(s"Controller not elected after $timeout ms")) - } - def awaitLeaderChange[B <: KafkaBroker]( brokers: Seq[B], tp: TopicPartition, @@ -1126,7 +988,6 @@ object TestUtils extends Logging { time = time, brokerTopicStats = new BrokerTopicStats, logDirFailureChannel = new LogDirFailureChannel(logDirs.size), - keepPartitionMetadataFile = true, interBrokerProtocolVersion = interBrokerProtocolVersion, remoteStorageSystemEnable = remoteStorageSystemEnable, initialTaskDelayMs = initialTaskDelayMs) @@ -1147,15 +1008,13 @@ object TestUtils extends Logging { override def submit( topicPartition: TopicIdPartition, leaderAndIsr: LeaderAndIsr, - controllerEpoch: Int ): CompletableFuture[LeaderAndIsr]= { val future = new CompletableFuture[LeaderAndIsr]() if (inFlight.compareAndSet(false, true)) { isrUpdates += AlterPartitionItem( topicPartition, leaderAndIsr, - future, - controllerEpoch + future ) } else { future.completeExceptionally(new OperationNotAttemptedException( @@ -1227,18 +1086,10 @@ object TestUtils extends Logging { } def verifyTopicDeletion[B <: KafkaBroker]( - zkClient: KafkaZkClient, topic: String, numPartitions: Int, brokers: Seq[B]): Unit = { val topicPartitions = (0 until numPartitions).map(new TopicPartition(topic, _)) - if (zkClient != null) { - // wait until admin path for delete topic is deleted, signaling completion of topic deletion - waitUntilTrue(() => !zkClient.isTopicMarkedForDeletion(topic), - "Admin path /admin/delete_topics/%s path not deleted even after a replica is restarted".format(topic)) - waitUntilTrue(() => !zkClient.topicExists(topic), - "Topic path /brokers/topics/%s not deleted after /admin/delete_topics/%s path is deleted".format(topic, topic)) - } // ensure that the topic-partition has been deleted from all brokers' replica managers waitUntilTrue(() => brokers.forall(broker => topicPartitions.forall(tp => broker.replicaManager.onlinePartition(tp).isEmpty)), @@ -1537,15 +1388,6 @@ object TestUtils extends Logging { ) } - def assertFutureExceptionTypeEquals(future: KafkaFuture[_], clazz: Class[_ <: Throwable], - expectedErrorMessage: Option[String] = None): Unit = { - val cause = assertThrows(classOf[ExecutionException], () => future.get()).getCause - assertTrue(clazz.isInstance(cause), "Expected an exception of type " + clazz.getName + "; got type " + - cause.getClass.getName) - expectedErrorMessage.foreach(message => assertTrue(cause.getMessage.contains(message), s"Received error message : ${cause.getMessage}" + - s" does not contain expected error message : $message")) - } - def assertBadConfigContainingMessage(props: Properties, expectedExceptionContainsText: String): Unit = { try { KafkaConfig.fromProps(props) diff --git a/core/src/test/scala/unit/kafka/zk/EmbeddedZookeeper.scala b/core/src/test/scala/unit/kafka/zk/EmbeddedZookeeper.scala deleted file mode 100755 index 159b69f2f4190..0000000000000 --- a/core/src/test/scala/unit/kafka/zk/EmbeddedZookeeper.scala +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.zk - -import org.apache.zookeeper.server.{NIOServerCnxnFactory, SessionTrackerImpl, ZooKeeperServer} -import kafka.utils.{CoreUtils, Logging, TestUtils} - -import java.net.InetSocketAddress -import org.apache.kafka.common.utils.Utils - -import java.io.Closeable - -/** - * ZooKeeperServer wrapper that starts the server with temporary directories during construction and deletes - * the directories when `shutdown()` is called. - * - * This is an internal class and it's subject to change. We recommend that you implement your own simple wrapper - * if you need similar functionality. - */ -// This should be named EmbeddedZooKeeper for consistency with other classes, but since this is widely used by other -// projects (even though it's internal), we keep the name as it is until we have a publicly supported test library for -// others to use. -class EmbeddedZookeeper extends Closeable with Logging { - - val snapshotDir = TestUtils.tempDir() - val logDir = TestUtils.tempDir() - val tickTime = 800 // allow a maxSessionTimeout of 20 * 800ms = 16 secs - - System.setProperty("zookeeper.forceSync", "no") //disable fsync to ZK txn log in tests to avoid timeout - val zookeeper = new ZooKeeperServer(snapshotDir, logDir, tickTime) - val factory = new NIOServerCnxnFactory() - private val addr = new InetSocketAddress("127.0.0.1", TestUtils.RandomPort) - factory.configure(addr, 0) - factory.startup(zookeeper) - val port = zookeeper.getClientPort - - def shutdown(): Unit = { - // Also shuts down ZooKeeperServer - CoreUtils.swallow(factory.shutdown(), this) - - zookeeper.getSessionTracker match { - case tracker: SessionTrackerImpl => - while (tracker.isAlive) { - Thread.sleep(100) - } - case _ => - } - - def isDown(): Boolean = { - try { - ZkFourLetterWords.sendStat("127.0.0.1", port, 3000) - false - } catch { case _: Throwable => true } - } - - Iterator.continually(isDown()).exists(identity) - CoreUtils.swallow(zookeeper.getZKDatabase.close(), this) - - Utils.delete(logDir) - Utils.delete(snapshotDir) - } - - override def close(): Unit = shutdown() -} diff --git a/core/src/test/scala/unit/kafka/zk/ZkFourLetterWords.scala b/core/src/test/scala/unit/kafka/zk/ZkFourLetterWords.scala deleted file mode 100644 index 4a61c17287706..0000000000000 --- a/core/src/test/scala/unit/kafka/zk/ZkFourLetterWords.scala +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.zk - -import java.io.IOException -import java.net.{SocketTimeoutException, Socket, InetAddress, InetSocketAddress} - -/** - * ZooKeeper responds to a small set of commands. Each command is composed of four letters. You issue the commands to - * ZooKeeper via telnet or nc, at the client port. - * - * Three of the more interesting commands: "stat" gives some general information about the server and connected - * clients, while "srvr" and "cons" give extended details on server and connections respectively. - */ -object ZkFourLetterWords { - def sendStat(host: String, port: Int, timeout: Int): Unit = { - val hostAddress = - if (host != null) new InetSocketAddress(host, port) - else new InetSocketAddress(InetAddress.getByName(null), port) - val sock = new Socket() - try { - sock.connect(hostAddress, timeout) - val outStream = sock.getOutputStream - outStream.write("stat".getBytes) - outStream.flush() - } catch { - case e: SocketTimeoutException => throw new IOException("Exception while sending 4lw", e) - } finally { - sock.close() - } - } -} diff --git a/docker/docker_official_images/3.7.0/jvm/jsa_launch b/docker/docker_official_images/3.7.0/jvm/jsa_launch index 4ea3561280aaa..dd0299767e33d 100755 --- a/docker/docker_official_images/3.7.0/jvm/jsa_launch +++ b/docker/docker_official_images/3.7.0/jvm/jsa_launch @@ -17,7 +17,7 @@ KAFKA_CLUSTER_ID="$(opt/kafka/bin/kafka-storage.sh random-uuid)" TOPIC="test-topic" -KAFKA_JVM_PERFORMANCE_OPTS="-XX:ArchiveClassesAtExit=storage.jsa" opt/kafka/bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c opt/kafka/config/kraft/reconfig-server.properties +KAFKA_JVM_PERFORMANCE_OPTS="-XX:ArchiveClassesAtExit=storage.jsa" opt/kafka/bin/kafka-storage.sh format --standalone -t $KAFKA_CLUSTER_ID -c opt/kafka/config/kraft/reconfig-server.properties KAFKA_JVM_PERFORMANCE_OPTS="-XX:ArchiveClassesAtExit=kafka.jsa" opt/kafka/bin/kafka-server-start.sh opt/kafka/config/kraft/reconfig-server.properties & diff --git a/docker/examples/README.md b/docker/examples/README.md index 27c8c99fc0d41..159713ed31b63 100644 --- a/docker/examples/README.md +++ b/docker/examples/README.md @@ -4,9 +4,11 @@ Kafka Docker Image Usage Guide Introduction ------------ -This document contains usage guide as well as examples for docker image. Docker compose files are provided in this directory for the example use cases. +This document contains usage guide as well as examples for Docker image. +Docker Compose files are provided in this directory for the example use cases. + +Kafka server can be started using following ways: -Kafka server can be started using following ways:- - Default configs - File input - Environment variables @@ -14,48 +16,48 @@ Kafka server can be started using following ways:- Running on default configs -------------------------- -If no user provided configs are passed to docker container or configs provided are empty, default configs will be used (configs that are packaged in kafka tarball). If any user provided config is provided, default configs will not be used. +If no user provided configuration (file input or environment variables) is passed to the Docker container, the default KRaft configuration for single combined-mode node will be used. +This default configuration is packaged in the Kafka tarball. -Use file input for providing configs +Use input file for providing configs ------------------------------------ -- This method requires users to provide path to a local folder which contains kafka property files and mount it to docker container using docker volume. -- It replaces the default config file present in docker container. -- Mount the folder containing kafka property files to `/mnt/shared/config` in docker container. -- Command `docker run --volume path/to/property/folder:/mnt/shared/config -p 9092:9092 apache/kafka:latest` can be used to mount the folder containing property files. -- Property files will be only read by the docker container. +- This method requires users to provide path to a local folder which contains the Kafka property files and mount it to Docker container using Docker volume. +- It replaces the default KRaft configuration file present in Docker container. +- The Command `docker run --volume /path/to/property/folder:/mnt/shared/config -p 9092:9092 apache/kafka:latest` can be used to mount the folder containing the property files. +- Property files will be only read by the Docker container. Using Environment Variables --------------------------- -- Kafka property defined via env variables will override the value of that property defined in file input and default config. -- If properties are provided via environment variables only, default configs will be replaced by user provided properties. -- To construct the environment key variable name for server.properties configs, following steps can be followed:- - - Replace . with _ - - Replace _ with __(double underscore) - - Replace - with ___(triple underscore) - - Prefix the result with KAFKA_ +When using the environment variables, you need to set all properties required to start the KRaft node. +Therefore, the recommended way to use environment variables is via Docker Compose, which allows users to set all the properties that are needed. +It is also possible to use the input file to have a common set of configurations, and then override specific node properties using the environment variables. + +- Kafka property defined via environment variables will override the value of that property defined in the user provided property file. +- If properties are provided via environment variables only, all required properties must be specified. +- The following rules must be used to construct the environment variable key name: + - Replace `.` with `_` + - Replace `_` with `__` (double underscore) + - Replace `-` with `___` (triple underscore) + - Prefix the result with `KAFKA_` - Examples: - - For abc.def, use KAFKA_ABC_DEF - - For abc-def, use KAFKA_ABC___DEF - - For abc_def, use KAFKA_ABC__DEF + - For `abc.def`, use `KAFKA_ABC_DEF` + - For `abc-def`, use `KAFKA_ABC___DEF` + - For `abc_def`, use `KAFKA_ABC__DEF` -- To provide configs to log4j property files, following points should be considered:- -- log4j properties provided via environment variables will be appended to the default properties file (log4j properties files bundled with kafka) -- `KAFKA_LOG4J_ROOT_LOGLEVEL` can be provided to set the value of log4j.rootLogger in log4j.properties and tools-log4j.properties -- log4j loggers can be added to log4j.properties by setting them in KAFKA_LOG4J_LOGGERS environment variable in a single comma separated string - - Example: - - Assuming that KAFKA_LOG4J_LOGGERS='property1=value1,property2=value2' environment variable is provided to docker container. - - log4j.logger.property1=value1 and log4j.logger.property2=value2 will be added to the log4j.properties file inside docker container. - -- Environment variables commonly used in Kafka can be provided via environment variables, for example `CLUSTER_ID`. -- Command `docker run --env CONFIG_NAME=CONFIG_VALUE -p 9092:9092 apache/kafka:latest` can be used to provide environment variables to docker container -- Note that it is recommended to use docker compose files to provide configs using environment variables. +- To provide configs to log4j property files, following points should be considered: + - log4j properties provided via environment variables will be appended to the default properties file (log4j properties files bundled with Kafka). + - `KAFKA_LOG4J_ROOT_LOGLEVEL` can be provided to set the value of `log4j.rootLogger` in log4j.properties and `tools-log4j.properties`. + - log4j loggers can be added to log4j.properties by setting them in `KAFKA_LOG4J_LOGGERS` environment variable in a single comma separated string. + - Example: + - Assuming that `KAFKA_LOG4J_LOGGERS='property1=value1,property2=value2'` environment variable is provided to Docker container. + - `log4j.logger.property1=value1` and `log4j.logger.property2=value2` will be added to the `log4j.properties` file inside Docker container. Running in SSL mode ------------------- -- Recommended way to run in ssl mode is by mounting secrets on `/etc/kafka/secrets` in docker container and providing configs following through environment variables (`KAFKA_SSL_KEYSTORE_FILENAME`, `KAFKA_SSL_KEYSTORE_CREDENTIALS`, `KAFKA_SSL_KEY_CREDENTIALS`, `KAFKA_SSL_TRUSTSTORE_FILENAME` and `KAFKA_SSL_TRUSTSTORE_CREDENTIALS`) to let the docker image scripts extract passwords and populate correct paths in server.properties. +- Recommended way to run in ssl mode is by mounting secrets on `/etc/kafka/secrets` in Docker container and providing configs following through environment variables (`KAFKA_SSL_KEYSTORE_FILENAME`, `KAFKA_SSL_KEYSTORE_CREDENTIALS`, `KAFKA_SSL_KEY_CREDENTIALS`, `KAFKA_SSL_TRUSTSTORE_FILENAME` and `KAFKA_SSL_TRUSTSTORE_CREDENTIALS`) to let the Docker image scripts extract passwords and populate correct paths in `server.properties`. - Please ensure appropriate `KAFKA_ADVERTISED_LISTENERS` are provided through environment variables to enable SSL mode in Kafka server, i.e. it should contain an `SSL` listener. - Alternatively property file input can be used to provide ssl properties. - Make sure you set location of truststore and keystore correctly when using file input. See example for file input in `docker-compose-files/single-node/file-input` for better clarity. @@ -66,7 +68,7 @@ Running in SSL mode Examples -------- -- `docker-compose-files` directory contains docker compose files for some example configs to run `apache/kafka` OR `apache/kafka-native` docker image. +- `docker-compose-files` directory contains Docker Compose files for some example configs to run `apache/kafka` OR `apache/kafka-native` Docker image. - Pass the `IMAGE` variable with the Docker Compose file to specify which Docker image to use for bringing up the containers. ``` # to bring up containers using apache/kafka docker image @@ -78,22 +80,23 @@ IMAGE=apache/kafka-native:latest - Run the commands from root of the repository. - Checkout `single-node` examples for quick small examples to play around with. - `cluster` contains multi node examples, for `combined` mode as well as `isolated` mode. -- Kafka server running on docker container can be accessed using cli scripts or your own client code. +- Kafka server running on Docker container can be accessed using cli scripts or your own client code. - Make sure jars are built, if you decide to use cli scripts of this repo. Single Node ----------- -- These examples are for understanding various ways inputs can be provided and kafka can be configured in docker container. +- These examples are for understanding various ways inputs can be provided and kafka can be configured in Docker container. - Examples are present inside `docker-compose-files/single-node` directory. -- Plaintext:- +- Plaintext: - This is the simplest compose file. - We are using environment variables purely for providing configs. - `KAFKA_LISTENERS` is getting supplied. But if it was not provided, defaulting would have kicked in and we would have used `KAFKA_ADVERTISED_LISTENERS` to generate `KAFKA_LISTENERS`, by replacing the host with `0.0.0.0`. - Note that we have provided a `CLUSTER_ID`, but it's not mandatory as there is a default cluster id present in container. - We had to provide `KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR` and set it explicitly to 1, because if we don't provide it default value provided by kafka will be taken which is 3. - - We have also set hostname of the container. It can be kept same as the container name for clarity. - - To run the example:- + - We have also set hostname of the container. + It can be kept same as the container name for clarity. + - To run the example: ``` # Run from root of the repo @@ -103,16 +106,16 @@ Single Node # GraalVM based Native Apache Kafka Docker Image $ IMAGE=apache/kafka-native:latest docker compose -f docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml up ``` - - To produce messages using client scripts:- + - To produce messages using client scripts: ``` # Run from root of the repo $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9092 ``` -- SSL:- +- SSL: - Note that here we are using environment variables to pass configs. - Notice how secrets folder is mounted to docker container. - In case of environment variable it is mandatory to keep the files in `/etc/kafka/secrets` folder in docker container, given that the path of the files will be derived from that, as we are just providing file names in other SSL configs. - - To run the example:- + - To run the example: ``` # Run from root of the repo @@ -122,17 +125,17 @@ Single Node # GraalVM based Native Apache Kafka Docker Image $ IMAGE=apache/kafka-native:latest docker compose -f docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml up ``` - - To produce messages using client scripts (Ensure that java version >= 17):- + - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` -- File Input:- +- File Input: - Here ssl configs are provided via file input. - Notice that now full file path is needed for the configs. - Note that there is extra volume mount now. - Configs provided via environment variable will override the file input configs. - - To run the example:- + - To run the example: ``` # Run from root of the repo @@ -142,7 +145,7 @@ Single Node # GraalVM based Native Apache Kafka Docker Image $ IMAGE=apache/kafka-native:latest docker compose -f docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml up ``` - - To produce messages using client scripts (Ensure that java version >= 17):- + - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties @@ -151,19 +154,19 @@ Single Node Multi Node Cluster ------------------ -- These examples are for real world usecases where multiple nodes of kafka are required. -- Combined:- +- These examples are for real world use cases where multiple nodes of kafka are required. +- Combined: - Examples are present in `docker-compose-files/cluster/combined` directory. - - Plaintext:- + - Plaintext: - Each broker must expose a unique port to host machine. - For example broker-1, broker2 and broker3 are listening on port 9092, they're exposing it to the host via ports 29092, 39092 and 49092 respectively. - Here important thing to note is that to ensure that kafka brokers are accessible both to clients as well as to each other we have introduced an additional listener. - PLAINTEXT is supposed to be listener accessible to other brokers. - - The inter broker listener advertised by the brokers is exposed on container's hostname. This is done so that brokers can find each other in docker network. + - The inter broker listener advertised by the brokers is exposed on container's hostname. This is done so that brokers can find each other in Docker network. - PLAINTEXT_HOST is supposed to be listener accessible to the clients. - - The port advertised for host machine is done on localhost, as this is the hostname (in this example) that client will use to connect with kafka running inside docker container. + - The port advertised for host machine is done on localhost, as this is the hostname (in this example) that client will use to connect with kafka running inside Docker container. - Here we take advantage of hostname set for each broker and set the listener accordingly. - - To run the example:- + - To run the example: ``` # Run from root of the repo @@ -173,16 +176,18 @@ Multi Node Cluster # GraalVM based Native Apache Kafka Docker Image $ IMAGE=apache/kafka-native:latest docker compose -f docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml up ``` - - To access using client script:- + - To access using client script: ``` # Run from root of the repo $ bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:29092 ``` - - SSL:- + - SSL: - Similar to Plaintext example, for inter broker communication in SSL mode, SSL-INTERNAL is required and for client to broker communication, SSL is required. - - Note that `KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM` is set to empty as hostname was not set in credentials. This won't be needed in production usecases. - - Also note that for example we have used the same credentials for all brokers. Make sure each broker has it's own secrets. - - To run the example:- + - Note that `KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM` is set to empty as hostname was not set in credentials. + This won't be needed in production use cases. + - Also note that for example we have used the same credentials for all brokers. + Make sure each broker has it's own secrets. + - To run the example: ``` # Run from root of the repo @@ -192,18 +197,18 @@ Multi Node Cluster # GraalVM based Native Apache Kafka Docker Image $ IMAGE=apache/kafka-native:latest docker compose -f docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml up ``` - - To produce messages using client scripts (Ensure that java version >= 17):- + - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` -- Isolated:- +- Isolated: - Examples are present in `docker-compose-files/cluster/isolated` directory. - - Plaintext:- + - Plaintext: - Here controllers and brokers are configured separately. - It's a good practice to define that brokers depend on controllers. - In this case also we have same listeners setup as mentioned in combined case. - - To run the example:- + - To run the example: ``` # Run from root of the repo @@ -213,15 +218,15 @@ Multi Node Cluster # GraalVM based Native Apache Kafka Docker Image $ IMAGE=apache/kafka-native:latest docker compose -f docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml up ``` - - To access using client script:- + - To access using client script: ``` # Run from root of the repo $ bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:29092 ``` - - SSL:- + - SSL: - Pretty much same as combined example, with controllers and brokers separated now. - Note that `SSL-INTERNAL` is only for inter broker communication and controllers are using `PLAINTEXT`. - - To run the example:- + - To run the example: ``` # Run from root of the repo @@ -231,7 +236,7 @@ Multi Node Cluster # GraalVM based Native Apache Kafka Docker Image $ IMAGE=apache/kafka-native:latest docker compose -f docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml up ``` - - To produce messages using client scripts (Ensure that java version >= 17):- + - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties diff --git a/docker/jvm/Dockerfile b/docker/jvm/Dockerfile index 79687d0e2252a..767b414ab7ab1 100644 --- a/docker/jvm/Dockerfile +++ b/docker/jvm/Dockerfile @@ -76,8 +76,8 @@ RUN set -eux ; \ chown appuser:appuser -R /usr/logs /opt/kafka /mnt/shared/config; \ chown appuser:root -R /var/lib/kafka /etc/kafka/secrets /etc/kafka; \ chmod -R ug+w /etc/kafka /var/lib/kafka /etc/kafka/secrets; \ - cp /opt/kafka/config/log4j.properties /etc/kafka/docker/log4j.properties; \ - cp /opt/kafka/config/tools-log4j.properties /etc/kafka/docker/tools-log4j.properties; \ + cp /opt/kafka/config/log4j2.yaml /etc/kafka/docker/log4j2.yaml; \ + cp /opt/kafka/config/tools-log4j2.yaml /etc/kafka/docker/tools-log4j2.yaml; \ cp /opt/kafka/config/kraft/reconfig-server.properties /etc/kafka/docker/server.properties; \ rm kafka.tgz kafka.tgz.asc KEYS; \ apk del wget gpg gpg-agent; \ diff --git a/docker/jvm/jsa_launch b/docker/jvm/jsa_launch index 4ea3561280aaa..dd0299767e33d 100755 --- a/docker/jvm/jsa_launch +++ b/docker/jvm/jsa_launch @@ -17,7 +17,7 @@ KAFKA_CLUSTER_ID="$(opt/kafka/bin/kafka-storage.sh random-uuid)" TOPIC="test-topic" -KAFKA_JVM_PERFORMANCE_OPTS="-XX:ArchiveClassesAtExit=storage.jsa" opt/kafka/bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c opt/kafka/config/kraft/reconfig-server.properties +KAFKA_JVM_PERFORMANCE_OPTS="-XX:ArchiveClassesAtExit=storage.jsa" opt/kafka/bin/kafka-storage.sh format --standalone -t $KAFKA_CLUSTER_ID -c opt/kafka/config/kraft/reconfig-server.properties KAFKA_JVM_PERFORMANCE_OPTS="-XX:ArchiveClassesAtExit=kafka.jsa" opt/kafka/bin/kafka-server-start.sh opt/kafka/config/kraft/reconfig-server.properties & diff --git a/docker/native/Dockerfile b/docker/native/Dockerfile index ed3171137c368..57c11ba704865 100644 --- a/docker/native/Dockerfile +++ b/docker/native/Dockerfile @@ -64,8 +64,8 @@ RUN apk update ; \ COPY --chown=appuser:root --from=build-native-image /app/kafka/kafka.Kafka /opt/kafka/ COPY --chown=appuser:root --from=build-native-image /app/kafka/config/kraft/reconfig-server.properties /etc/kafka/docker/ -COPY --chown=appuser:root --from=build-native-image /app/kafka/config/log4j.properties /etc/kafka/docker/ -COPY --chown=appuser:root --from=build-native-image /app/kafka/config/tools-log4j.properties /etc/kafka/docker/ +COPY --chown=appuser:root --from=build-native-image /app/kafka/config/log4j2.yaml /etc/kafka/docker/ +COPY --chown=appuser:root --from=build-native-image /app/kafka/config/tools-log4j2.yaml /etc/kafka/docker/ COPY --chown=appuser:root resources/common-scripts /etc/kafka/docker/ COPY --chown=appuser:root launch /etc/kafka/docker/ diff --git a/docs/configuration.html b/docs/configuration.html index cd12dd3ea9ade..9d5454583d13b 100644 --- a/docs/configuration.html +++ b/docs/configuration.html @@ -22,9 +22,10 @@

              -
            1. broker.id +
            2. node.id
            3. log.dirs -
            4. zookeeper.connect +
            5. process.roles +
            6. controller.quorum.bootstrap.servers Topic-level configurations and defaults are discussed in more detail below. @@ -62,39 +63,12 @@

              All configs that are configurable at cluster level may also be configured at per-broker level (e.g. for testing). If a config value is defined at different levels, the following order of precedence is used:
                -
              • Dynamic per-broker config stored in ZooKeeper
              • -
              • Dynamic cluster-wide default config stored in ZooKeeper
              • +
              • Dynamic per-broker config stored in the metadata log
              • +
              • Dynamic cluster-wide default config stored in the metadata log
              • Static broker config from server.properties
              • Kafka default, see broker configs
              -

              Updating Password Configs Dynamically
              -

              Password config values that are dynamically updated are encrypted before storing in ZooKeeper. The broker config - password.encoder.secret must be configured in server.properties to enable dynamic update - of password configs. The secret may be different on different brokers.

              -

              The secret used for password encoding may be rotated with a rolling restart of brokers. The old secret used for encoding - passwords currently in ZooKeeper must be provided in the static broker config password.encoder.old.secret and - the new secret must be provided in password.encoder.secret. All dynamic password configs stored in ZooKeeper - will be re-encoded with the new secret when the broker starts up.

              -

              In Kafka 1.1.x, all dynamically updated password configs must be provided in every alter request when updating configs - using kafka-configs.sh even if the password config is not being altered. This constraint will be removed in - a future release.

              - -
              Updating Password Configs in ZooKeeper Before Starting Brokers
              - - From Kafka 2.0.0 onwards, kafka-configs.sh enables dynamic broker configs to be updated using ZooKeeper before - starting brokers for bootstrapping. This enables all password configs to be stored in encrypted form, avoiding the need for - clear passwords in server.properties. The broker config password.encoder.secret must also be specified - if any password configs are included in the alter command. Additional encryption parameters may also be specified. Password - encoder configs will not be persisted in ZooKeeper. For example, to store SSL key password for listener INTERNAL - on broker 0: - -
              $ bin/kafka-configs.sh --zookeeper localhost:2182 --zk-tls-config-file zk_tls_config.properties --entity-type brokers --entity-name 0 --alter --add-config
              -    'listener.name.internal.ssl.key.password=key-password,password.encoder.secret=secret,password.encoder.iterations=8192'
              - - The configuration listener.name.internal.ssl.key.password will be persisted in ZooKeeper in encrypted - form using the provided encoder configs. The encoder secret and iterations are not persisted in ZooKeeper. -
              Updating SSL Keystore of an Existing Listener
              Brokers may be configured with SSL keystores with short validity periods to reduce the risk of compromised certificates. Keystores may be updated dynamically without restarting the broker. The config name must be prefixed with the listener prefix @@ -154,17 +128,8 @@
              Updating Default Topic Configuration
            7. compression.type
            8. log.preallocate
            9. log.message.timestamp.type
            10. -
            11. log.message.timestamp.difference.max.ms
            12. - From Kafka version 2.0.0 onwards, unclean leader election is automatically enabled by the controller when the config - unclean.leader.election.enable is dynamically updated. - In Kafka version 1.1.x, changes to unclean.leader.election.enable take effect only when a new controller is elected. - Controller re-election may be forced by running: - -
              $ bin/zookeeper-shell.sh localhost
              -  rmr /controller
              -

              Updating Log Cleaner Configs
              Log cleaner configs may be updated dynamically at cluster-default level used by all brokers. The changes take effect on the next iteration of log cleaning. One or more of these configs may be updated: @@ -293,7 +258,7 @@

              org.apache.kafka.disallowed.login.modules

              This system property is used to disable the problematic login modules usage in SASL JAAS configuration. This property accepts comma-separated list of loginModule names. By default com.sun.security.auth.module.JndiLoginModule loginModule is disabled. -

              If users want to enable JndiLoginModule, users need to explicitly reset the system property like below. We advise the users to validate configurations and only allow trusted JNDI configurations. For more details CVE-2023-25194. +

              If users want to enable JndiLoginModule, users need to explicitly reset the system property like below. We advise the users to validate configurations and only allow trusted JNDI configurations. For more details CVE-2023-25194.

              -Dorg.apache.kafka.disallowed.login.modules=

              To disable more loginModules, update the system property with comma-separated loginModule names. Make sure to explicitly add JndiLoginModule module name to the comma-separated list like below.

              -Dorg.apache.kafka.disallowed.login.modules=com.sun.security.auth.module.JndiLoginModule,com.ibm.security.auth.module.LdapLoginModule,com.ibm.security.auth.module.Krb5LoginModule
              @@ -302,6 +267,22 @@

              org.apache.kafka.automatic.config.providers

              +

              This system property controls the automatic loading of ConfigProvider implementations in Apache Kafka. ConfigProviders are used to dynamically supply configuration values from sources such as files, directories, or environment variables. This property accepts a comma-separated list of ConfigProvider names. By default, all built-in ConfigProviders are enabled, including FileConfigProvider, DirectoryConfigProvider, and EnvVarConfigProvider.

              +

              If users want to disable all automatic ConfigProviders, they need to explicitly set the system property as shown below. Disabling automatic ConfigProviders is recommended in environments where configuration data comes from untrusted sources or where increased security is required. For more details, see CVE-2024-31141.

              +
              -Dorg.apache.kafka.automatic.config.providers=none
              +

              To allow specific ConfigProviders, update the system property with a comma-separated list of ConfigProvider names. For example, to enable only the EnvVarConfigProvider, set the property as follows:

              +
              -Dorg.apache.kafka.automatic.config.providers=env
              +

              To use multiple ConfigProviders, include their names in a comma-separated list as shown below:

              +
              -Dorg.apache.kafka.automatic.config.providers=file,env
              + + + + + +
              Since:3.8.0
              Default Value:All built-in ConfigProviders are enabled
              +

              3.10 Tiered Storage Configs

              diff --git a/docs/connect.html b/docs/connect.html index 3724ad8f68dea..f77e21bdf5498 100644 --- a/docs/connect.html +++ b/docs/connect.html @@ -294,7 +294,6 @@

              REST API

            13. PATCH /connectors/{name}/config - patch the configuration parameters for a specific connector, where null values in the JSON body indicates removing of the key from the final configuration
            14. GET /connectors/{name}/status - get current status of the connector, including if it is running, failed, paused, etc., which worker it is assigned to, error information if it has failed, and the state of all its tasks
            15. GET /connectors/{name}/tasks - get a list of tasks currently running for a connector along with their configurations
            16. -
            17. GET /connectors/{name}/tasks-config - get the configuration of all tasks for a specific connector. This endpoint is deprecated and will be removed in the next major release. Please use the GET /connectors/{name}/tasks endpoint instead. Note that the response structures of the two endpoints differ slightly, please refer to the OpenAPI documentation for more details
            18. GET /connectors/{name}/tasks/{taskid}/status - get current status of the task, including if it is running, failed, paused, etc., which worker it is assigned to, and error information if it has failed
            19. PUT /connectors/{name}/pause - pause the connector and its tasks, which stops message processing until the connector is resumed. Any resources claimed by its tasks are left allocated, which allows the connector to begin processing data quickly once it is resumed.
            20. PUT /connectors/{name}/stop - stop the connector and shut down its tasks, deallocating any resources claimed by its tasks. This is more efficient from a resource usage standpoint than pausing the connector, but can cause it to take longer to begin processing data once resumed. Note that the offsets for a connector can be only modified via the offsets management endpoints if it is in the stopped state
            21. @@ -767,7 +766,7 @@
              Err

              When error reporting is enabled for a connector, the connector can use an ErrantRecordReporter to report problems with individual records sent to a sink connector. The following example shows how a connector's SinkTask subclass might obtain and use the ErrantRecordReporter, safely handling a null reporter when the DLQ is not enabled or when the connector is installed in an older Connect runtime that doesn't have this reporter feature:

              - <
              private ErrantRecordReporter reporter;
              +    
              private ErrantRecordReporter reporter;
               
               @Override
               public void start(Map<String, String> props) {
              diff --git a/docs/design.html b/docs/design.html
              index f775552f4cf1e..466c4dfe7f4c1 100644
              --- a/docs/design.html
              +++ b/docs/design.html
              @@ -254,32 +254,31 @@ 

              - Kafka's semantics are straight-forward. When publishing a message we have a notion of the message being "committed" to the log. Once a published message is committed it will not be lost as long as one broker that - replicates the partition to which this message was written remains "alive". The definition of committed message, alive partition as well as a description of which types of failures we attempt to handle will be + Kafka's semantics are straightforward. When publishing a message we have a notion of the message being "committed" to the log. Once a published message is committed, it will not be lost as long as one broker that + replicates the partition to which this message was written remains "alive". The definition of committed message and alive partition as well as a description of which types of failures we attempt to handle will be described in more detail in the next section. For now let's assume a perfect, lossless broker and try to understand the guarantees to the producer and consumer. If a producer attempts to publish a message and - experiences a network error it cannot be sure if this error happened before or after the message was committed. This is similar to the semantics of inserting into a database table with an autogenerated key. + experiences a network error, it cannot be sure if this error happened before or after the message was committed. This is similar to the semantics of inserting into a database table with an autogenerated key.

              Prior to 0.11.0.0, if a producer failed to receive a response indicating that a message was committed, it had little choice but to resend the message. This provides at-least-once delivery semantics since the message may be written to the log again during resending if the original request had in fact succeeded. Since 0.11.0.0, the Kafka producer also supports an idempotent delivery option which guarantees that resending will not result in duplicate entries in the log. To achieve this, the broker assigns each producer an ID and deduplicates messages using a sequence number that is sent by the producer along with every message. - Also beginning with 0.11.0.0, the producer supports the ability to send messages to multiple topic partitions using transaction-like semantics: i.e. either all messages are successfully written or none of them are. - The main use case for this is exactly-once processing between Kafka topics (described below). + Also beginning with 0.11.0.0, the producer supports the ability to send messages atomically to multiple topic partitions using transactions, so that either all messages are successfully written or none of them are.

              - Not all use cases require such strong guarantees. For uses which are latency sensitive we allow the producer to specify the durability level it desires. If the producer specifies that it wants to wait on the message - being committed this can take on the order of 10 ms. However the producer can also specify that it wants to perform the send completely asynchronously or that it wants to wait only until the leader (but not + Not all use cases require such strong guarantees. For use cases which are latency-sensitive, we allow the producer to specify the durability level it desires. If the producer specifies that it wants to wait on the message + being committed, this can take on the order of 10 ms. However the producer can also specify that it wants to perform the send completely asynchronously or that it wants to wait only until the leader (but not necessarily the followers) have the message.

              - Now let's describe the semantics from the point-of-view of the consumer. All replicas have the exact same log with the same offsets. The consumer controls its position in this log. If the consumer never crashed it - could just store this position in memory, but if the consumer fails and we want this topic partition to be taken over by another process the new process will need to choose an appropriate position from which to start + Now let's describe the semantics from the point of view of the consumer. All replicas have the exact same log with the same offsets. The consumer controls its position in this log. If the consumer never crashed it + could just store this position in memory, but if the consumer fails and we want this topic partition to be taken over by another process, the new process will need to choose an appropriate position from which to start processing. Let's say the consumer reads some messages -- it has several options for processing the messages and updating its position.

              1. It can read the messages, then save its position in the log, and finally process the messages. In this case there is a possibility that the consumer process crashes after saving its position but before saving @@ -290,24 +289,61 @@

                Kafka Streams - application), we can leverage the new transactional producer capabilities in 0.11.0.0 that were mentioned above. The consumer's position is stored as a message in a topic, so we can write the offset to Kafka in the - same transaction as the output topics receiving the processed data. If the transaction is aborted, the consumer's position will revert to its old value and the produced data on the output topics will not be visible - to other consumers, depending on their "isolation level." In the default "read_uncommitted" isolation level, all messages are visible to consumers even if they were part of an aborted transaction, - but in "read_committed," the consumer will only return messages from transactions which were committed (and any messages which were not part of a transaction). + So what about exactly-once semantics? When consuming from a Kafka topic and producing to another topic (as in a Kafka Streams application), we can + leverage the new transactional producer capabilities in 0.11.0.0 that were mentioned above. The consumer's position is stored as a message in an internal topic, so we can write the offset to Kafka in the + same transaction as the output topics receiving the processed data. If the transaction is aborted, the consumer's stored position will revert to its old value (although the consumer has to refetch the + committed offset because it does not automatically rewind) and the produced data on the output topics will not be visible to other consumers, depending on their "isolation level". In the default + "read_uncommitted" isolation level, all messages are visible to consumers even if they were part of an aborted transaction, but in "read_committed" isolation level, the consumer will only return messages + from transactions which were committed (and any messages which were not part of a transaction).

                When writing to an external system, the limitation is in the need to coordinate the consumer's position with what is actually stored as output. The classic way of achieving this would be to introduce a two-phase - commit between the storage of the consumer position and the storage of the consumers output. But this can be handled more simply and generally by letting the consumer store its offset in the same place as + commit between the storage of the consumer position and the storage of the consumers output. This can be handled more simply and generally by letting the consumer store its offset in the same place as its output. This is better because many of the output systems a consumer might want to write to will not support a two-phase commit. As an example of this, consider a Kafka Connect connector which populates data in HDFS along with the offsets of the data it reads so that it is guaranteed that either data and offsets are both updated or neither is. We follow similar patterns for many other data systems which require these stronger semantics and for which the messages do not have a primary key to allow for deduplication.

                - So effectively Kafka supports exactly-once delivery in Kafka Streams, and the transactional producer/consumer can be used generally to provide - exactly-once delivery when transferring and processing data between Kafka topics. Exactly-once delivery for other destination systems generally requires cooperation with such systems, but Kafka provides the - offset which makes implementing this feasible (see also Kafka Connect). Otherwise, Kafka guarantees at-least-once delivery by default, and allows - the user to implement at-most-once delivery by disabling retries on the producer and committing offsets in the consumer prior to processing a batch of messages. + As a result, Kafka supports exactly-once delivery in Kafka Streams, and the transactional producer and the consumer using read-committed isolation level + can be used generally to provide exactly-once delivery when reading, processing and writing data on Kafka topics. Exactly-once delivery for other destination systems generally requires cooperation with such systems, + but Kafka provides the primitives which makes implementing this feasible (see also Kafka Connect). Otherwise, Kafka guarantees at-least-once delivery + by default, and allows the user to implement at-most-once delivery by disabling retries on the producer and committing offsets in the consumer prior to processing a batch of messages. -

                4.7 Replication

                +

                4.7 Using Transactions

                +

                + As mentioned above, the simplest way to get exactly-once semantics from Kafka is to use Kafka Streams. However, it is also possible to achieve + the same transactional guarantees using the Kafka producer and consumer directly by using them in the same way as Kafka Streams does. +

                + Kafka transactions are a bit different from transactions in other messaging systems. In Kafka, the consumer and producer are separate, and it is only the producer which is transactional. It is however able to + make transactional updates to the consumer's position (confusingly called the "committed offset"), and it is this which gives the overall exactly-once behavior. +

                + There are three key aspects to exactly-once processing using the producer and consumer, which match how Kafka Streams works. +

                  +
                1. The consumer uses partition assignment to ensure that it is the only consumer in the consumer group currently processing each partition.
                2. +
                3. The producer uses transactions so that all the records it produces, and any offsets it updates on behalf of the consumer, are performed atomically.
                4. +
                5. In order to handle transactions properly in combination with rebalancing, it is advisable to use one producer instance for each consumer instance. More complicated and efficient schemes are possible, + but at the cost of greater complexity.
                6. +
                +

                + In addition, it is generally considered a good practice to use the read-committed isolation level if trying to achieve exactly-once processing. Strictly speaking, the consumer doesn't have to use read-committed + isolation level, but if it does not, it will see records from aborted transactions and also open transactions which have not yet completed. +

                + The consumer configuration must include isolation.level=read_committed and enable.auto.commit=false. The producer configuration must set transactional.id + to the name of the transactional ID to be used, which configures the producer for transactional delivery and also makes sure that a restarted application causes any in-flight transaction from + the previous instance to abort. Only the producer has the transactional.id configuration. +

                + Here's an example of a transactional message copier + which uses these principles. It uses a KafkaConsumer to consume records from one topic and a KafkaProducer to produce records to another topic. It uses transactions to ensure + that there is no duplication or loss of records as they are copied, provided that the --use-group-metadata option is set. +

                + It is important to handle exceptions and aborted transactions correctly. Any records written by the transactional producer will be marked as being part of the transactions, and then when the + transaction commits or aborts, transaction marker records are written to indicate the outcome of the transaction. This is how the read-committed consumer does not see records from aborted + transactions. However, in the event of a transaction abort, the application's state and in particular the current position of the consumer must be reset explicitly so that it can + reprocess the records processed by the aborted transaction. +

                + A simple policy for handling exceptions and aborted transactions is to discard and recreate the Kafka producer and consumer objects and start afresh. As part of recreating the consumer, the consumer + group will rebalance and fetch the last committed offset, which has the effect of rewinding back to the state before the transaction aborted. Alternatively, a more sophisticated application (such as the + transactional message copier) can choose not to use KafkaConsumer.committed to retrieve the committed offset from Kafka, and then KafkaConsumer.seek to rewind the current position. + +

                4.8 Replication

                Kafka replicates the log for each topic's partitions across a configurable number of servers (you can set this replication factor on a topic-by-topic basis). This allows automatic failover to these replicas when a server in the cluster fails so messages remain available in the presence of failures. @@ -334,11 +370,6 @@

                < The result is that we are able to batch together many of the required leadership change notifications which makes the election process far cheaper and faster for a large number of partitions. If the controller itself fails, then another controller will be elected. -

                4.8 Log Compaction

                +

                4.9 Log Compaction

                Log compaction ensures that Kafka will always retain at least the last known value for each message key within the log of data for a single topic partition. It addresses use cases and scenarios such as restoring state after application crashes or system failure, or reloading caches after application restarts during operational maintenance. Let's dive into these use cases in more detail and then describe how compaction works. @@ -591,7 +622,7 @@

                <

                Further cleaner configurations are described here. -

                4.9 Quotas

                +

                4.10 Quotas

                Kafka cluster has the ability to enforce quotas on requests to control the broker resources used by clients. Two types of client quotas can be enforced by Kafka brokers for each group of clients sharing a quota: @@ -624,7 +655,7 @@

                <

                Quota configuration may be defined for (user, client-id), user and client-id groups. It is possible to override the default quota at any of the quota levels that needs a higher (or even lower) quota. The mechanism is similar to the per-topic log config overrides. - User and (user, client-id) quota overrides are written to ZooKeeper under /config/users and client-id quota overrides are written under /config/clients. + User and (user, client-id) quota overrides are written to the metadata log. These overrides are read by all brokers and are effective immediately. This lets us change quotas without having to do a rolling restart of the entire cluster. See here for details. Default quotas for each group may also be updated dynamically using the same mechanism.

                @@ -632,14 +663,14 @@

                < The order of precedence for quota configuration is:

                  -
                1. /config/users/<user>/clients/<client-id>
                2. -
                3. /config/users/<user>/clients/<default>
                4. -
                5. /config/users/<user>
                6. -
                7. /config/users/<default>/clients/<client-id>
                8. -
                9. /config/users/<default>/clients/<default>
                10. -
                11. /config/users/<default>
                12. -
                13. /config/clients/<client-id>
                14. -
                15. /config/clients/<default>
                16. +
                17. matching user and client-id quotas
                18. +
                19. matching user and default client-id quotas
                20. +
                21. matching user quota
                22. +
                23. default user and matching client-id quotas
                24. +
                25. default user and default client-id quotas
                26. +
                27. default user quota
                28. +
                29. matching client-id quota
                30. +
                31. default client-id quota

                Network Bandwidth Quotas

                diff --git a/docs/images/kafka-logo-readme-dark.svg b/docs/images/kafka-logo-readme-dark.svg new file mode 100644 index 0000000000000..00d131bf2e1f7 --- /dev/null +++ b/docs/images/kafka-logo-readme-dark.svg @@ -0,0 +1,217 @@ + + + + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + + + + + + diff --git a/docs/images/kafka-logo-readme-light.svg b/docs/images/kafka-logo-readme-light.svg new file mode 100644 index 0000000000000..91e55d2fd1f49 --- /dev/null +++ b/docs/images/kafka-logo-readme-light.svg @@ -0,0 +1,217 @@ + + + + + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + + + + + + diff --git a/docs/implementation.html b/docs/implementation.html index 93c9aa60c4cf6..25a7f60b18f8a 100644 --- a/docs/implementation.html +++ b/docs/implementation.html @@ -255,45 +255,6 @@

                < CoordinatorLoadInProgressException and the consumer may retry the OffsetFetchRequest after backing off.

                -

                ZooKeeper Directories

                -

                - The following gives the ZooKeeper structures and algorithms used for co-ordination between consumers and brokers. -

                - -

                Notation

                -

                - When an element in a path is denoted [xyz], that means that the value of xyz is not fixed and there is in fact a ZooKeeper znode for each possible value of xyz. For example /topics/[topic] would be a directory named /topics containing a sub-directory for each topic name. Numerical ranges are also given such as [0...5] to indicate the subdirectories 0, 1, 2, 3, 4. An arrow -> is used to indicate the contents of a znode. For example /hello -> world would indicate a znode /hello containing the value "world". -

                - -

                Broker Node Registry

                -
                /brokers/ids/[0...N] --> {"jmx_port":...,"timestamp":...,"endpoints":[...],"host":...,"version":...,"port":...} (ephemeral node)
                -

                - This is a list of all present broker nodes, each of which provides a unique logical broker id which identifies it to consumers (which must be given as part of its configuration). On startup, a broker node registers itself by creating a znode with the logical broker id under /brokers/ids. The purpose of the logical broker id is to allow a broker to be moved to a different physical machine without affecting consumers. An attempt to register a broker id that is already in use (say because two servers are configured with the same broker id) results in an error. -

                -

                - Since the broker registers itself in ZooKeeper using ephemeral znodes, this registration is dynamic and will disappear if the broker is shutdown or dies (thus notifying consumers it is no longer available). -

                -

                Broker Topic Registry

                -
                /brokers/topics/[topic]/partitions/[0...N]/state --> {"controller_epoch":...,"leader":...,"version":...,"leader_epoch":...,"isr":[...]} (ephemeral node)
                - -

                - Each broker registers itself under the topics it maintains and stores the number of partitions for that topic. -

                - -

                Cluster Id

                - -

                - The cluster id is a unique and immutable identifier assigned to a Kafka cluster. The cluster id can have a maximum of 22 characters and the allowed characters are defined by the regular expression [a-zA-Z0-9_\-]+, which corresponds to the characters used by the URL-safe Base64 variant with no padding. Conceptually, it is auto-generated when a cluster is started for the first time. -

                -

                - Implementation-wise, it is generated when a broker with version 0.10.1 or later is successfully started for the first time. The broker tries to get the cluster id from the /cluster/id znode during startup. If the znode does not exist, the broker generates a new cluster id and creates the znode with this cluster id. -

                - -

                Broker node registration

                - -

                - The broker nodes are basically independent, so they only publish information about what they have. When a broker joins, it registers itself under the broker node registry directory and writes information about its host name and port. The broker also register the list of existing topics and their logical partitions in the broker topic registry. New topics are registered dynamically when they are created on the broker. -

                diff --git a/docs/js/templateData.js b/docs/js/templateData.js index 34c8a96b7c263..cb834035f44e1 100644 --- a/docs/js/templateData.js +++ b/docs/js/templateData.js @@ -17,8 +17,8 @@ limitations under the License. // Define variables for doc templates var context={ - "version": "40", - "dotVersion": "4.0", - "fullDotVersion": "4.0.0", + "version": "41", + "dotVersion": "4.1", + "fullDotVersion": "4.1.0", "scalaVersion": "2.13" }; diff --git a/docs/migration.html b/docs/migration.html deleted file mode 100644 index 95fc87ffacafe..0000000000000 --- a/docs/migration.html +++ /dev/null @@ -1,34 +0,0 @@ - - - -

                Migrating from 0.7.x to 0.8

                - -0.8 is our first (and hopefully last) release with a non-backwards-compatible wire protocol, ZooKeeper layout, and on-disk data format. This was a chance for us to clean up a lot of cruft and start fresh. This means performing a no-downtime upgrade is more painful than normal—you cannot just swap in the new code in-place. - -

                Migration Steps

                - -
                  -
                1. Setup a new cluster running 0.8. -
                2. Use the 0.7 to 0.8 migration tool to mirror data from the 0.7 cluster into the 0.8 cluster. -
                3. When the 0.8 cluster is fully caught up, redeploy all data consumers running the 0.8 client and reading from the 0.8 cluster. -
                4. Finally migrate all 0.7 producers to 0.8 client publishing data to the 0.8 cluster. -
                5. Decommission the 0.7 cluster. -
                6. Drink. -
                - - diff --git a/docs/ops.html b/docs/ops.html index d3990010cc558..228d17734753a 100644 --- a/docs/ops.html +++ b/docs/ops.html @@ -180,7 +180,7 @@

                --reset-offsets also has the following scenarios to choose from (at least one scenario must be selected):

              -

              6.8 Monitoring

              +

              6.7 Monitoring

              Kafka uses Yammer Metrics for metrics reporting in the server. The Java clients use Kafka Metrics, a built-in metrics registry that minimizes transitive dependencies pulled into client applications. Both expose metrics via JMX and can be configured to report stats using pluggable stats reporters to hook up to your monitoring system.

              @@ -1723,17 +1690,6 @@

              -
            22. 0 - NONE, cluster created in KRaft mode;
            23. -
            24. 4 - ZK, Migration has not started, controller is a ZK controller;
            25. -
            26. 2 - PRE_MIGRATION, the KRaft Controller is waiting for all ZK brokers to register in migration mode;
            27. -
            28. 1 - MIGRATION, ZK metadata has been migrated, but some broker is still running in ZK mode;
            29. -
            30. 3 - POST_MIGRATION, the cluster migration is complete;
            31. - - - kafka.controller:type=KafkaController,name=ZkMigrationState - Global Topic Count The number of global topics as observed by this Controller. @@ -2157,22 +2095,6 @@
              metrics.recording.level="info"
              Client Metrics
              -All of the following metrics have a recording level of info: +All the following metrics have a recording level of info: @@ -2861,7 +2783,17 @@
              Thread Metrics
              -All of the following metrics have a recording level of info: +All the following metrics have a recording level of info:
              @@ -2881,6 +2818,16 @@
              Task Metrics
              -All of the following metrics have a recording level of debug, except for the dropped-records-* and +All the following metrics have a recording level of debug, except for the dropped-records-* and active-process-ratio metrics which have a recording level of info:
              @@ -3016,7 +2963,7 @@
              @@ -3099,17 +3046,17 @@
              RocksDB may have an impact on performance. Statistics-based metrics are collected every minute from the RocksDB state stores. @@ -3428,7 +3375,7 @@
              Record Cache Metrics
              - All of the following metrics have a recording level of debug: + All the following metrics have a recording level of debug:
              @@ -3703,37 +3650,7 @@

              6.9 ZooKeeper

              - -

              Stable version

              - The current stable branch is 3.8. Kafka is regularly updated to include the latest release in the 3.8 series. - -

              ZooKeeper Deprecation

              -

              With the release of Apache Kafka 3.5, Zookeeper is now marked deprecated. Removal of ZooKeeper is planned in the next major release of Apache Kafka (version 4.0), - which is scheduled to happen no sooner than April 2024. During the deprecation phase, ZooKeeper is still supported for metadata management of Kafka clusters, - but it is not recommended for new deployments. There is a small subset of features that remain to be implemented in KRaft - see current missing features for more information.

              - -
              Migration
              -

              Users are recommended to begin planning for migration to KRaft and also begin testing to provide any feedback. Refer to ZooKeeper to KRaft Migration for details on how to perform a live migration from ZooKeeper to KRaft and current limitations.

              - -
              3.x and ZooKeeper Support
              -

              The final 3.x minor release, that supports ZooKeeper mode, will receive critical bug fixes and security fixes for 12 months after its release.

              - -

              Operationalizing ZooKeeper

              - Operationally, we do the following for a healthy ZooKeeper installation: -
                -
              • Redundancy in the physical/hardware/network layout: try not to put them all in the same rack, decent (but don't go nuts) hardware, try to keep redundant power and network paths, etc. A typical ZooKeeper ensemble has 5 or 7 servers, which tolerates 2 and 3 servers down, respectively. If you have a small deployment, then using 3 servers is acceptable, but keep in mind that you'll only be able to tolerate 1 server down in this case.
              • -
              • I/O segregation: if you do a lot of write type traffic you'll almost definitely want the transaction logs on a dedicated disk group. Writes to the transaction log are synchronous (but batched for performance), and consequently, concurrent writes can significantly affect performance. ZooKeeper snapshots can be one such a source of concurrent writes, and ideally should be written on a disk group separate from the transaction log. Snapshots are written to disk asynchronously, so it is typically ok to share with the operating system and message log files. You can configure a server to use a separate disk group with the dataLogDir parameter.
              • -
              • Application segregation: Unless you really understand the application patterns of other apps that you want to install on the same box, it can be a good idea to run ZooKeeper in isolation (though this can be a balancing act with the capabilities of the hardware).
              • -
              • Use care with virtualization: It can work, depending on your cluster layout and read/write patterns and SLAs, but the tiny overheads introduced by the virtualization layer can add up and throw off ZooKeeper, as it can be very time sensitive
              • -
              • ZooKeeper configuration: It's java, make sure you give it 'enough' heap space (We usually run them with 3-5G, but that's mostly due to the data set size we have here). Unfortunately we don't have a good formula for it, but keep in mind that allowing for more ZooKeeper state means that snapshots can become large, and large snapshots affect recovery time. In fact, if the snapshot becomes too large (a few gigabytes), then you may need to increase the initLimit parameter to give enough time for servers to recover and join the ensemble.
              • -
              • Monitoring: Both JMX and the 4 letter words (4lw) commands are very useful, they do overlap in some cases (and in those cases we prefer the 4 letter commands, they seem more predictable, or at the very least, they work better with the LI monitoring infrastructure)
              • -
              • Don't overbuild the cluster: large clusters, especially in a write heavy usage pattern, means a lot of intracluster communication (quorums on the writes and subsequent cluster member updates), but don't underbuild it (and risk swamping the cluster). Having more servers adds to your read capacity.
              • -
              - Overall, we try to keep the ZooKeeper system as small as will handle the load (plus standard growth capacity planning) and as simple as possible. We try not to do anything fancy with the configuration or application layout as compared to the official release as well as keep it as self contained as possible. For these reasons, we tend to skip the OS packaged versions, since it has a tendency to try to put things in the OS standard hierarchy, which can be 'messy', for want of a better way to word it. - -

              6.10 KRaft

              +

              6.8 KRaft

              Configuration

              @@ -3745,7 +3662,6 @@
              @@ -3753,7 +3669,7 @@
              Controllers
              -

              In KRaft mode, specific Kafka servers are selected to be controllers (unlike the ZooKeeper-based mode, where any server can become the Controller). The servers selected to be controllers will participate in the metadata quorum. Each controller is either an active or a hot standby for the current active controller.

              +

              In KRaft mode, specific Kafka servers are selected to be controllers. The servers selected to be controllers will participate in the metadata quorum. Each controller is either an active or a hot standby for the current active controller.

              A Kafka admin will typically select 3 or 5 servers for this role, depending on factors like cost and the number of concurrent failures your system should withstand without availability impact. A majority of the controllers must be alive in order to maintain availability. With 3 controllers, the cluster can tolerate 1 controller failure; with 5 controllers, the cluster can tolerate 2 controller failures.

              @@ -3857,7 +3773,7 @@
              - $ bin/kafka-storage.sh format -t KAFKA_CLUSTER_ID --feature kraft.version=1 -c controller_static.properties + $ bin/kafka-storage.sh format -t KAFKA_CLUSTER_ID --feature kraft.version=1 -c controller.properties Cannot set kraft.version to 1 unless KIP-853 configuration is present. Try removing the --feature flag for kraft.version.

              @@ -3941,7 +3857,7 @@

            32. Kafka server's process.role should be set to either broker or controller but not both. Combined mode can be used in development environments, but it should be avoided in critical deployment environments.
            33. -
            34. For redundancy, a Kafka cluster should use 3 or 5 controllers, depending on factors like cost and the number of concurrent failures your system should withstand without availability impact. In the rare case of a partial network failure it is possible for the cluster metadata quorum to become unavailable. This limitation will be addressed in a future release of Kafka.
            35. +
            36. For redundancy, a Kafka cluster should use 3 or more controllers, depending on factors like cost and the number of concurrent failures your system should withstand without availability impact. For the KRaft controller cluster to withstand N concurrent failures the controller cluster must include 2N + 1 controllers.
            37. The Kafka controllers store all the metadata for the cluster in memory and on disk. We believe that for a typical Kafka cluster 5GB of main memory and 5GB of disk space on the metadata log director is sufficient.
            38. @@ -3955,358 +3871,10 @@

              ZooKeeper to KRaft Migration

              -

              Terminology

              -
                -
              • Brokers that are in ZK mode store their metadata in Apache ZooKepeer. This is the old mode of handling metadata.
              • -
              • Brokers that are in KRaft mode store their metadata in a KRaft quorum. This is the new and improved mode of handling metadata.
              • -
              • Migration is the process of moving cluster metadata from ZooKeeper into a KRaft quorum.
              • -
              - -

              Migration Phases

              - In general, the migration process passes through several phases. - -
                -
              • In the initial phase, all the brokers are in ZK mode, and there is a ZK-based controller.
              • -
              • During the initial metadata load, a KRaft quorum loads the metadata from ZooKeeper,
              • -
              • In hybrid phase, some brokers are in ZK mode, but there is a KRaft controller.
              • -
              • In dual-write phase, all brokers are KRaft, but the KRaft controller is continuing to write to ZK.
              • -
              • When the migration has been finalized, we no longer write metadata to ZooKeeper.
              • -
              - -

              Limitations

              -
                -
              • While a cluster is being migrated from ZK mode to KRaft mode, we do not support changing the metadata - version (also known as the inter.broker.protocol.version.) Please do not attempt to do this during - a migration, or you may break the cluster.
              • -
              • After the migration has been finalized, it is not possible to revert back to ZooKeeper mode.
              • -
              • - During the migration, if a ZK broker is running with multiple log directories, - any directory failure will cause the broker to shutdown. - Brokers with broken log directories will only be able to migrate to KRaft once the directories are repaired. - For further details refer to KAFKA-16431. -
              • -
              • As noted above, some features are not fully implemented in KRaft mode. If you are - using one of those features, you will not be able to migrate to KRaft yet.
              • -
              - -

              Preparing for migration

              -

              - Before beginning the migration, the Kafka brokers must be upgraded to software version {{fullDotVersion}} and have the - "inter.broker.protocol.version" configuration set to "{{dotVersion}}". -

              - -

              - It is recommended to enable TRACE level logging for the migration components while the migration is active. This can - be done by adding the following log4j configuration to each KRaft controller's "log4j.properties" file. -

              - -
              log4j.logger.org.apache.kafka.metadata.migration=TRACE
              - -

              - It is generally useful to enable DEBUG logging on the KRaft controllers and the ZK brokers during the migration. -

              - -

              Provisioning the KRaft controller quorum

              -

              - Two things are needed before the migration can begin. First, the brokers must be configured to support the migration and second, - a KRaft controller quorum must be deployed. The KRaft controllers should be provisioned with the same cluster ID as - the existing Kafka cluster. This can be found by examining one of the "meta.properties" files in the data directories - of the brokers, or by running the following command. -

              - -
              $ bin/zookeeper-shell.sh localhost:2181 get /cluster/id
              - -

              - The KRaft controller quorum should also be provisioned with the latest metadata.version. - This is done automatically when you format the node with the kafka-storage.sh tool. - For further instructions on KRaft deployment, please refer to the above documentation. -

              - -

              - In addition to the standard KRaft configuration, the KRaft controllers will need to enable support for the migration - as well as provide ZooKeeper connection configuration. -

              - -

              - Here is a sample config for a KRaft controller that is ready for migration: -

              -
              # Sample KRaft cluster controller.properties listening on 9093
              -process.roles=controller
              -node.id=3000
              -controller.quorum.bootstrap.servers=localhost:9093
              -controller.listener.names=CONTROLLER
              -listeners=CONTROLLER://:9093
              -
              -# Enable the migration
              -zookeeper.metadata.migration.enable=true
              -
              -# ZooKeeper client configuration
              -zookeeper.connect=localhost:2181
              -
              -# The inter broker listener in brokers to allow KRaft controller send RPCs to brokers
              -inter.broker.listener.name=PLAINTEXT
              -
              -# Other configs ...
              - -

              The new standalone controller in the example configuration above should be formatted using the bin/kafka-storage.sh format --standalonecommand.

              - -

              Note: The KRaft cluster node.id values must be different from any existing ZK broker broker.id. - In KRaft-mode, the brokers and controllers share the same Node ID namespace.

              - -

              Enter Migration Mode on the Brokers

              -

              - Once the KRaft controller quorum has been started, the brokers will need to be reconfigured and restarted. Brokers - may be restarted in a rolling fashion to avoid impacting cluster availability. Each broker requires the - following configuration to communicate with the KRaft controllers and to enable the migration. -

              - - - -

              Here is a sample config for a broker that is ready for migration:

              - -
              # Sample ZK broker server.properties listening on 9092
              -broker.id=0
              -listeners=PLAINTEXT://:9092
              -advertised.listeners=PLAINTEXT://localhost:9092
              -listener.security.protocol.map=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT
              -
              -# Set the IBP
              -inter.broker.protocol.version={{dotVersion}}
              -
              -# Enable the migration
              -zookeeper.metadata.migration.enable=true
              -
              -# ZooKeeper client configuration
              -zookeeper.connect=localhost:2181
              -
              -# KRaft controller quorum configuration
              -controller.quorum.bootstrap.servers=localhost:9093
              -controller.listener.names=CONTROLLER
              - -

              - Note: Once the final ZK broker has been restarted with the necessary configuration, the migration will automatically begin. - When the migration is complete, an INFO level log can be observed on the active controller: -

              - -
              Completed migration of metadata from Zookeeper to KRaft
              - -

              Migrating brokers to KRaft

              -

              - Once the KRaft controller completes the metadata migration, the brokers will still be running - in ZooKeeper mode. While the KRaft controller is in migration mode, it will continue sending - controller RPCs to the ZooKeeper mode brokers. This includes RPCs like UpdateMetadata and - LeaderAndIsr. -

              - -

              - To migrate the brokers to KRaft, they simply need to be reconfigured as KRaft brokers and restarted. Using the above - broker configuration as an example, we would replace the broker.id with node.id and add - process.roles=broker. It is important that the broker maintain the same Broker/Node ID when it is restarted. - The zookeeper configurations should be removed at this point. -

              - -

              - If your broker has authorization configured via the authorizer.class.name property - using kafka.security.authorizer.AclAuthorizer, this is also the time to change it - to use org.apache.kafka.metadata.authorizer.StandardAuthorizer instead. -

              - -
              # Sample KRaft broker server.properties listening on 9092
              -process.roles=broker
              -node.id=0
              -listeners=PLAINTEXT://:9092
              -advertised.listeners=PLAINTEXT://localhost:9092
              -listener.security.protocol.map=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT
              -
              -# Don't set the IBP, KRaft uses "metadata.version" feature flag
              -# inter.broker.protocol.version={{dotVersion}}
              -
              -# Remove the migration enabled flag
              -# zookeeper.metadata.migration.enable=true
              -
              -# Remove ZooKeeper client configuration
              -# zookeeper.connect=localhost:2181
              -
              -# Keep the KRaft controller quorum configuration
              -controller.quorum.bootstrap.servers=localhost:9093
              -controller.listener.names=CONTROLLER
              - -

              - Each broker is restarted with a KRaft configuration until the entire cluster is running in KRaft mode. -

              - -

              Finalizing the migration

              -

              - Once all brokers have been restarted in KRaft mode, the last step to finalize the migration is to take the - KRaft controllers out of migration mode. This is done by removing the "zookeeper.metadata.migration.enable" - property from each of their configs and restarting them one at a time. -

              -

              - Once the migration has been finalized, you can safely deprovision your ZooKeeper cluster, assuming you are - not using it for anything else. After this point, it is no longer possible to revert to ZooKeeper mode. -

              - -
              # Sample KRaft cluster controller.properties listening on 9093
              -process.roles=controller
              -node.id=3000
              -controller.quorum.bootstrap.servers=localhost:9093
              -controller.listener.names=CONTROLLER
              -listeners=CONTROLLER://:9093
              -
              -# Disable the migration
              -# zookeeper.metadata.migration.enable=true
              -
              -# Remove ZooKeeper client configuration
              -# zookeeper.connect=localhost:2181
              -
              -# Other configs ...
              - -

              Reverting to ZooKeeper mode During the Migration

              -

              - While the cluster is still in migration mode, it is possible to revert to ZooKeeper mode. The process - to follow depends on how far the migration has progressed. In order to find out how to revert, - select the final migration step that you have completed in this table. -

              -

              - Note that the directions given here assume that each step was fully completed, and they were - done in order. So, for example, we assume that if "Enter Migration Mode on the Brokers" was - completed, "Provisioning the KRaft controller quorum" was also fully completed previously. -

              -

              - If you did not fully complete any step, back out whatever you have done and then follow revert - directions for the last fully completed step. -

              - -
              - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
              Final Migration Section CompletedDirections for RevertingNotes
              Preparing for migration - The preparation section does not involve leaving ZooKeeper mode. So there is nothing to do in the - case of a revert. - -
              Provisioning the KRaft controller quorum -
                -
              • - Deprovision the KRaft controller quorum. -
              • -
              • - Then you are done. -
              • -
              -
              -
              Enter Migration Mode on the brokers -
                -
              • - Deprovision the KRaft controller quorum. -
              • -
              • - Using zookeeper-shell.sh, run rmr /controller so that one - of the brokers can become the new old-style controller. Additionally, run - get /migration followed by rmr /migration to clear the - migration state from ZooKeeper. This will allow you to re-attempt the migration - in the future. The data read from "/migration" can be useful for debugging. -
              • -
              • - On each broker, remove the zookeeper.metadata.migration.enable, - controller.listener.names, and controller.quorum.bootstrap.servers - configurations, and replace node.id with broker.id. - Then perform a rolling restart of all brokers. -
              • -
              • - Then you are done. -
              • -
              -
              - It is important to perform the zookeeper-shell.sh step quickly, to minimize the amount of - time that the cluster lacks a controller. Until the /controller znode is deleted, - you can also ignore any errors in the broker log about failing to connect to the Kraft controller. - Those error logs should disappear after second roll to pure zookeeper mode. -
              Migrating brokers to KRaft -
                -
              • - On each broker, remove the process.roles configuration, - replace the node.id with broker.id and - restore the zookeeper.connect configuration to its previous value. - If your cluster requires other ZooKeeper configurations for brokers, such as - zookeeper.ssl.protocol, re-add those configurations as well. - Then perform a rolling restart of all brokers. -
              • -
              • - Deprovision the KRaft controller quorum. -
              • -
              • - Using zookeeper-shell.sh, run rmr /controller so that one - of the brokers can become the new old-style controller. -
              • -
              • - On each broker, remove the zookeeper.metadata.migration.enable, - controller.listener.names, and controller.quorum.bootstrap.servers - configurations. - Then perform a second rolling restart of all brokers. -
              • -
              • - Then you are done. -
              • -
              -
              -
                -
              • - It is important to perform the zookeeper-shell.sh step quickly, to minimize the amount of - time that the cluster lacks a controller. Until the /controller znode is deleted, - you can also ignore any errors in the broker log about failing to connect to the Kraft controller. - Those error logs should disappear after second roll to pure zookeeper mode. -
              • -
              • - Make sure that on the first cluster roll, zookeeper.metadata.migration.enable remains set to - true. Do not set it to false until the second cluster roll. -
              • -
              -
              Finalizing the migration - If you have finalized the ZK migration, then you cannot revert. - - Some users prefer to wait for a week or two before finalizing the migration. While this - requires you to keep the ZooKeeper cluster running for a while longer, it may be helpful - in validating KRaft mode in your cluster. -
              - +

              In order to migrate from ZooKeeper to KRaft you need to use a bridge release. The last bridge release is Kafka 3.9. + See the ZooKeeper to KRaft Migration steps in the 3.9 documentation.

              -

              6.11 Tiered Storage

              +

              6.9 Tiered Storage

              Tiered Storage Overview

              @@ -4371,7 +3939,7 @@

              After build successfully, there should be a `kafka-storage-x.x.x-test.jar` file under `storage/build/libs`. Next, setting configurations in the broker side to enable tiered storage feature.

              -
              # Sample Zookeeper/Kraft broker server.properties listening on PLAINTEXT://:9092
              +
              # Sample KRaft broker server.properties listening on PLAINTEXT://:9092
               remote.log.storage.system.enable=true
               
               # Setting the listener for the clients in RemoteLogMetadataManager to talk to the brokers.
              diff --git a/docs/protocol.html b/docs/protocol.html
              index 9a21639dba147..84f4aec4b0d4f 100644
              --- a/docs/protocol.html
              +++ b/docs/protocol.html
              @@ -45,11 +45,6 @@ 

              Kafka protocol guide

            39. Record Batch
            40. -
            41. Evolving the Protocol -
            42. Constants
              • Error Codes @@ -125,7 +120,7 @@
                The intention is that clients will support a range of API versions. When communicating with a particular broker, a given client should use the highest API version supported by both and indicate this version in their requests.

                -

                The server will reject requests with a version it does not support, and will always respond to the client with exactly the protocol format it expects based on the version it included in its request. The intended upgrade path is that new features would first be rolled out on the server (with the older clients not making use of them) and then as newer clients are deployed these new features would gradually be taken advantage of.

                +

                The server will reject requests with a version it does not support, and will always respond to the client with exactly the protocol format it expects based on the version it included in its request. The intended upgrade path is that new features would first be rolled out on the server (with the older clients not making use of them) and then as newer clients are deployed these new features would gradually be taken advantage of. Note there is an exceptional case while retrieving supported API versions where the server can respond with a different version.

                Note that KIP-482 tagged fields can be added to a request without incrementing the version number. This offers an additional way of evolving the message schema without breaking compatibility. Tagged fields do not take up any space when the field is not set. Therefore, if a field is rarely used, it is more efficient to make it a tagged field than to put it in the mandatory schema. However, tagged fields are ignored by recipients that don't know about them, which could pose a challenge if this is not the behavior that the sender wants. In such cases, a version bump may be more appropriate. @@ -143,7 +138,13 @@

                ApiVersionRequest is not available. Also, note that broker versions older - than 0.10.0.0 do not support this API and will either ignore the request or close connection in response to the request.
              • + than 0.10.0.0 do not support this API and will either ignore the request or close connection in response to the request. Also + note that if the client ApiVersionsRequest version is unsupported by the broker (client is ahead), and the broker + version is 2.4.0 or greater, then the broker will respond with a version 0 ApiVersionsResponse + with the error code set to UNSUPPORTED_VERSION and the api_versions + field populated with the supported version of the ApiVersionsRequest. It is then up to the client to retry, making + another ApiVersionsRequest using the highest version supported by the client and broker. + See KIP-511: Collect and Expose Client's Name and Version in the Brokers
              • If multiple versions of an API are supported by broker and client, clients are recommended to use the latest version supported by the broker and itself.
              • Deprecation of a protocol version is done by marking an API version as deprecated in the protocol documentation.
              • diff --git a/docs/quickstart.html b/docs/quickstart.html index 64a7a23c6b901..1ded73e22565d 100644 --- a/docs/quickstart.html +++ b/docs/quickstart.html @@ -42,15 +42,11 @@

                Step 2: Start the Kafka environment

                -

                NOTE: Your local environment must have Java 11+ installed.

                +

                NOTE: Your local environment must have Java 17+ installed.

                -

                Apache Kafka can be started using KRaft or ZooKeeper. To get started with either configuration follow one of the sections below but not both.

                +

                Kafka can be run using local scripts and downloaded files or the docker image.

                -
                Kafka with KRaft
                - -

                Kafka can be run using KRaft mode using local scripts and downloaded files or the docker image. Follow one of the sections below but not both to start the kafka server.

                - -
                Using downloaded files
                +
                Using downloaded files

                Generate a Cluster UUID

                $ KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"
                @@ -63,7 +59,7 @@
                Using downloaded files

                Once the Kafka server has successfully launched, you will have a basic Kafka environment running and ready to use.

                -
                Using JVM Based Apache Kafka Docker Image
                +
                Using JVM Based Apache Kafka Docker Image

                Get the Docker image:

                $ docker pull apache/kafka:{{fullDotVersion}}
                @@ -71,7 +67,7 @@
                Using JVM Based Apache Kafka Docker Image

                Start the Kafka Docker container:

                $ docker run -p 9092:9092 apache/kafka:{{fullDotVersion}}
                -
                Using GraalVM Based Native Apache Kafka Docker Image
                +
                Using GraalVM Based Native Apache Kafka Docker Image

                Get the Docker image:

                $ docker pull apache/kafka-native:{{fullDotVersion}}
                @@ -79,18 +75,6 @@
                Using GraalVM Based Native Apache Kafka Docker Image

                Start the Kafka Docker container:

                $ docker run -p 9092:9092 apache/kafka-native:{{fullDotVersion}}
                -
                Kafka with ZooKeeper
                - -

                Run the following commands in order to start all services in the correct order:

                -
                # Start the ZooKeeper service
                -$ bin/zookeeper-server-start.sh config/zookeeper.properties
                - -

                Open another terminal session and run:

                -
                # Start the Kafka broker service
                -$ bin/kafka-server-start.sh config/server.properties
                - -

                Once all services have successfully launched, you will have a basic Kafka environment running and ready to use.

                -
                @@ -320,9 +304,6 @@

              • Stop the Kafka broker with Ctrl-C.
              • -
              • - Lastly, if the Kafka with ZooKeeper section was followed, stop the ZooKeeper server with Ctrl-C. -
              • @@ -330,7 +311,7 @@

                along the way, run the command:

                -
                $ rm -rf /tmp/kafka-logs /tmp/zookeeper /tmp/kraft-combined-logs
                +
                $ rm -rf /tmp/kafka-logs /tmp/kraft-combined-logs

                diff --git a/docs/security.html b/docs/security.html index f9e7a5ba69a79..1b82dfc1223f4 100644 --- a/docs/security.html +++ b/docs/security.html @@ -17,7 +17,7 @@
                diff --git a/docs/streams/developer-guide/app-reset-tool.html b/docs/streams/developer-guide/app-reset-tool.html index bf157e3866920..48a40043e701a 100644 --- a/docs/streams/developer-guide/app-reset-tool.html +++ b/docs/streams/developer-guide/app-reset-tool.html @@ -45,7 +45,7 @@

              The application reset tool does not:

              diff --git a/docs/streams/developer-guide/config-streams.html b/docs/streams/developer-guide/config-streams.html index bd9452827ae96..4e77fb7de5d81 100644 --- a/docs/streams/developer-guide/config-streams.html +++ b/docs/streams/developer-guide/config-streams.html @@ -63,8 +63,10 @@
            43. Recommended configuration parameters for resiliency
            44. Optional configuration parameters @@ -76,6 +78,7 @@
            45. default.timestamp.extractor
            46. default.value.serde
            47. deserialization.exception.handler
            48. +
            49. enable.metrics.push
            50. log.summary.interval.ms
            51. max.task.idle.ms
            52. max.warmup.replicas
            53. @@ -84,6 +87,7 @@
            54. probing.rebalance.interval.ms
            55. processing.exception.handler
            56. processing.guarantee
            57. +
            58. processor.wrapper.class
            59. production.exception.handler
            60. rack.aware.assignment.non_overlap_cost
            61. rack.aware.assignment.strategy
            62. @@ -153,7 +157,7 @@

              application.id

              bootstrap.servers

              -

              (Required) The Kafka bootstrap servers. This is the same setting that is used by the underlying producer and consumer clients to connect to the Kafka cluster. +

              (Required) The Kafka bootstrap servers. This is the same setting that is used by the underlying producer and consumer clients to connect to the Kafka cluster. Example: "kafka-broker1:9092,kafka-broker2:9092".

              @@ -170,59 +174,70 @@

              Recommended configuration parameters for - acks - Producer - acks=1 - acks=all + + acks + Producer (for version <=2.8) + acks="1") + acks="all" - replication.factor (for broker version 2.3 or older)/td> + + replication.factor (for broker version 2.3 or older)/td> Streams -1 - 3 + 3 (broker 2.4+: ensure broker config default.replication.factor=3) - min.insync.replicas + + min.insync.replicas Broker 1 2 - num.standby.replicas + + num.standby.replicas Streams 0 1 -

              Increasing the replication factor to 3 ensures that the internal Kafka Streams topic can tolerate up to 2 broker failures. Changing the acks setting to “all” - guarantees that a record will not be lost as long as one replica is alive. The tradeoff from moving to the default values to the recommended ones is +

              Increasing the replication factor to 3 ensures that the internal Kafka Streams topic can tolerate up to 2 broker failures. + The tradeoff from moving to the default values to the recommended ones is that some performance and more storage space (3x with the replication factor of 3) are sacrificed for more resiliency.

              -
              +

              acks

              The number of acknowledgments that the leader must have received before considering a request complete. This controls the durability of records that are sent. The possible values are:

                -
              • acks=0 The producer does not wait for acknowledgment from the server and the record is immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this case, and the producer won’t generally know of any failures. The offset returned for each record will always be set to -1.
              • -
              • acks=1 The leader writes the record to its local log and responds without waiting for full acknowledgement from all followers. If the leader immediately fails after acknowledging the record, but before the followers have replicated it, then the record will be lost.
              • -
              • acks=all The leader waits for the full set of in-sync replicas to acknowledge the record. This guarantees that the record will not be lost if there is at least one in-sync replica alive. This is the strongest available guarantee.
              • +
              • acks="0" The producer does not wait for acknowledgment from the server and the record is immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this case, and the producer won’t generally know of any failures. The offset returned for each record will always be set to -1.
              • +
              • acks="1" The leader writes the record to its local log and responds without waiting for full acknowledgement from all followers. If the leader immediately fails after acknowledging the record, but before the followers have replicated it, then the record will be lost.
              • +
              • acks="all" (default since 3.0 release) The leader waits for the full set of in-sync replicas to acknowledge the record. This guarantees that the record will not be lost if there is at least one in-sync replica alive. This is the strongest available guarantee.

              For more information, see the Kafka Producer documentation.

              -
              -

              replication.factor

              + -
              -

              num.standby.replicas

              +
              +

              min.insync.replicas

              +

              The minimum number of in-sync replicas available for replication if the producer is configured with acks="all" + (see topic configs). +

              +
              +
              Properties streamsSettings = new Properties();
               // for broker version 2.3 or older
               //streamsSettings.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, 3);
              +// for version 2.8 or older
              +//streamsSettings.put(StreamsConfig.producerPrefix(ProducerConfig.ACKS_CONFIG), "all");
               streamsSettings.put(StreamsConfig.topicPrefix(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG), 2);
              -streamsSettings.put(StreamsConfig.producerPrefix(ProducerConfig.ACKS_CONFIG), "all");
               streamsSettings.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
              @@ -231,9 +246,9 @@

              num.standby.replicasStreams javadocs, sorted by level of importance:

                -
              • High: These parameters can have a significant impact on performance. Take care when deciding the values of these parameters.
              • -
              • Medium: These parameters can have some impact on performance. Your specific environment will determine how much tuning effort should be focused on these parameters.
              • -
              • Low: These parameters have a less general or less significant impact on performance.
              • +
              • High: These are parameters with a default value which is most likely not a good fit for production use. It's highly recommended to revisit these parameters for production usage.
              • +
              • Medium: The default values of these parameters should work for production for many cases, but it's not uncommon that they are changed, for example to tune performance.
              • +
              • Low: It should rarely be necessary to change the value for these parameters. It's only recommended to change them if there is a very specific issue you want to address.
              @@ -265,12 +280,12 @@

              num.standby.replicas

              - + - + @@ -281,7 +296,7 @@

              num.standby.replicas

              - + @@ -301,8 +316,9 @@

              num.standby.replicas

              - - + + @@ -316,7 +332,7 @@

              num.standby.replicas"ROCKS_DB"

              @@ -335,9 +351,14 @@

              num.standby.replicas

              - + - + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + + + . + + + - . - + @@ -416,68 +444,91 @@

              num.standby.replicas

              - + - - + + + + + + List of tag keys used to distribute standby replicas across Kafka Streams clients. When configured, Kafka Streams will make a best-effort to distribute the standby tasks over - clients with different tag values. + clients with different tag values. + See Rack Aware Assignment Tags. - + + + + + + + + + + + - - + + - + - + - + - + (10 minutes) - + - + - + - + (5 minutes) - + - + - + - - + + - + - + (1 day) - + @@ -559,8 +610,9 @@

              acceptable.recovery.lagDefaultProductionExceptionHandler that always fails when these exceptions occur.

              -

              Each exception handler can return a FAIL or CONTINUE depending on the record and the exception thrown. Returning FAIL will signal that Streams should shut down and CONTINUE will signal that Streams - should ignore the issue and continue processing. If you want to provide an exception handler that always ignores records that are too large, you could implement something like the following:

              +

              An exception handler can return FAIL, CONTINUE, or RETRY depending on the record and the exception thrown. Returning FAIL will signal that Streams should shut down. CONTINUE will signal that Streams + should ignore the issue and continue processing. For RetriableException the handler may return RETRY to tell the runtime to retry sending the failed record (Note: If RETRY is returned for a non-RetriableException + it will be treated as FAIL.) If you want to provide an exception handler that always ignores records that are too large, you could implement something like the following:

              import java.util.Properties;
               import org.apache.kafka.streams.StreamsConfig;
              @@ -803,6 +855,18 @@ 

              log.summary.interval.ms +

              enable.metrics.push

              +
              +
              +

              + Kafka Streams metrics can be pushed to the brokers similar to client metrics. + Additionally, Kafka Streams allows to enable/disable metric pushing for each embedded client individually. + However, pushing Kafka Streams metrics requires that enable.metric.push is enabled on the main-consumer and admin client. +

              +
              +
              +

              max.task.idle.ms

              @@ -869,7 +933,7 @@

              log.summary.interval.msStream Task. Furthermore, note that each warmup task can only be promoted to an active task during + Note that one warmup replica corresponds to one Stream Task. Furthermore, note that each warmup task can only be promoted to an active task during a rebalance (normally during a so-called probing rebalance, which occur at a frequency specified by the probing.rebalance.interval.ms config). This means that the maximum rate at which active tasks can be migrated from one Kafka Streams instance to another instance can be determined by @@ -998,6 +1062,23 @@

              probing.rebalance.interval.ms

              +
              +

              processor.wrapper.class

              +
              +
              +

              + A class or class name implementing the ProcessorWrapper interface. This feature allows you to wrap any of the + processors in the compiled topology, including both custom processor implementations and those created by Streams for DSL operators. This can be useful for logging or tracing + implementations since it allows access to the otherwise-hidden processor context for DSL operators, and also allows for injecting additional debugging information to an entire + application topology with just a single config. +

              +

              + IMPORTANT: This MUST be passed in when creating the topology, and will not be applied unless passed in to the appropriate topology-building constructor. You should + use the StreamsBuilder#new(TopologyConfig) constructor for DSL applications, and the + Topology#new(TopologyConfig) constructor for PAPI applications. +

              +
              +

              replication.factor

              @@ -1182,8 +1263,8 @@

              Naming

              Default Values

              Kafka Streams uses different default values for some of the underlying client configs, which are summarized below. For detailed descriptions - of these configs, see Producer Configs - and Consumer Configs.

              + of these configs, see Producer Configs + and Consumer Configs.

              statestore.cache.max.bytes Medium Maximum number of memory bytes to be used for record caches across all threads.1048576010485760
              cache.max.bytes.buffering (Deprecated. Use statestore.cache.max.bytes instead.) Medium Maximum number of memory bytes to be used for record caches across all threads.10485760 bytes10485760
              client.id Mediumcommit.interval.ms Low The frequency in milliseconds with which to save the position (offsets in source topics) of tasks.30000 milliseconds (30 seconds)30000 (30 seconds)
              default.deserialization.exception.handler (Deprecated. Use deserialization.exception.handler instead.) Mediumdefault.timestamp.extractor MediumTimestamp extractor class that implements the TimestampExtractor interface.See Timestamp ExtractorTimestamp extractor class that implements the TimestampExtractor interface. + See Timestamp ExtractorFailOnInvalidTimestamp
              default.value.serde Medium
              deserialization.exception.handler Mediumlog.summary.interval.ms Low The output interval in milliseconds for logging summary information (disabled if negative).120000 milliseconds (2 minutes)120000 (2 minutes)
              max.task.idle.ms
              enable.metrics.pushLowWhether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client.true
              max.task.idle.ms Medium

              @@ -354,59 +375,66 @@

              num.standby.replicas0

              max.warmup.replicas
              max.warmup.replicas Medium The maximum number of warmup replicas (extra standbys beyond the configured num.standbys) that can be assigned at once. 2
              metric.reporters
              metric.reporters Low A list of classes to use as metrics reporters. the empty list
              metrics.num.samples
              metrics.num.samples Low The number of samples maintained to compute metrics. 2
              metrics.recording.level
              metrics.recording.level Low The highest recording level for metrics. INFO
              metrics.sample.window.ms
              metrics.sample.window.ms Low The window of time in milliseconds a metrics sample is computed over.30000 milliseconds (30 seconds)30000 (30 seconds)
              num.standby.replicas
              num.standby.replicas High The number of standby replicas for each task. 0
              num.stream.threads
              num.stream.threads Medium The number of threads to execute stream processing. 1
              probing.rebalance.interval.ms
              probing.rebalance.interval.ms Low The maximum time in milliseconds to wait before triggering a rebalance to probe for warmup replicas that have sufficiently caught up.600000 milliseconds (10 minutes)600000 (10 minutes)
              processing.exception.handler
              processing.exception.handler Medium Exception handling class that implements the ProcessingExceptionHandler interface. LogAndFailProcessingExceptionHandler
              processing.guarantee
              processing.guaranteeMediumThe processing mode. Can be either "at_least_once" + or "exactly_once_v2" (for EOS version 2, requires broker version 2.5+). + See Processing Guarantee."at_least_once"
              processor.wrapper.class MediumThe processing mode. Can be either "at_least_once" (default) - or "exactly_once_v2" (for EOS version 2, requires broker version 2.5+). Deprecated config options are - "exactly_once" (for EOS version 1) and "exactly_once_beta" (for EOS version 2, requires broker version 2.5+)See Processing GuaranteeA class or class name implementing the ProcessorWrapper interface. + Must be passed in when creating the topology, and will not be applied unless passed in to the appropriate constructor as a TopologyConfig. You should + use the StreamsBuilder#new(TopologyConfig) constructor for DSL applications, and the + Topology#new(TopologyConfig) constructor for PAPI applications.
              production.exception.handler Mediumpoll.ms Low The amount of time in milliseconds to block waiting for input.100 milliseconds100
              rack.aware.assignment.tagsMedium
              rack.aware.assignment.strategyLowThe strategy used for rack aware assignment. Acceptable value are + "none" (default), + "min_traffic", and + "balance_suttopology". + See Rack Aware Assignment Strategy."none"
              the empty list
              replication.factor
              rack.aware.assignment.non_overlap_costLowCost associated with moving tasks from existing assignment. + See Rack Aware Assignment Non-Overlap-Cost.null
              rack.aware.assignment.non_overlap_costLowCost associated with cross rack traffic. + See Rack Aware Assignment Traffic-Cost.null
              replication.factor Medium The replication factor for changelog topics and repartition topics created by the application. The default of -1 (meaning: use broker default replication factor) requires broker version 2.4 or newer. -1
              retry.backoff.msMedium
              retry.backoff.msLow The amount of time in milliseconds, before a request is retried. 100
              rocksdb.config.setter
              rocksdb.config.setter Medium The RocksDB configuration.null
              state.cleanup.delay.ms
              state.cleanup.delay.ms Low The amount of time in milliseconds to wait before deleting state when a partition has migrated.600000 milliseconds (10 minutes)600000
              state.dir
              state.dir High Directory location for state stores. /${java.io.tmpdir}/kafka-streams
              task.assignor.class
              task.assignor.class Medium A task assignor class or class name implementing the TaskAssignor interface. The high-availability task assignor.
              task.timeout.ms
              task.timeout.ms Medium The maximum amount of time in milliseconds a task might stall due to internal errors and retries until an error is raised. For a timeout of 0 ms, a task would raise an error for the first internal error. For any timeout larger than 0 ms, a task will retry at least once before an error is raised.300000 milliseconds (5 minutes)300000
              topology.optimization
              topology.optimization Medium A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: StreamsConfig.NO_OPTIMIZATION (none), StreamsConfig.OPTIMIZE (all) or a comma separated list of specific optimizations: StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS (reuse.ktable.source.topics), StreamsConfig.MERGE_REPARTITION_TOPICS (merge.repartition.topics), StreamsConfig.SINGLE_STORE_SELF_JOIN (single.store.self.join). NO_OPTIMIZATION"NO_OPTIMIZATION"
              upgrade.from
              upgrade.from MediumThe version you are upgrading from during a rolling upgrade.See Upgrade FromThe version you are upgrading from during a rolling upgrade. + See Upgrade Fromnull
              windowstore.changelog.additional.retention.ms
              windowstore.changelog.additional.retention.ms Low Added to a windows maintainMs to ensure data is not deleted from the log prematurely. Allows for clock drift.86400000 milliseconds (1 day)86400000
              window.size.ms
              window.size.ms Low Sets window size for the deserializer in order to calculate window end times. null
              diff --git a/docs/streams/developer-guide/dsl-api.html b/docs/streams/developer-guide/dsl-api.html index 32822c2709451..97dc644e83905 100644 --- a/docs/streams/developer-guide/dsl-api.html +++ b/docs/streams/developer-guide/dsl-api.html @@ -386,7 +386,30 @@

              + + + - - - - - - - + @@ -1636,37 +1739,56 @@

              Kafka Streams API (rows)

              - + + + + + + + + + + + + + + + + + + - + + - + +
              Parameter Name

              Filter

              +

              Broadcast/Multicast

              +
                +
              • no operator
              • +
              +

              Broadcasting a KStream into multiple downstream operators.

              +

              A record is sent to more than one operator by applying multiple operators to the same KStream instance.

              +
              KStream<String, Long> stream = ...;
              +KStream<...> stream1 = stream.map(...);
              +KStream<...> stream2 = stream.mapValue(...);
              +KStream<...> stream3 = stream.flatMap(...);
              +
              +

              Multicasting a KStream into multiple downstream operators.

              +

              In contrast to branching, which sends each record to at most one downstream branch, + a multicast may send a record to any number of downstream KStream instances.

              +

              A multicast is implemented as a broadcast plus filters.

              +
              KStream<String, Long> stream = ...;
              +KStream<...> stream1 = stream.filter((key, value) -> key.startsWith("A")); // contains all records whose keys start with "A"
              +KStream<...> stream2 = stream.filter((key, value) -> key.startsWith("AB")); // contains all records whose keys start with "AB" (subset of stream1)
              +KStream<...> stream3 = stream.filter((key, value) -> key.contains("B")); // contains all records whose keys contains a "B" (superset of stream2)
              +
              +

              Filter

              Inverse Filter

              +

              Inverse Filter

              FlatMap

              +

              FlatMap

              • KStream → KStream
              @@ -447,7 +468,7 @@

              FlatMapValues

              +

              FlatMapValues

              • KStream → KStream
              @@ -463,7 +484,7 @@

              Foreach

              +

              Foreach

              GroupByKey

              +

              GroupByKey

              • KStream → KGroupedStream
              @@ -524,7 +544,7 @@

              GroupBy

              +

              GroupBy

              Cogroup

              +

              Cogroup

              • KGroupedStream → CogroupedKStream
              • CogroupedKStream → CogroupedKStream
              • @@ -601,7 +619,7 @@

              Map

              +

              Map

              • KStream → KStream
              @@ -613,7 +631,6 @@

              mapValues instead, which will not cause data re-partitioning.

              KStream<byte[], String> stream = ...;
               
              -// Java 8+ example, using lambda expressions
               // Note how we change the key and the key type (similar to `selectKey`)
               // as well as the value and the value type.
               KStream<String, Integer> transformed = stream.map(
              @@ -621,7 +638,7 @@ 

              Map (values only)

              +

              Map (values only)

              Merge

              +

              Merge

              • KStream → KStream
              @@ -657,7 +673,7 @@

              Peek

              +

              Peek

              • KStream → KStream
              @@ -672,13 +688,12 @@

              KStream<byte[], String> stream = ...; -// Java 8+ example, using lambda expressions KStream<byte[], String> unmodifiedStream = stream.peek( (key, value) -> System.out.println("key=" + key + ", value=" + value));

              Print

              +

              Print

              • KStream → void
              @@ -696,7 +711,7 @@

              SelectKey

              +

              SelectKey

              • KStream → KStream
              @@ -709,12 +724,11 @@

              KStream<byte[], String> stream = ...; // Derive a new record key from the record's value. Note how the key type changes, too. -// Java 8+ example, using lambda expressions KStream<String, String> rekeyed = stream.selectKey((key, value) -> value.split(" ")[0])

              Table to Stream

              +

              Table to Stream

              • KTable → KStream
              @@ -728,7 +742,7 @@

              Stream to Table

              +

              Stream to Table

              • KStream → KTable
              @@ -740,7 +754,7 @@

              +

              Repartition

              Kafka Broker (columns)Kafka Broker (columns)
              0.10.0.x 0.10.1.x and 0.10.2.x0.11.0.x and
              1.0.x and
              1.1.x and
              2.0.x and
              2.1.x and
              2.2.x and
              2.3.x and
              2.4.x and
              2.5.x and
              2.6.x and
              2.7.x and
              2.8.x and
              3.0.x and
              3.1.x and
              3.2.x and
              3.3.x and
              3.4.x and
              3.5.x and
              3.6.x and
              3.7.x
              0.11.0.x and
              1.0.x and
              1.1.x and
              2.0.x
              2.1.x and
              2.2.x and
              2.3.x and
              2.4.x and
              2.5.x and
              2.6.x and
              2.7.x and
              2.8.x and
              3.0.x and
              3.1.x and
              3.2.x and
              3.3.x and
              3.4.x and
              3.5.x and
              3.6.x and
              3.7.x and
              3.8.x and
              3.9.x
              4.0.x
              0.10.0.x compatible compatible compatiblecompatible
              0.10.1.x and 0.10.2.x compatible compatiblecompatible
              0.11.0.x compatible with exactly-once turned off
              (requires broker version 0.11.0.x or higher)
              compatible
              1.0.x and
              1.1.x and
              2.0.x and
              2.1.x and
              2.2.0 and
              2.2.0
              compatible with exactly-once turned off
              (requires broker version 0.11.0.x or higher);
              requires message format 0.10 or higher;
              message headers are not supported
              (requires broker version 0.11.0.x or higher
              with message format 0.11 or higher)
              compatible; requires message format 0.10 or higher;
              if message headers are used, message format 0.11
              or higher required
              compatible
              2.2.1 and
              2.3.x and
              2.4.x and
              2.5.x and
              2.6.x and
              2.7.x and
              2.8.x and
              3.0.x and
              3.1.x and
              3.2.x and
              3.3.x and
              3.4.x and
              3.5.x and
              3.6.x and
              3.7.x and
              3.8.x and
              3.9.x
              compatible; requires message format 0.11 or higher;
              enabling exactly-once v2 requires 2.5.x or higher
              compatible
              2.2.1 and
              2.3.x and
              2.4.x and
              2.5.x and
              2.6.x and
              2.7.x and
              2.8.x and
              3.0.x and
              3.1.x and
              3.2.x and
              3.3.x and
              3.4.x and
              3.5.x and
              3.6.x and
              3.7.x
              4.0.x compatible; requires message format 0.11 or higher;
              enabling exactly-once v2 requires 2.4.x or higher
              compatible; enabling exactly-once v2 requires broker version 2.5.x or highercompatible
              diff --git a/docs/toc.html b/docs/toc.html index af670a8de7e1c..a32ee18fc13ea 100644 --- a/docs/toc.html +++ b/docs/toc.html @@ -74,9 +74,10 @@
            63. 4.4 The Producer
            64. 4.5 The Consumer
            65. 4.6 Message Delivery Semantics -
            66. 4.7 Replication -
            67. 4.8 Log Compaction -
            68. 4.9 Quotas +
            69. 4.7 Using Transactions +
            70. 4.8 Replication +
            71. 4.9 Log Compaction +
            72. 4.10 Quotas
            73. 5. Implementation @@ -131,14 +132,8 @@
            74. Further considerations -
            75. 6.5 Important Configs - - -
            76. 6.6 Java Version -
            77. 6.7 Hardware and OS +
            78. 6.5 Java Version +
            79. 6.6 Hardware and OS -
            80. 6.8 Monitoring +
            81. 6.7 Monitoring -
            82. 6.9 ZooKeeper - - -
            83. 6.10 KRaft +
            84. 6.8 KRaft -
            85. 6.11 Tiered Storage +
            86. 6.9 Tiered Storage
            87. 8. Kafka Connect diff --git a/docs/upgrade.html b/docs/upgrade.html index 29129e565718f..1be5e1836c771 100644 --- a/docs/upgrade.html +++ b/docs/upgrade.html @@ -19,9 +19,32 @@ + + + +
              +

              Significant Changes in Kafka 4.0 Release

              +

              The following are some of the updates in Kafka 4.0 release:

              +
              Removal Zookeeper configs
              +
                +
              • +

                + The password encoder-related configurations have been removed. These configurations were used in + ZooKeeper mode to define the key and backup key for encrypting sensitive data (e.g., passwords), + specify the algorithm and key generation method for password encryption (e.g., AES, RSA), and control + the key length and encryption strength. +

                +
                  +
                • password.encoder.secret
                • +
                • password.encoder.old.secret
                • +
                • password.encoder.keyfactory.algorithm
                • +
                • password.encoder.cipher.algorithm
                • +
                • password.encoder.key.length
                • +
                • password.encoder.iterations
                • +
                +

                + In Kraft mode, Kafka stores sensitive data in records, and the data is not encrypted in Kafka. +

                +
              • +
              • +

                + Removed control.plane.listener.name. Kafka relies on ZooKeeper to manage metadata, but some + internal operations (e.g., communication between controllers (a.k.a., broker controller) and brokers) + still require Kafka’s internal control plane for coordination. +

                +

                + In KRaft mode, Kafka eliminates its dependency on ZooKeeper, and the control plane functionality is + fully integrated into Kafka itself. The process roles are clearly separated: brokers handle data-related + requests, while the controllers (a.k.a., quorum controller) manages metadata-related requests. The controllers + use the Raft protocol for internal communication, which operates differently from the ZooKeeper model. Use the + following parameters to configure the control plane listener: +

                +
                  +
                • controller.listener.names
                • +
                • listeners
                • +
                • listener.security.protocol.map
                • +
                +
              • +
              • +

                + Removed graceful broker shutdowns-related configurations. These configurations were used in ZooKeeper mode + to define the maximum number of retries and the retry backoff time for controlled shutdowns. It can + reduce the risk of unplanned leader changes and data inconsistencies. +

                +
                  +
                • controlled.shutdown.max.retries
                • +
                • controlled.shutdown.retry.backoff.ms
                • +
                +

                + In KRaft mode, Kafka uses the Raft protocol to manage metadata. The broker shutdown process differs from + ZooKeeper mode as it is managed by the quorum-based controller. The shutdown process is more reliable + and efficient due to automated leader transfers and metadata updates handled by the controller. +

                +
              • +
              • +

                + Remove the broker id generation-related configurations. These configurations were used in ZooKeeper mode + to define the broker id, specify the broker id auto generation, and control the broker id generation process. +

                +
                  +
                • reserved.broker.max.id
                • +
                • broker.id.generation.enable
                • +
                • broker.id
                • +
                +

                + Kafka use the node id in Kraft mode to identify servers. +

                +
                  +
                • node.id
                • +
                +
              • +
              • +

                + Removed dynamic configurations which relies on ZooKeeper. In KRaft mode, to change these configurations, + you need to restart the broker/controller. +

                +
                  +
                • advertised.listeners
                • +
                +
              • +
              • +

                + Removed Zookeeper related configurations. +

                +
                  +
                • zookeeper.connect
                • +
                • zookeeper.session.timeout.ms
                • +
                • zookeeper.connection.timeout.ms
                • +
                • zookeeper.set.acl
                • +
                • zookeeper.max.in.flight.requests
                • +
                • zookeeper.ssl.client.enable
                • +
                • zookeeper.clientCnxnSocket
                • +
                • zookeeper.ssl.keystore.location
                • +
                • zookeeper.ssl.keystore.password
                • +
                • zookeeper.ssl.keystore.type
                • +
                • zookeeper.ssl.truststore.location
                • +
                • zookeeper.ssl.truststore.password
                • +
                • zookeeper.ssl.truststore.type
                • +
                • zookeeper.ssl.protocol
                • +
                • zookeeper.ssl.enabled.protocols
                • +
                • zookeeper.ssl.cipher.suites
                • +
                • zookeeper.ssl.endpoint.identification.algorithm
                • +
                • zookeeper.ssl.crl.enable
                • +
                • zookeeper.ssl.ocsp.enable
                • +
                +
              • +
              +
              Removal metrics
              +
                +
              • +

                + Remove the following metrics related to ZooKeeper. + ControlPlaneNetworkProcessorAvgIdlePercent + is to monitor the average fraction of time the network processors are idle. The other ControlPlaneExpiredConnectionsKilledCount + is to monitor the total number of connections disconnected, across all processors. +

                +
                  +
                • ControlPlaneNetworkProcessorAvgIdlePercent
                • +
                • ControlPlaneExpiredConnectionsKilledCount
                • +
                +

                + In Kraft mode, Kafka also provides metrics to monitor the network processors and expired connections. + Use the following metrics to monitor the network processors and expired connections: +

                +
                  +
                • NetworkProcessorAvgIdlePercent
                • +
                • ExpiredConnectionsKilledCount
                • +
                +
              • +
              • +

                + Remove the metrics which is monitoring the latency in milliseconds for ZooKeeper requests from broker. +

                +
                  +
                • kafka.server:type=ZooKeeperClientMetrics,name=ZooKeeperRequestLatencyMs
                • +
                +

                + In Kraft mode, Zookeeper is not used, so the metrics is removed. +

                +
              • +
              +
              \ No newline at end of file diff --git a/generator/src/main/java/org/apache/kafka/message/CodeBuffer.java b/generator/src/main/java/org/apache/kafka/message/CodeBuffer.java index 77febc957d9af..46b82a3ee182b 100644 --- a/generator/src/main/java/org/apache/kafka/message/CodeBuffer.java +++ b/generator/src/main/java/org/apache/kafka/message/CodeBuffer.java @@ -58,11 +58,7 @@ public void write(CodeBuffer other) { } private String indentSpaces() { - StringBuilder bld = new StringBuilder(); - for (int i = 0; i < indent; i++) { - bld.append(" "); - } - return bld.toString(); + return " ".repeat(Math.max(0, indent)); } @Override diff --git a/generator/src/main/java/org/apache/kafka/message/CoordinatorRecordJsonConvertersGenerator.java b/generator/src/main/java/org/apache/kafka/message/CoordinatorRecordJsonConvertersGenerator.java new file mode 100644 index 0000000000000..484049b3c181e --- /dev/null +++ b/generator/src/main/java/org/apache/kafka/message/CoordinatorRecordJsonConvertersGenerator.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.message; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; + +public class CoordinatorRecordJsonConvertersGenerator implements TypeClassGenerator { + private final HeaderGenerator headerGenerator; + private final CodeBuffer buffer; + private final TreeMap records; + + private static final class CoordinatorRecord { + final short id; + MessageSpec key; + MessageSpec value; + + CoordinatorRecord(short id) { + this.id = id; + } + } + + public CoordinatorRecordJsonConvertersGenerator(String packageName) { + this.headerGenerator = new HeaderGenerator(packageName); + this.records = new TreeMap<>(); + this.buffer = new CodeBuffer(); + } + + @Override + public String outputName() { + return MessageGenerator.COORDINATOR_RECORD_JSON_CONVERTERS_JAVA; + } + + @Override + public void registerMessageType(MessageSpec spec) { + switch (spec.type()) { + case COORDINATOR_KEY: { + short id = spec.apiKey().get(); + CoordinatorRecord record = records.computeIfAbsent(id, __ -> new CoordinatorRecord(id)); + if (record.key != null) { + throw new RuntimeException("Duplicate coordinator record key for type " + + id + ". Original claimant: " + record.key.name() + ". New " + + "claimant: " + spec.name()); + } + record.key = spec; + break; + } + + case COORDINATOR_VALUE: { + short id = spec.apiKey().get(); + CoordinatorRecord record = records.computeIfAbsent(id, __ -> new CoordinatorRecord(id)); + if (record.value != null) { + throw new RuntimeException("Duplicate coordinator record value for type " + + id + ". Original claimant: " + record.key.name() + ". New " + + "claimant: " + spec.name()); + } + record.value = spec; + break; + } + + default: + // Ignore + } + } + + @Override + public void generateAndWrite(BufferedWriter writer) throws IOException { + buffer.printf("public class CoordinatorRecordJsonConverters {%n"); + buffer.incrementIndent(); + generateWriteKeyJson(); + buffer.printf("%n"); + generateWriteValueJson(); + buffer.printf("%n"); + generateReadKeyFromJson(); + buffer.printf("%n"); + generateReadValueFromJson(); + buffer.printf("%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + headerGenerator.generate(); + + headerGenerator.buffer().write(writer); + buffer.write(writer); + } + + private void generateWriteKeyJson() { + headerGenerator.addImport(MessageGenerator.JSON_NODE_CLASS); + headerGenerator.addImport(MessageGenerator.API_MESSAGE_CLASS); + + buffer.printf("public static JsonNode writeRecordKeyAsJson(ApiMessage key) {%n"); + buffer.incrementIndent(); + buffer.printf("switch (key.apiKey()) {%n"); + buffer.incrementIndent(); + for (Map.Entry entry : records.entrySet()) { + String apiMessageClassName = MessageGenerator.capitalizeFirst(entry.getValue().key.name()); + buffer.printf("case %d:%n", entry.getKey()); + buffer.incrementIndent(); + buffer.printf("return %sJsonConverter.write((%s) key, (short) 0);%n", apiMessageClassName, apiMessageClassName); + buffer.decrementIndent(); + } + buffer.printf("default:%n"); + buffer.incrementIndent(); + headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS); + buffer.printf("throw new UnsupportedVersionException(\"Unknown record id \"" + + " + key.apiKey());%n"); + buffer.decrementIndent(); + buffer.decrementIndent(); + buffer.printf("}%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateWriteValueJson() { + headerGenerator.addImport(MessageGenerator.JSON_NODE_CLASS); + headerGenerator.addImport(MessageGenerator.API_MESSAGE_CLASS); + + buffer.printf("public static JsonNode writeRecordValueAsJson(ApiMessage value, short version) {%n"); + buffer.incrementIndent(); + buffer.printf("switch (value.apiKey()) {%n"); + buffer.incrementIndent(); + for (Map.Entry entry : records.entrySet()) { + String apiMessageClassName = MessageGenerator.capitalizeFirst(entry.getValue().value.name()); + buffer.printf("case %d:%n", entry.getKey()); + buffer.incrementIndent(); + buffer.printf("return %sJsonConverter.write((%s) value, version);%n", apiMessageClassName, apiMessageClassName); + buffer.decrementIndent(); + } + buffer.printf("default:%n"); + buffer.incrementIndent(); + headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS); + buffer.printf("throw new UnsupportedVersionException(\"Unknown record id \"" + + " + value.apiKey());%n"); + buffer.decrementIndent(); + buffer.decrementIndent(); + buffer.printf("}%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateReadKeyFromJson() { + headerGenerator.addImport(MessageGenerator.JSON_NODE_CLASS); + headerGenerator.addImport(MessageGenerator.API_MESSAGE_CLASS); + + buffer.printf("public static ApiMessage readRecordKeyFromJson(JsonNode json, short apiKey) {%n"); + buffer.incrementIndent(); + buffer.printf("switch (apiKey) {%n"); + buffer.incrementIndent(); + for (Map.Entry entry : records.entrySet()) { + String apiMessageClassName = MessageGenerator.capitalizeFirst(entry.getValue().key.name()); + buffer.printf("case %d:%n", entry.getKey()); + buffer.incrementIndent(); + buffer.printf("return %sJsonConverter.read(json, (short) 0);%n", apiMessageClassName); + buffer.decrementIndent(); + } + buffer.printf("default:%n"); + buffer.incrementIndent(); + headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS); + buffer.printf("throw new UnsupportedVersionException(\"Unknown record id \"" + + " + apiKey);%n"); + buffer.decrementIndent(); + buffer.decrementIndent(); + buffer.printf("}%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateReadValueFromJson() { + headerGenerator.addImport(MessageGenerator.JSON_NODE_CLASS); + headerGenerator.addImport(MessageGenerator.API_MESSAGE_CLASS); + + buffer.printf("public static ApiMessage readRecordValueFromJson(JsonNode json, short apiKey, short version) {%n"); + buffer.incrementIndent(); + buffer.printf("switch (apiKey) {%n"); + buffer.incrementIndent(); + for (Map.Entry entry : records.entrySet()) { + String apiMessageClassName = MessageGenerator.capitalizeFirst(entry.getValue().value.name()); + buffer.printf("case %d:%n", entry.getKey()); + buffer.incrementIndent(); + buffer.printf("return %sJsonConverter.read(json, version);%n", apiMessageClassName); + buffer.decrementIndent(); + } + buffer.printf("default:%n"); + buffer.incrementIndent(); + headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS); + buffer.printf("throw new UnsupportedVersionException(\"Unknown record id \"" + + " + apiKey);%n"); + buffer.decrementIndent(); + buffer.decrementIndent(); + buffer.printf("}%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } +} diff --git a/generator/src/main/java/org/apache/kafka/message/CoordinatorRecordTypeGenerator.java b/generator/src/main/java/org/apache/kafka/message/CoordinatorRecordTypeGenerator.java new file mode 100644 index 0000000000000..5a7ce9c1307ba --- /dev/null +++ b/generator/src/main/java/org/apache/kafka/message/CoordinatorRecordTypeGenerator.java @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.message; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.TreeMap; + +public class CoordinatorRecordTypeGenerator implements TypeClassGenerator { + private final HeaderGenerator headerGenerator; + private final CodeBuffer buffer; + private final TreeMap records; + + private static final class CoordinatorRecord { + final short id; + MessageSpec key; + MessageSpec value; + + CoordinatorRecord(short id) { + this.id = id; + } + } + + public CoordinatorRecordTypeGenerator(String packageName) { + this.headerGenerator = new HeaderGenerator(packageName); + this.records = new TreeMap<>(); + this.buffer = new CodeBuffer(); + } + + @Override + public String outputName() { + return MessageGenerator.COORDINATOR_RECORD_TYPE_JAVA; + } + + @Override + public void registerMessageType(MessageSpec spec) { + switch (spec.type()) { + case COORDINATOR_KEY: { + short id = spec.apiKey().get(); + CoordinatorRecord record = records.computeIfAbsent(id, __ -> new CoordinatorRecord(id)); + if (record.key != null) { + throw new RuntimeException("Duplicate coordinator record key for type " + + id + ". Original claimant: " + record.key.name() + ". New " + + "claimant: " + spec.name()); + } + record.key = spec; + break; + } + + case COORDINATOR_VALUE: { + short id = spec.apiKey().get(); + CoordinatorRecord record = records.computeIfAbsent(id, __ -> new CoordinatorRecord(id)); + if (record.value != null) { + throw new RuntimeException("Duplicate coordinator record value for type " + + id + ". Original claimant: " + record.key.name() + ". New " + + "claimant: " + spec.name()); + } + record.value = spec; + break; + } + + default: + // Ignore + } + } + + @Override + public void generateAndWrite(BufferedWriter writer) throws IOException { + generate(); + write(writer); + } + + private void generate() { + buffer.printf("public enum CoordinatorRecordType {%n"); + buffer.incrementIndent(); + generateEnumValues(); + buffer.printf("%n"); + generateInstanceVariables(); + buffer.printf("%n"); + generateEnumConstructor(); + buffer.printf("%n"); + generateFromApiKey(); + buffer.printf("%n"); + generateNewRecordKey(); + buffer.printf("%n"); + generateNewRecordValue(); + buffer.printf("%n"); + generateAccessor("id", "short"); + buffer.printf("%n"); + generateAccessor("lowestSupportedVersion", "short"); + buffer.printf("%n"); + generateAccessor("highestSupportedVersion", "short"); + buffer.printf("%n"); + generateToString(); + buffer.decrementIndent(); + buffer.printf("}%n"); + headerGenerator.generate(); + } + + private String cleanName(String name) { + return name + .replace("Key", "") + .replace("Value", ""); + } + + private void generateEnumValues() { + int numProcessed = 0; + for (Map.Entry entry : records.entrySet()) { + MessageSpec key = entry.getValue().key; + if (key == null) { + throw new RuntimeException("Coordinator record " + entry.getKey() + " has not key."); + } + MessageSpec value = entry.getValue().value; + if (value == null) { + throw new RuntimeException("Coordinator record " + entry.getKey() + " has not key."); + } + String name = cleanName(key.name()); + numProcessed++; + buffer.printf("%s(\"%s\", (short) %d, (short) %d, (short) %d)%s%n", + MessageGenerator.toSnakeCase(name).toUpperCase(Locale.ROOT), + MessageGenerator.capitalizeFirst(name), + entry.getKey(), + value.validVersions().lowest(), + value.validVersions().highest(), + (numProcessed == records.size()) ? ";" : ","); + } + } + + private void generateInstanceVariables() { + buffer.printf("private final String name;%n"); + buffer.printf("private final short id;%n"); + buffer.printf("private final short lowestSupportedVersion;%n"); + buffer.printf("private final short highestSupportedVersion;%n"); + } + + private void generateEnumConstructor() { + buffer.printf("CoordinatorRecordType(String name, short id, short lowestSupportedVersion, short highestSupportedVersion) {%n"); + buffer.incrementIndent(); + buffer.printf("this.name = name;%n"); + buffer.printf("this.id = id;%n"); + buffer.printf("this.lowestSupportedVersion = lowestSupportedVersion;%n"); + buffer.printf("this.highestSupportedVersion = highestSupportedVersion;%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateFromApiKey() { + buffer.printf("public static CoordinatorRecordType fromId(short id) {%n"); + buffer.incrementIndent(); + buffer.printf("switch (id) {%n"); + buffer.incrementIndent(); + for (Map.Entry entry : records.entrySet()) { + buffer.printf("case %d:%n", entry.getKey()); + buffer.incrementIndent(); + buffer.printf("return %s;%n", MessageGenerator. + toSnakeCase(cleanName(entry.getValue().key.name())).toUpperCase(Locale.ROOT)); + buffer.decrementIndent(); + } + buffer.printf("default:%n"); + buffer.incrementIndent(); + headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS); + buffer.printf("throw new UnsupportedVersionException(\"Unknown record id \"" + + " + id);%n"); + buffer.decrementIndent(); + buffer.decrementIndent(); + buffer.printf("}%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateNewRecordKey() { + headerGenerator.addImport(MessageGenerator.API_MESSAGE_CLASS); + buffer.printf("public ApiMessage newRecordKey() {%n"); + buffer.incrementIndent(); + buffer.printf("switch (id) {%n"); + buffer.incrementIndent(); + for (Map.Entry entry : records.entrySet()) { + buffer.printf("case %d:%n", entry.getKey()); + buffer.incrementIndent(); + buffer.printf("return new %s();%n", + MessageGenerator.capitalizeFirst(entry.getValue().key.name())); + buffer.decrementIndent(); + } + buffer.printf("default:%n"); + buffer.incrementIndent(); + headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS); + buffer.printf("throw new UnsupportedVersionException(\"Unknown record id \"" + + " + id);%n"); + buffer.decrementIndent(); + buffer.decrementIndent(); + buffer.printf("}%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateNewRecordValue() { + headerGenerator.addImport(MessageGenerator.API_MESSAGE_CLASS); + buffer.printf("public ApiMessage newRecordValue() {%n"); + buffer.incrementIndent(); + buffer.printf("switch (id) {%n"); + buffer.incrementIndent(); + for (Map.Entry entry : records.entrySet()) { + buffer.printf("case %d:%n", entry.getKey()); + buffer.incrementIndent(); + buffer.printf("return new %s();%n", + MessageGenerator.capitalizeFirst(entry.getValue().value.name())); + buffer.decrementIndent(); + } + buffer.printf("default:%n"); + buffer.incrementIndent(); + headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS); + buffer.printf("throw new UnsupportedVersionException(\"Unknown record id \"" + + " + id);%n"); + buffer.decrementIndent(); + buffer.decrementIndent(); + buffer.printf("}%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateAccessor(String name, String type) { + buffer.printf("public %s %s() {%n", type, name); + buffer.incrementIndent(); + buffer.printf("return this.%s;%n", name); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void generateToString() { + buffer.printf("@Override%n"); + buffer.printf("public String toString() {%n"); + buffer.incrementIndent(); + buffer.printf("return this.name();%n"); + buffer.decrementIndent(); + buffer.printf("}%n"); + } + + private void write(BufferedWriter writer) throws IOException { + headerGenerator.buffer().write(writer); + buffer.write(writer); + } +} diff --git a/generator/src/main/java/org/apache/kafka/message/FieldType.java b/generator/src/main/java/org/apache/kafka/message/FieldType.java index a0fa201100e2c..4b204fb4950a5 100644 --- a/generator/src/main/java/org/apache/kafka/message/FieldType.java +++ b/generator/src/main/java/org/apache/kafka/message/FieldType.java @@ -504,7 +504,7 @@ default Optional fixedLength() { } default boolean isVariableLength() { - return !fixedLength().isPresent(); + return fixedLength().isEmpty(); } /** diff --git a/generator/src/main/java/org/apache/kafka/message/MessageDataGenerator.java b/generator/src/main/java/org/apache/kafka/message/MessageDataGenerator.java index 9267b48358606..802ac703cefd6 100644 --- a/generator/src/main/java/org/apache/kafka/message/MessageDataGenerator.java +++ b/generator/src/main/java/org/apache/kafka/message/MessageDataGenerator.java @@ -485,7 +485,7 @@ private void generateClassReader(String className, StructSpec struct, for (FieldSpec field : struct.fields()) { Versions validTaggedVersions = field.versions().intersect(field.taggedVersions()); if (!validTaggedVersions.empty()) { - if (!field.tag().isPresent()) { + if (field.tag().isEmpty()) { throw new RuntimeException("Field " + field.name() + " has tagged versions, but no tag."); } buffer.printf("case %d: {%n", field.tag().get()); @@ -1332,19 +1332,24 @@ private void generateVariableLengthFieldSize(FieldSpec field, }). generate(buffer); } else if (field.type().isStruct()) { - // Adding a byte if the field is nullable. A byte works for both regular and tagged struct fields. - VersionConditional.forVersions(field.nullableVersions(), possibleVersions). - ifMember(__ -> { - buffer.printf("_size.addBytes(1);%n"); - }). - generate(buffer); - if (tagged) { buffer.printf("int _sizeBeforeStruct = _size.totalSize();%n", field.camelCaseName()); + // Add a byte if the field is nullable. + VersionConditional.forVersions(field.nullableVersions(), possibleVersions). + ifMember(__ -> { + buffer.printf("_size.addBytes(1);%n"); + }). + generate(buffer); buffer.printf("this.%s.addSize(_size, _cache, _version);%n", field.camelCaseName()); buffer.printf("int _structSize = _size.totalSize() - _sizeBeforeStruct;%n", field.camelCaseName()); buffer.printf("_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_structSize));%n"); } else { + // Add a byte if the field is nullable. + VersionConditional.forVersions(field.nullableVersions(), possibleVersions). + ifMember(__ -> { + buffer.printf("_size.addBytes(1);%n"); + }). + generate(buffer); buffer.printf("this.%s.addSize(_size, _cache, _version);%n", field.camelCaseName()); } } else { diff --git a/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java b/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java index 4d7903cd6affd..875b41ec540e8 100644 --- a/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java +++ b/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java @@ -56,6 +56,10 @@ public final class MessageGenerator { static final String API_SCOPE_JAVA = "ApiScope.java"; + static final String COORDINATOR_RECORD_TYPE_JAVA = "CoordinatorRecordType.java"; + + static final String COORDINATOR_RECORD_JSON_CONVERTERS_JAVA = "CoordinatorRecordJsonConverters.java"; + static final String METADATA_RECORD_TYPE_JAVA = "MetadataRecordType.java"; static final String METADATA_JSON_CONVERTERS_JAVA = "MetadataJsonConverters.java"; @@ -193,6 +197,12 @@ private static List createTypeClassGenerators(String package case "MetadataJsonConvertersGenerator": generators.add(new MetadataJsonConvertersGenerator(packageName)); break; + case "CoordinatorRecordTypeGenerator": + generators.add(new CoordinatorRecordTypeGenerator(packageName)); + break; + case "CoordinatorRecordJsonConvertersGenerator": + generators.add(new CoordinatorRecordJsonConvertersGenerator(packageName)); + break; default: throw new RuntimeException("Unknown type class generator type '" + type + "'"); } diff --git a/generator/src/main/java/org/apache/kafka/message/MessageSpec.java b/generator/src/main/java/org/apache/kafka/message/MessageSpec.java index 66a720f2ffd8f..c30ef02b19855 100644 --- a/generator/src/main/java/org/apache/kafka/message/MessageSpec.java +++ b/generator/src/main/java/org/apache/kafka/message/MessageSpec.java @@ -20,7 +20,6 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; @@ -42,6 +41,7 @@ public final class MessageSpec { private final boolean latestVersionUnstable; @JsonCreator + @SuppressWarnings({"NPathComplexity", "CyclomaticComplexity"}) public MessageSpec(@JsonProperty("name") String name, @JsonProperty("validVersions") String validVersions, @JsonProperty("deprecatedVersions") String deprecatedVersions, @@ -57,7 +57,7 @@ public MessageSpec(@JsonProperty("name") String name, this.apiKey = apiKey == null ? Optional.empty() : Optional.of(apiKey); this.type = Objects.requireNonNull(type); this.commonStructs = commonStructs == null ? Collections.emptyList() : - Collections.unmodifiableList(new ArrayList<>(commonStructs)); + List.copyOf(commonStructs); if (flexibleVersions == null) { throw new RuntimeException("You must specify a value for flexibleVersions. " + "Please use 0+ for all new messages."); @@ -81,6 +81,24 @@ public MessageSpec(@JsonProperty("name") String name, "messages with type `request`"); } this.latestVersionUnstable = latestVersionUnstable; + + if (type == MessageSpecType.COORDINATOR_KEY) { + if (this.apiKey.isEmpty()) { + throw new RuntimeException("The ApiKey must be set for messages " + name + " with type `coordinator-key`"); + } + if (!this.validVersions().equals(new Versions((short) 0, ((short) 0)))) { + throw new RuntimeException("The Versions must be set to `0` for messages " + name + " with type `coordinator-key`"); + } + if (!this.flexibleVersions.empty()) { + throw new RuntimeException("The FlexibleVersions are not supported for messages " + name + " with type `coordinator-key`"); + } + } + + if (type == MessageSpecType.COORDINATOR_VALUE) { + if (this.apiKey.isEmpty()) { + throw new RuntimeException("The ApiKey must be set for messages with type `coordinator-value`"); + } + } } public StructSpec struct() { diff --git a/generator/src/main/java/org/apache/kafka/message/MessageSpecType.java b/generator/src/main/java/org/apache/kafka/message/MessageSpecType.java index a7e7b5e7bd452..3bc9bf6a9339a 100644 --- a/generator/src/main/java/org/apache/kafka/message/MessageSpecType.java +++ b/generator/src/main/java/org/apache/kafka/message/MessageSpecType.java @@ -48,5 +48,11 @@ public enum MessageSpecType { * Other message spec types. */ @JsonProperty("data") - DATA + DATA, + + @JsonProperty("coordinator-key") + COORDINATOR_KEY, + + @JsonProperty("coordinator-value") + COORDINATOR_VALUE } diff --git a/generator/src/main/java/org/apache/kafka/message/checker/CheckerUtils.java b/generator/src/main/java/org/apache/kafka/message/checker/CheckerUtils.java index d4b89f540484c..06d6fe3d552cd 100644 --- a/generator/src/main/java/org/apache/kafka/message/checker/CheckerUtils.java +++ b/generator/src/main/java/org/apache/kafka/message/checker/CheckerUtils.java @@ -22,8 +22,22 @@ import org.apache.kafka.message.MessageSpec; import org.apache.kafka.message.Versions; +import org.eclipse.jgit.api.Git; +import org.eclipse.jgit.lib.ObjectId; +import org.eclipse.jgit.lib.ObjectLoader; +import org.eclipse.jgit.lib.Ref; +import org.eclipse.jgit.lib.Repository; +import org.eclipse.jgit.revwalk.RevCommit; +import org.eclipse.jgit.revwalk.RevTree; +import org.eclipse.jgit.revwalk.RevWalk; +import org.eclipse.jgit.treewalk.TreeWalk; +import org.eclipse.jgit.treewalk.filter.PathFilter; + import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; /** @@ -65,7 +79,7 @@ static void validateTaggedVersions( FieldSpec field, Versions topLevelFlexibleVersions ) { - if (!field.flexibleVersions().isPresent()) { + if (field.flexibleVersions().isEmpty()) { if (!topLevelFlexibleVersions.contains(field.taggedVersions())) { throw new RuntimeException("Tagged versions for " + what + " " + field.name() + " are " + field.taggedVersions() + ", but top " + @@ -91,4 +105,50 @@ static MessageSpec readMessageSpecFromFile(String schemaPath) { throw new RuntimeException("Unable to parse file as MessageSpec: " + schemaPath, e); } } + + /** + * Return a MessageSpec file give file contents. + * + * @param contents The path to read the file from. + * @return The MessageSpec. + */ + static MessageSpec readMessageSpecFromString(String contents) { + try { + return MessageGenerator.JSON_SERDE.readValue(contents, MessageSpec.class); + } catch (Exception e) { + throw new RuntimeException("Unable to parse string as MessageSpec: " + contents, e); + } + } + + /** + * Read a MessageSpec file from remote git repo. + * + * @param filePath The file to read from remote git repo. + * @param ref The specific git reference to be used for testing. + * @return The file contents. + */ + static String getDataFromGit(String filePath, Path gitPath, String ref) throws IOException { + Git git = Git.open(new File(gitPath + "/.git")); + Repository repository = git.getRepository(); + Ref head = repository.getRefDatabase().findRef(ref); + if (head == null) { + throw new IllegalStateException("Cannot find " + ref + " in the repository."); + } + + try (RevWalk revWalk = new RevWalk(repository)) { + RevCommit commit = revWalk.parseCommit(head.getObjectId()); + RevTree tree = commit.getTree(); + try (TreeWalk treeWalk = new TreeWalk(repository)) { + treeWalk.addTree(tree); + treeWalk.setRecursive(true); + treeWalk.setFilter(PathFilter.create(String.valueOf(Paths.get(filePath.substring(1))))); + if (!treeWalk.next()) { + throw new IllegalStateException("Did not find expected file " + filePath.substring(1)); + } + ObjectId objectId = treeWalk.getObjectId(0); + ObjectLoader loader = repository.open(objectId); + return new String(loader.getBytes(), StandardCharsets.UTF_8); + } + } + } } diff --git a/generator/src/main/java/org/apache/kafka/message/checker/MetadataSchemaCheckerTool.java b/generator/src/main/java/org/apache/kafka/message/checker/MetadataSchemaCheckerTool.java index 93cc343a82774..9901db59031fa 100644 --- a/generator/src/main/java/org/apache/kafka/message/checker/MetadataSchemaCheckerTool.java +++ b/generator/src/main/java/org/apache/kafka/message/checker/MetadataSchemaCheckerTool.java @@ -25,6 +25,11 @@ import net.sourceforge.argparse4j.internal.HelpScreenException; import java.io.PrintStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.apache.kafka.message.checker.CheckerUtils.getDataFromGit; public class MetadataSchemaCheckerTool { public static void main(String[] args) throws Exception { @@ -56,6 +61,15 @@ public static void run( evolutionVerifierParser.addArgument("--path2", "-2"). required(true). help("The final schema JSON path."); + Subparser evolutionGitVerifierParser = subparsers.addParser("verify-evolution-git"). + help(" Verify that an evolution of a JSON file is valid using git."); + evolutionGitVerifierParser.addArgument("--file", "-3"). + required(true). + help("The edited JSON file"); + evolutionGitVerifierParser.addArgument("--ref", "-4") + .required(false) + .setDefault("refs/heads/trunk") + .help("Optional Git reference to be used for testing. Defaults to 'refs/heads/trunk' if not specified."); Namespace namespace; if (args.length == 0) { namespace = argumentParser.parseArgs(new String[] {"--help"}); @@ -81,6 +95,23 @@ public static void run( ", and path2: " + path2); break; } + case "verify-evolution-git": { + String filePath = "/metadata/src/main/resources/common/metadata/" + namespace.getString("file"); + Path rootKafkaDirectory = Paths.get("").toAbsolutePath(); + while (!Files.exists(rootKafkaDirectory.resolve(".git"))) { + rootKafkaDirectory = rootKafkaDirectory.getParent(); + if (rootKafkaDirectory == null) { + throw new RuntimeException("Invalid directory, need to be within a Git repository"); + } + } + String gitContent = getDataFromGit(filePath, rootKafkaDirectory, namespace.getString("ref")); + EvolutionVerifier verifier = new EvolutionVerifier( + CheckerUtils.readMessageSpecFromFile(rootKafkaDirectory + filePath), + CheckerUtils.readMessageSpecFromString(gitContent)); + verifier.verify(); + writer.println("Successfully verified evolution of file: " + namespace.getString("file")); + break; + } default: throw new RuntimeException("Unknown command " + command); } diff --git a/generator/src/test/java/org/apache/kafka/message/checker/MetadataSchemaCheckerToolTest.java b/generator/src/test/java/org/apache/kafka/message/checker/MetadataSchemaCheckerToolTest.java index 6cedfdbc112f4..615dfe4255e25 100644 --- a/generator/src/test/java/org/apache/kafka/message/checker/MetadataSchemaCheckerToolTest.java +++ b/generator/src/test/java/org/apache/kafka/message/checker/MetadataSchemaCheckerToolTest.java @@ -26,6 +26,20 @@ import static org.junit.jupiter.api.Assertions.assertEquals; public class MetadataSchemaCheckerToolTest { + @Test + public void testVerifyEvolutionGit() throws Exception { + try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { + MetadataSchemaCheckerTool.run( + // In the CI environment because the CI fetch command only creates HEAD and refs/remotes/pull/... references. + // Since there may not be other branches like refs/heads/trunk in CI, HEAD serves as the baseline reference. + new String[]{"verify-evolution-git", "--file", "AbortTransactionRecord.json", "--ref", "HEAD"}, + new PrintStream(stream) + ); + assertEquals("Successfully verified evolution of file: AbortTransactionRecord.json", + stream.toString().trim()); + } + } + @Test public void testSuccessfulParse() throws Exception { try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { diff --git a/gradle.properties b/gradle.properties index 290ec0d5c21b5..46bfc41b7e1aa 100644 --- a/gradle.properties +++ b/gradle.properties @@ -23,10 +23,10 @@ group=org.apache.kafka # - streams/quickstart/pom.xml # - streams/quickstart/java/src/main/resources/archetype-resources/pom.xml # - streams/quickstart/java/pom.xml -version=4.0.0-SNAPSHOT +version=4.1.0-SNAPSHOT scalaVersion=2.13.15 # Adding swaggerVersion in gradle.properties to have a single version in place for swagger swaggerVersion=2.2.25 task=build -org.gradle.jvmargs=-Xmx2g -Xss4m -XX:+UseParallelGC +org.gradle.jvmargs=-Xmx4g -Xss4m -XX:+UseParallelGC org.gradle.parallel=true diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle index f02f2d5b5ea21..456bc3d430940 100644 --- a/gradle/dependencies.gradle +++ b/gradle/dependencies.gradle @@ -54,13 +54,12 @@ versions += [ apacheds: "2.0.0-M24", argparse4j: "0.7.0", bcpkix: "1.78.1", - caffeine: "2.9.3", // 3.x supports JDK 11 and above - // when updating checkstyle, check whether the exclusion of - // CVE-2023-2976 and CVE-2020-8908 can be dropped from - // gradle/resources/dependencycheck-suppressions.xml - checkstyle: "8.36.2", - commonsCli: "1.4", - commonsIo: "2.14.0", // ZooKeeper dependency. Do not use, this is going away. + // Version >=3.1.2 includes an improvement to prevent hash DOS attacks, + // but currently, tests are failing in >=3.1.2. Therefore, we are temporarily using version 3.1.1. + // The failing tests should be fixed under KAFKA-18089, allowing us to upgrade to >=3.1.2. + caffeine: "3.1.1", + bndlib: "7.0.0", + checkstyle: project.hasProperty('checkstyleVersion') ? checkstyleVersion : "10.20.2", commonsValidator: "1.9.0", classgraph: "4.8.173", dropwizardMetrics: "4.1.12.1", @@ -70,23 +69,21 @@ versions += [ jackson: "2.16.2", jacoco: "0.8.10", javassist: "3.29.2-GA", - jetty: "9.4.56.v20240826", - jersey: "2.39.1", + jetty: "12.0.15", + jersey: "3.1.9", jline: "3.25.1", jmh: "1.37", hamcrest: "2.2", scalaLogging: "3.9.5", jaxAnnotation: "1.3.2", jaxb: "2.3.1", - jaxrs: "2.1.1", + jakartaRs: "3.1.0", + jakartaServletApi: "6.1.0", jfreechart: "1.0.0", jopt: "5.0.4", jose4j: "0.9.4", junit: "5.10.2", jqwik: "1.8.3", - kafka_0100: "0.10.0.1", - kafka_0101: "0.10.1.1", - kafka_0102: "0.10.2.2", kafka_0110: "0.11.0.3", kafka_10: "1.0.2", kafka_11: "1.1.1", @@ -106,20 +103,20 @@ versions += [ kafka_34: "3.4.1", kafka_35: "3.5.2", kafka_36: "3.6.2", - kafka_37: "3.7.1", + kafka_37: "3.7.2", kafka_38: "3.8.1", + kafka_39: "3.9.0", + log4j2: "2.24.1", // When updating lz4 make sure the compression levels in org.apache.kafka.common.record.CompressionType are still valid lz4: "1.8.0", mavenArtifact: "3.9.6", metrics: "2.2.0", mockito: "5.14.2", - netty: "4.1.111.Final", opentelemetryProto: "1.0.0-alpha", protobuf: "3.25.5", // a dependency of opentelemetryProto pcollections: "4.0.1", - reload4j: "1.2.25", re2j: "1.7", - rocksDB: "7.9.2", + rocksDB: "9.7.3", // When updating the scalafmt version please also update the version field in checkstyle/.scalafmt.conf. scalafmt now // has the version field as mandatory in its configuration, see // https://github.com/scalameta/scalafmt/releases/tag/v3.1.0. @@ -129,7 +126,6 @@ versions += [ snappy: "1.1.10.5", spotbugs: "4.8.6", zinc: "1.9.2", - zookeeper: "3.8.4", // When updating the zstd version, please do as well in docker/native/native-image-configs/resource-config.json // Also make sure the compression levels in org.apache.kafka.common.record.CompressionType are still valid zstd: "1.5.6-6", @@ -150,26 +146,28 @@ libs += [ apachedsJdbmPartition: "org.apache.directory.server:apacheds-jdbm-partition:$versions.apacheds", argparse4j: "net.sourceforge.argparse4j:argparse4j:$versions.argparse4j", bcpkix: "org.bouncycastle:bcpkix-jdk18on:$versions.bcpkix", + bndlib:"biz.aQute.bnd:biz.aQute.bndlib:$versions.bndlib", caffeine: "com.github.ben-manes.caffeine:caffeine:$versions.caffeine", classgraph: "io.github.classgraph:classgraph:$versions.classgraph", - commonsCli: "commons-cli:commons-cli:$versions.commonsCli", - commonsIo: "commons-io:commons-io:$versions.commonsIo", commonsValidator: "commons-validator:commons-validator:$versions.commonsValidator", jacksonAnnotations: "com.fasterxml.jackson.core:jackson-annotations:$versions.jackson", jacksonDatabind: "com.fasterxml.jackson.core:jackson-databind:$versions.jackson", + jacksonDatabindYaml: "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:$versions.jackson", jacksonDataformatCsv: "com.fasterxml.jackson.dataformat:jackson-dataformat-csv:$versions.jackson", jacksonModuleScala: "com.fasterxml.jackson.module:jackson-module-scala_$versions.baseScala:$versions.jackson", jacksonJDK8Datatypes: "com.fasterxml.jackson.datatype:jackson-datatype-jdk8:$versions.jackson", - jacksonAfterburner: "com.fasterxml.jackson.module:jackson-module-afterburner:$versions.jackson", - jacksonJaxrsJsonProvider: "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:$versions.jackson", + jacksonBlackbird: "com.fasterxml.jackson.module:jackson-module-blackbird:$versions.jackson", + jacksonJakartarsJsonProvider: "com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-json-provider:$versions.jackson", jaxAnnotationApi: "javax.annotation:javax.annotation-api:$versions.jaxAnnotation", jaxbApi: "javax.xml.bind:jaxb-api:$versions.jaxb", - jaxrsApi: "javax.ws.rs:javax.ws.rs-api:$versions.jaxrs", + jakartaRsApi: "jakarta.ws.rs:jakarta.ws.rs-api:$versions.jakartaRs", + jakartaServletApi: "jakarta.servlet:jakarta.servlet-api:$versions.jakartaServletApi", + jaxrs2Jakarta: "io.swagger.core.v3:swagger-jaxrs2-jakarta:$swaggerVersion", javassist: "org.javassist:javassist:$versions.javassist", jettyServer: "org.eclipse.jetty:jetty-server:$versions.jetty", jettyClient: "org.eclipse.jetty:jetty-client:$versions.jetty", - jettyServlet: "org.eclipse.jetty:jetty-servlet:$versions.jetty", - jettyServlets: "org.eclipse.jetty:jetty-servlets:$versions.jetty", + jettyServlet: "org.eclipse.jetty.ee10:jetty-ee10-servlet:$versions.jetty", + jettyServlets: "org.eclipse.jetty.ee10:jetty-ee10-servlets:$versions.jetty", jerseyContainerServlet: "org.glassfish.jersey.containers:jersey-container-servlet:$versions.jersey", jerseyHk2: "org.glassfish.jersey.inject:jersey-hk2:$versions.jersey", jline: "org.jline:jline:$versions.jline", @@ -184,9 +182,6 @@ libs += [ junitPlatformLanucher: "org.junit.platform:junit-platform-launcher:$versions.junitPlatform", jqwik: "net.jqwik:jqwik:$versions.jqwik", hamcrest: "org.hamcrest:hamcrest:$versions.hamcrest", - kafkaStreams_0100: "org.apache.kafka:kafka-streams:$versions.kafka_0100", - kafkaStreams_0101: "org.apache.kafka:kafka-streams:$versions.kafka_0101", - kafkaStreams_0102: "org.apache.kafka:kafka-streams:$versions.kafka_0102", kafkaStreams_0110: "org.apache.kafka:kafka-streams:$versions.kafka_0110", kafkaStreams_10: "org.apache.kafka:kafka-streams:$versions.kafka_10", kafkaStreams_11: "org.apache.kafka:kafka-streams:$versions.kafka_11", @@ -208,28 +203,28 @@ libs += [ kafkaStreams_36: "org.apache.kafka:kafka-streams:$versions.kafka_36", kafkaStreams_37: "org.apache.kafka:kafka-streams:$versions.kafka_37", kafkaStreams_38: "org.apache.kafka:kafka-streams:$versions.kafka_38", + kafkaStreams_39: "org.apache.kafka:kafka-streams:$versions.kafka_39", + log4j1Bridge2Api: "org.apache.logging.log4j:log4j-1.2-api:$versions.log4j2", + log4j2Api: "org.apache.logging.log4j:log4j-api:$versions.log4j2", + log4j2Core: "org.apache.logging.log4j:log4j-core:$versions.log4j2", lz4: "org.lz4:lz4-java:$versions.lz4", metrics: "com.yammer.metrics:metrics-core:$versions.metrics", dropwizardMetrics: "io.dropwizard.metrics:metrics-core:$versions.dropwizardMetrics", mockitoCore: "org.mockito:mockito-core:$versions.mockito", mockitoJunitJupiter: "org.mockito:mockito-junit-jupiter:$versions.mockito", - nettyHandler: "io.netty:netty-handler:$versions.netty", - nettyTransportNativeEpoll: "io.netty:netty-transport-native-epoll:$versions.netty", pcollections: "org.pcollections:pcollections:$versions.pcollections", opentelemetryProto: "io.opentelemetry.proto:opentelemetry-proto:$versions.opentelemetryProto", protobuf: "com.google.protobuf:protobuf-java:$versions.protobuf", - reload4j: "ch.qos.reload4j:reload4j:$versions.reload4j", re2j: "com.google.re2j:re2j:$versions.re2j", rocksDBJni: "org.rocksdb:rocksdbjni:$versions.rocksDB", scalaLibrary: "org.scala-lang:scala-library:$versions.scala", scalaLogging: "com.typesafe.scala-logging:scala-logging_$versions.baseScala:$versions.scalaLogging", scalaReflect: "org.scala-lang:scala-reflect:$versions.scala", slf4jApi: "org.slf4j:slf4j-api:$versions.slf4j", - slf4jReload4j: "org.slf4j:slf4j-reload4j:$versions.slf4j", + slf4jLog4j2: "org.apache.logging.log4j:log4j-slf4j-impl:$versions.log4j2", snappy: "org.xerial.snappy:snappy-java:$versions.snappy", + spotbugs: "com.github.spotbugs:spotbugs-annotations:$versions.spotbugs", swaggerAnnotations: "io.swagger.core.v3:swagger-annotations:$swaggerVersion", - swaggerJaxrs2: "io.swagger.core.v3:swagger-jaxrs2:$swaggerVersion", - zookeeper: "org.apache.zookeeper:zookeeper:$versions.zookeeper", jfreechart: "jfreechart:jfreechart:$versions.jfreechart", mavenArtifact: "org.apache.maven:maven-artifact:$versions.mavenArtifact", zstd: "com.github.luben:zstd-jni:$versions.zstd", diff --git a/gradle/resources/dependencycheck-suppressions.xml b/gradle/resources/dependencycheck-suppressions.xml index 5ce34df1d2ef4..b28adc9002875 100644 --- a/gradle/resources/dependencycheck-suppressions.xml +++ b/gradle/resources/dependencycheck-suppressions.xml @@ -23,17 +23,6 @@ ]]> CVE-2023-35116 - - - CVE-2020-8908 - CVE-2023-2976 - + + + + + @@ -264,6 +271,12 @@ For a detailed description of spotbugs bug categories, see https://spotbugs.read + + + + + + - - - - - - - - diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/ConsumerGroupPartitionAssignor.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/ConsumerGroupPartitionAssignor.java index f46860f33a50b..b0febdfcff3c7 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/ConsumerGroupPartitionAssignor.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/ConsumerGroupPartitionAssignor.java @@ -16,14 +16,8 @@ */ package org.apache.kafka.coordinator.group.api.assignor; -import org.apache.kafka.common.annotation.InterfaceStability; - /** * Server-side partition assignor for consumer groups used by the GroupCoordinator. - * - * The new consumer group protocol is in preview so this interface is considered - * unstable until Apache Kafka 4.0. */ -@InterfaceStability.Unstable public interface ConsumerGroupPartitionAssignor extends PartitionAssignor { } diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupAssignment.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupAssignment.java index bf126334a17e0..4464ddded42e4 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupAssignment.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupAssignment.java @@ -16,15 +16,12 @@ */ package org.apache.kafka.coordinator.group.api.assignor; -import org.apache.kafka.common.annotation.InterfaceStability; - import java.util.Map; import java.util.Objects; /** * The partition assignment for a consumer group. */ -@InterfaceStability.Unstable public class GroupAssignment { /** * The member assignments keyed by member id. diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.java index ec417099629db..54b5690d74d88 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/GroupSpec.java @@ -17,14 +17,12 @@ package org.apache.kafka.coordinator.group.api.assignor; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Collection; /** * The group metadata specifications required to compute the target assignment. */ -@InterfaceStability.Unstable public interface GroupSpec { /** * @return All the member Ids of the consumer group. diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.java index 52b5c564e7df2..07f48649d72be 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberAssignment.java @@ -17,7 +17,6 @@ package org.apache.kafka.coordinator.group.api.assignor; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Map; import java.util.Set; @@ -25,7 +24,6 @@ /** * The partition assignment for a consumer group member. */ -@InterfaceStability.Unstable public interface MemberAssignment { /** * @return The assigned partitions keyed by topic Ids. diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.java index fcf7b84baf0b7..9de2b14cfead4 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/MemberSubscription.java @@ -17,7 +17,6 @@ package org.apache.kafka.coordinator.group.api.assignor; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Optional; import java.util.Set; @@ -25,7 +24,6 @@ /** * Interface representing the subscription metadata for a group member. */ -@InterfaceStability.Unstable public interface MemberSubscription { /** * Gets the rack Id if present. diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignor.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignor.java index 3d4f8efbaa698..10e7ecdd6cf8e 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignor.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/PartitionAssignor.java @@ -16,15 +16,9 @@ */ package org.apache.kafka.coordinator.group.api.assignor; -import org.apache.kafka.common.annotation.InterfaceStability; - /** * Server-side partition assignor used by the GroupCoordinator. - * - * The new consumer group protocol is in preview so this interface is considered - * unstable until Apache Kafka 4.0. */ -@InterfaceStability.Unstable public interface PartitionAssignor { /** * Unique name for this assignor. diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscribedTopicDescriber.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscribedTopicDescriber.java index ca1e5a514b85e..b677384a91562 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscribedTopicDescriber.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscribedTopicDescriber.java @@ -17,18 +17,13 @@ package org.apache.kafka.coordinator.group.api.assignor; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Set; /** * The subscribed topic describer is used by the {@link PartitionAssignor} * to obtain topic and partition metadata of the subscribed topics. - * - * The interface is kept in an internal module until KIP-848 is fully - * implemented and ready to be released. */ -@InterfaceStability.Unstable public interface SubscribedTopicDescriber { /** * The number of partitions for the given topic Id. diff --git a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscriptionType.java b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscriptionType.java index cab35bbf3dbf9..bd429eb9d150b 100644 --- a/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscriptionType.java +++ b/group-coordinator/group-coordinator-api/src/main/java/org/apache/kafka/coordinator/group/api/assignor/SubscriptionType.java @@ -16,21 +16,18 @@ */ package org.apache.kafka.coordinator.group.api.assignor; -import org.apache.kafka.common.annotation.InterfaceStability; - /** * The subscription type followed by a consumer group. */ -@InterfaceStability.Unstable public enum SubscriptionType { /** * A homogeneous subscription type means that all the members - * of the group are subscribed to the same set of topics. + * of the group use the same subscription. */ HOMOGENEOUS("Homogeneous"), /** * A heterogeneous subscription type means that not all the members - * of the group are subscribed to the same set of topics. + * of the group use the same subscription. */ HETEROGENEOUS("Heterogeneous"); diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/Group.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/Group.java index ab69970140e91..b5d634997510d 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/Group.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/Group.java @@ -67,6 +67,13 @@ public static GroupType parse(String name) { return type == null ? UNKNOWN : type; } + + static String[] documentValidValues() { + return Arrays.stream(GroupType.values()) + .filter(type -> type != UNKNOWN) + .map(GroupType::toString) + .toArray(String[]::new); + } } /** diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupConfig.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupConfig.java index 934055d9d5bf2..03f0af738d209 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupConfig.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupConfig.java @@ -21,10 +21,8 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.errors.InvalidConfigurationException; -import org.apache.kafka.common.utils.Utils; import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig; -import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Properties; @@ -34,7 +32,6 @@ import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; import static org.apache.kafka.common.config.ConfigDef.Type.INT; import static org.apache.kafka.common.config.ConfigDef.Type.STRING; -import static org.apache.kafka.common.config.ConfigDef.ValidString.in; /** * Group configuration related parameters and supporting methods like validation, etc. are @@ -53,8 +50,14 @@ public final class GroupConfig extends AbstractConfig { public static final String SHARE_RECORD_LOCK_DURATION_MS_CONFIG = "share.record.lock.duration.ms"; public static final String SHARE_AUTO_OFFSET_RESET_CONFIG = "share.auto.offset.reset"; - public static final String SHARE_AUTO_OFFSET_RESET_DEFAULT = ShareGroupAutoOffsetReset.LATEST.toString(); - public static final String SHARE_AUTO_OFFSET_RESET_DOC = "The strategy to initialize the share-partition start offset."; + public static final String SHARE_AUTO_OFFSET_RESET_DEFAULT = ShareGroupAutoOffsetResetStrategy.LATEST.name(); + public static final String SHARE_AUTO_OFFSET_RESET_DOC = "The strategy to initialize the share-partition start offset. " + + "
              • earliest: automatically reset the offset to the earliest offset
              • " + + "
              • latest: automatically reset the offset to the latest offset
              • " + + "
              • by_duration:<duration>: automatically reset the offset to a configured duration from the current timestamp. " + + "<duration> must be specified in ISO8601 format (PnDTnHnMn.nS). " + + "Negative duration is not allowed.
              • " + + "
              • anything else: throw exception to the share consumer.
              "; public final int consumerSessionTimeoutMs; @@ -102,7 +105,7 @@ public final class GroupConfig extends AbstractConfig { .define(SHARE_AUTO_OFFSET_RESET_CONFIG, STRING, SHARE_AUTO_OFFSET_RESET_DEFAULT, - in(Utils.enumOptions(ShareGroupAutoOffsetReset.class)), + new ShareGroupAutoOffsetResetStrategy.Validator(), MEDIUM, SHARE_AUTO_OFFSET_RESET_DOC); @@ -223,8 +226,8 @@ public static GroupConfig fromProps(Map defaults, Properties overrides) { /** * The default share group auto offset reset strategy. */ - public static ShareGroupAutoOffsetReset defaultShareAutoOffsetReset() { - return ShareGroupAutoOffsetReset.valueOf(SHARE_AUTO_OFFSET_RESET_DEFAULT.toUpperCase(Locale.ROOT)); + public static ShareGroupAutoOffsetResetStrategy defaultShareAutoOffsetReset() { + return ShareGroupAutoOffsetResetStrategy.fromString(SHARE_AUTO_OFFSET_RESET_DEFAULT); } /** @@ -265,16 +268,7 @@ public int shareRecordLockDurationMs() { /** * The share group auto offset reset strategy. */ - public ShareGroupAutoOffsetReset shareAutoOffsetReset() { - return ShareGroupAutoOffsetReset.valueOf(shareAutoOffsetReset.toUpperCase(Locale.ROOT)); - } - - public enum ShareGroupAutoOffsetReset { - LATEST, EARLIEST; - - @Override - public String toString() { - return super.toString().toLowerCase(Locale.ROOT); - } + public ShareGroupAutoOffsetResetStrategy shareAutoOffsetReset() { + return ShareGroupAutoOffsetResetStrategy.fromString(shareAutoOffsetReset); } } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinator.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinator.java index 87efb530dc8fa..b57963aed9247 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinator.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinator.java @@ -339,11 +339,18 @@ CompletableFuture completeTransaction( /** * Commit or abort the pending transactional offsets for the given partitions. * + * This method is only used by the old group coordinator. Internally, the old + * group coordinator completes the transaction asynchronously in order to + * avoid deadlocks. Hence, this method returns a future that the caller + * can wait on. + * * @param producerId The producer id. * @param partitions The partitions. * @param transactionResult The result of the transaction. + * + * @return A future yielding the result. */ - void onTransactionCompleted( + CompletableFuture onTransactionCompleted( long producerId, Iterable partitions, TransactionResult transactionResult @@ -420,7 +427,6 @@ void onNewMetadataImage( * * @param groupId The group id. * @param newGroupConfig The new group config - * @return void */ void updateGroupConfig(String groupId, Properties newGroupConfig); diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfig.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfig.java index efe01ddd53763..804d80b226686 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfig.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfig.java @@ -25,13 +25,11 @@ import org.apache.kafka.coordinator.group.assignor.UniformAssignor; import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.stream.Collectors; import static org.apache.kafka.common.config.ConfigDef.Importance.HIGH; import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM; @@ -75,15 +73,16 @@ public class GroupCoordinatorConfig { public static final boolean NEW_GROUP_COORDINATOR_ENABLE_DEFAULT = true; public static final String GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG = "group.coordinator.rebalance.protocols"; - public static final String GROUP_COORDINATOR_REBALANCE_PROTOCOLS_DOC = "The list of enabled rebalance protocols. Supported protocols: " + - Arrays.stream(Group.GroupType.values()).map(Group.GroupType::toString).collect(Collectors.joining(",")) + ". " + + public static final String GROUP_COORDINATOR_REBALANCE_PROTOCOLS_DOC = "The list of enabled rebalance protocols." + "The " + Group.GroupType.SHARE + " rebalance protocol is in early access and therefore must not be used in production."; - public static final List GROUP_COORDINATOR_REBALANCE_PROTOCOLS_DEFAULT = - Collections.unmodifiableList(Arrays.asList(Group.GroupType.CLASSIC.toString(), Group.GroupType.CONSUMER.toString())); + public static final List GROUP_COORDINATOR_REBALANCE_PROTOCOLS_DEFAULT = List.of( + Group.GroupType.CLASSIC.toString(), + Group.GroupType.CONSUMER.toString() + ); public static final String GROUP_COORDINATOR_APPEND_LINGER_MS_CONFIG = "group.coordinator.append.linger.ms"; public static final String GROUP_COORDINATOR_APPEND_LINGER_MS_DOC = "The duration in milliseconds that the coordinator will " + "wait for writes to accumulate before flushing them to disk. Transactional writes are not accumulated."; - public static final int GROUP_COORDINATOR_APPEND_LINGER_MS_DEFAULT = 10; + public static final int GROUP_COORDINATOR_APPEND_LINGER_MS_DEFAULT = 5; public static final String GROUP_COORDINATOR_NUM_THREADS_CONFIG = "group.coordinator.threads"; public static final String GROUP_COORDINATOR_NUM_THREADS_DOC = "The number of threads used by the group coordinator."; @@ -115,15 +114,18 @@ public class GroupCoordinatorConfig { public static final int CONSUMER_GROUP_MAX_HEARTBEAT_INTERVAL_MS_DEFAULT = 15000; public static final String CONSUMER_GROUP_MAX_SIZE_CONFIG = "group.consumer.max.size"; - public static final String CONSUMER_GROUP_MAX_SIZE_DOC = "The maximum number of consumers that a single consumer group can accommodate. This value will only impact the new consumer coordinator. To configure the classic consumer coordinator check " + GROUP_MAX_SIZE_CONFIG + " instead."; + public static final String CONSUMER_GROUP_MAX_SIZE_DOC = "The maximum number of consumers " + + "that a single consumer group can accommodate. This value will only impact groups under " + + "the CONSUMER group protocol. To configure the max group size when using the CLASSIC " + + "group protocol use " + GROUP_MAX_SIZE_CONFIG + " " + "instead."; public static final int CONSUMER_GROUP_MAX_SIZE_DEFAULT = Integer.MAX_VALUE; public static final String CONSUMER_GROUP_ASSIGNORS_CONFIG = "group.consumer.assignors"; public static final String CONSUMER_GROUP_ASSIGNORS_DOC = "The server side assignors as a list of full class names. The first one in the list is considered as the default assignor to be used in the case where the consumer does not specify an assignor."; - public static final List CONSUMER_GROUP_ASSIGNORS_DEFAULT = Collections.unmodifiableList(Arrays.asList( - UniformAssignor.class.getName(), - RangeAssignor.class.getName() - )); + public static final List CONSUMER_GROUP_ASSIGNORS_DEFAULT = List.of( + UniformAssignor.class.getName(), + RangeAssignor.class.getName() + ); public static final String CONSUMER_GROUP_MIGRATION_POLICY_CONFIG = "group.consumer.migration.policy"; public static final String CONSUMER_GROUP_MIGRATION_POLICY_DEFAULT = ConsumerGroupMigrationPolicy.BIDIRECTIONAL.toString(); @@ -205,21 +207,21 @@ public class GroupCoordinatorConfig { public static final String OFFSET_COMMIT_TIMEOUT_MS_DOC = "Offset commit will be delayed until all replicas for the offsets topic receive the commit " + "or this timeout is reached. This is similar to the producer request timeout."; - public static final ConfigDef GROUP_COORDINATOR_CONFIG_DEF = new ConfigDef() + public static final ConfigDef GROUP_COORDINATOR_CONFIG_DEF = new ConfigDef() .define(GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, INT, GROUP_MIN_SESSION_TIMEOUT_MS_DEFAULT, MEDIUM, GROUP_MIN_SESSION_TIMEOUT_MS_DOC) .define(GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, INT, GROUP_MAX_SESSION_TIMEOUT_MS_DEFAULT, MEDIUM, GROUP_MAX_SESSION_TIMEOUT_MS_DOC) .define(GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, INT, GROUP_INITIAL_REBALANCE_DELAY_MS_DEFAULT, MEDIUM, GROUP_INITIAL_REBALANCE_DELAY_MS_DOC) .define(GROUP_MAX_SIZE_CONFIG, INT, GROUP_MAX_SIZE_DEFAULT, atLeast(1), MEDIUM, GROUP_MAX_SIZE_DOC); - public static final ConfigDef NEW_GROUP_CONFIG_DEF = new ConfigDef() + public static final ConfigDef NEW_GROUP_CONFIG_DEF = new ConfigDef() .define(GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, LIST, GROUP_COORDINATOR_REBALANCE_PROTOCOLS_DEFAULT, - ConfigDef.ValidList.in(Utils.enumOptions(Group.GroupType.class)), MEDIUM, GROUP_COORDINATOR_REBALANCE_PROTOCOLS_DOC) + ConfigDef.ValidList.in(Group.GroupType.documentValidValues()), MEDIUM, GROUP_COORDINATOR_REBALANCE_PROTOCOLS_DOC) .define(GROUP_COORDINATOR_NUM_THREADS_CONFIG, INT, GROUP_COORDINATOR_NUM_THREADS_DEFAULT, atLeast(1), MEDIUM, GROUP_COORDINATOR_NUM_THREADS_DOC) .define(GROUP_COORDINATOR_APPEND_LINGER_MS_CONFIG, INT, GROUP_COORDINATOR_APPEND_LINGER_MS_DEFAULT, atLeast(0), MEDIUM, GROUP_COORDINATOR_APPEND_LINGER_MS_DOC) // Internal configuration used by integration and system tests. .defineInternal(NEW_GROUP_COORDINATOR_ENABLE_CONFIG, BOOLEAN, NEW_GROUP_COORDINATOR_ENABLE_DEFAULT, null, MEDIUM, NEW_GROUP_COORDINATOR_ENABLE_DOC); - public static final ConfigDef OFFSET_MANAGEMENT_CONFIG_DEF = new ConfigDef() + public static final ConfigDef OFFSET_MANAGEMENT_CONFIG_DEF = new ConfigDef() .define(OFFSET_METADATA_MAX_SIZE_CONFIG, INT, OFFSET_METADATA_MAX_SIZE_DEFAULT, HIGH, OFFSET_METADATA_MAX_SIZE_DOC) .define(OFFSETS_LOAD_BUFFER_SIZE_CONFIG, INT, OFFSETS_LOAD_BUFFER_SIZE_DEFAULT, atLeast(1), HIGH, OFFSETS_LOAD_BUFFER_SIZE_DOC) .define(OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, SHORT, OFFSETS_TOPIC_REPLICATION_FACTOR_DEFAULT, atLeast(1), HIGH, OFFSETS_TOPIC_REPLICATION_FACTOR_DOC) @@ -229,7 +231,8 @@ public class GroupCoordinatorConfig { .define(OFFSETS_RETENTION_MINUTES_CONFIG, INT, OFFSETS_RETENTION_MINUTES_DEFAULT, atLeast(1), HIGH, OFFSETS_RETENTION_MINUTES_DOC) .define(OFFSETS_RETENTION_CHECK_INTERVAL_MS_CONFIG, LONG, OFFSETS_RETENTION_CHECK_INTERVAL_MS_DEFAULT, atLeast(1), HIGH, OFFSETS_RETENTION_CHECK_INTERVAL_MS_DOC) .define(OFFSET_COMMIT_TIMEOUT_MS_CONFIG, INT, OFFSET_COMMIT_TIMEOUT_MS_DEFAULT, atLeast(1), HIGH, OFFSET_COMMIT_TIMEOUT_MS_DOC); - public static final ConfigDef CONSUMER_GROUP_CONFIG_DEF = new ConfigDef() + + public static final ConfigDef CONSUMER_GROUP_CONFIG_DEF = new ConfigDef() .define(CONSUMER_GROUP_SESSION_TIMEOUT_MS_CONFIG, INT, CONSUMER_GROUP_SESSION_TIMEOUT_MS_DEFAULT, atLeast(1), MEDIUM, CONSUMER_GROUP_SESSION_TIMEOUT_MS_DOC) .define(CONSUMER_GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, INT, CONSUMER_GROUP_MIN_SESSION_TIMEOUT_MS_DEFAULT, atLeast(1), MEDIUM, CONSUMER_GROUP_MIN_SESSION_TIMEOUT_MS_DOC) .define(CONSUMER_GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, INT, CONSUMER_GROUP_MAX_SESSION_TIMEOUT_MS_DEFAULT, atLeast(1), MEDIUM, CONSUMER_GROUP_MAX_SESSION_TIMEOUT_MS_DOC) @@ -238,15 +241,16 @@ public class GroupCoordinatorConfig { .define(CONSUMER_GROUP_MAX_HEARTBEAT_INTERVAL_MS_CONFIG, INT, CONSUMER_GROUP_MAX_HEARTBEAT_INTERVAL_MS_DEFAULT, atLeast(1), MEDIUM, CONSUMER_GROUP_MAX_HEARTBEAT_INTERVAL_MS_DOC) .define(CONSUMER_GROUP_MAX_SIZE_CONFIG, INT, CONSUMER_GROUP_MAX_SIZE_DEFAULT, atLeast(1), MEDIUM, CONSUMER_GROUP_MAX_SIZE_DOC) .define(CONSUMER_GROUP_ASSIGNORS_CONFIG, LIST, CONSUMER_GROUP_ASSIGNORS_DEFAULT, null, MEDIUM, CONSUMER_GROUP_ASSIGNORS_DOC) - .defineInternal(CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, STRING, CONSUMER_GROUP_MIGRATION_POLICY_DEFAULT, ConfigDef.CaseInsensitiveValidString.in(Utils.enumOptions(ConsumerGroupMigrationPolicy.class)), MEDIUM, CONSUMER_GROUP_MIGRATION_POLICY_DOC); - public static final ConfigDef SHARE_GROUP_CONFIG_DEF = new ConfigDef() + .define(CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, STRING, CONSUMER_GROUP_MIGRATION_POLICY_DEFAULT, ConfigDef.CaseInsensitiveValidString.in(Utils.enumOptions(ConsumerGroupMigrationPolicy.class)), MEDIUM, CONSUMER_GROUP_MIGRATION_POLICY_DOC); + + public static final ConfigDef SHARE_GROUP_CONFIG_DEF = new ConfigDef() .define(SHARE_GROUP_SESSION_TIMEOUT_MS_CONFIG, INT, SHARE_GROUP_SESSION_TIMEOUT_MS_DEFAULT, atLeast(1), MEDIUM, SHARE_GROUP_SESSION_TIMEOUT_MS_DOC) .define(SHARE_GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, INT, SHARE_GROUP_MIN_SESSION_TIMEOUT_MS_DEFAULT, atLeast(1), MEDIUM, SHARE_GROUP_MIN_SESSION_TIMEOUT_MS_DOC) .define(SHARE_GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, INT, SHARE_GROUP_MAX_SESSION_TIMEOUT_MS_DEFAULT, atLeast(1), MEDIUM, SHARE_GROUP_MAX_SESSION_TIMEOUT_MS_DOC) .define(SHARE_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG, INT, SHARE_GROUP_HEARTBEAT_INTERVAL_MS_DEFAULT, atLeast(1), MEDIUM, SHARE_GROUP_HEARTBEAT_INTERVAL_MS_DOC) .define(SHARE_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG, INT, SHARE_GROUP_MIN_HEARTBEAT_INTERVAL_MS_DEFAULT, atLeast(1), MEDIUM, SHARE_GROUP_MIN_HEARTBEAT_INTERVAL_MS_DOC) .define(SHARE_GROUP_MAX_HEARTBEAT_INTERVAL_MS_CONFIG, INT, SHARE_GROUP_MAX_HEARTBEAT_INTERVAL_MS_DEFAULT, atLeast(1), MEDIUM, SHARE_GROUP_MAX_HEARTBEAT_INTERVAL_MS_DOC) - .define(SHARE_GROUP_MAX_SIZE_CONFIG, INT, SHARE_GROUP_MAX_SIZE_DEFAULT, between(10, 1000), MEDIUM, SHARE_GROUP_MAX_SIZE_DOC); + .define(SHARE_GROUP_MAX_SIZE_CONFIG, INT, SHARE_GROUP_MAX_SIZE_DEFAULT, between(1, 1000), MEDIUM, SHARE_GROUP_MAX_SIZE_DOC); /** * The timeout used to wait for a new member in milliseconds. @@ -286,14 +290,14 @@ public class GroupCoordinatorConfig { private final int shareGroupMinHeartbeatIntervalMs; private final int shareGroupMaxHeartbeatIntervalMs; + @SuppressWarnings("this-escape") public GroupCoordinatorConfig(AbstractConfig config) { this.numThreads = config.getInt(GroupCoordinatorConfig.GROUP_COORDINATOR_NUM_THREADS_CONFIG); this.appendLingerMs = config.getInt(GroupCoordinatorConfig.GROUP_COORDINATOR_APPEND_LINGER_MS_CONFIG); this.consumerGroupSessionTimeoutMs = config.getInt(GroupCoordinatorConfig.CONSUMER_GROUP_SESSION_TIMEOUT_MS_CONFIG); this.consumerGroupHeartbeatIntervalMs = config.getInt(GroupCoordinatorConfig.CONSUMER_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG); this.consumerGroupMaxSize = config.getInt(GroupCoordinatorConfig.CONSUMER_GROUP_MAX_SIZE_CONFIG); - this.consumerGroupAssignors = Collections.unmodifiableList( - config.getConfiguredInstances(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, ConsumerGroupPartitionAssignor.class)); + this.consumerGroupAssignors = consumerGroupAssignors(config); this.offsetsTopicSegmentBytes = config.getInt(GroupCoordinatorConfig.OFFSETS_TOPIC_SEGMENT_BYTES_CONFIG); this.offsetMetadataMaxSize = config.getInt(GroupCoordinatorConfig.OFFSET_METADATA_MAX_SIZE_CONFIG); this.classicGroupMaxSize = config.getInt(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG); @@ -366,6 +370,34 @@ public GroupCoordinatorConfig(AbstractConfig config) { SHARE_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG, SHARE_GROUP_SESSION_TIMEOUT_MS_CONFIG)); } + public static GroupCoordinatorConfig fromProps( + Map props + ) { + return new GroupCoordinatorConfig( + new AbstractConfig( + Utils.mergeConfigs(List.of( + GroupCoordinatorConfig.GROUP_COORDINATOR_CONFIG_DEF, + GroupCoordinatorConfig.NEW_GROUP_CONFIG_DEF, + GroupCoordinatorConfig.OFFSET_MANAGEMENT_CONFIG_DEF, + GroupCoordinatorConfig.CONSUMER_GROUP_CONFIG_DEF, + GroupCoordinatorConfig.SHARE_GROUP_CONFIG_DEF + )), + props + ) + ); + } + + protected List consumerGroupAssignors( + AbstractConfig config + ) { + return Collections.unmodifiableList( + config.getConfiguredInstances( + GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, + ConsumerGroupPartitionAssignor.class + ) + ); + } + /** * Copy the subset of properties that are relevant to consumer group and share group. */ @@ -380,10 +412,9 @@ public Map extractGroupConfigMap(ShareGroupConfig shareGroupCon * Copy the subset of properties that are relevant to consumer group. */ public Map extractConsumerGroupConfigMap() { - Map groupProps = new HashMap<>(); - groupProps.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, consumerGroupSessionTimeoutMs()); - groupProps.put(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, consumerGroupHeartbeatIntervalMs()); - return Collections.unmodifiableMap(groupProps); + return Map.of( + GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, consumerGroupSessionTimeoutMs(), + GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, consumerGroupHeartbeatIntervalMs()); } /** diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpers.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpers.java index b3aa3b9db77fb..f56c4a5b6831b 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpers.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpers.java @@ -35,6 +35,7 @@ import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue; import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataKey; import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue; +import org.apache.kafka.coordinator.group.generated.CoordinatorRecordType; import org.apache.kafka.coordinator.group.generated.GroupMetadataKey; import org.apache.kafka.coordinator.group.generated.GroupMetadataValue; import org.apache.kafka.coordinator.group.generated.OffsetCommitKey; @@ -56,7 +57,6 @@ import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression; import org.apache.kafka.coordinator.group.modern.share.ShareGroupMember; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.MetadataVersion; import java.util.ArrayList; import java.util.Collections; @@ -69,6 +69,9 @@ * the __consumer_offsets topic. */ public class GroupCoordinatorRecordHelpers { + + private static final short GROUP_METADATA_VALUE_VERSION = 3; + private GroupCoordinatorRecordHelpers() {} /** @@ -89,7 +92,7 @@ public static CoordinatorRecord newConsumerGroupMemberSubscriptionRecord( new ConsumerGroupMemberMetadataKey() .setGroupId(groupId) .setMemberId(member.memberId()), - (short) 5 + CoordinatorRecordType.CONSUMER_GROUP_MEMBER_METADATA.id() ), new ApiMessageAndVersion( new ConsumerGroupMemberMetadataValue() @@ -123,7 +126,7 @@ public static CoordinatorRecord newConsumerGroupMemberSubscriptionTombstoneRecor new ConsumerGroupMemberMetadataKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 5 + CoordinatorRecordType.CONSUMER_GROUP_MEMBER_METADATA.id() ), null // Tombstone. ); @@ -141,19 +144,19 @@ public static CoordinatorRecord newConsumerGroupSubscriptionMetadataRecord( Map newSubscriptionMetadata ) { ConsumerGroupPartitionMetadataValue value = new ConsumerGroupPartitionMetadataValue(); - newSubscriptionMetadata.forEach((topicName, topicMetadata) -> { + newSubscriptionMetadata.forEach((topicName, topicMetadata) -> value.topics().add(new ConsumerGroupPartitionMetadataValue.TopicMetadata() .setTopicId(topicMetadata.id()) .setTopicName(topicMetadata.name()) .setNumPartitions(topicMetadata.numPartitions()) - ); - }); + ) + ); return new CoordinatorRecord( new ApiMessageAndVersion( new ConsumerGroupPartitionMetadataKey() .setGroupId(groupId), - (short) 4 + CoordinatorRecordType.CONSUMER_GROUP_PARTITION_METADATA.id() ), new ApiMessageAndVersion( value, @@ -175,7 +178,7 @@ public static CoordinatorRecord newConsumerGroupSubscriptionMetadataTombstoneRec new ApiMessageAndVersion( new ConsumerGroupPartitionMetadataKey() .setGroupId(groupId), - (short) 4 + CoordinatorRecordType.CONSUMER_GROUP_PARTITION_METADATA.id() ), null // Tombstone. ); @@ -196,7 +199,7 @@ public static CoordinatorRecord newConsumerGroupEpochRecord( new ApiMessageAndVersion( new ConsumerGroupMetadataKey() .setGroupId(groupId), - (short) 3 + CoordinatorRecordType.CONSUMER_GROUP_METADATA.id() ), new ApiMessageAndVersion( new ConsumerGroupMetadataValue() @@ -219,7 +222,7 @@ public static CoordinatorRecord newConsumerGroupEpochTombstoneRecord( new ApiMessageAndVersion( new ConsumerGroupMetadataKey() .setGroupId(groupId), - (short) 3 + CoordinatorRecordType.CONSUMER_GROUP_METADATA.id() ), null // Tombstone. ); @@ -254,7 +257,7 @@ public static CoordinatorRecord newConsumerGroupTargetAssignmentRecord( new ConsumerGroupTargetAssignmentMemberKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 7 + CoordinatorRecordType.CONSUMER_GROUP_TARGET_ASSIGNMENT_MEMBER.id() ), new ApiMessageAndVersion( new ConsumerGroupTargetAssignmentMemberValue() @@ -280,7 +283,7 @@ public static CoordinatorRecord newConsumerGroupTargetAssignmentTombstoneRecord( new ConsumerGroupTargetAssignmentMemberKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 7 + CoordinatorRecordType.CONSUMER_GROUP_TARGET_ASSIGNMENT_MEMBER.id() ), null // Tombstone. ); @@ -301,7 +304,7 @@ public static CoordinatorRecord newConsumerGroupTargetAssignmentEpochRecord( new ApiMessageAndVersion( new ConsumerGroupTargetAssignmentMetadataKey() .setGroupId(groupId), - (short) 6 + CoordinatorRecordType.CONSUMER_GROUP_TARGET_ASSIGNMENT_METADATA.id() ), new ApiMessageAndVersion( new ConsumerGroupTargetAssignmentMetadataValue() @@ -324,7 +327,7 @@ public static CoordinatorRecord newConsumerGroupTargetAssignmentEpochTombstoneRe new ApiMessageAndVersion( new ConsumerGroupTargetAssignmentMetadataKey() .setGroupId(groupId), - (short) 6 + CoordinatorRecordType.CONSUMER_GROUP_TARGET_ASSIGNMENT_METADATA.id() ), null // Tombstone. ); @@ -346,7 +349,7 @@ public static CoordinatorRecord newConsumerGroupCurrentAssignmentRecord( new ConsumerGroupCurrentMemberAssignmentKey() .setGroupId(groupId) .setMemberId(member.memberId()), - (short) 8 + CoordinatorRecordType.CONSUMER_GROUP_CURRENT_MEMBER_ASSIGNMENT.id() ), new ApiMessageAndVersion( new ConsumerGroupCurrentMemberAssignmentValue() @@ -376,7 +379,7 @@ public static CoordinatorRecord newConsumerGroupCurrentAssignmentTombstoneRecord new ConsumerGroupCurrentMemberAssignmentKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 8 + CoordinatorRecordType.CONSUMER_GROUP_CURRENT_MEMBER_ASSIGNMENT.id() ), null // Tombstone ); @@ -403,7 +406,7 @@ public static CoordinatorRecord newConsumerGroupRegularExpressionRecord( new ConsumerGroupRegularExpressionKey() .setGroupId(groupId) .setRegularExpression(regex), - (short) 16 + CoordinatorRecordType.CONSUMER_GROUP_REGULAR_EXPRESSION.id() ), new ApiMessageAndVersion( new ConsumerGroupRegularExpressionValue() @@ -431,7 +434,7 @@ public static CoordinatorRecord newConsumerGroupRegularExpressionTombstone( new ConsumerGroupRegularExpressionKey() .setGroupId(groupId) .setRegularExpression(regex), - (short) 16 + CoordinatorRecordType.CONSUMER_GROUP_REGULAR_EXPRESSION.id() ), null // Tombstone ); @@ -442,13 +445,11 @@ public static CoordinatorRecord newConsumerGroupRegularExpressionTombstone( * * @param group The classic group. * @param assignment The classic group assignment. - * @param metadataVersion The metadata version. * @return The record. */ public static CoordinatorRecord newGroupMetadataRecord( ClassicGroup group, - Map assignment, - MetadataVersion metadataVersion + Map assignment ) { List members = new ArrayList<>(group.allMembers().size()); group.allMembers().forEach(member -> { @@ -480,7 +481,7 @@ public static CoordinatorRecord newGroupMetadataRecord( new ApiMessageAndVersion( new GroupMetadataKey() .setGroup(group.groupId()), - (short) 2 + CoordinatorRecordType.GROUP_METADATA.id() ), new ApiMessageAndVersion( new GroupMetadataValue() @@ -490,7 +491,7 @@ public static CoordinatorRecord newGroupMetadataRecord( .setLeader(group.leaderOrNull()) .setCurrentStateTimestamp(group.currentStateTimestampOrDefault()) .setMembers(members), - metadataVersion.groupMetadataValueVersion() + GROUP_METADATA_VALUE_VERSION ) ); } @@ -508,7 +509,7 @@ public static CoordinatorRecord newGroupMetadataTombstoneRecord( new ApiMessageAndVersion( new GroupMetadataKey() .setGroup(groupId), - (short) 2 + CoordinatorRecordType.GROUP_METADATA.id() ), null // Tombstone ); @@ -518,18 +519,16 @@ public static CoordinatorRecord newGroupMetadataTombstoneRecord( * Creates an empty GroupMetadata record. * * @param group The classic group. - * @param metadataVersion The metadata version. * @return The record. */ public static CoordinatorRecord newEmptyGroupMetadataRecord( - ClassicGroup group, - MetadataVersion metadataVersion + ClassicGroup group ) { return new CoordinatorRecord( new ApiMessageAndVersion( new GroupMetadataKey() .setGroup(group.groupId()), - (short) 2 + CoordinatorRecordType.GROUP_METADATA.id() ), new ApiMessageAndVersion( new GroupMetadataValue() @@ -539,7 +538,7 @@ public static CoordinatorRecord newEmptyGroupMetadataRecord( .setLeader(null) .setCurrentStateTimestamp(group.currentStateTimestampOrDefault()) .setMembers(Collections.emptyList()), - metadataVersion.groupMetadataValueVersion() + GROUP_METADATA_VALUE_VERSION ) ); } @@ -551,17 +550,15 @@ public static CoordinatorRecord newEmptyGroupMetadataRecord( * @param topic The topic name. * @param partitionId The partition id. * @param offsetAndMetadata The offset and metadata. - * @param metadataVersion The metadata version. * @return The record. */ public static CoordinatorRecord newOffsetCommitRecord( String groupId, String topic, int partitionId, - OffsetAndMetadata offsetAndMetadata, - MetadataVersion metadataVersion + OffsetAndMetadata offsetAndMetadata ) { - short version = metadataVersion.offsetCommitValueVersion(offsetAndMetadata.expireTimestampMs.isPresent()); + short version = offsetCommitValueVersion(offsetAndMetadata.expireTimestampMs.isPresent()); return new CoordinatorRecord( new ApiMessageAndVersion( @@ -569,7 +566,7 @@ public static CoordinatorRecord newOffsetCommitRecord( .setGroup(groupId) .setTopic(topic) .setPartition(partitionId), - (short) 1 + CoordinatorRecordType.OFFSET_COMMIT.id() ), new ApiMessageAndVersion( new OffsetCommitValue() @@ -584,6 +581,16 @@ public static CoordinatorRecord newOffsetCommitRecord( ); } + static short offsetCommitValueVersion(boolean expireTimestampMs) { + if (expireTimestampMs) { + return 1; + } else { + // Serialize with the highest supported non-flexible version + // until a tagged field is introduced or the version is bumped. + return 3; + } + } + /** * Creates an OffsetCommit tombstone record. * @@ -603,7 +610,7 @@ public static CoordinatorRecord newOffsetCommitTombstoneRecord( .setGroup(groupId) .setTopic(topic) .setPartition(partitionId), - (short) 1 + CoordinatorRecordType.OFFSET_COMMIT.id() ), null ); @@ -627,7 +634,7 @@ public static CoordinatorRecord newShareGroupMemberSubscriptionRecord( new ShareGroupMemberMetadataKey() .setGroupId(groupId) .setMemberId(member.memberId()), - (short) 10 + CoordinatorRecordType.SHARE_GROUP_MEMBER_METADATA.id() ), new ApiMessageAndVersion( new ShareGroupMemberMetadataValue() @@ -656,7 +663,7 @@ public static CoordinatorRecord newShareGroupMemberSubscriptionTombstoneRecord( new ShareGroupMemberMetadataKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 10 + CoordinatorRecordType.SHARE_GROUP_MEMBER_METADATA.id() ), null // Tombstone. ); @@ -674,19 +681,19 @@ public static CoordinatorRecord newShareGroupSubscriptionMetadataRecord( Map newSubscriptionMetadata ) { ShareGroupPartitionMetadataValue value = new ShareGroupPartitionMetadataValue(); - newSubscriptionMetadata.forEach((topicName, topicMetadata) -> { + newSubscriptionMetadata.forEach((topicName, topicMetadata) -> value.topics().add(new ShareGroupPartitionMetadataValue.TopicMetadata() .setTopicId(topicMetadata.id()) .setTopicName(topicMetadata.name()) .setNumPartitions(topicMetadata.numPartitions()) - ); - }); + ) + ); return new CoordinatorRecord( new ApiMessageAndVersion( new ShareGroupPartitionMetadataKey() .setGroupId(groupId), - (short) 9 + CoordinatorRecordType.SHARE_GROUP_PARTITION_METADATA.id() ), new ApiMessageAndVersion( value, @@ -708,7 +715,7 @@ public static CoordinatorRecord newShareGroupSubscriptionMetadataTombstoneRecord new ApiMessageAndVersion( new ShareGroupPartitionMetadataKey() .setGroupId(groupId), - (short) 9 + CoordinatorRecordType.SHARE_GROUP_PARTITION_METADATA.id() ), null // Tombstone. ); @@ -729,7 +736,7 @@ public static CoordinatorRecord newShareGroupEpochRecord( new ApiMessageAndVersion( new ShareGroupMetadataKey() .setGroupId(groupId), - (short) 11 + CoordinatorRecordType.SHARE_GROUP_METADATA.id() ), new ApiMessageAndVersion( new ShareGroupMetadataValue() @@ -752,7 +759,7 @@ public static CoordinatorRecord newShareGroupEpochTombstoneRecord( new ApiMessageAndVersion( new ShareGroupMetadataKey() .setGroupId(groupId), - (short) 11 + CoordinatorRecordType.SHARE_GROUP_METADATA.id() ), null // Tombstone. ); @@ -787,7 +794,7 @@ public static CoordinatorRecord newShareGroupTargetAssignmentRecord( new ShareGroupTargetAssignmentMemberKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 13 + CoordinatorRecordType.SHARE_GROUP_TARGET_ASSIGNMENT_MEMBER.id() ), new ApiMessageAndVersion( new ShareGroupTargetAssignmentMemberValue() @@ -813,7 +820,7 @@ public static CoordinatorRecord newShareGroupTargetAssignmentTombstoneRecord( new ShareGroupTargetAssignmentMemberKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 13 + CoordinatorRecordType.SHARE_GROUP_TARGET_ASSIGNMENT_MEMBER.id() ), null // Tombstone. ); @@ -834,7 +841,7 @@ public static CoordinatorRecord newShareGroupTargetAssignmentEpochRecord( new ApiMessageAndVersion( new ShareGroupTargetAssignmentMetadataKey() .setGroupId(groupId), - (short) 12 + CoordinatorRecordType.SHARE_GROUP_TARGET_ASSIGNMENT_METADATA.id() ), new ApiMessageAndVersion( new ShareGroupTargetAssignmentMetadataValue() @@ -857,7 +864,7 @@ public static CoordinatorRecord newShareGroupTargetAssignmentEpochTombstoneRecor new ApiMessageAndVersion( new ShareGroupTargetAssignmentMetadataKey() .setGroupId(groupId), - (short) 12 + CoordinatorRecordType.SHARE_GROUP_TARGET_ASSIGNMENT_METADATA.id() ), null // Tombstone. ); @@ -879,7 +886,7 @@ public static CoordinatorRecord newShareGroupCurrentAssignmentRecord( new ShareGroupCurrentMemberAssignmentKey() .setGroupId(groupId) .setMemberId(member.memberId()), - (short) 14 + CoordinatorRecordType.SHARE_GROUP_CURRENT_MEMBER_ASSIGNMENT.id() ), new ApiMessageAndVersion( new ShareGroupCurrentMemberAssignmentValue() @@ -908,7 +915,7 @@ public static CoordinatorRecord newShareGroupCurrentAssignmentTombstoneRecord( new ShareGroupCurrentMemberAssignmentKey() .setGroupId(groupId) .setMemberId(memberId), - (short) 14 + CoordinatorRecordType.SHARE_GROUP_CURRENT_MEMBER_ASSIGNMENT.id() ), null // Tombstone ); diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerde.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerde.java index 9affee45ca69a..9143079aa115b 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerde.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerde.java @@ -16,116 +16,31 @@ */ package org.apache.kafka.coordinator.group; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecordSerde; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue; -import org.apache.kafka.coordinator.group.generated.GroupMetadataKey; -import org.apache.kafka.coordinator.group.generated.GroupMetadataValue; -import org.apache.kafka.coordinator.group.generated.OffsetCommitKey; -import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupMemberMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupMemberMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupPartitionMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupPartitionMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupStatePartitionMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupStatePartitionMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMemberKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMemberValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataValue; +import org.apache.kafka.coordinator.group.generated.CoordinatorRecordType; +/** + * Please ensure any new record added here stays in sync with DumpLogSegments. + */ public class GroupCoordinatorRecordSerde extends CoordinatorRecordSerde { @Override - protected ApiMessage apiMessageKeyFor(short recordVersion) { - switch (recordVersion) { - case 0: - case 1: - return new OffsetCommitKey(); - case 2: - return new GroupMetadataKey(); - case 3: - return new ConsumerGroupMetadataKey(); - case 4: - return new ConsumerGroupPartitionMetadataKey(); - case 5: - return new ConsumerGroupMemberMetadataKey(); - case 6: - return new ConsumerGroupTargetAssignmentMetadataKey(); - case 7: - return new ConsumerGroupTargetAssignmentMemberKey(); - case 8: - return new ConsumerGroupCurrentMemberAssignmentKey(); - case 9: - return new ShareGroupPartitionMetadataKey(); - case 10: - return new ShareGroupMemberMetadataKey(); - case 11: - return new ShareGroupMetadataKey(); - case 12: - return new ShareGroupTargetAssignmentMetadataKey(); - case 13: - return new ShareGroupTargetAssignmentMemberKey(); - case 14: - return new ShareGroupCurrentMemberAssignmentKey(); - case 15: - return new ShareGroupStatePartitionMetadataKey(); - default: - throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion); + protected ApiMessage apiMessageKeyFor(short recordType) { + try { + return CoordinatorRecordType.fromId(recordType).newRecordKey(); + } catch (UnsupportedVersionException ex) { + throw new CoordinatorLoader.UnknownRecordTypeException(recordType); } } @Override protected ApiMessage apiMessageValueFor(short recordVersion) { - switch (recordVersion) { - case 0: - case 1: - return new OffsetCommitValue(); - case 2: - return new GroupMetadataValue(); - case 3: - return new ConsumerGroupMetadataValue(); - case 4: - return new ConsumerGroupPartitionMetadataValue(); - case 5: - return new ConsumerGroupMemberMetadataValue(); - case 6: - return new ConsumerGroupTargetAssignmentMetadataValue(); - case 7: - return new ConsumerGroupTargetAssignmentMemberValue(); - case 8: - return new ConsumerGroupCurrentMemberAssignmentValue(); - case 9: - return new ShareGroupPartitionMetadataValue(); - case 10: - return new ShareGroupMemberMetadataValue(); - case 11: - return new ShareGroupMetadataValue(); - case 12: - return new ShareGroupTargetAssignmentMetadataValue(); - case 13: - return new ShareGroupTargetAssignmentMemberValue(); - case 14: - return new ShareGroupCurrentMemberAssignmentValue(); - case 15: - return new ShareGroupStatePartitionMetadataValue(); - default: - throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion); + try { + return CoordinatorRecordType.fromId(recordVersion).newRecordValue(); + } catch (UnsupportedVersionException ex) { + throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion); } } } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java index 8f6a2bf1fed1b..8b8c4bb0f9985 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorService.java @@ -92,6 +92,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.IntSupplier; import java.util.stream.Collectors; @@ -205,6 +206,7 @@ public GroupCoordinatorService build() { .withSerializer(new GroupCoordinatorRecordSerde()) .withCompression(Compression.of(config.offsetTopicCompressionType()).build()) .withAppendLingerMs(config.appendLingerMs()) + .withExecutorService(Executors.newSingleThreadExecutor()) .build(); return new GroupCoordinatorService( @@ -1084,7 +1086,7 @@ public CompletableFuture completeTransaction( * See {@link GroupCoordinator#onTransactionCompleted(long, Iterable, TransactionResult)}. */ @Override - public void onTransactionCompleted( + public CompletableFuture onTransactionCompleted( long producerId, Iterable partitions, TransactionResult transactionResult diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorShard.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorShard.java index 173ea366c7f1d..5f8dd6089bbb3 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorShard.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorShard.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.ApiException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; @@ -48,6 +49,7 @@ import org.apache.kafka.common.requests.TransactionResult; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.coordinator.common.runtime.CoordinatorExecutor; import org.apache.kafka.coordinator.common.runtime.CoordinatorMetrics; import org.apache.kafka.coordinator.common.runtime.CoordinatorMetricsShard; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; @@ -69,8 +71,11 @@ import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue; import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataKey; import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue; +import org.apache.kafka.coordinator.group.generated.CoordinatorRecordType; import org.apache.kafka.coordinator.group.generated.GroupMetadataKey; import org.apache.kafka.coordinator.group.generated.GroupMetadataValue; +import org.apache.kafka.coordinator.group.generated.LegacyOffsetCommitKey; +import org.apache.kafka.coordinator.group.generated.LegacyOffsetCommitValue; import org.apache.kafka.coordinator.group.generated.OffsetCommitKey; import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; import org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentKey; @@ -116,11 +121,12 @@ public class GroupCoordinatorShard implements CoordinatorShard { private final GroupCoordinatorConfig config; + private final GroupConfigManager groupConfigManager; private LogContext logContext; private SnapshotRegistry snapshotRegistry; private Time time; private CoordinatorTimer timer; - private GroupConfigManager groupConfigManager; + private CoordinatorExecutor executor; private CoordinatorMetrics coordinatorMetrics; private TopicPartition topicPartition; @@ -156,6 +162,14 @@ public CoordinatorShardBuilder withTim return this; } + @Override + public CoordinatorShardBuilder withExecutor( + CoordinatorExecutor executor + ) { + this.executor = executor; + return this; + } + @Override public CoordinatorShardBuilder withCoordinatorMetrics( CoordinatorMetrics coordinatorMetrics @@ -178,6 +192,7 @@ public CoordinatorShardBuilder withSna return this; } + @SuppressWarnings("NPathComplexity") @Override public GroupCoordinatorShard build() { if (logContext == null) logContext = new LogContext(); @@ -189,6 +204,8 @@ public GroupCoordinatorShard build() { throw new IllegalArgumentException("Time must be set."); if (timer == null) throw new IllegalArgumentException("Timer must be set."); + if (executor == null) + throw new IllegalArgumentException("Executor must be set."); if (coordinatorMetrics == null || !(coordinatorMetrics instanceof GroupCoordinatorMetrics)) throw new IllegalArgumentException("CoordinatorMetrics must be set and be of type GroupCoordinatorMetrics."); if (topicPartition == null) @@ -204,20 +221,9 @@ public GroupCoordinatorShard build() { .withSnapshotRegistry(snapshotRegistry) .withTime(time) .withTimer(timer) + .withExecutor(executor) + .withConfig(config) .withGroupConfigManager(groupConfigManager) - .withConsumerGroupAssignors(config.consumerGroupAssignors()) - .withConsumerGroupMaxSize(config.consumerGroupMaxSize()) - .withConsumerGroupSessionTimeout(config.consumerGroupSessionTimeoutMs()) - .withConsumerGroupHeartbeatInterval(config.consumerGroupHeartbeatIntervalMs()) - .withClassicGroupMaxSize(config.classicGroupMaxSize()) - .withClassicGroupInitialRebalanceDelayMs(config.classicGroupInitialRebalanceDelayMs()) - .withClassicGroupNewMemberJoinTimeoutMs(config.classicGroupNewMemberJoinTimeoutMs()) - .withClassicGroupMinSessionTimeoutMs(config.classicGroupMinSessionTimeoutMs()) - .withClassicGroupMaxSessionTimeoutMs(config.classicGroupMaxSessionTimeoutMs()) - .withConsumerGroupMigrationPolicy(config.consumerGroupMigrationPolicy()) - .withShareGroupMaxSize(config.shareGroupMaxSize()) - .withShareGroupSessionTimeout(config.shareGroupSessionTimeoutMs()) - .withShareGroupHeartbeatInterval(config.shareGroupHeartbeatIntervalMs()) .withGroupCoordinatorMetricsShard(metricsShard) .build(); @@ -601,7 +607,7 @@ public List describeGroups( List groupIds, long committedOffset ) { - return groupMetadataManager.describeGroups(groupIds, committedOffset); + return groupMetadataManager.describeGroups(context, groupIds, committedOffset); } /** @@ -725,7 +731,6 @@ private void cancelClassicGroupSizeCounter() { public void onLoaded(MetadataImage newImage) { MetadataDelta emptyDelta = new MetadataDelta(newImage); groupMetadataManager.onNewMetadataImage(newImage, emptyDelta); - offsetMetadataManager.onNewMetadataImage(newImage, emptyDelta); coordinatorMetrics.activateMetricsShard(metricsShard); groupMetadataManager.onLoaded(); @@ -750,7 +755,26 @@ public void onUnloaded() { @Override public void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) { groupMetadataManager.onNewMetadataImage(newImage, delta); - offsetMetadataManager.onNewMetadataImage(newImage, delta); + } + + private static OffsetCommitKey convertLegacyOffsetCommitKey( + LegacyOffsetCommitKey key + ) { + return new OffsetCommitKey() + .setGroup(key.group()) + .setTopic(key.topic()) + .setPartition(key.partition()); + } + + private static OffsetCommitValue convertLegacyOffsetCommitValue( + LegacyOffsetCommitValue value + ) { + if (value == null) return null; + + return new OffsetCommitValue() + .setOffset(value.offset()) + .setCommitTimestamp(value.commitTimestamp()) + .setMetadata(value.metadata()); } /** @@ -773,9 +797,25 @@ public void replay( ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); - switch (key.version()) { - case 0: - case 1: + CoordinatorRecordType recordType; + try { + recordType = CoordinatorRecordType.fromId(key.version()); + } catch (UnsupportedVersionException ex) { + throw new IllegalStateException("Received an unknown record type " + key.version() + + " in " + record, ex); + } + + switch (recordType) { + case LEGACY_OFFSET_COMMIT: + offsetMetadataManager.replay( + offset, + producerId, + convertLegacyOffsetCommitKey((LegacyOffsetCommitKey) key.message()), + convertLegacyOffsetCommitValue((LegacyOffsetCommitValue) Utils.messageOrNull(value)) + ); + break; + + case OFFSET_COMMIT: offsetMetadataManager.replay( offset, producerId, @@ -784,98 +824,98 @@ public void replay( ); break; - case 2: + case GROUP_METADATA: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; - case 3: + case CONSUMER_GROUP_METADATA: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; - case 4: + case CONSUMER_GROUP_PARTITION_METADATA: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; - case 5: + case CONSUMER_GROUP_MEMBER_METADATA: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; - case 6: + case CONSUMER_GROUP_TARGET_ASSIGNMENT_METADATA: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; - case 7: + case CONSUMER_GROUP_TARGET_ASSIGNMENT_MEMBER: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; - case 8: + case CONSUMER_GROUP_CURRENT_MEMBER_ASSIGNMENT: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; - case 9: + case SHARE_GROUP_PARTITION_METADATA: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; - case 10: + case SHARE_GROUP_MEMBER_METADATA: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; - case 11: + case SHARE_GROUP_METADATA: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; - case 12: + case SHARE_GROUP_TARGET_ASSIGNMENT_METADATA: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; - case 13: + case SHARE_GROUP_TARGET_ASSIGNMENT_MEMBER: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; - case 14: + case SHARE_GROUP_CURRENT_MEMBER_ASSIGNMENT: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; - case 16: + case CONSUMER_GROUP_REGULAR_EXPRESSION: groupMetadataManager.replay( (ConsumerGroupRegularExpressionKey) key.message(), (ConsumerGroupRegularExpressionValue) Utils.messageOrNull(value) diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java index f2169740e3ba1..2d5fd1e3288ca 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java @@ -34,6 +34,7 @@ import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.errors.UnreleasedInstanceIdException; import org.apache.kafka.common.errors.UnsupportedAssignorException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; @@ -62,6 +63,7 @@ import org.apache.kafka.common.requests.ShareGroupHeartbeatRequest; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.coordinator.common.runtime.CoordinatorExecutor; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; import org.apache.kafka.coordinator.common.runtime.CoordinatorResult; import org.apache.kafka.coordinator.common.runtime.CoordinatorTimer; @@ -106,6 +108,7 @@ import org.apache.kafka.coordinator.group.modern.Assignment; import org.apache.kafka.coordinator.group.modern.MemberState; import org.apache.kafka.coordinator.group.modern.ModernGroup; +import org.apache.kafka.coordinator.group.modern.SubscriptionCount; import org.apache.kafka.coordinator.group.modern.TargetAssignmentBuilder; import org.apache.kafka.coordinator.group.modern.TopicMetadata; import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup; @@ -118,6 +121,7 @@ import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.TopicImage; +import org.apache.kafka.image.TopicsDelta; import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.TimelineHashMap; import org.apache.kafka.timeline.TimelineHashSet; @@ -134,6 +138,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -160,6 +165,7 @@ import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord; import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord; import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord; +import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone; import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord; import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord; import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newShareGroupCurrentAssignmentRecord; @@ -181,6 +187,7 @@ import static org.apache.kafka.coordinator.group.classic.ClassicGroupState.STABLE; import static org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics.CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME; import static org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics.CONSUMER_GROUP_REBALANCES_SENSOR_NAME; +import static org.apache.kafka.coordinator.group.modern.ModernGroupMember.hasAssignedPartitionsChanged; import static org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember.hasAssignedPartitionsChanged; /** @@ -193,30 +200,34 @@ * handling as well as during the initial loading of the records from the partitions. */ public class GroupMetadataManager { + private static final int METADATA_REFRESH_INTERVAL_MS = Integer.MAX_VALUE; + + private static class UpdateSubscriptionMetadataResult { + private final int groupEpoch; + private final Map subscriptionMetadata; + private final SubscriptionType subscriptionType; + + UpdateSubscriptionMetadataResult( + int groupEpoch, + Map subscriptionMetadata, + SubscriptionType subscriptionType + ) { + this.groupEpoch = groupEpoch; + this.subscriptionMetadata = Objects.requireNonNull(subscriptionMetadata); + this.subscriptionType = Objects.requireNonNull(subscriptionType); + } + } public static class Builder { private LogContext logContext = null; private SnapshotRegistry snapshotRegistry = null; private Time time = null; private CoordinatorTimer timer = null; - private List consumerGroupAssignors = null; + private CoordinatorExecutor executor = null; + private GroupCoordinatorConfig config = null; private GroupConfigManager groupConfigManager = null; - private int consumerGroupMaxSize = Integer.MAX_VALUE; - private int consumerGroupHeartbeatIntervalMs = 5000; - private int consumerGroupMetadataRefreshIntervalMs = Integer.MAX_VALUE; private MetadataImage metadataImage = null; - private int consumerGroupSessionTimeoutMs = 45000; - private int classicGroupMaxSize = Integer.MAX_VALUE; - private int classicGroupInitialRebalanceDelayMs = 3000; - private int classicGroupNewMemberJoinTimeoutMs = 5 * 60 * 1000; - private int classicGroupMinSessionTimeoutMs; - private int classicGroupMaxSessionTimeoutMs; - private ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy; private ShareGroupPartitionAssignor shareGroupAssignor = null; - private int shareGroupMaxSize = Integer.MAX_VALUE; - private int shareGroupHeartbeatIntervalMs = 5 * 1000; - private int shareGroupSessionTimeoutMs = 45 * 1000; - private int shareGroupMetadataRefreshIntervalMs = Integer.MAX_VALUE; private GroupCoordinatorMetricsShard metrics; Builder withLogContext(LogContext logContext) { @@ -239,33 +250,18 @@ Builder withTimer(CoordinatorTimer timer) { return this; } - Builder withConsumerGroupAssignors(List consumerGroupAssignors) { - this.consumerGroupAssignors = consumerGroupAssignors; - return this; - } - - Builder withGroupConfigManager(GroupConfigManager groupConfigManager) { - this.groupConfigManager = groupConfigManager; - return this; - } - - Builder withConsumerGroupMaxSize(int consumerGroupMaxSize) { - this.consumerGroupMaxSize = consumerGroupMaxSize; - return this; - } - - Builder withConsumerGroupSessionTimeout(int consumerGroupSessionTimeoutMs) { - this.consumerGroupSessionTimeoutMs = consumerGroupSessionTimeoutMs; + Builder withExecutor(CoordinatorExecutor executor) { + this.executor = executor; return this; } - Builder withConsumerGroupHeartbeatInterval(int consumerGroupHeartbeatIntervalMs) { - this.consumerGroupHeartbeatIntervalMs = consumerGroupHeartbeatIntervalMs; + Builder withConfig(GroupCoordinatorConfig config) { + this.config = config; return this; } - Builder withConsumerGroupMetadataRefreshIntervalMs(int consumerGroupMetadataRefreshIntervalMs) { - this.consumerGroupMetadataRefreshIntervalMs = consumerGroupMetadataRefreshIntervalMs; + Builder withGroupConfigManager(GroupConfigManager groupConfigManager) { + this.groupConfigManager = groupConfigManager; return this; } @@ -274,36 +270,6 @@ Builder withMetadataImage(MetadataImage metadataImage) { return this; } - Builder withClassicGroupMaxSize(int classicGroupMaxSize) { - this.classicGroupMaxSize = classicGroupMaxSize; - return this; - } - - Builder withClassicGroupInitialRebalanceDelayMs(int classicGroupInitialRebalanceDelayMs) { - this.classicGroupInitialRebalanceDelayMs = classicGroupInitialRebalanceDelayMs; - return this; - } - - Builder withClassicGroupNewMemberJoinTimeoutMs(int classicGroupNewMemberJoinTimeoutMs) { - this.classicGroupNewMemberJoinTimeoutMs = classicGroupNewMemberJoinTimeoutMs; - return this; - } - - Builder withClassicGroupMinSessionTimeoutMs(int classicGroupMinSessionTimeoutMs) { - this.classicGroupMinSessionTimeoutMs = classicGroupMinSessionTimeoutMs; - return this; - } - - Builder withClassicGroupMaxSessionTimeoutMs(int classicGroupMaxSessionTimeoutMs) { - this.classicGroupMaxSessionTimeoutMs = classicGroupMaxSessionTimeoutMs; - return this; - } - - Builder withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy) { - this.consumerGroupMigrationPolicy = consumerGroupMigrationPolicy; - return this; - } - Builder withGroupCoordinatorMetricsShard(GroupCoordinatorMetricsShard metrics) { this.metrics = metrics; return this; @@ -314,26 +280,6 @@ Builder withShareGroupAssignor(ShareGroupPartitionAssignor shareGroupAssignor) { return this; } - public Builder withShareGroupMaxSize(int shareGroupMaxSize) { - this.shareGroupMaxSize = shareGroupMaxSize; - return this; - } - - Builder withShareGroupSessionTimeout(int shareGroupSessionTimeoutMs) { - this.shareGroupSessionTimeoutMs = shareGroupSessionTimeoutMs; - return this; - } - - Builder withShareGroupHeartbeatInterval(int shareGroupHeartbeatIntervalMs) { - this.shareGroupHeartbeatIntervalMs = shareGroupHeartbeatIntervalMs; - return this; - } - - Builder withShareGroupMetadataRefreshIntervalMs(int shareGroupMetadataRefreshIntervalMs) { - this.shareGroupMetadataRefreshIntervalMs = shareGroupMetadataRefreshIntervalMs; - return this; - } - GroupMetadataManager build() { if (logContext == null) logContext = new LogContext(); if (snapshotRegistry == null) snapshotRegistry = new SnapshotRegistry(logContext); @@ -342,8 +288,10 @@ GroupMetadataManager build() { if (timer == null) throw new IllegalArgumentException("Timer must be set."); - if (consumerGroupAssignors == null || consumerGroupAssignors.isEmpty()) - throw new IllegalArgumentException("Assignors must be set before building."); + if (executor == null) + throw new IllegalArgumentException("Executor must be set."); + if (config == null) + throw new IllegalArgumentException("Config must be set."); if (shareGroupAssignor == null) shareGroupAssignor = new SimpleAssignor(); if (metrics == null) @@ -356,29 +304,22 @@ GroupMetadataManager build() { logContext, time, timer, + executor, metrics, - consumerGroupAssignors, metadataImage, + config, groupConfigManager, - consumerGroupMaxSize, - consumerGroupSessionTimeoutMs, - consumerGroupHeartbeatIntervalMs, - consumerGroupMetadataRefreshIntervalMs, - classicGroupMaxSize, - classicGroupInitialRebalanceDelayMs, - classicGroupNewMemberJoinTimeoutMs, - classicGroupMinSessionTimeoutMs, - classicGroupMaxSessionTimeoutMs, - consumerGroupMigrationPolicy, - shareGroupAssignor, - shareGroupMaxSize, - shareGroupSessionTimeoutMs, - shareGroupHeartbeatIntervalMs, - shareGroupMetadataRefreshIntervalMs + shareGroupAssignor ); } } + /** + * The minimum amount of time between two consecutive refreshes of + * the regular expressions within a single group. + */ + private static final long REGEX_BATCH_REFRESH_INTERVAL_MS = 10_000L; + /** * The log context. */ @@ -404,11 +345,21 @@ GroupMetadataManager build() { */ private final CoordinatorTimer timer; + /** + * The executor to executor asynchronous tasks. + */ + private final CoordinatorExecutor executor; + /** * The coordinator metrics. */ private final GroupCoordinatorMetricsShard metrics; + /** + * The group coordinator config. + */ + private final GroupCoordinatorConfig config; + /** * The supported consumer group partition assignors keyed by their name. */ @@ -435,29 +386,15 @@ GroupMetadataManager build() { private final GroupConfigManager groupConfigManager; /** - * The maximum number of members allowed in a single consumer group. - */ - private final int consumerGroupMaxSize; - - /** - * The default heartbeat interval for consumer groups. - */ - private final int consumerGroupHeartbeatIntervalMs; - - /** - * The default session timeout for consumer groups. + * The metadata image. */ - private final int consumerGroupSessionTimeoutMs; + private MetadataImage metadataImage; /** - * The metadata refresh interval. + * This tracks the version (or the offset) of the last metadata image + * with newly created topics. */ - private final int consumerGroupMetadataRefreshIntervalMs; - - /** - * The metadata image. - */ - private MetadataImage metadataImage; + private long lastMetadataImageWithNewTopics = -1L; /** * An empty result returned to the state machine. This means that @@ -468,113 +405,41 @@ GroupMetadataManager build() { static final CoordinatorResult EMPTY_RESULT = new CoordinatorResult<>(Collections.emptyList(), CompletableFuture.completedFuture(null), false); - /** - * The maximum number of members allowed in a single classic group. - */ - private final int classicGroupMaxSize; - - /** - * Initial rebalance delay for members joining a classic group. - */ - private final int classicGroupInitialRebalanceDelayMs; - - /** - * The timeout used to wait for a new member in milliseconds. - */ - private final int classicGroupNewMemberJoinTimeoutMs; - - /** - * The group minimum session timeout. - */ - private final int classicGroupMinSessionTimeoutMs; - - /** - * The group maximum session timeout. - */ - private final int classicGroupMaxSessionTimeoutMs; - /** * The share group partition assignor. */ private final ShareGroupPartitionAssignor shareGroupAssignor; - /** - * The maximum number of members allowed in a single share group. - */ - private final int shareGroupMaxSize; - - /** - * The heartbeat interval for share groups. - */ - private final int shareGroupHeartbeatIntervalMs; - - /** - * The session timeout for share groups. - */ - private final int shareGroupSessionTimeoutMs; - - /** - * The share group metadata refresh interval. - */ - private final int shareGroupMetadataRefreshIntervalMs; - - /** - * The config indicating whether group protocol upgrade/downgrade is allowed. - */ - private final ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy; - private GroupMetadataManager( SnapshotRegistry snapshotRegistry, LogContext logContext, Time time, CoordinatorTimer timer, + CoordinatorExecutor executor, GroupCoordinatorMetricsShard metrics, - List consumerGroupAssignors, MetadataImage metadataImage, + GroupCoordinatorConfig config, GroupConfigManager groupConfigManager, - int consumerGroupMaxSize, - int consumerGroupSessionTimeoutMs, - int consumerGroupHeartbeatIntervalMs, - int consumerGroupMetadataRefreshIntervalMs, - int classicGroupMaxSize, - int classicGroupInitialRebalanceDelayMs, - int classicGroupNewMemberJoinTimeoutMs, - int classicGroupMinSessionTimeoutMs, - int classicGroupMaxSessionTimeoutMs, - ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy, - ShareGroupPartitionAssignor shareGroupAssignor, - int shareGroupMaxSize, - int shareGroupSessionTimeoutMs, - int shareGroupHeartbeatIntervalMs, - int shareGroupMetadataRefreshIntervalMs + ShareGroupPartitionAssignor shareGroupAssignor ) { this.logContext = logContext; this.log = logContext.logger(GroupMetadataManager.class); this.snapshotRegistry = snapshotRegistry; this.time = time; this.timer = timer; + this.executor = executor; this.metrics = metrics; + this.config = config; this.metadataImage = metadataImage; - this.consumerGroupAssignors = consumerGroupAssignors.stream().collect(Collectors.toMap(ConsumerGroupPartitionAssignor::name, Function.identity())); - this.defaultConsumerGroupAssignor = consumerGroupAssignors.get(0); + this.consumerGroupAssignors = config + .consumerGroupAssignors() + .stream() + .collect(Collectors.toMap(ConsumerGroupPartitionAssignor::name, Function.identity())); + this.defaultConsumerGroupAssignor = config.consumerGroupAssignors().get(0); this.groups = new TimelineHashMap<>(snapshotRegistry, 0); this.groupsByTopics = new TimelineHashMap<>(snapshotRegistry, 0); this.groupConfigManager = groupConfigManager; - this.consumerGroupMaxSize = consumerGroupMaxSize; - this.consumerGroupSessionTimeoutMs = consumerGroupSessionTimeoutMs; - this.consumerGroupHeartbeatIntervalMs = consumerGroupHeartbeatIntervalMs; - this.consumerGroupMetadataRefreshIntervalMs = consumerGroupMetadataRefreshIntervalMs; - this.classicGroupMaxSize = classicGroupMaxSize; - this.classicGroupInitialRebalanceDelayMs = classicGroupInitialRebalanceDelayMs; - this.classicGroupNewMemberJoinTimeoutMs = classicGroupNewMemberJoinTimeoutMs; - this.classicGroupMinSessionTimeoutMs = classicGroupMinSessionTimeoutMs; - this.classicGroupMaxSessionTimeoutMs = classicGroupMaxSessionTimeoutMs; - this.consumerGroupMigrationPolicy = consumerGroupMigrationPolicy; this.shareGroupAssignor = shareGroupAssignor; - this.shareGroupMaxSize = shareGroupMaxSize; - this.shareGroupSessionTimeoutMs = shareGroupSessionTimeoutMs; - this.shareGroupHeartbeatIntervalMs = shareGroupHeartbeatIntervalMs; - this.shareGroupMetadataRefreshIntervalMs = shareGroupMetadataRefreshIntervalMs; } /** @@ -675,6 +540,7 @@ public List consumerGroupDescr describedGroups.add(new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId(groupId) .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage(exception.getMessage()) ); } }); @@ -706,6 +572,7 @@ public List shareGroupDescribe( describedGroups.add(new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(groupId) .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage(exception.getMessage()) ); } }); @@ -716,12 +583,14 @@ public List shareGroupDescribe( /** * Handles a DescribeGroup request. * + * @param context The request context. * @param groupIds The IDs of the groups to describe. * @param committedOffset A specified committed offset corresponding to this shard. * * @return A list containing the DescribeGroupsResponseData.DescribedGroup. */ public List describeGroups( + RequestContext context, List groupIds, long committedOffset ) { @@ -731,7 +600,7 @@ public List describeGroups( ClassicGroup group = classicGroup(groupId, committedOffset); if (group.isInState(STABLE)) { - if (!group.protocolName().isPresent()) { + if (group.protocolName().isEmpty()) { throw new IllegalStateException("Invalid null group protocol for stable group"); } @@ -751,16 +620,25 @@ public List describeGroups( .setGroupState(group.stateAsString()) .setProtocolType(group.protocolType().orElse("")) .setMembers(group.allMembers().stream() - .map(member -> member.describeNoMetadata()) + .map(ClassicGroupMember::describeNoMetadata) .collect(Collectors.toList()) ) ); } } catch (GroupIdNotFoundException exception) { - describedGroups.add(new DescribeGroupsResponseData.DescribedGroup() - .setGroupId(groupId) - .setGroupState(DEAD.toString()) - ); + if (context.header.apiVersion() >= 6) { + describedGroups.add(new DescribeGroupsResponseData.DescribedGroup() + .setGroupId(groupId) + .setGroupState(DEAD.toString()) + .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage(exception.getMessage()) + ); + } else { + describedGroups.add(new DescribeGroupsResponseData.DescribedGroup() + .setGroupId(groupId) + .setGroupState(DEAD.toString()) + ); + } } }); return describedGroups; @@ -793,15 +671,19 @@ ConsumerGroup getOrMaybeCreateConsumerGroup( throw new GroupIdNotFoundException(String.format("Consumer group %s not found.", groupId)); } - if (group == null || (createIfNotExists && maybeDeleteEmptyClassicGroup(group, records))) { + if (group == null) { + return new ConsumerGroup(snapshotRegistry, groupId, metrics); + } else if (createIfNotExists && maybeDeleteEmptyClassicGroup(group, records)) { + log.info("[GroupId {}] Converted the empty classic group to a consumer group.", groupId); return new ConsumerGroup(snapshotRegistry, groupId, metrics); } else { if (group.type() == CONSUMER) { return (ConsumerGroup) group; - } else if (createIfNotExists && group.type() == CLASSIC && validateOnlineUpgrade((ClassicGroup) group)) { + } else if (createIfNotExists && group.type() == CLASSIC) { + validateOnlineUpgrade((ClassicGroup) group); return convertToConsumerGroup((ClassicGroup) group, records); } else { - throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group", groupId)); + throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group.", groupId)); } } } @@ -824,7 +706,7 @@ public ConsumerGroup consumerGroup( if (group.type() == CONSUMER) { return (ConsumerGroup) group; } else { - throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group", groupId)); + throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group.", groupId)); } } @@ -858,7 +740,7 @@ ConsumerGroup getOrMaybeCreatePersistedConsumerGroup( Group group = groups.get(groupId); if (group == null && !createIfNotExists) { - throw new GroupIdNotFoundException(String.format("Consumer group %s not found", groupId)); + throw new GroupIdNotFoundException(String.format("Consumer group %s not found.", groupId)); } if (group == null) { @@ -1049,24 +931,24 @@ ShareGroup shareGroup( } /** - * Validates the online downgrade if a consumer member is fenced from the consumer group. + * Validates the online downgrade if consumer members are fenced from the consumer group. * * @param consumerGroup The ConsumerGroup. - * @param fencedMemberId The fenced member id. + * @param fencedMembers The fenced members. * @return A boolean indicating whether it's valid to online downgrade the consumer group. */ - private boolean validateOnlineDowngradeWithFencedMember(ConsumerGroup consumerGroup, String fencedMemberId) { - if (!consumerGroup.allMembersUseClassicProtocolExcept(fencedMemberId)) { + private boolean validateOnlineDowngradeWithFencedMembers(ConsumerGroup consumerGroup, Set fencedMembers) { + if (!consumerGroup.allMembersUseClassicProtocolExcept(fencedMembers)) { return false; - } else if (consumerGroup.numMembers() <= 1) { + } else if (consumerGroup.numMembers() - fencedMembers.size() <= 0) { log.debug("Skip downgrading the consumer group {} to classic group because it's empty.", consumerGroup.groupId()); return false; - } else if (!consumerGroupMigrationPolicy.isDowngradeEnabled()) { + } else if (!config.consumerGroupMigrationPolicy().isDowngradeEnabled()) { log.info("Cannot downgrade consumer group {} to classic group because the online downgrade is disabled.", consumerGroup.groupId()); return false; - } else if (consumerGroup.numMembers() - 1 > classicGroupMaxSize) { + } else if (consumerGroup.numMembers() - fencedMembers.size() > config.classicGroupMaxSize()) { log.info("Cannot downgrade consumer group {} to classic group because its group size is greater than classic group max size.", consumerGroup.groupId()); return false; @@ -1079,21 +961,21 @@ private boolean validateOnlineDowngradeWithFencedMember(ConsumerGroup consumerGr * static member is replaced by another new one uses the classic protocol. * * @param consumerGroup The group to downgrade. - * @param replacedMemberId The replaced member id. + * @param replacedMember The replaced member. * * @return A boolean indicating whether it's valid to online downgrade the consumer group. */ - private boolean validateOnlineDowngradeWithReplacedMemberId( + private boolean validateOnlineDowngradeWithReplacedMember( ConsumerGroup consumerGroup, - String replacedMemberId + ConsumerGroupMember replacedMember ) { - if (!consumerGroup.allMembersUseClassicProtocolExcept(replacedMemberId)) { + if (!consumerGroup.allMembersUseClassicProtocolExcept(replacedMember)) { return false; - } else if (!consumerGroupMigrationPolicy.isDowngradeEnabled()) { + } else if (!config.consumerGroupMigrationPolicy().isDowngradeEnabled()) { log.info("Cannot downgrade consumer group {} to classic group because the online downgrade is disabled.", consumerGroup.groupId()); return false; - } else if (consumerGroup.numMembers() > classicGroupMaxSize) { + } else if (consumerGroup.numMembers() > config.classicGroupMaxSize()) { log.info("Cannot downgrade consumer group {} to classic group because its group size is greater than classic group max size.", consumerGroup.groupId()); return false; @@ -1105,27 +987,34 @@ private boolean validateOnlineDowngradeWithReplacedMemberId( * Creates a ClassicGroup corresponding to the given ConsumerGroup. * * @param consumerGroup The converted ConsumerGroup. - * @param leavingMemberId The leaving member that triggers the downgrade validation. + * @param leavingMembers The leaving member(s) that triggered the downgrade validation. * @param joiningMember The newly joined member if the downgrade is triggered by static member replacement. + * When not null, must have an instanceId that matches an existing member. * @param records The record list to which the conversion records are added. */ private void convertToClassicGroup( ConsumerGroup consumerGroup, - String leavingMemberId, + Set leavingMembers, ConsumerGroupMember joiningMember, List records ) { if (joiningMember == null) { consumerGroup.createGroupTombstoneRecords(records); } else { - consumerGroup.createGroupTombstoneRecordsWithReplacedMember(records, leavingMemberId, joiningMember.memberId()); + // We've already generated the records to replace replacedMember with joiningMember, + // so we need to tombstone joiningMember instead. + ConsumerGroupMember replacedMember = consumerGroup.staticMember(joiningMember.instanceId()); + if (replacedMember == null) { + throw new IllegalArgumentException("joiningMember must be a static member when not null."); + } + consumerGroup.createGroupTombstoneRecordsWithReplacedMember(records, replacedMember.memberId(), joiningMember.memberId()); } ClassicGroup classicGroup; try { classicGroup = ClassicGroup.fromConsumerGroup( consumerGroup, - leavingMemberId, + leavingMembers, joiningMember, logContext, time, @@ -1138,7 +1027,7 @@ private void convertToClassicGroup( throw new GroupIdNotFoundException(String.format("Cannot downgrade the classic group %s: %s.", consumerGroup.groupId(), e.getMessage())); } - classicGroup.createClassicGroupRecords(metadataImage.features().metadataVersion(), records); + classicGroup.createClassicGroupRecords(records); // Directly update the states instead of replaying the records because // the classicGroup reference is needed for triggering the rebalance. @@ -1151,29 +1040,36 @@ private void convertToClassicGroup( if (joiningMember == null) { prepareRebalance(classicGroup, String.format("Downgrade group %s from consumer to classic.", classicGroup.groupId())); } + + log.info("[GroupId {}] Converted the consumer group to a classic group.", consumerGroup.groupId()); } /** * Validates the online upgrade if the Classic Group receives a ConsumerGroupHeartbeat request. * * @param classicGroup A ClassicGroup. - * @return A boolean indicating whether it's valid to online upgrade the classic group. + * @throws GroupIdNotFoundException if the group cannot be upgraded. */ - private boolean validateOnlineUpgrade(ClassicGroup classicGroup) { - if (!consumerGroupMigrationPolicy.isUpgradeEnabled()) { - log.info("Cannot upgrade classic group {} to consumer group because the online upgrade is disabled.", + private void validateOnlineUpgrade(ClassicGroup classicGroup) { + if (!config.consumerGroupMigrationPolicy().isUpgradeEnabled()) { + log.info("Cannot upgrade classic group {} to consumer group because online upgrade is disabled.", classicGroup.groupId()); - return false; + throw new GroupIdNotFoundException( + String.format("Cannot upgrade classic group %s to consumer group because online upgrade is disabled.", classicGroup.groupId()) + ); } else if (!classicGroup.usesConsumerGroupProtocol()) { log.info("Cannot upgrade classic group {} to consumer group because the group does not use the consumer embedded protocol.", classicGroup.groupId()); - return false; - } else if (classicGroup.numMembers() > consumerGroupMaxSize) { + throw new GroupIdNotFoundException( + String.format("Cannot upgrade classic group %s to consumer group because the group does not use the consumer embedded protocol.", classicGroup.groupId()) + ); + } else if (classicGroup.numMembers() > config.consumerGroupMaxSize()) { log.info("Cannot upgrade classic group {} to consumer group because the group size exceeds the consumer group maximum size.", classicGroup.groupId()); - return false; + throw new GroupIdNotFoundException( + String.format("Cannot upgrade classic group %s to consumer group because the group size exceeds the consumer group maximum size.", classicGroup.groupId()) + ); } - return true; } /** @@ -1202,12 +1098,21 @@ ConsumerGroup convertToConsumerGroup(ClassicGroup classicGroup, List= consumerGroupMaxSize && (memberId.isEmpty() || !group.hasMember(memberId))) { + if (group.numMembers() >= config.consumerGroupMaxSize() && (memberId.isEmpty() || !group.hasMember(memberId))) { throw new GroupMaxSizeReachedException("The consumer group has reached its maximum capacity of " - + consumerGroupMaxSize + " members."); + + config.consumerGroupMaxSize() + " members."); } } @@ -1432,9 +1340,9 @@ private void throwIfShareGroupIsFull( ) throws GroupMaxSizeReachedException { // The member is rejected, if the share group has reached its maximum capacity, or it is not // a member of the share group. - if (group.numMembers() >= shareGroupMaxSize && (memberId.isEmpty() || !group.hasMember(memberId))) { + if (group.numMembers() >= config.shareGroupMaxSize() && (memberId.isEmpty() || !group.hasMember(memberId))) { throw new GroupMaxSizeReachedException("The share group has reached its maximum capacity of " - + shareGroupMaxSize + " members."); + + config.shareGroupMaxSize() + " members."); } } @@ -1812,56 +1720,43 @@ private CoordinatorResult .setClassicMemberMetadata(null) .build(); - boolean bumpGroupEpoch = hasMemberSubscriptionChanged( + // If the group is newly created, we must ensure that it moves away from + // epoch 0 and that it is fully initialized. + boolean bumpGroupEpoch = group.groupEpoch() == 0; + + bumpGroupEpoch |= hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); + bumpGroupEpoch |= maybeUpdateRegularExpressions( + group, + member, + updatedMember, + records + ); + int groupEpoch = group.groupEpoch(); Map subscriptionMetadata = group.subscriptionMetadata(); - Map subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. - subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); - subscriptionMetadata = group.computeSubscriptionMetadata( - subscribedTopicNamesMap, - metadataImage.topics(), - metadataImage.cluster() - ); - - int numMembers = group.numMembers(); - if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { - numMembers++; - } - - subscriptionType = ModernGroup.subscriptionType( - subscribedTopicNamesMap, - numMembers + UpdateSubscriptionMetadataResult result = updateSubscriptionMetadata( + group, + bumpGroupEpoch, + member, + updatedMember, + records ); - if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { - if (log.isDebugEnabled()) { - log.debug("[GroupId {}] Computed new subscription metadata: {}.", - groupId, subscriptionMetadata); - } - bumpGroupEpoch = true; - records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); - } - - if (bumpGroupEpoch) { - groupEpoch += 1; - records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); - log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); - metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); - } - - group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); + groupEpoch = result.groupEpoch; + subscriptionMetadata = result.subscriptionMetadata; + subscriptionType = result.subscriptionType; } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between @@ -1908,11 +1803,11 @@ private CoordinatorResult // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields - // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request - // as those must be set in a full request. + // (rebalanceTimeoutMs, (subscribedTopicNames or subscribedTopicRegex) and ownedTopicPartitions) + // to detect a full request as those must be set in a full request. // 2. The member's assignment has been updated. - boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); - if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { + boolean isFullRequest = rebalanceTimeoutMs != -1 && (subscribedTopicNames != null || subscribedTopicRegex != null) && ownedTopicPartitions != null; + if (memberEpoch == 0 || isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } @@ -1987,7 +1882,6 @@ private CoordinatorResult classicGroupJoinToConsumerGro int groupEpoch = group.groupEpoch(); Map subscriptionMetadata = group.subscriptionMetadata(); - Map subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); final ConsumerProtocolSubscription subscription = deserializeSubscription(protocols); @@ -2021,40 +1915,17 @@ private CoordinatorResult classicGroupJoinToConsumerGro // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. - subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); - subscriptionMetadata = group.computeSubscriptionMetadata( - subscribedTopicNamesMap, - metadataImage.topics(), - metadataImage.cluster() - ); - - int numMembers = group.numMembers(); - if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { - numMembers++; - } - - subscriptionType = ConsumerGroup.subscriptionType( - subscribedTopicNamesMap, - numMembers + UpdateSubscriptionMetadataResult result = updateSubscriptionMetadata( + group, + bumpGroupEpoch, + member, + updatedMember, + records ); - if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { - if (log.isDebugEnabled()) { - log.debug("[GroupId {}] Computed new subscription metadata: {}.", - groupId, subscriptionMetadata); - } - bumpGroupEpoch = true; - records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); - } - - if (bumpGroupEpoch) { - groupEpoch += 1; - records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); - log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); - metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); - } - - group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); + groupEpoch = result.groupEpoch; + subscriptionMetadata = result.subscriptionMetadata; + subscriptionType = result.subscriptionType; } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between @@ -2093,13 +1964,13 @@ private CoordinatorResult classicGroupJoinToConsumerGro // 4. Maybe downgrade the consumer group if the last static member using the // consumer protocol is replaced by the joining static member. - String existingStaticMemberIdOrNull = group.staticMemberId(request.groupInstanceId()); - boolean downgrade = existingStaticMemberIdOrNull != null && - validateOnlineDowngradeWithReplacedMemberId(group, existingStaticMemberIdOrNull); + ConsumerGroupMember existingStaticMemberOrNull = group.staticMember(request.groupInstanceId()); + boolean downgrade = existingStaticMemberOrNull != null && + validateOnlineDowngradeWithReplacedMember(group, existingStaticMemberOrNull); if (downgrade) { convertToClassicGroup( group, - existingStaticMemberIdOrNull, + Collections.emptySet(), updatedMember, records ); @@ -2200,7 +2071,7 @@ private CoordinatorResult sh // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. - Map subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); + Map subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), @@ -2230,7 +2101,7 @@ private CoordinatorResult sh log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); } - group.setMetadataRefreshDeadline(currentTimeMs + shareGroupMetadataRefreshIntervalMs, groupEpoch); + group.setMetadataRefreshDeadline(currentTimeMs + METADATA_REFRESH_INTERVAL_MS, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between @@ -2361,7 +2232,8 @@ private ConsumerGroupMember getOrMaybeSubscribeStaticConsumerGroupMember( .setPreviousMemberEpoch(0) .build(); - // Generate the records to replace the member. + // Generate the records to replace the member. We don't care about the regular expression + // here because it is taken care of later after the static membership replacement. replaceMember(records, group, existingStaticMemberOrNull, newMember); log.info("[GroupId {}] Static member with instance id {} re-joins the consumer group " + @@ -2407,15 +2279,14 @@ private ShareGroupMember getOrMaybeSubscribeShareGroupMember( /** * Creates the member subscription record if the updatedMember is different from - * the old member. Returns true if the subscribedTopicNames/subscribedTopicRegex - * has changed. + * the old member. Returns true if the subscribedTopicNames has changed. * * @param groupId The group id. * @param member The old member. * @param updatedMember The updated member. * @param records The list to accumulate any new records. * @return A boolean indicating whether the updatedMember has a different - * subscribedTopicNames/subscribedTopicRegex from the old member. + * subscribedTopicNames from the old member. * @throws InvalidRegularExpression if the regular expression is invalid. */ private boolean hasMemberSubscriptionChanged( @@ -2427,25 +2298,281 @@ private boolean hasMemberSubscriptionChanged( String memberId = updatedMember.memberId(); if (!updatedMember.equals(member)) { records.add(newConsumerGroupMemberSubscriptionRecord(groupId, updatedMember)); - if (!updatedMember.subscribedTopicNames().equals(member.subscribedTopicNames())) { log.debug("[GroupId {}] Member {} updated its subscribed topics to: {}.", groupId, memberId, updatedMember.subscribedTopicNames()); return true; } + } + return false; + } + + private static boolean isNotEmpty(String value) { + return value != null && !value.isEmpty(); + } - if (!updatedMember.subscribedTopicRegex().equals(member.subscribedTopicRegex())) { - log.debug("[GroupId {}] Member {} updated its subscribed regex to: {}.", - groupId, memberId, updatedMember.subscribedTopicRegex()); - // If the regular expression has changed, we compile it to ensure that - // its syntax is valid. - if (updatedMember.subscribedTopicRegex() != null) { + /** + * Check whether the member has updated its subscribed topic regular expression and + * may trigger the resolution/the refresh of all the regular expressions in the + * group. We align the refreshment of the regular expression in order to have + * them trigger only one rebalance per update. + * + * @param group The consumer group. + * @param member The old member. + * @param updatedMember The new member. + * @param records The records accumulator. + * @return Whether a rebalance must be triggered. + */ + private boolean maybeUpdateRegularExpressions( + ConsumerGroup group, + ConsumerGroupMember member, + ConsumerGroupMember updatedMember, + List records + ) { + String groupId = group.groupId(); + String memberId = updatedMember.memberId(); + String oldSubscribedTopicRegex = member.subscribedTopicRegex(); + String newSubscribedTopicRegex = updatedMember.subscribedTopicRegex(); + + boolean bumpGroupEpoch = false; + boolean requireRefresh = false; + + // Check whether the member has changed its subscribed regex. + if (!Objects.equals(oldSubscribedTopicRegex, newSubscribedTopicRegex)) { + log.debug("[GroupId {}] Member {} updated its subscribed regex to: {}.", + groupId, memberId, newSubscribedTopicRegex); + + if (isNotEmpty(oldSubscribedTopicRegex) && group.numSubscribedMembers(oldSubscribedTopicRegex) == 1) { + // If the member was the last one subscribed to the regex, we delete the + // resolved regular expression. + records.add(newConsumerGroupRegularExpressionTombstone( + groupId, + oldSubscribedTopicRegex + )); + } + + if (isNotEmpty(newSubscribedTopicRegex)) { + if (group.numSubscribedMembers(newSubscribedTopicRegex) == 0) { + // If the member subscribed to a new regex, we compile it to ensure its validity. + // We also trigger a refresh of the regexes in order to resolve it. throwIfRegularExpressionIsInvalid(updatedMember.subscribedTopicRegex()); + requireRefresh = true; + } else { + // If the new regex is already resolved, we trigger a rebalance + // by bumping the group epoch. + bumpGroupEpoch = group.resolvedRegularExpression(newSubscribedTopicRegex).isPresent(); } - return true; } } - return false; + + // Conditions to trigger a refresh: + // 0. The group is subscribed to regular expressions. + // 1. There is no ongoing refresh for the group. + // 2. The last refresh is older than 10s. + // 3. The group has unresolved regular expressions. + // 4. The metadata image has new topics. + + // 0. The group is subscribed to regular expressions. We also take the one + // that the current may have just introduced. + if (!requireRefresh && group.subscribedRegularExpressions().isEmpty()) { + return bumpGroupEpoch; + } + + // 1. There is no ongoing refresh for the group. + String key = group.groupId() + "-regex"; + if (executor.isScheduled(key)) { + return bumpGroupEpoch; + } + + // 2. The last refresh is older than 10s. If the group does not have any regular + // expressions but the current member just brought a new one, we should continue. + long lastRefreshTimeMs = group.lastResolvedRegularExpressionRefreshTimeMs(); + if (time.milliseconds() <= lastRefreshTimeMs + REGEX_BATCH_REFRESH_INTERVAL_MS) { + return bumpGroupEpoch; + } + + // 3. The group has unresolved regular expressions. + Map subscribedRegularExpressions = new HashMap<>(group.subscribedRegularExpressions()); + if (isNotEmpty(oldSubscribedTopicRegex)) { + subscribedRegularExpressions.compute(oldSubscribedTopicRegex, Utils::decValue); + } + if (isNotEmpty(newSubscribedTopicRegex)) { + subscribedRegularExpressions.compute(newSubscribedTopicRegex, Utils::incValue); + } + + requireRefresh |= subscribedRegularExpressions.size() != group.numResolvedRegularExpressions(); + + // 4. The metadata has new topics that we must consider. + requireRefresh |= group.lastResolvedRegularExpressionVersion() < lastMetadataImageWithNewTopics; + + if (requireRefresh && !subscribedRegularExpressions.isEmpty()) { + Set regexes = Collections.unmodifiableSet(subscribedRegularExpressions.keySet()); + executor.schedule( + key, + () -> refreshRegularExpressions(groupId, log, time, metadataImage, regexes), + (result, exception) -> handleRegularExpressionsResult(groupId, result, exception) + ); + } + + return bumpGroupEpoch; + } + + /** + * Resolves the provided regular expressions. Note that this static method is executed + * as an asynchronous task in the executor. Hence, it should not access any state from + * the manager. + * + * @param groupId The group id. + * @param log The log instance. + * @param time The time instance. + * @param image The metadata image to use for listing the topics. + * @param regexes The list of regular expressions that must be resolved. + * @return The list of resolved regular expressions. + * + * public for benchmarks. + */ + public static Map refreshRegularExpressions( + String groupId, + Logger log, + Time time, + MetadataImage image, + Set regexes + ) { + long startTimeMs = time.milliseconds(); + log.debug("[GroupId {}] Refreshing regular expressions: {}", groupId, regexes); + + Map> resolvedRegexes = new HashMap<>(regexes.size()); + List compiledRegexes = new ArrayList<>(regexes.size()); + for (String regex : regexes) { + resolvedRegexes.put(regex, new HashSet<>()); + try { + compiledRegexes.add(Pattern.compile(regex)); + } catch (PatternSyntaxException ex) { + // This should not happen because the regular expressions are validated + // when received from the members. If for some reason, it would + // happen, we log it and ignore it. + log.error("[GroupId {}] Couldn't parse regular expression '{}' due to `{}`. Ignoring it.", + groupId, regex, ex.getDescription()); + } + } + + for (String topicName : image.topics().topicsByName().keySet()) { + for (Pattern regex : compiledRegexes) { + if (regex.matcher(topicName).matches()) { + resolvedRegexes.get(regex.pattern()).add(topicName); + } + } + } + + long version = image.provenance().lastContainedOffset(); + Map result = new HashMap<>(resolvedRegexes.size()); + for (Map.Entry> resolvedRegex : resolvedRegexes.entrySet()) { + result.put( + resolvedRegex.getKey(), + new ResolvedRegularExpression(resolvedRegex.getValue(), version, startTimeMs) + ); + } + + log.info("[GroupId {}] Scanned {} topics to refresh regular expressions {} in {}ms.", + groupId, image.topics().topicsByName().size(), resolvedRegexes.keySet(), + time.milliseconds() - startTimeMs); + + return result; + } + + /** + * Handle the result of the asynchronous tasks which resolves the regular expressions. + * + * @param resolvedRegularExpressions The resolved regular expressions. + * @param exception The exception if the resolution failed. + * @return A CoordinatorResult containing the records to mutate the group state. + */ + private CoordinatorResult handleRegularExpressionsResult( + String groupId, + Map resolvedRegularExpressions, + Throwable exception + ) { + if (exception != null) { + log.error("[GroupId {}] Couldn't update regular expression due to: {}", + groupId, exception.getMessage()); + return new CoordinatorResult<>(Collections.emptyList()); + } + + if (log.isDebugEnabled()) { + log.debug("[GroupId {}] Received updated regular expressions: {}.", + groupId, resolvedRegularExpressions); + } + + List records = new ArrayList<>(); + try { + ConsumerGroup group = consumerGroup(groupId); + Map subscribedTopicNames = new HashMap<>(group.subscribedTopicNames()); + + boolean bumpGroupEpoch = false; + for (Map.Entry entry : resolvedRegularExpressions.entrySet()) { + String regex = entry.getKey(); + + // We can skip the regex if the group is no longer + // subscribed to it. + if (group.numSubscribedMembers(regex) == 0) continue; + + ResolvedRegularExpression newResolvedRegularExpression = entry.getValue(); + ResolvedRegularExpression oldResolvedRegularExpression = group + .resolvedRegularExpression(regex) + .orElse(ResolvedRegularExpression.EMPTY); + + if (!oldResolvedRegularExpression.topics.equals(newResolvedRegularExpression.topics)) { + bumpGroupEpoch = true; + + oldResolvedRegularExpression.topics.forEach(topicName -> + subscribedTopicNames.compute(topicName, SubscriptionCount::decRegexCount) + ); + + newResolvedRegularExpression.topics.forEach(topicName -> + subscribedTopicNames.compute(topicName, SubscriptionCount::incRegexCount) + ); + } + + // Add the record to persist the change. + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord( + groupId, + regex, + newResolvedRegularExpression + )); + } + + // Compute the subscription metadata. + Map subscriptionMetadata = group.computeSubscriptionMetadata( + subscribedTopicNames, + metadataImage.topics(), + metadataImage.cluster() + ); + + if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { + if (log.isDebugEnabled()) { + log.debug("[GroupId {}] Computed new subscription metadata: {}.", + groupId, subscriptionMetadata); + } + bumpGroupEpoch = true; + records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); + } + + if (bumpGroupEpoch) { + int groupEpoch = group.groupEpoch() + 1; + records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); + log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); + metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); + group.setMetadataRefreshDeadline( + time.milliseconds() + METADATA_REFRESH_INTERVAL_MS, + groupEpoch + ); + } + } catch (GroupIdNotFoundException ex) { + log.debug("[GroupId {}] Received result of regular expression resolution but " + + "it no longer exists.", groupId); + } + + return new CoordinatorResult<>(records); } /** @@ -2580,6 +2707,77 @@ private ShareGroupMember maybeReconcile( return updatedMember; } + /** + * Updates the subscription metadata and bumps the group epoch if needed. + * + * @param group The consumer group. + * @param bumpGroupEpoch Whether the group epoch must be bumped. + * @param member The old member. + * @param updatedMember The new member. + * @param records The record accumulator. + * @return The result of the update. + */ + private UpdateSubscriptionMetadataResult updateSubscriptionMetadata( + ConsumerGroup group, + boolean bumpGroupEpoch, + ConsumerGroupMember member, + ConsumerGroupMember updatedMember, + List records + ) { + final long currentTimeMs = time.milliseconds(); + final String groupId = group.groupId(); + int groupEpoch = group.groupEpoch(); + + Map subscribedRegularExpressions = group.computeSubscribedRegularExpressions( + member, + updatedMember + ); + Map subscribedTopicNamesMap = group.computeSubscribedTopicNames( + member, + updatedMember + ); + Map subscriptionMetadata = group.computeSubscriptionMetadata( + subscribedTopicNamesMap, + metadataImage.topics(), + metadataImage.cluster() + ); + + int numMembers = group.numMembers(); + if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { + numMembers++; + } + + SubscriptionType subscriptionType = ConsumerGroup.subscriptionType( + subscribedRegularExpressions, + subscribedTopicNamesMap, + numMembers + ); + + if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { + if (log.isDebugEnabled()) { + log.debug("[GroupId {}] Computed new subscription metadata: {}.", + groupId, subscriptionMetadata); + } + bumpGroupEpoch = true; + records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); + } + + if (bumpGroupEpoch) { + groupEpoch += 1; + records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); + log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); + metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); + } + + group.setMetadataRefreshDeadline(currentTimeMs + METADATA_REFRESH_INTERVAL_MS, groupEpoch); + + return new UpdateSubscriptionMetadataResult( + groupEpoch, + subscriptionMetadata, + subscriptionType + ); + } + /** * Updates the target assignment according to the updated member and subscription metadata. * @@ -2606,8 +2804,8 @@ private Assignment updateTargetAssignment( updatedMember ).orElse(defaultConsumerGroupAssignor.name()); try { - TargetAssignmentBuilder assignmentResultBuilder = - new TargetAssignmentBuilder(group.groupId(), groupEpoch, consumerGroupAssignors.get(preferredServerAssignor)) + TargetAssignmentBuilder.ConsumerTargetAssignmentBuilder assignmentResultBuilder = + new TargetAssignmentBuilder.ConsumerTargetAssignmentBuilder(group.groupId(), groupEpoch, consumerGroupAssignors.get(preferredServerAssignor)) .withMembers(group.members()) .withStaticMembers(group.staticMembers()) .withSubscriptionMetadata(subscriptionMetadata) @@ -2615,6 +2813,7 @@ private Assignment updateTargetAssignment( .withTargetAssignment(group.targetAssignment()) .withInvertedTargetAssignment(group.invertedTargetAssignment()) .withTopicsImage(metadataImage.topics()) + .withResolvedRegularExpressions(group.resolvedRegularExpressions()) .addOrUpdateMember(updatedMember.memberId(), updatedMember); // If the instance id was associated to a different member, it means that the @@ -2673,16 +2872,14 @@ private Assignment updateTargetAssignment( List records ) { try { - TargetAssignmentBuilder assignmentResultBuilder = - new TargetAssignmentBuilder(group.groupId(), groupEpoch, shareGroupAssignor) + TargetAssignmentBuilder.ShareTargetAssignmentBuilder assignmentResultBuilder = + new TargetAssignmentBuilder.ShareTargetAssignmentBuilder(group.groupId(), groupEpoch, shareGroupAssignor) .withMembers(group.members()) .withSubscriptionMetadata(subscriptionMetadata) .withSubscriptionType(subscriptionType) .withTargetAssignment(group.targetAssignment()) .withInvertedTargetAssignment(group.invertedTargetAssignment()) .withTopicsImage(metadataImage.topics()) - .withTargetAssignmentRecordBuilder(GroupCoordinatorRecordHelpers::newShareGroupTargetAssignmentRecord) - .withTargetAssignmentEpochRecordBuilder(GroupCoordinatorRecordHelpers::newShareGroupTargetAssignmentEpochRecord) .addOrUpdateMember(updatedMember.memberId(), updatedMember); long startTimeMs = time.milliseconds(); @@ -2818,16 +3015,47 @@ private CoordinatorResult consumerGroupFenceMember( ConsumerGroupMember member, T response ) { + return consumerGroupFenceMembers(group, Set.of(member), response); + } + + /** + * Fences members from a consumer group and maybe downgrade the consumer group to a classic group. + * + * @param group The group. + * @param members The members. + * @param response The response of the CoordinatorResult. + * + * @return The CoordinatorResult to be applied. + */ + private CoordinatorResult consumerGroupFenceMembers( + ConsumerGroup group, + Set members, + T response + ) { + if (members.isEmpty()) { + // No members to fence. Don't bump the group epoch. + return new CoordinatorResult<>(Collections.emptyList(), response); + } + List records = new ArrayList<>(); - if (validateOnlineDowngradeWithFencedMember(group, member.memberId())) { - convertToClassicGroup(group, member.memberId(), null, records); + if (validateOnlineDowngradeWithFencedMembers(group, members)) { + convertToClassicGroup(group, members, null, records); return new CoordinatorResult<>(records, response, null, false); } else { - removeMember(records, group.groupId(), member.memberId()); + for (ConsumerGroupMember member : members) { + removeMember(records, group.groupId(), member.memberId()); + } - // We update the subscription metadata without the leaving member. + // Check whether resolved regular expressions could be deleted. + Set deletedRegexes = maybeDeleteResolvedRegularExpressions( + records, + group, + members + ); + + // We update the subscription metadata without the leaving members. Map subscriptionMetadata = group.computeSubscriptionMetadata( - group.computeSubscribedTopicNames(member, null), + group.computeSubscribedTopicNamesWithoutDeletedMembers(members, deletedRegexes), metadataImage.topics(), metadataImage.cluster() ); @@ -2843,8 +3071,11 @@ private CoordinatorResult consumerGroupFenceMember( // We bump the group epoch. int groupEpoch = group.groupEpoch() + 1; records.add(newConsumerGroupEpochRecord(group.groupId(), groupEpoch)); + log.info("[GroupId {}] Bumped group epoch to {}.", group.groupId(), groupEpoch); - cancelTimers(group.groupId(), member.memberId()); + for (ConsumerGroupMember member : members) { + cancelTimers(group.groupId(), member.memberId()); + } return new CoordinatorResult<>(records, response); } @@ -2926,6 +3157,38 @@ private void replaceMember( )); } + /** + * Maybe delete the resolved regular expressions associated with the provided members + * if they were the last ones subscribed to them. + * + * @param records The record accumulator. + * @param group The group. + * @param members The member removed from the group. + * @return The set of deleted regular expressions. + */ + private Set maybeDeleteResolvedRegularExpressions( + List records, + ConsumerGroup group, + Set members + ) { + Map counts = new HashMap<>(); + members.forEach(member -> { + if (isNotEmpty(member.subscribedTopicRegex())) { + counts.compute(member.subscribedTopicRegex(), Utils::incValue); + } + }); + + Set deletedRegexes = new HashSet<>(); + counts.forEach((regex, count) -> { + if (group.numSubscribedMembers(regex) == count) { + records.add(newConsumerGroupRegularExpressionTombstone(group.groupId(), regex)); + deletedRegexes.add(regex); + } + }); + + return deletedRegexes; + } + /** * Write tombstones for the member. The order matters here. * @@ -3738,9 +4001,9 @@ public void replay( if (value != null) { Map subscriptionMetadata = new HashMap<>(); - value.topics().forEach(topicMetadata -> { - subscriptionMetadata.put(topicMetadata.topicName(), TopicMetadata.fromRecord(topicMetadata)); - }); + value.topics().forEach(topicMetadata -> + subscriptionMetadata.put(topicMetadata.topicName(), TopicMetadata.fromRecord(topicMetadata)) + ); group.setSubscriptionMetadata(subscriptionMetadata); } else { group.setSubscriptionMetadata(Collections.emptyMap()); @@ -3836,24 +4099,39 @@ public void replay( public void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) { metadataImage = newImage; + // Initialize the last offset if it was not yet. + if (lastMetadataImageWithNewTopics == -1L) { + lastMetadataImageWithNewTopics = metadataImage.provenance().lastContainedOffset(); + } + + TopicsDelta topicsDelta = delta.topicsDelta(); + if (topicsDelta == null) return; + + // Updated the last offset of the image with newly created topics. This is used to + // trigger a refresh of all the regular expressions when topics are created. Note + // that we don't trigger a refresh when topics are deleted. Those are removed from + // the subscription metadata (and the assignment) via the above mechanism. The + // resolved regular expressions are cleaned up on the next refresh. + if (!topicsDelta.createdTopicIds().isEmpty()) { + lastMetadataImageWithNewTopics = metadataImage.provenance().lastContainedOffset(); + } + // Notify all the groups subscribed to the created, updated or // deleted topics. - Optional.ofNullable(delta.topicsDelta()).ifPresent(topicsDelta -> { - Set allGroupIds = new HashSet<>(); - topicsDelta.changedTopics().forEach((topicId, topicDelta) -> { - String topicName = topicDelta.name(); - allGroupIds.addAll(groupsSubscribedToTopic(topicName)); - }); - topicsDelta.deletedTopicIds().forEach(topicId -> { - TopicImage topicImage = delta.image().topics().getTopic(topicId); - allGroupIds.addAll(groupsSubscribedToTopic(topicImage.name())); - }); - allGroupIds.forEach(groupId -> { - Group group = groups.get(groupId); - if (group != null && (group.type() == CONSUMER || group.type() == SHARE)) { - ((ModernGroup) group).requestMetadataRefresh(); - } - }); + Set allGroupIds = new HashSet<>(); + topicsDelta.changedTopics().forEach((topicId, topicDelta) -> { + String topicName = topicDelta.name(); + allGroupIds.addAll(groupsSubscribedToTopic(topicName)); + }); + topicsDelta.deletedTopicIds().forEach(topicId -> { + TopicImage topicImage = delta.image().topics().getTopic(topicId); + allGroupIds.addAll(groupsSubscribedToTopic(topicImage.name())); + }); + allGroupIds.forEach(groupId -> { + Group group = groups.get(groupId); + if (group != null && (group.type() == CONSUMER || group.type() == SHARE)) { + ((ModernGroup) group).requestMetadataRefresh(); + } }); } @@ -3903,10 +4181,10 @@ public void onLoaded() { rescheduleClassicGroupMemberHeartbeat(classicGroup, member); }); - if (classicGroup.numMembers() > classicGroupMaxSize) { + if (classicGroup.numMembers() > config.classicGroupMaxSize()) { // In case the max size config has changed. prepareRebalance(classicGroup, "Freshly-loaded group " + groupId + - " (size " + classicGroup.numMembers() + ") is over capacity " + classicGroupMaxSize + + " (size " + classicGroup.numMembers() + ") is over capacity " + config.classicGroupMaxSize() + ". Rebalancing in order to give a chance for consumers to commit offsets"); } @@ -3948,19 +4226,19 @@ public void onUnloaded() { case DEAD: break; case PREPARING_REBALANCE: - classicGroup.allMembers().forEach(member -> { + classicGroup.allMembers().forEach(member -> classicGroup.completeJoinFuture(member, new JoinGroupResponseData() .setMemberId(member.memberId()) - .setErrorCode(NOT_COORDINATOR.code())); - }); + .setErrorCode(NOT_COORDINATOR.code())) + ); break; case COMPLETING_REBALANCE: case STABLE: - classicGroup.allMembers().forEach(member -> { + classicGroup.allMembers().forEach(member -> classicGroup.completeSyncFuture(member, new SyncGroupResponseData() - .setErrorCode(NOT_COORDINATOR.code())); - }); + .setErrorCode(NOT_COORDINATOR.code())) + ); } break; case SHARE: @@ -4108,7 +4386,9 @@ CoordinatorResult classicGroupJoinToClassicGroup( // Group is created if it does not exist and the member id is UNKNOWN. if member // is specified but group does not exist, request is rejected with GROUP_ID_NOT_FOUND ClassicGroup group; - maybeDeleteEmptyConsumerGroup(groupId, records); + if (maybeDeleteEmptyConsumerGroup(groupId, records)) { + log.info("[GroupId {}] Converted the empty consumer group to a classic group.", groupId); + } boolean isNewGroup = !groups.containsKey(groupId); try { group = getOrMaybeCreateClassicGroup(groupId, isUnknownMember); @@ -4158,7 +4438,7 @@ CoordinatorResult classicGroupJoinToClassicGroup( }); records.add( - GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group, metadataImage.features().metadataVersion()) + GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group) ); return new CoordinatorResult<>(records, appendFuture, false); @@ -4588,7 +4868,7 @@ private CoordinatorResult completeClassicGroupJoin( }); List records = Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord( - group, Collections.emptyMap(), metadataImage.features().metadataVersion())); + group, Collections.emptyMap())); return new CoordinatorResult<>(records, appendFuture, false); @@ -4818,7 +5098,7 @@ private CoordinatorResult addMemberThenRebalanceOrCompl // timeout during a long rebalance), they may simply retry which will lead to a lot of defunct // members in the rebalance. To prevent this going on indefinitely, we time out JoinGroup requests // for new members. If the new member is still there, we expect it to retry. - rescheduleClassicGroupMemberHeartbeat(group, member, classicGroupNewMemberJoinTimeoutMs); + rescheduleClassicGroupMemberHeartbeat(group, member, config.classicGroupNewMemberJoinTimeoutMs()); return maybePrepareRebalanceOrCompleteJoin(group, "Adding new member " + memberId + " with group instance id " + request.groupInstanceId() + "; client reason: " + JoinGroupRequest.joinReason(request)); @@ -4869,8 +5149,8 @@ CoordinatorResult prepareRebalance( boolean isInitialRebalance = group.isInState(EMPTY); if (isInitialRebalance) { // The group is new. Provide more time for the members to join. - int delayMs = classicGroupInitialRebalanceDelayMs; - int remainingMs = Math.max(group.rebalanceTimeoutMs() - classicGroupInitialRebalanceDelayMs, 0); + int delayMs = config.classicGroupInitialRebalanceDelayMs(); + int remainingMs = Math.max(group.rebalanceTimeoutMs() - config.classicGroupInitialRebalanceDelayMs(), 0); timer.schedule( classicGroupJoinKey(group.groupId()), @@ -4939,7 +5219,7 @@ private CoordinatorResult tryCompleteInitialRebalanceEl if (group.newMemberAdded() && remainingMs != 0) { // A new member was added. Extend the delay. group.setNewMemberAdded(false); - int newDelayMs = Math.min(classicGroupInitialRebalanceDelayMs, remainingMs); + int newDelayMs = Math.min(config.classicGroupInitialRebalanceDelayMs(), remainingMs); int newRemainingMs = Math.max(remainingMs - delayMs, 0); timer.schedule( @@ -5141,13 +5421,13 @@ private boolean acceptJoiningMember(ClassicGroup group, String memberId) { // 2) using the number of awaiting members allows to kick out the last rejoining // members of the group. return (group.hasMember(memberId) && group.member(memberId).isAwaitingJoin()) || - group.numAwaitingJoinResponse() < classicGroupMaxSize; + group.numAwaitingJoinResponse() < config.classicGroupMaxSize(); case COMPLETING_REBALANCE: case STABLE: // An existing member is accepted. New members are accepted up to the max group size. // Note that the group size is used here. When the group transitions to CompletingRebalance, // members who haven't rejoined are removed. - return group.hasMember(memberId) || group.numMembers() < classicGroupMaxSize; + return group.hasMember(memberId) || group.numMembers() < config.classicGroupMaxSize(); default: throw new IllegalStateException("Unknown group state: " + group.stateAsString()); } @@ -5250,7 +5530,7 @@ private CoordinatorResult updateStaticMemberThenRebalan }); List records = Collections.singletonList( - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment, metadataImage.features().metadataVersion()) + GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment) ); return new CoordinatorResult<>(records, appendFuture, false); @@ -5397,7 +5677,7 @@ private CoordinatorResult classicGroupSyncToClassicGrou }); List records = Collections.singletonList( - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignment, metadataImage.features().metadataVersion()) + GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignment) ); return new CoordinatorResult<>(records, appendFuture, false); } @@ -5767,9 +6047,9 @@ public CoordinatorResult classicGroup } if (group.type() == CLASSIC) { - return classicGroupLeaveToClassicGroup((ClassicGroup) group, context, request); + return classicGroupLeaveToClassicGroup((ClassicGroup) group, request); } else if (group.type() == CONSUMER) { - return classicGroupLeaveToConsumerGroup((ConsumerGroup) group, context, request); + return classicGroupLeaveToConsumerGroup((ConsumerGroup) group, request); } else { throw new UnknownMemberIdException(String.format("Group %s not found.", request.groupId())); } @@ -5779,95 +6059,68 @@ public CoordinatorResult classicGroup * Handle a classic LeaveGroupRequest to a ConsumerGroup. * * @param group The ConsumerGroup. - * @param context The request context. * @param request The actual LeaveGroup request. * * @return The LeaveGroup response and the records to append. */ private CoordinatorResult classicGroupLeaveToConsumerGroup( ConsumerGroup group, - RequestContext context, LeaveGroupRequestData request ) throws UnknownMemberIdException { String groupId = group.groupId(); List memberResponses = new ArrayList<>(); Set validLeaveGroupMembers = new HashSet<>(); - List records = new ArrayList<>(); for (MemberIdentity memberIdentity : request.members()) { - String memberId = memberIdentity.memberId(); - String instanceId = memberIdentity.groupInstanceId(); String reason = memberIdentity.reason() != null ? memberIdentity.reason() : "not provided"; - ConsumerGroupMember member; try { - if (instanceId == null) { - member = group.getOrMaybeCreateMember(memberId, false); - throwIfMemberDoesNotUseClassicProtocol(member); + ConsumerGroupMember member; + + if (memberIdentity.groupInstanceId() == null) { + member = group.getOrMaybeCreateMember(memberIdentity.memberId(), false); - log.info("[Group {}] Dynamic member {} has left group " + + log.info("[GroupId {}] Dynamic member {} has left group " + "through explicit `LeaveGroup` request; client reason: {}", - groupId, memberId, reason); + groupId, memberIdentity.memberId(), reason); } else { - member = group.staticMember(instanceId); - throwIfStaticMemberIsUnknown(member, instanceId); + member = group.staticMember(memberIdentity.groupInstanceId()); + throwIfStaticMemberIsUnknown(member, memberIdentity.groupInstanceId()); // The LeaveGroup API allows administrative removal of members by GroupInstanceId // in which case we expect the MemberId to be undefined. - if (!UNKNOWN_MEMBER_ID.equals(memberId)) { - throwIfInstanceIdIsFenced(member, groupId, memberId, instanceId); + if (!UNKNOWN_MEMBER_ID.equals(memberIdentity.memberId())) { + throwIfInstanceIdIsFenced(member, groupId, memberIdentity.memberId(), memberIdentity.groupInstanceId()); } - throwIfMemberDoesNotUseClassicProtocol(member); - memberId = member.memberId(); - log.info("[Group {}] Static member {} with instance id {} has left group " + + log.info("[GroupId {}] Static member {} with instance id {} has left group " + "through explicit `LeaveGroup` request; client reason: {}", - groupId, memberId, instanceId, reason); + groupId, memberIdentity.memberId(), memberIdentity.groupInstanceId(), reason); } - removeMember(records, groupId, memberId); - cancelTimers(groupId, memberId); memberResponses.add( new MemberResponse() - .setMemberId(memberId) - .setGroupInstanceId(instanceId) + .setMemberId(memberIdentity.memberId()) + .setGroupInstanceId(memberIdentity.groupInstanceId()) ); + validLeaveGroupMembers.add(member); } catch (KafkaException e) { memberResponses.add( new MemberResponse() - .setMemberId(memberId) - .setGroupInstanceId(instanceId) + .setMemberId(memberIdentity.memberId()) + .setGroupInstanceId(memberIdentity.groupInstanceId()) .setErrorCode(Errors.forException(e).code()) ); } } - if (!records.isEmpty()) { - // Maybe update the subscription metadata. - Map subscriptionMetadata = group.computeSubscriptionMetadata( - group.computeSubscribedTopicNames(validLeaveGroupMembers), - metadataImage.topics(), - metadataImage.cluster() - ); - - if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { - log.info("[GroupId {}] Computed new subscription metadata: {}.", - group.groupId(), subscriptionMetadata); - records.add(newConsumerGroupSubscriptionMetadataRecord(group.groupId(), subscriptionMetadata)); - } - - // Bump the group epoch. - records.add(newConsumerGroupEpochRecord(groupId, group.groupEpoch() + 1)); - } - - return new CoordinatorResult<>(records, new LeaveGroupResponseData().setMembers(memberResponses)); + return consumerGroupFenceMembers(group, validLeaveGroupMembers, new LeaveGroupResponseData().setMembers(memberResponses)); } /** * Handle a classic LeaveGroupRequest to a ClassicGroup. * * @param group The ClassicGroup. - * @param context The request context. * @param request The actual LeaveGroup request. * * @return The LeaveGroup response and the GroupMetadata record to append if the group @@ -5875,7 +6128,6 @@ private CoordinatorResult classicGrou */ private CoordinatorResult classicGroupLeaveToClassicGroup( ClassicGroup group, - RequestContext context, LeaveGroupRequestData request ) throws UnknownMemberIdException { if (group.isInState(DEAD)) { @@ -6081,13 +6333,13 @@ private static boolean isEmptyConsumerGroup(Group group) { * @param group The group to be deleted. * @param records The list of records to delete the group. * - * @return true if the group is empty + * @return true if the group is an empty classic group. */ private boolean maybeDeleteEmptyClassicGroup(Group group, List records) { if (isEmptyClassicGroup(group)) { // Delete the classic group by adding tombstones. // There's no need to remove the group as the replay of tombstones removes it. - if (group != null) createGroupTombstoneRecords(group, records); + createGroupTombstoneRecords(group, records); return true; } return false; @@ -6098,15 +6350,19 @@ private boolean maybeDeleteEmptyClassicGroup(Group group, List records) { + private boolean maybeDeleteEmptyConsumerGroup(String groupId, List records) { Group group = groups.get(groupId, Long.MAX_VALUE); if (isEmptyConsumerGroup(group)) { // Add tombstones for the previous consumer group. The tombstones won't actually be // replayed because its coordinator result has a non-null appendFuture. createGroupTombstoneRecords(group, records); removeGroup(groupId); + return true; } + return false; } /** @@ -6140,7 +6396,7 @@ public Set groupIds() { private int consumerGroupSessionTimeoutMs(String groupId) { Optional groupConfig = groupConfigManager.groupConfig(groupId); return groupConfig.map(GroupConfig::consumerSessionTimeoutMs) - .orElse(consumerGroupSessionTimeoutMs); + .orElse(config.consumerGroupSessionTimeoutMs()); } /** @@ -6149,7 +6405,7 @@ private int consumerGroupSessionTimeoutMs(String groupId) { private int consumerGroupHeartbeatIntervalMs(String groupId) { Optional groupConfig = groupConfigManager.groupConfig(groupId); return groupConfig.map(GroupConfig::consumerHeartbeatIntervalMs) - .orElse(consumerGroupHeartbeatIntervalMs); + .orElse(config.consumerGroupHeartbeatIntervalMs()); } /** @@ -6158,7 +6414,7 @@ private int consumerGroupHeartbeatIntervalMs(String groupId) { private int shareGroupSessionTimeoutMs(String groupId) { Optional groupConfig = groupConfigManager.groupConfig(groupId); return groupConfig.map(GroupConfig::shareSessionTimeoutMs) - .orElse(shareGroupSessionTimeoutMs); + .orElse(config.shareGroupSessionTimeoutMs()); } /** @@ -6167,7 +6423,7 @@ private int shareGroupSessionTimeoutMs(String groupId) { private int shareGroupHeartbeatIntervalMs(String groupId) { Optional groupConfig = groupConfigManager.groupConfig(groupId); return groupConfig.map(GroupConfig::shareHeartbeatIntervalMs) - .orElse(shareGroupHeartbeatIntervalMs); + .orElse(config.shareGroupHeartbeatIntervalMs()); } /** diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetAndMetadata.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetAndMetadata.java index 38d241e86a2aa..a5635f58ac565 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetAndMetadata.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetAndMetadata.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.TxnOffsetCommitRequestData; -import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; import java.util.Objects; @@ -166,8 +165,7 @@ public static OffsetAndMetadata fromRequest( ofSentinel(partition.committedLeaderEpoch()), partition.committedMetadata() == null ? OffsetAndMetadata.NO_METADATA : partition.committedMetadata(), - partition.commitTimestamp() == OffsetCommitRequest.DEFAULT_TIMESTAMP ? - currentTimeMs : partition.commitTimestamp(), + currentTimeMs, expireTimestampMs ); } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetMetadataManager.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetMetadataManager.java index 032957062c789..5e0cf7589e14c 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetMetadataManager.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetMetadataManager.java @@ -47,7 +47,6 @@ import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics; import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetricsShard; -import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.TimelineHashMap; @@ -168,11 +167,6 @@ public OffsetMetadataManager build() { */ private final Time time; - /** - * The metadata image. - */ - private MetadataImage metadataImage; - /** * The group metadata manager. */ @@ -284,7 +278,6 @@ private OffsetAndMetadata remove( this.snapshotRegistry = snapshotRegistry; this.log = logContext.logger(OffsetMetadataManager.class); this.time = time; - this.metadataImage = metadataImage; this.groupMetadataManager = groupMetadataManager; this.config = config; this.metrics = metrics; @@ -498,8 +491,7 @@ public CoordinatorResult commitOffs request.groupId(), topic.name(), partition.partitionIndex(), - offsetAndMetadata, - metadataImage.features().metadataVersion() + offsetAndMetadata )); } }); @@ -558,8 +550,7 @@ public CoordinatorResult commitT request.groupId(), topic.name(), partition.partitionIndex(), - offsetAndMetadata, - metadataImage.features().metadataVersion() + offsetAndMetadata )); } }); @@ -872,7 +863,7 @@ public boolean cleanupExpiredOffsets(String groupId, List rec long currentTimestampMs = time.milliseconds(); Optional offsetExpirationCondition = group.offsetExpirationCondition(); - if (!offsetExpirationCondition.isPresent()) { + if (offsetExpirationCondition.isEmpty()) { return false; } @@ -1111,16 +1102,6 @@ public void replayEndTransactionMarker( } } - /** - * A new metadata image is available. - * - * @param newImage The new metadata image. - * @param delta The delta image. - */ - public void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) { - metadataImage = newImage; - } - /** * @return The offset for the provided groupId and topic partition or null * if it does not exist. diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/ShareGroupAutoOffsetResetStrategy.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/ShareGroupAutoOffsetResetStrategy.java new file mode 100644 index 0000000000000..c2ab20b7569e8 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/ShareGroupAutoOffsetResetStrategy.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group; + +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigException; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Represents the strategy for resetting offsets in share consumer groups when no previous offset is found + * for a partition or when an offset is out of range. + * + * Supports three strategies: + *
                + *
              • {@code EARLIEST} - Reset the offset to the earliest available offset + *
              • {@code LATEST} - Reset the offset to the latest available offset + *
              • {@code BY_DURATION} - Reset the offset to a timestamp that is the specified duration before the current time + *
              + */ +public class ShareGroupAutoOffsetResetStrategy { + + public static final ShareGroupAutoOffsetResetStrategy EARLIEST = new ShareGroupAutoOffsetResetStrategy(AutoOffsetResetStrategy.EARLIEST, StrategyType.EARLIEST); + public static final ShareGroupAutoOffsetResetStrategy LATEST = new ShareGroupAutoOffsetResetStrategy(AutoOffsetResetStrategy.LATEST, StrategyType.LATEST); + + public enum StrategyType { + LATEST, EARLIEST, BY_DURATION; + + @Override + public String toString() { + return super.toString().toLowerCase(Locale.ROOT); + } + } + + private final AutoOffsetResetStrategy delegate; + private final StrategyType type; + + private ShareGroupAutoOffsetResetStrategy(AutoOffsetResetStrategy delegate, StrategyType type) { + this.delegate = delegate; + this.type = type; + } + + /** + * Factory method to create a ShareGroupAutoOffsetResetStrategy from a string representation. + */ + public static ShareGroupAutoOffsetResetStrategy fromString(String offsetStrategy) { + AutoOffsetResetStrategy baseStrategy = AutoOffsetResetStrategy.fromString(offsetStrategy); + AutoOffsetResetStrategy.StrategyType baseType = baseStrategy.type(); + + StrategyType shareGroupType; + switch (baseType) { + case EARLIEST: + shareGroupType = StrategyType.EARLIEST; + break; + case LATEST: + shareGroupType = StrategyType.LATEST; + break; + case BY_DURATION: + shareGroupType = StrategyType.BY_DURATION; + break; + default: + throw new IllegalArgumentException("Unsupported strategy for ShareGroup: " + baseType); + } + + return new ShareGroupAutoOffsetResetStrategy(baseStrategy, shareGroupType); + } + + /** + * Returns the share group strategy type. + */ + public StrategyType type() { + return type; + } + + /** + * Returns the name of the share group offset reset strategy. + */ + public String name() { + return type.toString(); + } + + /** + * Delegates the timestamp calculation to the base strategy. + * @return the timestamp for the OffsetResetStrategy, + * if the strategy is EARLIEST or LATEST or duration is provided + * else return Optional.empty() + */ + public Long timestamp() { + return delegate.timestamp().get(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShareGroupAutoOffsetResetStrategy that = (ShareGroupAutoOffsetResetStrategy) o; + return type == that.type && Objects.equals(delegate, that.delegate); + } + + @Override + public int hashCode() { + return Objects.hash(delegate, type); + } + + @Override + public String toString() { + return "ShareGroupAutoOffsetResetStrategy{" + + "type=" + type + + ", delegate=" + delegate + + '}'; + } + + /** + * Factory method for creating EARLIEST strategy. + */ + public static ShareGroupAutoOffsetResetStrategy earliest() { + return new ShareGroupAutoOffsetResetStrategy(AutoOffsetResetStrategy.EARLIEST, StrategyType.EARLIEST); + } + + /** + * Factory method for creating LATEST strategy. + */ + public static ShareGroupAutoOffsetResetStrategy latest() { + return new ShareGroupAutoOffsetResetStrategy(AutoOffsetResetStrategy.LATEST, StrategyType.LATEST); + } + + public static class Validator implements ConfigDef.Validator { + @Override + public void ensureValid(String name, Object value) { + String offsetStrategy = (String) value; + try { + fromString(offsetStrategy); + } catch (Exception e) { + throw new ConfigException(name, value, "Invalid value `" + offsetStrategy + "` for configuration " + + name + ". The value must be either 'earliest', 'latest' or of the format 'by_duration:'."); + } + } + + public String toString() { + String values = Arrays.stream(StrategyType.values()) + .map(strategyType -> { + if (strategyType == StrategyType.BY_DURATION) { + return strategyType + ":PnDTnHnMn.nS"; + } + return strategyType.toString(); + }).collect(Collectors.joining(", ")); + return "[" + values + "]"; + } + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/RangeSet.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/RangeSet.java index 867cdd9c55619..6bde9cddd967a 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/RangeSet.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/RangeSet.java @@ -63,7 +63,7 @@ public boolean contains(Object o) { @Override public Iterator iterator() { - return new Iterator() { + return new Iterator<>() { private int current = from; @Override @@ -160,14 +160,12 @@ public String toString() { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof Set)) return false; + if (!(o instanceof Set otherSet)) return false; - if (o instanceof RangeSet) { - RangeSet other = (RangeSet) o; + if (o instanceof RangeSet other) { return this.from == other.from && this.to == other.to; } - Set otherSet = (Set) o; if (otherSet.size() != this.size()) return false; for (int i = from; i < to; i++) { diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/UniformHeterogeneousAssignmentBuilder.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/UniformHeterogeneousAssignmentBuilder.java index ca5ba77fbbdb5..3166d775c27e0 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/UniformHeterogeneousAssignmentBuilder.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/UniformHeterogeneousAssignmentBuilder.java @@ -24,9 +24,6 @@ import org.apache.kafka.coordinator.group.api.assignor.SubscribedTopicDescriber; import org.apache.kafka.coordinator.group.modern.MemberAssignmentImpl; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -55,7 +52,6 @@ * Balance > Stickiness. */ public class UniformHeterogeneousAssignmentBuilder { - private static final Logger LOG = LoggerFactory.getLogger(UniformHeterogeneousAssignmentBuilder.class); /** * The maximum number of iterations to perform in the final iterative balancing phase. @@ -181,50 +177,44 @@ public UniformHeterogeneousAssignmentBuilder(GroupSpec groupSpec, SubscribedTopi } } - this.topicComparator = new Comparator() { - @Override - public int compare(final Uuid topic1Id, final Uuid topic2Id) { - int topic1PartitionCount = subscribedTopicDescriber.numPartitions(topic1Id); - int topic2PartitionCount = subscribedTopicDescriber.numPartitions(topic2Id); - int topic1SubscriberCount = topicSubscribers.get(topic1Id).size(); - int topic2SubscriberCount = topicSubscribers.get(topic2Id).size(); - - // Order by partitions per subscriber, descending. - int order = Double.compare( - (double) topic2PartitionCount / topic2SubscriberCount, - (double) topic1PartitionCount / topic1SubscriberCount - ); - - // Then order by subscriber count, ascending. - if (order == 0) { - order = Integer.compare(topic1SubscriberCount, topic2SubscriberCount); - } - - // Then order by topic id, ascending. - if (order == 0) { - order = topic1Id.compareTo(topic2Id); - } + this.topicComparator = (topic1Id, topic2Id) -> { + int topic1PartitionCount = subscribedTopicDescriber.numPartitions(topic1Id); + int topic2PartitionCount = subscribedTopicDescriber.numPartitions(topic2Id); + int topic1SubscriberCount = topicSubscribers.get(topic1Id).size(); + int topic2SubscriberCount = topicSubscribers.get(topic2Id).size(); + + // Order by partitions per subscriber, descending. + int order = Double.compare( + (double) topic2PartitionCount / topic2SubscriberCount, + (double) topic1PartitionCount / topic1SubscriberCount + ); + + // Then order by subscriber count, ascending. + if (order == 0) { + order = Integer.compare(topic1SubscriberCount, topic2SubscriberCount); + } - return order; + // Then order by topic id, ascending. + if (order == 0) { + order = topic1Id.compareTo(topic2Id); } - }; - this.memberComparator = new Comparator() { - @Override - public int compare(final Integer memberIndex1, final Integer memberIndex2) { - // Order by number of assigned partitions, ascending. - int order = Integer.compare( - memberTargetAssignmentSizes[memberIndex1], - memberTargetAssignmentSizes[memberIndex2] - ); + return order; + }; - // Then order by member index, ascending. - if (order == 0) { - order = memberIndex1.compareTo(memberIndex2); - } + this.memberComparator = (memberIndex1, memberIndex2) -> { + // Order by number of assigned partitions, ascending. + int order = Integer.compare( + memberTargetAssignmentSizes[memberIndex1], + memberTargetAssignmentSizes[memberIndex2] + ); - return order; + // Then order by member index, ascending. + if (order == 0) { + order = memberIndex1.compareTo(memberIndex2); } + + return order; }; // Initialize partition owners for the target assignments. @@ -851,14 +841,6 @@ private void assignPartition(Uuid topicId, int partition, int memberIndex) { addPartitionToTargetAssignment(topicId, partition, memberIndex); } - /** - * @param memberIndex The member index. - * @return The current assignment size for the given member. - */ - private int targetAssignmentSize(int memberIndex) { - return memberTargetAssignmentSizes[memberIndex]; - } - /** * Assigns a partition to a member and updates the current assignment size. * diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroup.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroup.java index 819eb53be38b3..b35f392053286 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroup.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroup.java @@ -41,7 +41,6 @@ import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup; import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.server.common.MetadataVersion; import org.slf4j.Logger; @@ -287,7 +286,7 @@ public Optional protocolType() { * @return True if the group is a simple group. */ public boolean isSimpleGroup() { - return !protocolType.isPresent() && isEmpty() && pendingJoinMembers.isEmpty(); + return protocolType.isEmpty() && isEmpty() && pendingJoinMembers.isEmpty(); } /** @@ -448,7 +447,7 @@ public void add(ClassicGroupMember member, CompletableFuture statesFilter, long committedOffset) { return statesFilter.contains(state.toLowerCaseString()); } - /** - * Verify the member id is up to date for static members. Return true if both conditions met: - * 1. given member is a known static member to group - * 2. group stored member id doesn't match with given member id - * - * @param groupInstanceId the group instance id. - * @param memberId the member id. - * @return whether the static member is fenced based on the condition above. - */ - public boolean isStaticMemberFenced( - String groupInstanceId, - String memberId - ) { - String existingMemberId = staticMemberId(groupInstanceId); - return existingMemberId != null && !existingMemberId.equals(memberId); - } - /** * @return whether the group can rebalance. */ @@ -1160,7 +1142,7 @@ public boolean isSubscribedToTopic(String topic) { * @return the subscribed topics or Empty based on the condition above. */ public Optional> computeSubscribedTopics() { - if (!protocolType.isPresent()) { + if (protocolType.isEmpty()) { return Optional.empty(); } String type = protocolType.get(); @@ -1353,12 +1335,11 @@ public Map groupAssignment() { /** * Convert the given ConsumerGroup to a corresponding ClassicGroup. - * The member with leavingMemberId will not be converted to the new ClassicGroup as it's the last - * member using new consumer protocol that left and triggered the downgrade. * * @param consumerGroup The converted ConsumerGroup. - * @param leavingMemberId The member that will not be converted in the ClassicGroup. + * @param leavingMembers The members that will not be converted in the ClassicGroup. * @param joiningMember The member that needs to be converted and added to the ClassicGroup. + * When not null, must have an instanceId that matches an existing member. * @param logContext The logContext to create the ClassicGroup. * @param time The time to create the ClassicGroup. * @param metadataImage The MetadataImage. @@ -1366,7 +1347,7 @@ public Map groupAssignment() { */ public static ClassicGroup fromConsumerGroup( ConsumerGroup consumerGroup, - String leavingMemberId, + Set leavingMembers, ConsumerGroupMember joiningMember, LogContext logContext, Time time, @@ -1378,14 +1359,15 @@ public static ClassicGroup fromConsumerGroup( ClassicGroupState.STABLE, time, consumerGroup.groupEpoch(), - Optional.ofNullable(ConsumerProtocol.PROTOCOL_TYPE), + Optional.of(ConsumerProtocol.PROTOCOL_TYPE), Optional.empty(), Optional.empty(), Optional.of(time.milliseconds()) ); consumerGroup.members().forEach((memberId, member) -> { - if (!memberId.equals(leavingMemberId)) { + if (!leavingMembers.contains(member) && + (joiningMember == null || joiningMember.instanceId() == null || !joiningMember.instanceId().equals(member.instanceId()))) { classicGroup.add( new ClassicGroupMember( memberId, @@ -1429,7 +1411,11 @@ public static ClassicGroup fromConsumerGroup( // If the downgraded is triggered by the joining static member replacing // the leaving static member, the joining member should take the assignment // of the leaving one. - memberId = leavingMemberId; + ConsumerGroupMember replacedMember = consumerGroup.staticMember(joiningMember.instanceId()); + if (replacedMember == null) { + throw new IllegalArgumentException("joiningMember must be a static member when not null."); + } + memberId = replacedMember.memberId(); } byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment( toConsumerProtocolAssignment( @@ -1450,11 +1436,9 @@ public static ClassicGroup fromConsumerGroup( /** * Populate the record list with the records needed to create the given classic group. * - * @param metadataVersion The MetadataVersion. * @param records The list to which the new records are added. */ public void createClassicGroupRecords( - MetadataVersion metadataVersion, List records ) { Map assignments = new HashMap<>(); @@ -1462,7 +1446,7 @@ public void createClassicGroupRecords( assignments.put(classicGroupMember.memberId(), classicGroupMember.assignment()) ); - records.add(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(this, assignments, metadataVersion)); + records.add(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(this, assignments)); } /** diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/ModernGroup.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/ModernGroup.java index 8b920a7e0510c..cf291e0306bf7 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/ModernGroup.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/ModernGroup.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.coordinator.group.Group; -import org.apache.kafka.coordinator.group.Utils; import org.apache.kafka.coordinator.group.api.assignor.SubscriptionType; import org.apache.kafka.image.ClusterImage; import org.apache.kafka.image.TopicImage; @@ -82,7 +81,7 @@ public static class DeadlineAndEpoch { /** * The number of subscribers or regular expressions per topic. */ - protected final TimelineHashMap subscribedTopicNames; + protected final TimelineHashMap subscribedTopicNames; /** * The metadata associated with each subscribed topic name. @@ -221,7 +220,7 @@ public Map members() { * @return An immutable map containing all the subscribed topic names * with the subscribers counts per topic. */ - public Map subscribedTopicNames() { + public Map subscribedTopicNames() { return Collections.unmodifiableMap(subscribedTopicNames); } @@ -378,7 +377,7 @@ public void setSubscriptionMetadata( * @return An immutable map of subscription metadata for each topic that the consumer group is subscribed to. */ public Map computeSubscriptionMetadata( - Map subscribedTopicNames, + Map subscribedTopicNames, TopicsImage topicsImage, ClusterImage clusterImage ) { @@ -440,19 +439,24 @@ public DeadlineAndEpoch metadataRefreshDeadline() { return metadataRefreshDeadline; } + /** + * Updates the subscription type. + */ + protected void maybeUpdateGroupSubscriptionType() { + subscriptionType.set(subscriptionType(subscribedTopicNames, members.size())); + } + /** * Updates the subscribed topic names count. - * The subscription type is updated as a consequence. * * @param oldMember The old member. * @param newMember The new member. */ - protected void maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType( + protected void maybeUpdateSubscribedTopicNames( ModernGroupMember oldMember, ModernGroupMember newMember ) { maybeUpdateSubscribedTopicNames(subscribedTopicNames, oldMember, newMember); - subscriptionType.set(subscriptionType(subscribedTopicNames, members.size())); } /** @@ -463,19 +467,19 @@ protected void maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType( * @param newMember The new member. */ private static void maybeUpdateSubscribedTopicNames( - Map subscribedTopicCount, + Map subscribedTopicCount, ModernGroupMember oldMember, ModernGroupMember newMember ) { if (oldMember != null) { oldMember.subscribedTopicNames().forEach(topicName -> - subscribedTopicCount.compute(topicName, Utils::decValue) + subscribedTopicCount.compute(topicName, SubscriptionCount::decNameCount) ); } if (newMember != null) { newMember.subscribedTopicNames().forEach(topicName -> - subscribedTopicCount.compute(topicName, Utils::incValue) + subscribedTopicCount.compute(topicName, SubscriptionCount::incNameCount) ); } } @@ -488,11 +492,11 @@ private static void maybeUpdateSubscribedTopicNames( * * @return Copy of the map of topics to the count of number of subscribers. */ - public Map computeSubscribedTopicNames( + public Map computeSubscribedTopicNames( ModernGroupMember oldMember, ModernGroupMember newMember ) { - Map subscribedTopicNames = new HashMap<>(this.subscribedTopicNames); + Map subscribedTopicNames = new HashMap<>(this.subscribedTopicNames); maybeUpdateSubscribedTopicNames( subscribedTopicNames, oldMember, @@ -508,10 +512,10 @@ public Map computeSubscribedTopicNames( * * @return Copy of the map of topics to the count of number of subscribers. */ - public Map computeSubscribedTopicNames( + public Map computeSubscribedTopicNames( Set removedMembers ) { - Map subscribedTopicNames = new HashMap<>(this.subscribedTopicNames); + Map subscribedTopicNames = new HashMap<>(this.subscribedTopicNames); if (removedMembers != null) { removedMembers.forEach(removedMember -> maybeUpdateSubscribedTopicNames( @@ -533,15 +537,15 @@ public Map computeSubscribedTopicNames( * otherwise, {@link SubscriptionType#HETEROGENEOUS}. */ public static SubscriptionType subscriptionType( - Map subscribedTopicNames, + Map subscribedTopicNames, int numberOfMembers ) { if (subscribedTopicNames.isEmpty()) { return HOMOGENEOUS; } - for (int subscriberCount : subscribedTopicNames.values()) { - if (subscriberCount != numberOfMembers) { + for (SubscriptionCount subscriberCount : subscribedTopicNames.values()) { + if (subscriberCount.byNameCount != numberOfMembers) { return HETEROGENEOUS; } } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/SubscriptionCount.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/SubscriptionCount.java new file mode 100644 index 0000000000000..ef75a3c3eca34 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/SubscriptionCount.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.modern; + +/** + * A class which holds two counters. One to count subscription by name and + * another one to count subscription by regex. + */ +public class SubscriptionCount { + public final int byNameCount; + public final int byRegexCount; + + public SubscriptionCount(int byNameCount, int byRegexCount) { + this.byNameCount = byNameCount; + this.byRegexCount = byRegexCount; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SubscriptionCount that = (SubscriptionCount) o; + + if (byNameCount != that.byNameCount) return false; + return byRegexCount == that.byRegexCount; + } + + @Override + public int hashCode() { + int result = byNameCount; + result = 31 * result + byRegexCount; + return result; + } + + @Override + public String toString() { + return "SubscriptionCount(" + + "byNameCount=" + byNameCount + + ", byRegexCount=" + byRegexCount + + ')'; + } + + /** + * Increments the name count by 1; This helper is meant to be used with Map#compute. + */ + public static SubscriptionCount incNameCount(String key, SubscriptionCount count) { + if (count == null) { + return new SubscriptionCount(1, 0); + } else { + return new SubscriptionCount(count.byNameCount + 1, count.byRegexCount); + } + } + + /** + * Decrements the name count by 1; This helper is meant to be used with Map#compute. + */ + public static SubscriptionCount decNameCount(String key, SubscriptionCount count) { + if (count == null || (count.byNameCount == 1 && count.byRegexCount == 0)) { + return null; + } else { + return new SubscriptionCount(count.byNameCount - 1, count.byRegexCount); + } + } + + /** + * Increments the regex count by 1; This helper is meant to be used with Map#compute. + */ + public static SubscriptionCount incRegexCount(String key, SubscriptionCount count) { + if (count == null) { + return new SubscriptionCount(0, 1); + } else { + return new SubscriptionCount(count.byNameCount, count.byRegexCount + 1); + } + } + + /** + * Decrements the regex count by 1; This helper is meant to be used with Map#compute. + */ + public static SubscriptionCount decRegexCount(String key, SubscriptionCount count) { + if (count == null || (count.byRegexCount == 1 && count.byNameCount == 0)) { + return null; + } else { + return new SubscriptionCount(count.byNameCount, count.byRegexCount - 1); + } + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilder.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilder.java index ba08a236ba679..63bf81f1e0801 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilder.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilder.java @@ -24,6 +24,9 @@ import org.apache.kafka.coordinator.group.api.assignor.PartitionAssignor; import org.apache.kafka.coordinator.group.api.assignor.PartitionAssignorException; import org.apache.kafka.coordinator.group.api.assignor.SubscriptionType; +import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; +import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression; +import org.apache.kafka.coordinator.group.modern.share.ShareGroupMember; import org.apache.kafka.image.TopicsImage; import java.util.ArrayList; @@ -47,7 +50,7 @@ * is deleted as part of the member deletion process. In other words, this class * does not yield a tombstone for removed members. */ -public class TargetAssignmentBuilder { +public abstract class TargetAssignmentBuilder> { /** * The assignment result returned by {{@link TargetAssignmentBuilder#build()}}. @@ -89,6 +92,144 @@ public Map targetAssignment() { } } + public static class ConsumerTargetAssignmentBuilder extends TargetAssignmentBuilder { + + /** + * The resolved regular expressions. + */ + private Map resolvedRegularExpressions = Collections.emptyMap(); + + public ConsumerTargetAssignmentBuilder( + String groupId, + int groupEpoch, + PartitionAssignor assignor + ) { + super(groupId, groupEpoch, assignor); + } + + /** + * Adds all the existing resolved regular expressions. + * + * @param resolvedRegularExpressions The resolved regular expressions. + * @return This object. + */ + public ConsumerTargetAssignmentBuilder withResolvedRegularExpressions( + Map resolvedRegularExpressions + ) { + this.resolvedRegularExpressions = resolvedRegularExpressions; + return self(); + } + + @Override + protected ConsumerTargetAssignmentBuilder self() { + return this; + } + + @Override + protected CoordinatorRecord newTargetAssignmentRecord( + String groupId, + String memberId, + Map> partitions + ) { + return GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord( + groupId, + memberId, + partitions + ); + } + + @Override + protected CoordinatorRecord newTargetAssignmentEpochRecord(String groupId, int assignmentEpoch) { + return GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord( + groupId, + assignmentEpoch + ); + } + + @Override + protected MemberSubscriptionAndAssignmentImpl newMemberSubscriptionAndAssignment( + ConsumerGroupMember member, + Assignment memberAssignment, + TopicIds.TopicResolver topicResolver + ) { + Set subscriptions = member.subscribedTopicNames(); + + // Check whether the member is also subscribed to a regular expression. If it is, + // create the union of the two subscriptions. + String subscribedTopicRegex = member.subscribedTopicRegex(); + if (subscribedTopicRegex != null && !subscribedTopicRegex.isEmpty()) { + ResolvedRegularExpression resolvedRegularExpression = resolvedRegularExpressions.get(subscribedTopicRegex); + if (resolvedRegularExpression != null) { + if (subscriptions.isEmpty()) { + subscriptions = resolvedRegularExpression.topics; + } else if (!resolvedRegularExpression.topics.isEmpty()) { + // We only use a UnionSet when the member uses both type of subscriptions. The + // protocol allows it. However, the Apache Kafka Consumer does not support it. + // Other clients such as librdkafka may support it. + subscriptions = new UnionSet<>(subscriptions, resolvedRegularExpression.topics); + } + } + } + + return new MemberSubscriptionAndAssignmentImpl( + Optional.ofNullable(member.rackId()), + Optional.ofNullable(member.instanceId()), + new TopicIds(subscriptions, topicResolver), + memberAssignment + ); + } + } + + public static class ShareTargetAssignmentBuilder extends TargetAssignmentBuilder { + public ShareTargetAssignmentBuilder( + String groupId, + int groupEpoch, + PartitionAssignor assignor + ) { + super(groupId, groupEpoch, assignor); + } + + @Override + protected ShareTargetAssignmentBuilder self() { + return this; + } + + @Override + protected CoordinatorRecord newTargetAssignmentRecord( + String groupId, + String memberId, + Map> partitions + ) { + return GroupCoordinatorRecordHelpers.newShareGroupTargetAssignmentRecord( + groupId, + memberId, + partitions + ); + } + + @Override + protected CoordinatorRecord newTargetAssignmentEpochRecord(String groupId, int assignmentEpoch) { + return GroupCoordinatorRecordHelpers.newShareGroupTargetAssignmentEpochRecord( + groupId, + assignmentEpoch + ); + } + + @Override + protected MemberSubscriptionAndAssignmentImpl newMemberSubscriptionAndAssignment( + ShareGroupMember member, + Assignment memberAssignment, + TopicIds.TopicResolver topicResolver + ) { + return new MemberSubscriptionAndAssignmentImpl( + Optional.ofNullable(member.rackId()), + Optional.ofNullable(member.instanceId()), + new TopicIds(member.subscribedTopicNames(), topicResolver), + memberAssignment + ); + } + } + /** * The group id. */ @@ -146,27 +287,6 @@ public Map targetAssignment() { */ private Map staticMembers = new HashMap<>(); - public interface TargetAssignmentRecordBuilder { - CoordinatorRecord build( - final String groupId, - final String memberId, - final Map> partitions - ); - } - - public interface TargetAssignmentEpochRecordBuilder { - CoordinatorRecord build( - final String groupId, - final int assignmentEpoch - ); - } - - private TargetAssignmentRecordBuilder targetAssignmentRecordBuilder = - GroupCoordinatorRecordHelpers::newConsumerGroupTargetAssignmentRecord; - - private TargetAssignmentEpochRecordBuilder targetAssignmentEpochRecordBuilder = - GroupCoordinatorRecordHelpers::newConsumerGroupTargetAssignmentEpochRecord; - /** * Constructs the object. * @@ -190,11 +310,11 @@ public TargetAssignmentBuilder( * @param members The existing members in the consumer group. * @return This object. */ - public TargetAssignmentBuilder withMembers( + public U withMembers( Map members ) { this.members = members; - return this; + return self(); } /** @@ -203,11 +323,11 @@ public TargetAssignmentBuilder withMembers( * @param staticMembers The existing static members in the consumer group. * @return This object. */ - public TargetAssignmentBuilder withStaticMembers( + public U withStaticMembers( Map staticMembers ) { this.staticMembers = staticMembers; - return this; + return self(); } /** @@ -216,11 +336,11 @@ public TargetAssignmentBuilder withStaticMembers( * @param subscriptionMetadata The subscription metadata. * @return This object. */ - public TargetAssignmentBuilder withSubscriptionMetadata( + public U withSubscriptionMetadata( Map subscriptionMetadata ) { this.subscriptionMetadata = subscriptionMetadata; - return this; + return self(); } /** @@ -229,11 +349,11 @@ public TargetAssignmentBuilder withSubscriptionMetadata( * @param subscriptionType Subscription type of the group. * @return This object. */ - public TargetAssignmentBuilder withSubscriptionType( + public U withSubscriptionType( SubscriptionType subscriptionType ) { this.subscriptionType = subscriptionType; - return this; + return self(); } /** @@ -242,11 +362,11 @@ public TargetAssignmentBuilder withSubscriptionType( * @param targetAssignment The existing target assignment. * @return This object. */ - public TargetAssignmentBuilder withTargetAssignment( + public U withTargetAssignment( Map targetAssignment ) { this.targetAssignment = targetAssignment; - return this; + return self(); } /** @@ -255,11 +375,11 @@ public TargetAssignmentBuilder withTargetAssignment( * @param invertedTargetAssignment The reverse lookup map of the current target assignment. * @return This object. */ - public TargetAssignmentBuilder withInvertedTargetAssignment( + public U withInvertedTargetAssignment( Map> invertedTargetAssignment ) { this.invertedTargetAssignment = invertedTargetAssignment; - return this; + return self(); } /** @@ -268,25 +388,11 @@ public TargetAssignmentBuilder withInvertedTargetAssignment( * @param topicsImage The topics image. * @return This object. */ - public TargetAssignmentBuilder withTopicsImage( + public U withTopicsImage( TopicsImage topicsImage ) { this.topicsImage = topicsImage; - return this; - } - - public TargetAssignmentBuilder withTargetAssignmentRecordBuilder( - TargetAssignmentRecordBuilder targetAssignmentRecordBuilder - ) { - this.targetAssignmentRecordBuilder = targetAssignmentRecordBuilder; - return this; - } - - public TargetAssignmentBuilder withTargetAssignmentEpochRecordBuilder( - TargetAssignmentEpochRecordBuilder targetAssignmentEpochRecordBuilder - ) { - this.targetAssignmentEpochRecordBuilder = targetAssignmentEpochRecordBuilder; - return this; + return self(); } /** @@ -297,12 +403,12 @@ public TargetAssignmentBuilder withTargetAssignmentEpochRecordBuilder( * @param member The member to add or update. * @return This object. */ - public TargetAssignmentBuilder addOrUpdateMember( + public U addOrUpdateMember( String memberId, T member ) { this.updatedMembers.put(memberId, member); - return this; + return self(); } /** @@ -312,7 +418,7 @@ public TargetAssignmentBuilder addOrUpdateMember( * @param memberId The member id. * @return This object. */ - public TargetAssignmentBuilder removeMember( + public U removeMember( String memberId ) { return addOrUpdateMember(memberId, null); @@ -331,7 +437,7 @@ public TargetAssignmentResult build() throws PartitionAssignorException { // Prepare the member spec for all members. members.forEach((memberId, member) -> - memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( + memberSpecs.put(memberId, newMemberSubscriptionAndAssignment( member, targetAssignment.getOrDefault(memberId, Assignment.EMPTY), topicResolver @@ -353,7 +459,7 @@ public TargetAssignmentResult build() throws PartitionAssignorException { } } - memberSpecs.put(memberId, createMemberSubscriptionAndAssignment( + memberSpecs.put(memberId, newMemberSubscriptionAndAssignment( updatedMemberOrNull, assignment, topicResolver @@ -391,7 +497,7 @@ public TargetAssignmentResult build() throws PartitionAssignorException { if (!newMemberAssignment.equals(oldMemberAssignment)) { // If the member had no assignment or had a different assignment, we // create a record for the new assignment. - records.add(targetAssignmentRecordBuilder.build( + records.add(newTargetAssignmentRecord( groupId, memberId, newMemberAssignment.partitions() @@ -400,11 +506,30 @@ public TargetAssignmentResult build() throws PartitionAssignorException { } // Bump the target assignment epoch. - records.add(targetAssignmentEpochRecordBuilder.build(groupId, groupEpoch)); + records.add(newTargetAssignmentEpochRecord(groupId, groupEpoch)); return new TargetAssignmentResult(records, newGroupAssignment.members()); } + protected abstract U self(); + + protected abstract CoordinatorRecord newTargetAssignmentRecord( + String groupId, + String memberId, + Map> partitions + ); + + protected abstract CoordinatorRecord newTargetAssignmentEpochRecord( + String groupId, + int assignmentEpoch + ); + + protected abstract MemberSubscriptionAndAssignmentImpl newMemberSubscriptionAndAssignment( + T member, + Assignment memberAssignment, + TopicIds.TopicResolver topicResolver + ); + private Assignment newMemberAssignment( GroupAssignment newGroupAssignment, String memberId @@ -416,18 +541,4 @@ private Assignment newMemberAssignment( return Assignment.EMPTY; } } - - // private for testing - static MemberSubscriptionAndAssignmentImpl createMemberSubscriptionAndAssignment( - T member, - Assignment memberAssignment, - TopicIds.TopicResolver topicResolver - ) { - return new MemberSubscriptionAndAssignmentImpl( - Optional.ofNullable(member.rackId()), - Optional.ofNullable(member.instanceId()), - new TopicIds(member.subscribedTopicNames(), topicResolver), - memberAssignment - ); - } } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TopicIds.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TopicIds.java index f45735a527c8f..c92493cc1cb00 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TopicIds.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TopicIds.java @@ -190,8 +190,7 @@ public boolean isEmpty() { @Override public boolean contains(Object o) { - if (o instanceof Uuid) { - Uuid topicId = (Uuid) o; + if (o instanceof Uuid topicId) { String topicName = resolver.name(topicId); if (topicName == null) return false; return topicNames.contains(topicName); diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/UnionSet.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/UnionSet.java new file mode 100644 index 0000000000000..95c6d9bd86354 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/UnionSet.java @@ -0,0 +1,218 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.modern; + +import java.lang.reflect.Array; +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.Set; + +/** + * A set which presents the union of two underlying sets without + * materializing it. This class expects the underlying sets to + * be immutable. + * + * @param The set type. + */ +public class UnionSet implements Set { + private final Set largeSet; + private final Set smallSet; + private int size = -1; + + public UnionSet(Set s1, Set s2) { + Objects.requireNonNull(s1); + Objects.requireNonNull(s2); + + if (s1.size() > s2.size()) { + largeSet = s1; + smallSet = s2; + } else { + largeSet = s2; + smallSet = s1; + } + } + + @Override + public int size() { + if (size == -1) { + size = largeSet.size(); + for (T item : smallSet) { + if (!largeSet.contains(item)) { + size++; + } + } + } + return size; + } + + @Override + public boolean isEmpty() { + return largeSet.isEmpty() && smallSet.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return largeSet.contains(o) || smallSet.contains(o); + } + + @Override + public Iterator iterator() { + return new Iterator<>() { + private final Iterator largeSetIterator = largeSet.iterator(); + private final Iterator smallSetIterator = smallSet.iterator(); + private T next = null; + + @Override + public boolean hasNext() { + if (next != null) return true; + if (largeSetIterator.hasNext()) { + next = largeSetIterator.next(); + return true; + } + while (smallSetIterator.hasNext()) { + next = smallSetIterator.next(); + if (!largeSet.contains(next)) { + return true; + } + } + next = null; + return false; + } + + @Override + public T next() { + if (!hasNext()) throw new NoSuchElementException(); + T result = next; + next = null; + return result; + } + }; + } + + @Override + public Object[] toArray() { + Object[] array = new Object[size()]; + int index = 0; + for (T item : largeSet) { + array[index] = item; + index++; + } + for (T item : smallSet) { + if (!largeSet.contains(item)) { + array[index] = item; + index++; + } + } + return array; + } + + @Override + @SuppressWarnings("unchecked") + public U[] toArray(U[] array) { + int size = size(); + if (array.length < size) { + // Create a new array of the same type with the correct size + array = (U[]) Array.newInstance(array.getClass().getComponentType(), size); + } + int index = 0; + for (T item : largeSet) { + array[index] = (U) item; + index++; + } + for (T item : smallSet) { + if (!largeSet.contains(item)) { + array[index] = (U) item; + index++; + } + } + if (array.length > size) { + array[size] = null; + } + return array; + } + + @Override + public boolean add(T t) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(Collection c) { + for (Object o : c) { + if (!contains(o)) return false; + } + return true; + } + + @Override + public boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof Set set)) return false; + + if (set.size() != size()) return false; + return containsAll(set); + } + + @Override + public int hashCode() { + int h = 0; + for (T item : largeSet) { + h += item.hashCode(); + } + for (T item : smallSet) { + if (!largeSet.contains(item)) { + h += item.hashCode(); + } + } + return h; + } + + @Override + public String toString() { + return "UnionSet(" + + "largeSet=" + largeSet + + ", smallSet=" + smallSet + + ')'; + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroup.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroup.java index 3be2124c058db..58d70ffe99aff 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroup.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroup.java @@ -24,13 +24,17 @@ import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; +import org.apache.kafka.common.message.ConsumerProtocolAssignment; import org.apache.kafka.common.message.ConsumerProtocolSubscription; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.types.SchemaException; +import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; import org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers; import org.apache.kafka.coordinator.group.OffsetExpirationCondition; import org.apache.kafka.coordinator.group.OffsetExpirationConditionImpl; import org.apache.kafka.coordinator.group.Utils; +import org.apache.kafka.coordinator.group.api.assignor.SubscriptionType; import org.apache.kafka.coordinator.group.classic.ClassicGroup; import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataValue; import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetricsShard; @@ -38,6 +42,7 @@ import org.apache.kafka.coordinator.group.modern.MemberState; import org.apache.kafka.coordinator.group.modern.ModernGroup; import org.apache.kafka.coordinator.group.modern.ModernGroupMember; +import org.apache.kafka.coordinator.group.modern.SubscriptionCount; import org.apache.kafka.image.TopicsImage; import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.TimelineHashMap; @@ -45,8 +50,10 @@ import org.apache.kafka.timeline.TimelineObject; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -56,10 +63,14 @@ import static org.apache.kafka.coordinator.group.Utils.toOptional; import static org.apache.kafka.coordinator.group.Utils.toTopicPartitionMap; +import static org.apache.kafka.coordinator.group.api.assignor.SubscriptionType.HETEROGENEOUS; +import static org.apache.kafka.coordinator.group.api.assignor.SubscriptionType.HOMOGENEOUS; +import static org.apache.kafka.coordinator.group.classic.ClassicGroupMember.EMPTY_ASSIGNMENT; import static org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup.ConsumerGroupState.ASSIGNING; import static org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup.ConsumerGroupState.EMPTY; import static org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup.ConsumerGroupState.RECONCILING; import static org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup.ConsumerGroupState.STABLE; +import static org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember.subscribedTopicRegexOrNull; /** * A Consumer Group. All the metadata in this class are backed by @@ -303,12 +314,13 @@ public void updateMember(ConsumerGroupMember newMember) { throw new IllegalArgumentException("newMember cannot be null."); } ConsumerGroupMember oldMember = members.put(newMember.memberId(), newMember); - maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, newMember); + maybeUpdateSubscribedTopicNames(oldMember, newMember); maybeUpdateServerAssignors(oldMember, newMember); maybeUpdatePartitionEpoch(oldMember, newMember); maybeUpdateSubscribedRegularExpression(oldMember, newMember); updateStaticMember(newMember); maybeUpdateGroupState(); + maybeUpdateGroupSubscriptionType(); maybeUpdateNumClassicProtocolMembers(oldMember, newMember); maybeUpdateClassicProtocolMembersSupportedProtocols(oldMember, newMember); } @@ -327,12 +339,13 @@ private void updateStaticMember(ConsumerGroupMember newMember) { @Override public void removeMember(String memberId) { ConsumerGroupMember oldMember = members.remove(memberId); - maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, null); + maybeUpdateSubscribedTopicNames(oldMember, null); maybeUpdateServerAssignors(oldMember, null); maybeRemovePartitionEpoch(oldMember); maybeUpdateSubscribedRegularExpression(oldMember, null); removeStaticMember(oldMember); maybeUpdateGroupState(); + maybeUpdateGroupSubscriptionType(); maybeUpdateNumClassicProtocolMembers(oldMember, null); maybeUpdateClassicProtocolMembersSupportedProtocols(oldMember, null); } @@ -348,6 +361,91 @@ private void removeStaticMember(ConsumerGroupMember oldMember) { } } + /** + * Updates the subscription count. + * + * @param oldMember The old member. + * @param newMember The new member. + * + * @return Copy of the map of topics to the count of number of subscribers. + */ + public Map computeSubscribedTopicNames( + ConsumerGroupMember oldMember, + ConsumerGroupMember newMember + ) { + Map subscribedTopicsNames = super.computeSubscribedTopicNames(oldMember, newMember); + String oldSubscribedTopicRegex = subscribedTopicRegexOrNull(oldMember); + + if (oldSubscribedTopicRegex != null) { + String newSubscribedTopicRegex = subscribedTopicRegexOrNull(newMember); + + // If the old member was the last one subscribed to the regex and the new member + // is not subscribed to it, we must remove it from the subscribed topic names. + if (!oldSubscribedTopicRegex.equals(newSubscribedTopicRegex) && numSubscribedMembers(oldSubscribedTopicRegex) == 1) { + resolvedRegularExpression(oldSubscribedTopicRegex).ifPresent(resolvedRegularExpression -> + resolvedRegularExpression.topics.forEach(topic -> subscribedTopicsNames.compute(topic, SubscriptionCount::decRegexCount)) + ); + } + } + + return subscribedTopicsNames; + } + + /** + * Computes an updated version of the subscribed regular expressions based on + * the new/old members. + * + * @param oldMember The old member. + * @param newMember The new member. + * @return An unmodifiable and updated copy of the map. + */ + public Map computeSubscribedRegularExpressions( + ConsumerGroupMember oldMember, + ConsumerGroupMember newMember + ) { + String oldRegex = subscribedTopicRegexOrNull(oldMember); + String newRegex = subscribedTopicRegexOrNull(newMember); + + if (!Objects.equals(oldRegex, newRegex)) { + Map newSubscribedRegularExpressions = new HashMap<>(subscribedRegularExpressions); + if (oldRegex != null) { + newSubscribedRegularExpressions.compute(oldRegex, Utils::decValue); + } + if (newRegex != null) { + newSubscribedRegularExpressions.compute(newRegex, Utils::incValue); + } + return Collections.unmodifiableMap(newSubscribedRegularExpressions); + } else { + return Collections.unmodifiableMap(subscribedRegularExpressions); + } + } + + /** + * Computes an updated copy of the subscribed topic names without the provided + * removed members and removed regular expressions. + * + * @param removedMembers The set of removed members. + * @param removedRegexes The set of removed regular expressions. + * + * @return Copy of the map of topics to the count of number of subscribers. + */ + public Map computeSubscribedTopicNamesWithoutDeletedMembers( + Set removedMembers, + Set removedRegexes + ) { + Map subscribedTopicsNames = super.computeSubscribedTopicNames(removedMembers); + + removedRegexes.forEach(regex -> + resolvedRegularExpression(regex).ifPresent(resolvedRegularExpression -> + resolvedRegularExpression.topics.forEach(topic -> + subscribedTopicsNames.compute(topic, SubscriptionCount::decRegexCount) + ) + ) + ); + + return subscribedTopicsNames; + } + /** * Update the resolved regular expression. * @@ -361,7 +459,7 @@ public void updateResolvedRegularExpression( removeResolvedRegularExpression(regex); if (newResolvedRegularExpression != null) { resolvedRegularExpressions.put(regex, newResolvedRegularExpression); - newResolvedRegularExpression.topics.forEach(topicName -> subscribedTopicNames.compute(topicName, Utils::incValue)); + newResolvedRegularExpression.topics.forEach(topicName -> subscribedTopicNames.compute(topicName, SubscriptionCount::incRegexCount)); } } @@ -373,7 +471,33 @@ public void updateResolvedRegularExpression( public void removeResolvedRegularExpression(String regex) { ResolvedRegularExpression oldResolvedRegularExpression = resolvedRegularExpressions.remove(regex); if (oldResolvedRegularExpression != null) { - oldResolvedRegularExpression.topics.forEach(topicName -> subscribedTopicNames.compute(topicName, Utils::decValue)); + oldResolvedRegularExpression.topics.forEach(topicName -> subscribedTopicNames.compute(topicName, SubscriptionCount::decRegexCount)); + } + } + + /** + * @return The last time resolved regular expressions were refreshed or Long.MIN_VALUE if + * there are no resolved regular expression. Note that we use the timestamp of the first + * entry as a proxy for all of them. They are always resolved together. + */ + public long lastResolvedRegularExpressionRefreshTimeMs() { + Iterator iterator = resolvedRegularExpressions.values().iterator(); + if (iterator.hasNext()) { + return iterator.next().timestamp; + } else { + return Long.MIN_VALUE; + } + } + + /** + * @return The version of the regular expressions or Zero if there are no resolved regular expression. + */ + public long lastResolvedRegularExpressionVersion() { + Iterator iterator = resolvedRegularExpressions.values().iterator(); + if (iterator.hasNext()) { + return iterator.next().version; + } else { + return 0L; } } @@ -384,10 +508,17 @@ public void removeResolvedRegularExpression(String regex) { * @param regex The regular expression. * @return The optional containing the resolved regular expression or an empty optional. */ - public Optional regularExpression(String regex) { + public Optional resolvedRegularExpression(String regex) { return Optional.ofNullable(resolvedRegularExpressions.get(regex)); } + /** + * @return The number of resolved regular expressions. + */ + public int numResolvedRegularExpressions() { + return resolvedRegularExpressions.size(); + } + /** * @return The number of members subscribed to the provided regex. */ @@ -395,6 +526,14 @@ public int numSubscribedMembers(String regex) { return subscribedRegularExpressions.getOrDefault(regex, 0); } + /** + * @return An immutable map containing all the subscribed regular expressions + * with the subscribers counts. + */ + public Map subscribedRegularExpressions() { + return Collections.unmodifiableMap(subscribedRegularExpressions); + } + /** * @return The number of members that use the classic protocol. */ @@ -416,6 +555,13 @@ public Map staticMembers() { return Collections.unmodifiableMap(staticMembers); } + /** + * @return An immutable Map containing all the resolved regular expressions. + */ + public Map resolvedRegularExpressions() { + return Collections.unmodifiableMap(resolvedRegularExpressions); + } + /** * Returns the current epoch of a partition or -1 if the partition * does not have one. @@ -505,6 +651,12 @@ public void validateOffsetCommit( // the request can commit offsets if the group is empty. if (memberEpoch < 0 && members().isEmpty()) return; + // The TxnOffsetCommit API does not require the member id, the generation id and the group instance id fields. + // Hence, they are only validated if any of them is provided + if (isTransactional && memberEpoch == JoinGroupRequest.UNKNOWN_GENERATION_ID && + memberId.equals(JoinGroupRequest.UNKNOWN_MEMBER_ID) && groupInstanceId == null) + return; + final ConsumerGroupMember member = getOrMaybeCreateMember(memberId, false); // If the commit is not transactional and the member uses the new consumer protocol (KIP-848), @@ -573,21 +725,25 @@ public void validateDeleteGroup() throws ApiException { */ @Override public void createGroupTombstoneRecords(List records) { - members().forEach((memberId, member) -> - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId(), memberId)) + members.keySet().forEach(memberId -> + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId)) + ); + + members.keySet().forEach(memberId -> + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId)) ); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId)); - members().forEach((memberId, member) -> - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId(), memberId)) + members.keySet().forEach(memberId -> + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId)) ); - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId())); - members().forEach((memberId, member) -> - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId(), memberId)) + resolvedRegularExpressions.keySet().forEach(regex -> + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, regex)) ); - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId())); - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId())); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)); } /** @@ -604,24 +760,28 @@ public void createGroupTombstoneRecordsWithReplacedMember( String leavingMemberId, String joiningMemberId ) { - members().forEach((memberId, __) -> { + members.keySet().forEach(memberId -> { String removedMemberId = memberId.equals(leavingMemberId) ? joiningMemberId : memberId; - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId(), removedMemberId)); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, removedMemberId)); }); - members().forEach((memberId, __) -> { + members.keySet().forEach(memberId -> { String removedMemberId = memberId.equals(leavingMemberId) ? joiningMemberId : memberId; - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId(), removedMemberId)); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, removedMemberId)); }); - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId())); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId)); - members().forEach((memberId, __) -> { + members.keySet().forEach(memberId -> { String removedMemberId = memberId.equals(leavingMemberId) ? joiningMemberId : memberId; - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId(), removedMemberId)); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, removedMemberId)); }); - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId())); - records.add(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId())); + resolvedRegularExpressions.keySet().forEach(regex -> + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, regex)) + ); + + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)); + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)); } @Override @@ -669,6 +829,60 @@ private void validateMemberEpoch( } } + /** + * Computes the subscription type based on the provided information. + * + * @param subscribedRegularExpressions The subscribed regular expression count. + * @param subscribedTopicNames The subscribed topic name count. + * @param numberOfMembers The number of members in the group. + * + * @return The subscription type. + */ + public static SubscriptionType subscriptionType( + Map subscribedRegularExpressions, + Map subscribedTopicNames, + int numberOfMembers + ) { + if (subscribedRegularExpressions.isEmpty()) { + // If the members do not use regular expressions, the subscription is + // considered as homogeneous if all the members are subscribed to the + // same topics. Otherwise, it is considered as heterogeneous. + for (SubscriptionCount subscriberCount : subscribedTopicNames.values()) { + if (subscriberCount.byNameCount != numberOfMembers) { + return HETEROGENEOUS; + } + } + return HOMOGENEOUS; + } else { + int count = subscribedRegularExpressions.values().iterator().next(); + if (count == numberOfMembers) { + // If all the members are subscribed to a single regular expressions + // and none of them are subscribed to topic names, the subscription + // is considered as homogeneous. If some members are subscribed to + // topic names too, the subscription is considered as heterogeneous. + for (SubscriptionCount subscriberCount : subscribedTopicNames.values()) { + if (subscriberCount.byRegexCount != 1 || subscriberCount.byNameCount > 0) { + return HETEROGENEOUS; + } + } + return HOMOGENEOUS; + } else { + // The subscription is considered as heterogeneous because + // there is a mix of regular expressions. + return SubscriptionType.HETEROGENEOUS; + } + } + } + + @Override + protected void maybeUpdateGroupSubscriptionType() { + subscriptionType.set(subscriptionType( + subscribedRegularExpressions, + subscribedTopicNames, + members.size() + )); + } + @Override protected void maybeUpdateGroupState() { ConsumerGroupState previousState = state.get(); @@ -738,11 +952,11 @@ private void maybeUpdateSubscribedRegularExpression( ConsumerGroupMember newMember ) { // Decrement the count of the old regex. - if (oldMember != null && oldMember.subscribedTopicRegex() != null) { + if (oldMember != null && oldMember.subscribedTopicRegex() != null && !oldMember.subscribedTopicRegex().isEmpty()) { subscribedRegularExpressions.compute(oldMember.subscribedTopicRegex(), Utils::decValue); } // Increment the count of the new regex. - if (newMember != null && newMember.subscribedTopicRegex() != null) { + if (newMember != null && newMember.subscribedTopicRegex() != null && !newMember.subscribedTopicRegex().isEmpty()) { subscribedRegularExpressions.compute(newMember.subscribedTopicRegex(), Utils::incValue); } } @@ -919,6 +1133,9 @@ public ConsumerGroupDescribeResponseData.DescribedGroup asDescribedGroup( * @param classicGroup The converted classic group. * @param topicsImage The TopicsImage for topic id and topic name conversion. * @return The created ConsumerGroup. + * + * @throws SchemaException if any member's subscription or assignment cannot be deserialized. + * @throws UnsupportedVersionException if userData from a custom assignor would be lost. */ public static ConsumerGroup fromClassicGroup( SnapshotRegistry snapshotRegistry, @@ -932,12 +1149,23 @@ public static ConsumerGroup fromClassicGroup( consumerGroup.setTargetAssignmentEpoch(classicGroup.generationId()); classicGroup.allMembers().forEach(classicGroupMember -> { - Map> assignedPartitions = toTopicPartitionMap( - ConsumerProtocol.deserializeConsumerProtocolAssignment( + // The assigned partition can be empty if the member just joined and has never synced. + // We should accept the empty assignment. + Map> assignedPartitions; + if (Arrays.equals(classicGroupMember.assignment(), EMPTY_ASSIGNMENT)) { + assignedPartitions = Collections.emptyMap(); + } else { + ConsumerProtocolAssignment assignment = ConsumerProtocol.deserializeConsumerProtocolAssignment( ByteBuffer.wrap(classicGroupMember.assignment()) - ), - topicsImage - ); + ); + if (assignment.userData() != null && assignment.userData().hasRemaining()) { + throw new UnsupportedVersionException("userData from a custom assignor would be lost"); + } + assignedPartitions = toTopicPartitionMap(assignment, topicsImage); + } + + // Every member is guaranteed to have metadata set when it joins, + // so we don't check for empty subscription here. ConsumerProtocolSubscription subscription = ConsumerProtocol.deserializeConsumerProtocolSubscription( ByteBuffer.wrap(classicGroupMember.metadata(classicGroup.protocolName().get())) ); @@ -1027,12 +1255,27 @@ public boolean supportsClassicProtocols(String memberProtocolType, Set m /** * Checks whether all the members use the classic protocol except the given member. * - * @param memberId The member to remove. + * @param member The member to remove. * @return A boolean indicating whether all the members use the classic protocol. */ - public boolean allMembersUseClassicProtocolExcept(String memberId) { - return numClassicProtocolMembers() == members().size() - 1 && - !getOrMaybeCreateMember(memberId, false).useClassicProtocol(); + public boolean allMembersUseClassicProtocolExcept(ConsumerGroupMember member) { + return numClassicProtocolMembers() == members().size() - 1 && !member.useClassicProtocol(); + } + + /** + * Checks whether all the members use the classic protocol except the given members. + * + * @param members The members to remove. + * @return A boolean indicating whether all the members use the classic protocol. + */ + public boolean allMembersUseClassicProtocolExcept(Set members) { + int numExcludedClassicProtocolMembers = 0; + for (ConsumerGroupMember member : members) { + if (member.useClassicProtocol()) { + numExcludedClassicProtocolMembers++; + } + } + return numClassicProtocolMembers() - numExcludedClassicProtocolMembers == members().size() - members.size(); } /** diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMember.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMember.java index 3b59d820a70f6..c96dd277adb73 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMember.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMember.java @@ -253,7 +253,7 @@ public ConsumerGroupMember build() { /** * The rebalance timeout provided by the member. */ - private int rebalanceTimeoutMs; + private final int rebalanceTimeoutMs; /** * The subscription pattern configured by the member. @@ -360,7 +360,7 @@ public JoinGroupRequestData.JoinGroupRequestProtocolCollection supportedJoinGrou */ public Optional classicProtocolSessionTimeout() { if (useClassicProtocol()) { - return Optional.ofNullable(classicMemberMetadata.sessionTimeoutMs()); + return Optional.of(classicMemberMetadata.sessionTimeoutMs()); } else { return Optional.empty(); } @@ -408,7 +408,8 @@ public ConsumerGroupDescribeResponseData.Member asConsumerGroupDescribeMember( .setInstanceId(instanceId) .setRackId(rackId) .setSubscribedTopicNames(subscribedTopicNames == null ? null : new ArrayList<>(subscribedTopicNames)) - .setSubscribedTopicRegex(subscribedTopicRegex); + .setSubscribedTopicRegex(subscribedTopicRegex) + .setMemberType(useClassicProtocol() ? (byte) 0 : (byte) 1); } private static List topicPartitionsFromMap( @@ -517,4 +518,12 @@ public String toString() { ", classicMemberMetadata='" + classicMemberMetadata + '\'' + ')'; } + + public static String subscribedTopicRegexOrNull(ConsumerGroupMember member) { + if (member != null && member.subscribedTopicRegex() != null && !member.subscribedTopicRegex().isEmpty()) { + return member.subscribedTopicRegex(); + } else { + return null; + } + } } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ResolvedRegularExpression.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ResolvedRegularExpression.java index d13fb23da2f7e..7cef5602dd65d 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ResolvedRegularExpression.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/consumer/ResolvedRegularExpression.java @@ -24,6 +24,8 @@ * The metadata associated with a regular expression in a Consumer Group. */ public class ResolvedRegularExpression { + public static final ResolvedRegularExpression EMPTY = new ResolvedRegularExpression(Collections.emptySet(), -1L, -1L); + /** * The set of resolved topics. */ diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroup.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroup.java index 1e641dfbe87a8..bbd073c8e760c 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroup.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroup.java @@ -163,8 +163,9 @@ public void updateMember(ShareGroupMember newMember) { } ShareGroupMember oldMember = members.put(newMember.memberId(), newMember); - maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, newMember); + maybeUpdateSubscribedTopicNames(oldMember, newMember); maybeUpdateGroupState(); + maybeUpdateGroupSubscriptionType(); } /** @@ -174,8 +175,9 @@ public void updateMember(ShareGroupMember newMember) { */ public void removeMember(String memberId) { ShareGroupMember oldMember = members.remove(memberId); - maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, null); + maybeUpdateSubscribedTopicNames(oldMember, null); maybeUpdateGroupState(); + maybeUpdateGroupSubscriptionType(); } @Override diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupConfig.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupConfig.java index 8c7e7a15c1f39..4e34e15fee910 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupConfig.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupConfig.java @@ -23,8 +23,6 @@ import org.apache.kafka.coordinator.group.GroupConfig; import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; -import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -179,8 +177,6 @@ private void validate() { * Copy the subset of properties that are relevant to share group. */ public Map extractShareGroupConfigMap() { - Map groupProps = new HashMap<>(); - groupProps.put(GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG, shareGroupRecordLockDurationMs()); - return Collections.unmodifiableMap(groupProps); + return Map.of(GroupConfig.SHARE_RECORD_LOCK_DURATION_MS_CONFIG, shareGroupRecordLockDurationMs()); } } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/Assignment.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/Assignment.java new file mode 100644 index 0000000000000..da377d19ccd52 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/Assignment.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import org.apache.kafka.coordinator.group.generated.StreamsGroupTargetAssignmentMemberValue; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * An immutable assignment for a member. + * + * @param activeTasks Active tasks assigned to the member. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + * @param standbyTasks Standby tasks assigned to the member. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + * @param warmupTasks Warm-up tasks assigned to the member. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + */ +public record Assignment(Map> activeTasks, + Map> standbyTasks, + Map> warmupTasks) { + + public Assignment { + activeTasks = Collections.unmodifiableMap(Objects.requireNonNull(activeTasks)); + standbyTasks = Collections.unmodifiableMap(Objects.requireNonNull(standbyTasks)); + warmupTasks = Collections.unmodifiableMap(Objects.requireNonNull(warmupTasks)); + } + + /** + * An empty assignment. + */ + public static final Assignment EMPTY = new Assignment( + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + + /** + * Creates a {{@link org.apache.kafka.coordinator.group.streams.Assignment}} from a + * {{@link org.apache.kafka.coordinator.group.generated.StreamsGroupTargetAssignmentMemberValue}}. + * + * @param record The record. + * @return A {{@link org.apache.kafka.coordinator.group.streams.Assignment}}. + */ + public static Assignment fromRecord( + StreamsGroupTargetAssignmentMemberValue record + ) { + return new Assignment( + record.activeTasks().stream() + .collect(Collectors.toMap( + StreamsGroupTargetAssignmentMemberValue.TaskIds::subtopologyId, + taskId -> new HashSet<>(taskId.partitions()) + ) + ), + record.standbyTasks().stream() + .collect(Collectors.toMap( + StreamsGroupTargetAssignmentMemberValue.TaskIds::subtopologyId, + taskId -> new HashSet<>(taskId.partitions()) + ) + ), + record.warmupTasks().stream() + .collect(Collectors.toMap( + StreamsGroupTargetAssignmentMemberValue.TaskIds::subtopologyId, + taskId -> new HashSet<>(taskId.partitions()) + ) + ) + ); + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/MemberState.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/MemberState.java new file mode 100644 index 0000000000000..71914da48b2af --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/MemberState.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import java.util.HashMap; +import java.util.Map; + +/** + * The various states that a member can be in. For their definition, refer to the documentation of + * {{@link org.apache.kafka.coordinator.group.streams.CurrentAssignmentBuilder}}. + */ +public enum MemberState { + + /** + * The member is fully reconciled with the desired target assignment. + */ + STABLE((byte) 1), + + /** + * The member must revoke some tasks in order to be able to transition to the next epoch. + */ + UNREVOKED_TASKS((byte) 2), + + /** + * The member transitioned to the last epoch but waits on some tasks which have not been revoked by their previous owners yet. + */ + UNRELEASED_TASKS((byte) 3), + + /** + * The member is in an unknown state. This can only happen if a future version of the software introduces a new state unknown by this + * version. + */ + UNKNOWN((byte) 127); + + private static final Map VALUES_TO_ENUMS = new HashMap<>(); + + static { + for (MemberState state : MemberState.values()) { + VALUES_TO_ENUMS.put(state.value(), state); + } + } + + private final byte value; + + MemberState(byte value) { + this.value = value; + } + + public byte value() { + return value; + } + + public static MemberState fromValue(byte value) { + MemberState state = VALUES_TO_ENUMS.get(value); + if (state == null) { + return UNKNOWN; + } + return state; + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/StreamsGroupMember.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/StreamsGroupMember.java new file mode 100644 index 0000000000000..e23df3f5701c4 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/StreamsGroupMember.java @@ -0,0 +1,463 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; +import org.apache.kafka.coordinator.group.generated.StreamsGroupCurrentMemberAssignmentValue; +import org.apache.kafka.coordinator.group.generated.StreamsGroupMemberMetadataValue; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Contains all information related to a member within a Streams group. + *

              + * This class is immutable and is fully backed by records stored in the __consumer_offsets topic. + * + * @param memberId The ID of the member. + * @param memberEpoch The current epoch of the member. + * @param previousMemberEpoch The previous epoch of the member. + * @param state The current state of the member. + * @param instanceId The instance ID of the member. + * @param rackId The rack ID of the member. + * @param clientId The client ID of the member. + * @param clientHost The host of the member. + * @param rebalanceTimeoutMs The rebalance timeout in milliseconds. + * @param topologyEpoch The epoch of the topology the member uses. + * @param processId The ID of the Streams client that contains the member. + * @param userEndpoint The user endpoint exposed for Interactive Queries by the Streams client that + * contains the member. + * @param clientTags Tags of the client of the member used for rack-aware assignment. + * @param assignedActiveTasks Active tasks assigned to the member. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + * @param assignedStandbyTasks Standby tasks assigned to the member. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + * @param assignedWarmupTasks Warm-up tasks assigned to the member. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + * @param activeTasksPendingRevocation Active tasks assigned to the member pending revocation. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + * @param standbyTasksPendingRevocation Standby tasks assigned to the member pending revocation. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + * @param warmupTasksPendingRevocation Warm-up tasks assigned to the member pending revocation. + * The key of the map is the subtopology ID and the value is the set of partition IDs. + */ +@SuppressWarnings("checkstyle:JavaNCSS") +public record StreamsGroupMember(String memberId, + Integer memberEpoch, + Integer previousMemberEpoch, + MemberState state, + Optional instanceId, + Optional rackId, + String clientId, + String clientHost, + Integer rebalanceTimeoutMs, + Integer topologyEpoch, + String processId, + Optional userEndpoint, + Map clientTags, + Map> assignedActiveTasks, + Map> assignedStandbyTasks, + Map> assignedWarmupTasks, + Map> activeTasksPendingRevocation, + Map> standbyTasksPendingRevocation, + Map> warmupTasksPendingRevocation) { + + public StreamsGroupMember { + Objects.requireNonNull(memberId, "memberId cannot be null"); + clientTags = clientTags != null ? Collections.unmodifiableMap(clientTags) : null; + assignedActiveTasks = assignedActiveTasks != null ? Collections.unmodifiableMap(assignedActiveTasks) : null; + assignedStandbyTasks = assignedStandbyTasks != null ? Collections.unmodifiableMap(assignedStandbyTasks) : null; + assignedWarmupTasks = assignedWarmupTasks != null ? Collections.unmodifiableMap(assignedWarmupTasks) : null; + activeTasksPendingRevocation = activeTasksPendingRevocation != null ? Collections.unmodifiableMap(activeTasksPendingRevocation) : null; + standbyTasksPendingRevocation = standbyTasksPendingRevocation != null ? Collections.unmodifiableMap(standbyTasksPendingRevocation) : null; + warmupTasksPendingRevocation = warmupTasksPendingRevocation != null ? Collections.unmodifiableMap(warmupTasksPendingRevocation) : null; + } + + /** + * A builder that facilitates the creation of a new member or the update of an existing one. + *

              + * Please refer to the javadoc of {{@link StreamsGroupMember}} for the definition of the fields. + */ + public static class Builder { + + private final String memberId; + private Integer memberEpoch = null; + private Integer previousMemberEpoch = null; + private MemberState state = null; + private Optional instanceId = null; + private Optional rackId = null; + private Integer rebalanceTimeoutMs = null; + private String clientId = null; + private String clientHost = null; + private Integer topologyEpoch = null; + private String processId = null; + private Optional userEndpoint = null; + private Map clientTags = null; + private Map> assignedActiveTasks = null; + private Map> assignedStandbyTasks = null; + private Map> assignedWarmupTasks = null; + private Map> activeTasksPendingRevocation = null; + private Map> standbyTasksPendingRevocation = null; + private Map> warmupTasksPendingRevocation = null; + + public Builder(String memberId) { + this.memberId = Objects.requireNonNull(memberId, "memberId cannot be null"); + } + + public Builder(StreamsGroupMember member) { + Objects.requireNonNull(member, "member cannot be null"); + + this.memberId = member.memberId; + this.memberEpoch = member.memberEpoch; + this.previousMemberEpoch = member.previousMemberEpoch; + this.instanceId = member.instanceId; + this.rackId = member.rackId; + this.rebalanceTimeoutMs = member.rebalanceTimeoutMs; + this.clientId = member.clientId; + this.clientHost = member.clientHost; + this.topologyEpoch = member.topologyEpoch; + this.processId = member.processId; + this.userEndpoint = member.userEndpoint; + this.clientTags = member.clientTags; + this.state = member.state; + this.assignedActiveTasks = member.assignedActiveTasks; + this.assignedStandbyTasks = member.assignedStandbyTasks; + this.assignedWarmupTasks = member.assignedWarmupTasks; + this.activeTasksPendingRevocation = member.activeTasksPendingRevocation; + this.standbyTasksPendingRevocation = member.standbyTasksPendingRevocation; + this.warmupTasksPendingRevocation = member.warmupTasksPendingRevocation; + } + + public Builder updateMemberEpoch(int memberEpoch) { + int currentMemberEpoch = this.memberEpoch; + this.memberEpoch = memberEpoch; + this.previousMemberEpoch = currentMemberEpoch; + return this; + } + + public Builder setMemberEpoch(int memberEpoch) { + this.memberEpoch = memberEpoch; + return this; + } + + public Builder setPreviousMemberEpoch(int previousMemberEpoch) { + this.previousMemberEpoch = previousMemberEpoch; + return this; + } + + public Builder setInstanceId(String instanceId) { + this.instanceId = Optional.ofNullable(instanceId); + return this; + } + + public Builder maybeUpdateInstanceId(Optional instanceId) { + instanceId.ifPresent(this::setInstanceId); + return this; + } + + public Builder setRackId(String rackId) { + this.rackId = Optional.ofNullable(rackId); + return this; + } + + public Builder maybeUpdateRackId(Optional rackId) { + rackId.ifPresent(this::setRackId); + return this; + } + + public Builder setRebalanceTimeoutMs(int rebalanceTimeoutMs) { + this.rebalanceTimeoutMs = rebalanceTimeoutMs; + return this; + } + + public Builder maybeUpdateRebalanceTimeoutMs(OptionalInt rebalanceTimeoutMs) { + this.rebalanceTimeoutMs = rebalanceTimeoutMs.orElse(this.rebalanceTimeoutMs); + return this; + } + + public Builder setClientId(String clientId) { + this.clientId = clientId; + return this; + } + + public Builder setClientHost(String clientHost) { + this.clientHost = clientHost; + return this; + } + + public Builder setState(MemberState state) { + this.state = state; + return this; + } + + public Builder setTopologyEpoch(int topologyEpoch) { + this.topologyEpoch = topologyEpoch; + return this; + } + + public Builder maybeUpdateTopologyEpoch(OptionalInt topologyEpoch) { + this.topologyEpoch = topologyEpoch.orElse(this.topologyEpoch); + return this; + } + + public Builder setProcessId(String processId) { + this.processId = processId; + return this; + } + + public Builder maybeUpdateProcessId(Optional processId) { + this.processId = processId.orElse(this.processId); + return this; + } + + public Builder setUserEndpoint(StreamsGroupMemberMetadataValue.Endpoint userEndpoint) { + this.userEndpoint = Optional.ofNullable(userEndpoint); + return this; + } + + public Builder maybeUpdateUserEndpoint(Optional userEndpoint) { + userEndpoint.ifPresent(this::setUserEndpoint); + return this; + } + + public Builder setClientTags(Map clientTags) { + this.clientTags = clientTags; + return this; + } + + public Builder maybeUpdateClientTags(Optional> clientTags) { + this.clientTags = clientTags.orElse(this.clientTags); + return this; + } + + public Builder setAssignment(Assignment assignment) { + this.assignedActiveTasks = assignment.activeTasks(); + this.assignedStandbyTasks = assignment.standbyTasks(); + this.assignedWarmupTasks = assignment.warmupTasks(); + return this; + } + + public Builder setAssignedActiveTasks(Map> assignedActiveTasks) { + this.assignedActiveTasks = assignedActiveTasks; + return this; + } + + public Builder setAssignedStandbyTasks(Map> assignedStandbyTasks) { + this.assignedStandbyTasks = assignedStandbyTasks; + return this; + } + + public Builder setAssignedWarmupTasks(Map> assignedWarmupTasks) { + this.assignedWarmupTasks = assignedWarmupTasks; + return this; + } + + public Builder setAssignmentPendingRevocation(Assignment assignment) { + this.activeTasksPendingRevocation = assignment.activeTasks(); + this.standbyTasksPendingRevocation = assignment.standbyTasks(); + this.warmupTasksPendingRevocation = assignment.warmupTasks(); + return this; + } + + public Builder setActiveTasksPendingRevocation( + Map> activeTasksPendingRevocation) { + this.activeTasksPendingRevocation = activeTasksPendingRevocation; + return this; + } + + public Builder setStandbyTasksPendingRevocation( + Map> standbyTasksPendingRevocation) { + this.standbyTasksPendingRevocation = standbyTasksPendingRevocation; + return this; + } + + public Builder setWarmupTasksPendingRevocation( + Map> warmupTasksPendingRevocation) { + this.warmupTasksPendingRevocation = warmupTasksPendingRevocation; + return this; + } + + public Builder updateWith(StreamsGroupMemberMetadataValue record) { + setInstanceId(record.instanceId()); + setRackId(record.rackId()); + setClientId(record.clientId()); + setClientHost(record.clientHost()); + setRebalanceTimeoutMs(record.rebalanceTimeoutMs()); + setTopologyEpoch(record.topologyEpoch()); + setProcessId(record.processId()); + setUserEndpoint(record.userEndpoint()); + setClientTags(record.clientTags().stream().collect(Collectors.toMap( + StreamsGroupMemberMetadataValue.KeyValue::key, + StreamsGroupMemberMetadataValue.KeyValue::value + ))); + return this; + } + + public Builder updateWith(StreamsGroupCurrentMemberAssignmentValue record) { + setMemberEpoch(record.memberEpoch()); + setPreviousMemberEpoch(record.previousMemberEpoch()); + setState(MemberState.fromValue(record.state())); + setAssignedActiveTasks(assignmentFromTaskIds(record.activeTasks())); + setAssignedStandbyTasks(assignmentFromTaskIds(record.standbyTasks())); + setAssignedWarmupTasks(assignmentFromTaskIds(record.warmupTasks())); + setActiveTasksPendingRevocation( + assignmentFromTaskIds(record.activeTasksPendingRevocation())); + setStandbyTasksPendingRevocation( + assignmentFromTaskIds(record.standbyTasksPendingRevocation())); + setWarmupTasksPendingRevocation( + assignmentFromTaskIds(record.warmupTasksPendingRevocation())); + return this; + } + + private static Map> assignmentFromTaskIds( + List topicPartitionsList + ) { + return topicPartitionsList.stream().collect(Collectors.toMap( + StreamsGroupCurrentMemberAssignmentValue.TaskIds::subtopologyId, + taskIds -> Set.copyOf(taskIds.partitions()))); + } + + public StreamsGroupMember build() { + return new StreamsGroupMember( + memberId, + memberEpoch, + previousMemberEpoch, + state, + instanceId, + rackId, + clientId, + clientHost, + rebalanceTimeoutMs, + topologyEpoch, + processId, + userEndpoint, + clientTags, + assignedActiveTasks, + assignedStandbyTasks, + assignedWarmupTasks, + activeTasksPendingRevocation, + standbyTasksPendingRevocation, + warmupTasksPendingRevocation + ); + } + } + + /** + * @return True if the member is in the Stable state and at the desired epoch. + */ + public boolean isReconciledTo(int targetAssignmentEpoch) { + return state == MemberState.STABLE && memberEpoch == targetAssignmentEpoch; + } + + /** + * Creates a member description for the Streams group describe response from this member. + * + * @param targetAssignment The target assignment of this member in the corresponding group. + * + * @return The StreamsGroupMember mapped as StreamsGroupDescribeResponseData.Member. + */ + public StreamsGroupDescribeResponseData.Member asStreamsGroupDescribeMember( + Assignment targetAssignment + ) { + final StreamsGroupDescribeResponseData.Assignment describedTargetAssignment = + new StreamsGroupDescribeResponseData.Assignment(); + + if (targetAssignment != null) { + describedTargetAssignment + .setActiveTasks(taskIdsFromMap(targetAssignment.activeTasks())) + .setStandbyTasks(taskIdsFromMap(targetAssignment.standbyTasks())) + .setWarmupTasks(taskIdsFromMap(targetAssignment.warmupTasks())); + } + + return new StreamsGroupDescribeResponseData.Member() + .setMemberEpoch(memberEpoch) + .setMemberId(memberId) + .setAssignment( + new StreamsGroupDescribeResponseData.Assignment() + .setActiveTasks(taskIdsFromMap(assignedActiveTasks)) + .setStandbyTasks(taskIdsFromMap(assignedStandbyTasks)) + .setWarmupTasks(taskIdsFromMap(assignedWarmupTasks))) + .setTargetAssignment(describedTargetAssignment) + .setClientHost(clientHost) + .setClientId(clientId) + .setInstanceId(instanceId.orElse(null)) + .setRackId(rackId.orElse(null)) + .setClientTags(clientTags.entrySet().stream().map( + entry -> new StreamsGroupDescribeResponseData.KeyValue() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + ).collect(Collectors.toList())) + .setProcessId(processId) + .setTopologyEpoch(topologyEpoch) + .setUserEndpoint( + userEndpoint.map( + endpoint -> new StreamsGroupDescribeResponseData.Endpoint() + .setHost(endpoint.host()) + .setPort(endpoint.port()) + ).orElse(null) + ); + } + + private static List taskIdsFromMap( + Map> tasks + ) { + List taskIds = new ArrayList<>(); + tasks.forEach((subtopologyId, partitionSet) -> { + taskIds.add(new StreamsGroupDescribeResponseData.TaskIds() + .setSubtopologyId(subtopologyId) + .setPartitions(new ArrayList<>(partitionSet))); + }); + return taskIds; + } + + /** + * @return True if the two provided members have different assigned active tasks. + */ + public static boolean hasAssignedActiveTasksChanged( + StreamsGroupMember member1, + StreamsGroupMember member2 + ) { + return !member1.assignedActiveTasks().equals(member2.assignedActiveTasks()); + } + + /** + * @return True if the two provided members have different assigned active tasks. + */ + public static boolean hasAssignedStandbyTasksChanged( + StreamsGroupMember member1, + StreamsGroupMember member2 + ) { + return !member1.assignedStandbyTasks().equals(member2.assignedStandbyTasks()); + } + + /** + * @return True if the two provided members have different assigned active tasks. + */ + public static boolean hasAssignedWarmupTasksChanged( + StreamsGroupMember member1, + StreamsGroupMember member2 + ) { + return !member1.assignedWarmupTasks().equals(member2.assignedWarmupTasks()); + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/StreamsTopology.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/StreamsTopology.java new file mode 100644 index 0000000000000..49ce9f9b4fd99 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/StreamsTopology.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.Subtopology; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.TopicInfo; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Contains the topology sent by a Streams client in the Streams heartbeat during initialization. + *

              + * This topology is used together with the partition metadata on the broker to create a + * {@link org.apache.kafka.coordinator.group.streams.topics.ConfiguredTopology configured topology}. + * This class allows to look-up subtopologies by subtopology ID in constant time by getting the subtopologies map. + * The information in this class is fully backed by records stored in the __consumer_offsets topic. + * + * @param topologyEpoch The epoch of the topology (must be non-negative). + * @param subtopologies The subtopologies of the topology containing information about source topics, + * repartition topics, changelog topics, co-partition groups etc. (must be non-null) + */ +public record StreamsTopology(int topologyEpoch, + Map subtopologies) { + + public StreamsTopology { + if (topologyEpoch < 0) { + throw new IllegalArgumentException("Topology epoch must be non-negative."); + } + subtopologies = Collections.unmodifiableMap(Objects.requireNonNull(subtopologies, "Subtopologies cannot be null.")); + } + + /** + * Returns the set of topics that are required by the topology. + *

              + * The required topics are used to determine the partition metadata on the brokers needed to configure the topology. + * + * @return set of topics required by the topology + */ + public Set requiredTopics() { + return subtopologies.values().stream() + .flatMap(x -> + Stream.concat( + Stream.concat( + x.sourceTopics().stream(), + x.repartitionSourceTopics().stream().map(TopicInfo::name) + ), + x.stateChangelogTopics().stream().map(TopicInfo::name) + ) + ).collect(Collectors.toSet()); + } + + /** + * Creates an instance of StreamsTopology from a StreamsGroupTopologyValue record. + * + * @param record The StreamsGroupTopologyValue record. + * @return The instance of StreamsTopology created from the record. + */ + public static StreamsTopology fromRecord(StreamsGroupTopologyValue record) { + return new StreamsTopology( + record.epoch(), + record.subtopologies().stream().collect(Collectors.toMap(Subtopology::subtopologyId, x -> x)) + ); + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/AssignmentMemberSpec.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/AssignmentMemberSpec.java new file mode 100644 index 0000000000000..99953b09d7159 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/AssignmentMemberSpec.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; + +/** + * The assignment specification for a Streams group member. + * + * @param instanceId The instance ID if provided. + * @param rackId The rack ID if provided. + * @param activeTasks Reconciled active tasks + * @param standbyTasks Reconciled standby tasks + * @param warmupTasks Reconciled warm-up tasks + * @param processId The process ID. + * @param clientTags The client tags for a rack-aware assignment. + * @param taskOffsets The last received cumulative task offsets of assigned tasks or dormant tasks. + */ +public record AssignmentMemberSpec(Optional instanceId, + Optional rackId, + Map> activeTasks, + Map> standbyTasks, + Map> warmupTasks, + String processId, + Map clientTags, + Map taskOffsets, + Map taskEndOffsets +) { + + public AssignmentMemberSpec { + Objects.requireNonNull(instanceId); + Objects.requireNonNull(rackId); + activeTasks = Collections.unmodifiableMap(Objects.requireNonNull(activeTasks)); + standbyTasks = Collections.unmodifiableMap(Objects.requireNonNull(standbyTasks)); + warmupTasks = Collections.unmodifiableMap(Objects.requireNonNull(warmupTasks)); + Objects.requireNonNull(processId); + clientTags = Collections.unmodifiableMap(Objects.requireNonNull(clientTags)); + taskOffsets = Collections.unmodifiableMap(Objects.requireNonNull(taskOffsets)); + taskEndOffsets = Collections.unmodifiableMap(Objects.requireNonNull(taskEndOffsets)); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/OAuthBearerLoginCallbackHandler.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupAssignment.java similarity index 69% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/OAuthBearerLoginCallbackHandler.java rename to group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupAssignment.java index 68780a2c94e9e..a97cdc33b7909 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/OAuthBearerLoginCallbackHandler.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupAssignment.java @@ -14,14 +14,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.kafka.coordinator.group.streams.assignor; -package org.apache.kafka.common.security.oauthbearer.secured; +import java.util.Map; +import java.util.Objects; /** - * @deprecated See org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler + * The task assignment for a streams group. + * + * @param members The member assignments keyed by member id. */ +public record GroupAssignment(Map members) { -@Deprecated -public class OAuthBearerLoginCallbackHandler extends org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler { + public GroupAssignment { + Objects.requireNonNull(members); + } } diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpec.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpec.java new file mode 100644 index 0000000000000..1a8e7edc01c63 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpec.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import java.util.Map; + +/** + * The group metadata specifications required to compute the target assignment. + */ +public interface GroupSpec { + + /** + * @return Member metadata keyed by member Id. + */ + Map members(); + + /** + * @return Any configurations passed to the assignor. + */ + Map assignmentConfigs(); + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpecImpl.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpecImpl.java new file mode 100644 index 0000000000000..caa82ed2cb21c --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpecImpl.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import java.util.Map; +import java.util.Objects; + +/** + * The assignment specification for a Streams group. + * + * @param members The member metadata keyed by member ID. + * @param assignmentConfigs Any configurations passed to the assignor. + */ +public record GroupSpecImpl(Map members, + Map assignmentConfigs) implements GroupSpec { + + public GroupSpecImpl { + Objects.requireNonNull(members); + Objects.requireNonNull(assignmentConfigs); + } + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/MemberAssignment.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/MemberAssignment.java new file mode 100644 index 0000000000000..22f908b825c52 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/MemberAssignment.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * The task assignment for a Streams group member. + * + * @param activeTasks The target tasks assigned to this member keyed by subtopologyId. + */ +public record MemberAssignment(Map> activeTasks, + Map> standbyTasks, + Map> warmupTasks) { + + public MemberAssignment { + Objects.requireNonNull(activeTasks); + Objects.requireNonNull(standbyTasks); + Objects.requireNonNull(warmupTasks); + } + + public static MemberAssignment empty() { + return new MemberAssignment(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignor.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignor.java new file mode 100644 index 0000000000000..ce0bc101101ec --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignor.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Mock implementation of {@link TaskAssignor} that assigns tasks to members in a round-robin fashion, with a bit of stickiness. + */ +public class MockAssignor implements TaskAssignor { + + public static final String MOCK_ASSIGNOR_NAME = "mock"; + + @Override + public String name() { + return MOCK_ASSIGNOR_NAME; + } + + @Override + public GroupAssignment assign( + final GroupSpec groupSpec, + final TopologyDescriber topologyDescriber + ) throws TaskAssignorException { + + Map newTargetAssignment = new HashMap<>(); + Map subtopologyToActiveMember = new HashMap<>(); + + for (String subtopology : topologyDescriber.subtopologies()) { + int numberOfPartitions = topologyDescriber.numTasks(subtopology); + subtopologyToActiveMember.put(subtopology, new String[numberOfPartitions]); + } + + // Copy existing assignment and fill temporary data structures + for (Map.Entry memberEntry : groupSpec.members().entrySet()) { + final String memberId = memberEntry.getKey(); + final AssignmentMemberSpec memberSpec = memberEntry.getValue(); + + Map> activeTasks = new HashMap<>(memberSpec.activeTasks()); + + newTargetAssignment.put(memberId, new MemberAssignment(activeTasks, new HashMap<>(), new HashMap<>())); + for (Map.Entry> entry : activeTasks.entrySet()) { + final String subtopologyId = entry.getKey(); + final Set taskIds = entry.getValue(); + final String[] activeMembers = subtopologyToActiveMember.get(subtopologyId); + for (int taskId : taskIds) { + if (activeMembers[taskId] != null) { + throw new TaskAssignorException( + "Task " + taskId + " of subtopology " + subtopologyId + " is assigned to multiple members"); + } + activeMembers[taskId] = memberId; + } + } + } + + // Define priority queue to sort members by task count + PriorityQueue memberAndTaskCount = new PriorityQueue<>(Comparator.comparingInt(m -> m.taskCount)); + memberAndTaskCount.addAll( + newTargetAssignment.keySet().stream() + .map(memberId -> new MemberAndTaskCount(memberId, + newTargetAssignment.get(memberId).activeTasks().values().stream().mapToInt(Set::size).sum())) + .collect(Collectors.toSet()) + ); + + // Assign unassigned tasks to members with the fewest tasks + for (Map.Entry entry : subtopologyToActiveMember.entrySet()) { + final String subtopologyId = entry.getKey(); + final String[] activeMembers = entry.getValue(); + for (int i = 0; i < activeMembers.length; i++) { + if (activeMembers[i] == null) { + final MemberAndTaskCount m = memberAndTaskCount.poll(); + if (m == null) { + throw new TaskAssignorException("No member available to assign task " + i + " of subtopology " + subtopologyId); + } + newTargetAssignment.get(m.memberId).activeTasks().computeIfAbsent(subtopologyId, k -> new HashSet<>()).add(i); + activeMembers[i] = m.memberId; + memberAndTaskCount.add(new MemberAndTaskCount(m.memberId, m.taskCount + 1)); + } + } + } + + return new GroupAssignment(newTargetAssignment); + } + + private record MemberAndTaskCount(String memberId, int taskCount) { + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskAssignor.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskAssignor.java new file mode 100644 index 0000000000000..7b4874a9bf88e --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskAssignor.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +/** + * Server side task assignor used by streams groups. + */ +public interface TaskAssignor { + + /** + * Unique name for this assignor. + */ + String name(); + + /** + * Assigns tasks to group members based on the given assignment specification and topic metadata. + * + * @param groupSpec The assignment spec which includes member metadata. + * @param topologyDescriber The task metadata describer. + * @return The new assignment for the group. + */ + GroupAssignment assign( + GroupSpec groupSpec, + TopologyDescriber topologyDescriber + ) throws TaskAssignorException; + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskAssignorException.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskAssignorException.java new file mode 100644 index 0000000000000..2fda6a9e9ec62 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskAssignorException.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import org.apache.kafka.common.errors.ApiException; + +/** + * Exception thrown by {@link TaskAssignor#assign(GroupSpec, TopologyDescriber)}}. The exception is only used internally. + */ +public class TaskAssignorException extends ApiException { + + public TaskAssignorException(String message) { + super(message); + } + + public TaskAssignorException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskId.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskId.java new file mode 100644 index 0000000000000..3a9e594a82bf7 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TaskId.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import java.util.Comparator; +import java.util.Objects; + +/** + * The identifier for a task + * + * @param subtopologyId The unique identifier of the subtopology. + * @param partition The partition of the input topics this task is processing. + */ +public record TaskId(String subtopologyId, int partition) implements Comparable { + + public TaskId { + Objects.requireNonNull(subtopologyId); + } + + @Override + public int compareTo(final TaskId other) { + return Comparator.comparing(TaskId::subtopologyId) + .thenComparingInt(TaskId::partition) + .compare(this, other); + } + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TopologyDescriber.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TopologyDescriber.java new file mode 100644 index 0000000000000..2f913bf5514e8 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/assignor/TopologyDescriber.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import java.util.List; +import java.util.NoSuchElementException; + +/** + * The subscribed topic describer is used by the {@link TaskAssignor} to obtain topic and task metadata of the groups topology. + */ +public interface TopologyDescriber { + + /** + * @return The list of subtopologies IDs. + */ + List subtopologies(); + + /** + * The number of tasks for the given subtopology. + * + * @param subtopologyId String identifying the subtopology. + * + * @return The number of tasks corresponding to the given subtopology ID. + * @throws NoSuchElementException if subtopology does not exist in the topology. + */ + int numTasks(String subtopologyId) throws NoSuchElementException; + + /** + * Whether the given subtopology is stateful. + * + * @param subtopologyId String identifying the subtopology. + * @return true if the subtopology is stateful, false otherwise. + */ + boolean isStateful(String subtopologyId); + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ChangelogTopics.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ChangelogTopics.java new file mode 100644 index 0000000000000..a7792471e4e84 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ChangelogTopics.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.errors.StreamsInvalidTopologyException; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.Subtopology; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.TopicInfo; + +import org.slf4j.Logger; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.OptionalInt; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * This class is responsible for setting up the changelog topics for a topology. For a given topology, which does not have the number + * of partitions specified for changelog partitions, this class will determine the number of partitions for each non-source changelog topic. + */ +public class ChangelogTopics { + + private final Logger log; + private final Collection subtopologies; + private final Function topicPartitionCountProvider; + + /** + * Constructor for ChangelogTopics. + * + * @param logContext The context for emitting log messages. + * @param subtopologies The subtopologies for the requested topology. + * @param topicPartitionCountProvider Returns the number of partitions for a given topic, representing the current state of the broker + * as well as any partition number decisions that have already been made. In particular, we expect + * the number of partitions for all repartition topics defined, even if they do not exist in the + * broker yet. + */ + public ChangelogTopics( + final LogContext logContext, + final Collection subtopologies, + final Function topicPartitionCountProvider + ) { + this.log = logContext.logger(getClass()); + this.subtopologies = subtopologies; + this.topicPartitionCountProvider = topicPartitionCountProvider; + } + + /** + * Determines the number of partitions for each non-source changelog topic in the requested topology. + * + * @return the map of all non-source changelog topics for the requested topology to their required number of partitions. + */ + public Map setup() { + final Map changelogTopicPartitions = new HashMap<>(); + for (Subtopology subtopology : subtopologies) { + final Set sourceTopics = new HashSet<>(subtopology.sourceTopics()); + + final OptionalInt maxNumPartitions = + subtopology.sourceTopics().stream().mapToInt(this::getPartitionCountOrFail).max(); + + if (maxNumPartitions.isEmpty()) { + throw new StreamsInvalidTopologyException("No source topics found for subtopology " + subtopology.subtopologyId()); + } + for (final TopicInfo topicInfo : subtopology.stateChangelogTopics()) { + if (!sourceTopics.contains(topicInfo.name())) { + changelogTopicPartitions.put(topicInfo.name(), maxNumPartitions.getAsInt()); + } + } + } + + log.debug("Expecting state changelog topic partitions {} for the requested topology.", + changelogTopicPartitions.entrySet().stream().map(e -> e.getKey() + ":" + e.getValue()).collect(Collectors.joining(", "))); + + return changelogTopicPartitions; + } + + private int getPartitionCountOrFail(String topic) { + final OptionalInt topicPartitionCount = topicPartitionCountProvider.apply(topic); + if (topicPartitionCount.isEmpty()) { + throw TopicConfigurationException.missingSourceTopics("No partition count for source topic " + topic); + } + return topicPartitionCount.getAsInt(); + } +} \ No newline at end of file diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopic.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopic.java new file mode 100644 index 0000000000000..855f1ea0b58d1 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopic.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.internals.Topic; +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Captures the properties required for configuring the internal topics we create for changelogs and repartitioning etc. + *

              + * It is derived from the topology sent by the client, and the current state of the topics inside the broker. If the topics on the broker + * changes, the internal topic may need to be reconfigured. + * + * @param name The name of the topic. + * @param numberOfPartitions The number of partitions for the topic. + * @param replicationFactor The replication factor of the topic. If undefiend, the broker default is used. + * @param topicConfigs The topic configurations of the topic. + */ +public record ConfiguredInternalTopic(String name, + int numberOfPartitions, + Optional replicationFactor, + Map topicConfigs +) { + + public ConfiguredInternalTopic { + Objects.requireNonNull(name, "name can't be null"); + Topic.validate(name); + if (numberOfPartitions < 1) { + throw new IllegalArgumentException("Number of partitions must be at least 1."); + } + topicConfigs = Collections.unmodifiableMap(Objects.requireNonNull(topicConfigs, "topicConfigs can't be null")); + } + + public StreamsGroupDescribeResponseData.TopicInfo asStreamsGroupDescribeTopicInfo() { + return new StreamsGroupDescribeResponseData.TopicInfo() + .setName(name) + .setPartitions(numberOfPartitions) + .setReplicationFactor(replicationFactor.orElse((short) 0)) + .setTopicConfigs( + topicConfigs != null ? + topicConfigs.entrySet().stream().map( + y -> new StreamsGroupDescribeResponseData.KeyValue() + .setKey(y.getKey()) + .setValue(y.getValue()) + ).collect(Collectors.toList()) : null + ); + } + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopology.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopology.java new file mode 100644 index 0000000000000..bfc1a86a06b9f --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopology.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; + +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Internal representation of a subtopology. + *

              + * The subtopology is configured according to the number of partitions available in the source topics. It has regular expressions already + * resolved and defined exactly the information that is being used by streams groups assignment reconciliation. + *

              + * Configured subtopologies may be recreated every time the input topics used by the subtopology are modified. + * + * @param sourceTopics The source topics of the subtopology. + * @param repartitionSourceTopics The repartition source topics of the subtopology. + * @param repartitionSinkTopics The repartition sink topics of the subtopology. + * @param stateChangelogTopics The state changelog topics of the subtopology. + */ +public record ConfiguredSubtopology(Set sourceTopics, + Map repartitionSourceTopics, + Set repartitionSinkTopics, + Map stateChangelogTopics) { + + public ConfiguredSubtopology { + Objects.requireNonNull(sourceTopics, "sourceTopics can't be null"); + Objects.requireNonNull(repartitionSourceTopics, "repartitionSourceTopics can't be null"); + Objects.requireNonNull(repartitionSinkTopics, "repartitionSinkTopics can't be null"); + Objects.requireNonNull(stateChangelogTopics, "stateChangelogTopics can't be null"); + } + + public StreamsGroupDescribeResponseData.Subtopology asStreamsGroupDescribeSubtopology(String subtopologyId) { + return new StreamsGroupDescribeResponseData.Subtopology() + .setSubtopologyId(subtopologyId) + .setSourceTopics(sourceTopics.stream().sorted().collect(Collectors.toList())) + .setRepartitionSinkTopics(repartitionSinkTopics.stream().sorted().collect(Collectors.toList())) + .setRepartitionSourceTopics(repartitionSourceTopics.values().stream() + .map(ConfiguredInternalTopic::asStreamsGroupDescribeTopicInfo).sorted().collect(Collectors.toList())) + .setStateChangelogTopics(stateChangelogTopics.values().stream() + .map(ConfiguredInternalTopic::asStreamsGroupDescribeTopicInfo).sorted().collect(Collectors.toList())); + } + +} \ No newline at end of file diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredTopology.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredTopology.java new file mode 100644 index 0000000000000..86f8080421c46 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredTopology.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic; +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * This class captures the result of taking a topology definition sent by the client and using the current state of the topics inside the + * broker to configure the internal topics required for the topology. + * + * @param topologyEpoch The epoch of the topology. Same as the topology epoch in the heartbeat request that last initialized + * the topology. + * @param subtopologies Contains the subtopologies that have been configured. This can be used by the task assignors, since it + * specifies the number of tasks available for every subtopology. + * @param internalTopicsToBeCreated Contains a list of internal topics that need to be created. This is used to create the topics in the + * broker. + * @param topicConfigurationException If the topic configuration process failed, e.g. because expected topics are missing or have an + * incorrect number of partitions, this field will store the error that occurred, so that is can be + * reported back to the client. + */ +public record ConfiguredTopology(int topologyEpoch, + Map subtopologies, + Map internalTopicsToBeCreated, + Optional topicConfigurationException) { + + public ConfiguredTopology { + if (topologyEpoch < 0) { + throw new IllegalArgumentException("Topology epoch must be non-negative."); + } + Objects.requireNonNull(subtopologies, "subtopologies can't be null"); + Objects.requireNonNull(internalTopicsToBeCreated, "internalTopicsToBeCreated can't be null"); + Objects.requireNonNull(topicConfigurationException, "topicConfigurationException can't be null"); + } + + public boolean isReady() { + return topicConfigurationException.isEmpty(); + } + + public StreamsGroupDescribeResponseData.Topology asStreamsGroupDescribeTopology() { + return new StreamsGroupDescribeResponseData.Topology() + .setEpoch(topologyEpoch) + .setSubtopologies(subtopologies.entrySet().stream().map( + entry -> entry.getValue().asStreamsGroupDescribeSubtopology(entry.getKey()) + ).collect(Collectors.toList())); + } + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/CopartitionedTopicsEnforcer.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/CopartitionedTopicsEnforcer.java new file mode 100644 index 0000000000000..4bcc229ef3432 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/CopartitionedTopicsEnforcer.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.errors.StreamsInvalidTopologyException; +import org.apache.kafka.common.utils.LogContext; + +import org.slf4j.Logger; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.OptionalInt; +import java.util.Set; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * This class is responsible for enforcing the number of partitions in copartitioned topics. For each copartition group, it checks whether + * the number of partitions for all repartition topics is the same, and enforces copartitioning for repartition topics whose number of + * partitions is not enforced by the topology. + */ +public class CopartitionedTopicsEnforcer { + + private final Logger log; + private final Function topicPartitionCountProvider; + + /** + * The constructor for the class. + * + * @param logContext The context for emitting log messages. + * @param topicPartitionCountProvider Returns the number of partitions for a given topic, representing the current state of the broker + * as well as any partition number decisions that have already been made. In particular, we expect + * the number of partitions for all repartition topics defined, even if they do not exist in the + * broker yet. + */ + public CopartitionedTopicsEnforcer(final LogContext logContext, + final Function topicPartitionCountProvider) { + this.log = logContext.logger(getClass()); + this.topicPartitionCountProvider = topicPartitionCountProvider; + } + + /** + * Enforces the number of partitions for copartitioned topics. + * + * @param copartitionedTopics The set of copartitioned topics (external source topics and repartition topics). + * @param fixedRepartitionTopics The set of repartition topics whose partition count is fixed by the topology sent by the + * client (in particular, when the user uses `repartition` in the DSL). + * @param flexibleRepartitionTopics The set of repartition topics whose partition count is flexible, and can be changed. + * + * @throws TopicConfigurationException If source topics are missing, or there are topics in copartitionTopics that are not copartitioned + * according to topicPartitionCountProvider. + * + * @return A map from all repartition topics in copartitionedTopics to their updated partition counts. + */ + public Map enforce(final Set copartitionedTopics, + final Set fixedRepartitionTopics, + final Set flexibleRepartitionTopics) throws StreamsInvalidTopologyException { + if (copartitionedTopics.isEmpty()) { + return Collections.emptyMap(); + } + final Map returnedPartitionCounts = new HashMap<>(); + + final Map repartitionTopicPartitionCounts = + copartitionedTopics.stream() + .filter(x -> fixedRepartitionTopics.contains(x) || flexibleRepartitionTopics.contains(x)) + .collect(Collectors.toMap(topic -> topic, this::getPartitionCount)); + + final Map nonRepartitionTopicPartitions = + copartitionedTopics.stream().filter(topic -> !repartitionTopicPartitionCounts.containsKey(topic)) + .collect(Collectors.toMap(topic -> topic, topic -> { + final OptionalInt topicPartitionCount = topicPartitionCountProvider.apply(topic); + if (topicPartitionCount.isEmpty()) { + final String str = String.format("Following topics are missing: [%s]", topic); + log.error(str); + throw TopicConfigurationException.missingSourceTopics(str); + } else { + return topicPartitionCount.getAsInt(); + } + })); + + final int numPartitionsToUseForRepartitionTopics; + + if (copartitionedTopics.equals(repartitionTopicPartitionCounts.keySet())) { + + // if there's at least one repartition topic with fixed number of partitions + // validate that they all have same number of partitions + if (!fixedRepartitionTopics.isEmpty()) { + numPartitionsToUseForRepartitionTopics = validateAndGetNumOfPartitions( + repartitionTopicPartitionCounts, + fixedRepartitionTopics + ); + } else { + // If all topics for this co-partition group are repartition topics, + // then set the number of partitions to be the maximum of the number of partitions. + numPartitionsToUseForRepartitionTopics = getMaxPartitions(repartitionTopicPartitionCounts); + } + } else { + // Otherwise, use the number of partitions from external topics (which must all be the same) + numPartitionsToUseForRepartitionTopics = getSamePartitions(nonRepartitionTopicPartitions); + } + + // coerce all the repartition topics to use the decided number of partitions. + for (final Entry repartitionTopic : repartitionTopicPartitionCounts.entrySet()) { + returnedPartitionCounts.put(repartitionTopic.getKey(), numPartitionsToUseForRepartitionTopics); + if (fixedRepartitionTopics.contains(repartitionTopic.getKey()) + && repartitionTopic.getValue() != numPartitionsToUseForRepartitionTopics) { + final String msg = String.format("Number of partitions [%d] of repartition topic [%s] " + + "doesn't match number of partitions [%d] of the source topic.", + repartitionTopic.getValue(), + repartitionTopic.getKey(), + numPartitionsToUseForRepartitionTopics); + throw TopicConfigurationException.incorrectlyPartitionedTopics(msg); + } + } + + return returnedPartitionCounts; + } + + private int getPartitionCount(final String topicName) { + OptionalInt partitions = topicPartitionCountProvider.apply(topicName); + if (partitions.isPresent()) { + return partitions.getAsInt(); + } else { + throw new StreamsInvalidTopologyException("Number of partitions is not set for topic: " + topicName); + } + } + + private int validateAndGetNumOfPartitions(final Map repartitionTopics, + final Collection fixedRepartitionTopics) { + final String firstTopicName = fixedRepartitionTopics.iterator().next(); + + final int firstNumberOfPartitionsOfInternalTopic = getPartitionCount(firstTopicName); + + for (final String topicName : fixedRepartitionTopics) { + final int numberOfPartitions = getPartitionCount(topicName); + + if (numberOfPartitions != firstNumberOfPartitionsOfInternalTopic) { + final String msg = String.format("Following topics do not have the same number of partitions: [%s]", + new TreeMap<>(repartitionTopics)); + throw TopicConfigurationException.incorrectlyPartitionedTopics(msg); + } + } + + return firstNumberOfPartitionsOfInternalTopic; + } + + private int getSamePartitions(final Map nonRepartitionTopicsInCopartitionGroup) { + final int partitions = nonRepartitionTopicsInCopartitionGroup.values().iterator().next(); + for (final Entry entry : nonRepartitionTopicsInCopartitionGroup.entrySet()) { + if (entry.getValue() != partitions) { + final TreeMap sorted = new TreeMap<>(nonRepartitionTopicsInCopartitionGroup); + throw TopicConfigurationException.incorrectlyPartitionedTopics( + String.format("Following topics do not have the same number of partitions: [%s]", sorted)); + } + } + return partitions; + } + + private int getMaxPartitions(final Map repartitionTopicsInCopartitionGroup) { + int maxPartitions = 0; + + for (final Integer numPartitions : repartitionTopicsInCopartitionGroup.values()) { + maxPartitions = Integer.max(maxPartitions, numPartitions); + } + if (maxPartitions == 0) { + throw new StreamsInvalidTopologyException("All topics in the copartition group had undefined partition number: " + + repartitionTopicsInCopartitionGroup.keySet()); + } + return maxPartitions; + } + +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/RepartitionTopics.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/RepartitionTopics.java new file mode 100644 index 0000000000000..d1fefe67864ff --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/RepartitionTopics.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.errors.StreamsInvalidTopologyException; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.Subtopology; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.TopicInfo; + +import org.slf4j.Logger; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.OptionalInt; +import java.util.Set; +import java.util.function.Function; + +/** + * Responsible for configuring the number of partitions in repartitioning topics. It computes a fix-point iteration, deriving the number of + * partitions for each repartition topic based on the number of partitions of the source topics of the topology, if the number of + * partitions is not explicitly set in the topology. + */ +public class RepartitionTopics { + + private final Logger log; + private final Collection subtopologies; + private final Function topicPartitionCountProvider; + + /** + * The constructor for the class. + * + * @param logContext The context for emitting log messages. + * @param subtopologies The subtopologies for the requested topology. + * @param topicPartitionCountProvider Returns the number of partitions for a given topic, representing the current state of the + * broker. + */ + public RepartitionTopics(final LogContext logContext, + final Collection subtopologies, + final Function topicPartitionCountProvider) { + this.log = logContext.logger(getClass()); + this.subtopologies = subtopologies; + this.topicPartitionCountProvider = topicPartitionCountProvider; + } + + /** + * Returns the set of the number of partitions for each repartition topic. + * + * @return the map of repartition topics for the requested topology to their required number of partitions. + * + * @throws TopicConfigurationException if no valid configuration can be found given the broker state, for example, if a source topic + * is missing. + * @throws StreamsInvalidTopologyException if the number of partitions for all repartition topics cannot be determined, e.g. + * because of loops, or if a repartition source topic is not a sink topic of any subtopology. + */ + public Map setup() { + final Set missingSourceTopicsForTopology = new HashSet<>(); + + for (final Subtopology subtopology : subtopologies) { + final Set missingSourceTopicsForSubtopology = computeMissingExternalSourceTopics(subtopology); + missingSourceTopicsForTopology.addAll(missingSourceTopicsForSubtopology); + } + + if (!missingSourceTopicsForTopology.isEmpty()) { + throw TopicConfigurationException.missingSourceTopics(String.format("Missing source topics: %s", + String.join(", ", missingSourceTopicsForTopology))); + } + + final Map repartitionTopicPartitionCount = computeRepartitionTopicPartitionCount(); + + for (final Subtopology subtopology : subtopologies) { + if (subtopology.repartitionSourceTopics().stream().anyMatch(repartitionTopic -> !repartitionTopicPartitionCount.containsKey(repartitionTopic.name()))) { + throw new StreamsInvalidTopologyException("Failed to compute number of partitions for all repartition topics, because " + + "a repartition source topic is never used as a sink topic."); + } + } + + return repartitionTopicPartitionCount; + } + + private Set computeMissingExternalSourceTopics(final Subtopology subtopology) { + final Set missingExternalSourceTopics = new HashSet<>(subtopology.sourceTopics()); + for (final TopicInfo topicInfo : subtopology.repartitionSourceTopics()) { + missingExternalSourceTopics.remove(topicInfo.name()); + } + missingExternalSourceTopics.removeIf(x -> topicPartitionCountProvider.apply(x).isPresent()); + return missingExternalSourceTopics; + } + + /** + * Computes the number of partitions and returns it for each repartition topic. + */ + private Map computeRepartitionTopicPartitionCount() { + boolean partitionCountNeeded; + Map repartitionTopicPartitionCounts = new HashMap<>(); + + for (final Subtopology subtopology : subtopologies) { + for (final TopicInfo repartitionSourceTopic : subtopology.repartitionSourceTopics()) { + if (repartitionSourceTopic.partitions() != 0) { + repartitionTopicPartitionCounts.put(repartitionSourceTopic.name(), repartitionSourceTopic.partitions()); + } + } + } + + do { + partitionCountNeeded = false; + // avoid infinitely looping without making any progress on unknown repartitions + boolean progressMadeThisIteration = false; + + for (final Subtopology subtopology : subtopologies) { + for (final String repartitionSinkTopic : subtopology.repartitionSinkTopics()) { + if (!repartitionTopicPartitionCounts.containsKey(repartitionSinkTopic)) { + final Integer numPartitions = computePartitionCount( + repartitionTopicPartitionCounts, + subtopology + ); + + if (numPartitions == null) { + partitionCountNeeded = true; + log.trace("Unable to determine number of partitions for {}, another iteration is needed", + repartitionSinkTopic); + } else { + log.trace("Determined number of partitions for {} to be {}", + repartitionSinkTopic, + numPartitions); + repartitionTopicPartitionCounts.put(repartitionSinkTopic, numPartitions); + progressMadeThisIteration = true; + } + } + } + } + if (!progressMadeThisIteration && partitionCountNeeded) { + throw new StreamsInvalidTopologyException("Failed to compute number of partitions for all " + + "repartition topics. There may be loops in the topology that cannot be resolved."); + } + } while (partitionCountNeeded); + + return repartitionTopicPartitionCounts; + } + + private Integer computePartitionCount(final Map repartitionTopicPartitionCounts, + final Subtopology subtopology) { + Integer partitionCount = null; + // try set the number of partitions for this repartition topic if it is not set yet + // use the maximum of all its source topic partitions as the number of partitions + + // It is possible that there is another internal topic, i.e, + // map().join().join(map()) + for (final TopicInfo repartitionSourceTopic : subtopology.repartitionSourceTopics()) { + Integer numPartitionsCandidate = repartitionTopicPartitionCounts.get(repartitionSourceTopic.name()); + if (numPartitionsCandidate != null && (partitionCount == null || numPartitionsCandidate > partitionCount)) { + partitionCount = numPartitionsCandidate; + } + } + for (final String externalSourceTopic : subtopology.sourceTopics()) { + final OptionalInt actualPartitionCount = topicPartitionCountProvider.apply(externalSourceTopic); + if (actualPartitionCount.isPresent() && (partitionCount == null || actualPartitionCount.getAsInt() > partitionCount)) { + partitionCount = actualPartitionCount.getAsInt(); + } + } + return partitionCount; + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/TopicConfigurationException.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/TopicConfigurationException.java new file mode 100644 index 0000000000000..f52c950b77095 --- /dev/null +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/streams/topics/TopicConfigurationException.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.requests.StreamsGroupHeartbeatResponse; +import org.apache.kafka.common.requests.StreamsGroupHeartbeatResponse.Status; + +public class TopicConfigurationException extends RuntimeException { + + private final Status status; + + public TopicConfigurationException(StreamsGroupHeartbeatResponse.Status status, String message) { + super(message); + this.status = status; + } + + public Status status() { + return status; + } + + public static TopicConfigurationException incorrectlyPartitionedTopics(String message) { + return new TopicConfigurationException(Status.INCORRECTLY_PARTITIONED_TOPICS, message); + } + + public static TopicConfigurationException missingSourceTopics(String message) { + return new TopicConfigurationException(Status.MISSING_SOURCE_TOPICS, message); + } + + public static TopicConfigurationException missingInternalTopics(String message) { + return new TopicConfigurationException(Status.MISSING_INTERNAL_TOPICS, message); + } +} diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentKey.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentKey.json index c89ba9ab30c1c..22ea2457667d9 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentKey.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentKey.json @@ -13,16 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 8, + "type": "coordinator-key", "name": "ConsumerGroupCurrentMemberAssignmentKey", - "validVersions": "8", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "8", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "MemberId", "type": "string", "versions": "8", + { "name": "MemberId", "type": "string", "versions": "0", "about": "The member id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentValue.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentValue.json index 3af09b3edc8cf..f94604e946c0b 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentValue.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupCurrentMemberAssignmentValue.json @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 8, + "type": "coordinator-value", "name": "ConsumerGroupCurrentMemberAssignmentValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataKey.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataKey.json index 2c27192af2e95..868b0bec9ee84 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataKey.json @@ -13,16 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 5, + "type": "coordinator-key", "name": "ConsumerGroupMemberMetadataKey", - "validVersions": "5", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "5", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "MemberId", "type": "string", "versions": "5", + { "name": "MemberId", "type": "string", "versions": "0", "about": "The member id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataValue.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataValue.json index 46890a91bfbf7..ab10d12756660 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupMemberMetadataValue.json @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 5, + "type": "coordinator-value", "name": "ConsumerGroupMemberMetadataValue", "validVersions": "0", "flexibleVersions": "0+", @@ -33,7 +33,7 @@ { "name": "SubscribedTopicRegex", "versions": "0+", "nullableVersions": "0+", "type": "string", "about": "The subscribed topic regular expression." }, { "name": "RebalanceTimeoutMs", "type": "int32", "versions": "0+", "default": -1, - "about": "The rebalance timeout" }, + "about": "The rebalance timeout." }, { "name": "ServerAssignor", "versions": "0+", "nullableVersions": "0+", "type": "string", "about": "The server assignor to use; or null if not used." }, { "name": "ClassicMemberMetadata", "versions": "0+", "nullableVersions": "0+", "type": "ClassicMemberMetadata", diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataKey.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataKey.json index a1bfb0d0502f6..d67c850bc0fc1 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataKey.json @@ -13,14 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 3, + "type": "coordinator-key", "name": "ConsumerGroupMetadataKey", - "validVersions": "3", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "3", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataValue.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataValue.json index 035919eeffa43..c01cabb521ec6 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupMetadataValue.json @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 3, + "type": "coordinator-value", "name": "ConsumerGroupMetadataValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataKey.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataKey.json index c6866bd34b732..e4c3dc1babcfa 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataKey.json @@ -13,14 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 4, + "type": "coordinator-key", "name": "ConsumerGroupPartitionMetadataKey", - "validVersions": "4", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "4", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataValue.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataValue.json index 89be8cfa056b5..413ee101b5b96 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupPartitionMetadataValue.json @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 4, + "type": "coordinator-value", "name": "ConsumerGroupPartitionMetadataValue", "validVersions": "0", "flexibleVersions": "0+", @@ -29,7 +29,7 @@ { "name": "NumPartitions", "versions": "0+", "type": "int32", "about": "The number of partitions of the topic." }, { "name": "PartitionMetadata", "versions": "0+", "type": "[]PartitionMetadata", - "about": "Deprecated: this field is not used after 4.0. Partitions mapped to a set of racks. If the rack information is unavailable for all the partitions, an empty list is stored", "fields": [ + "about": "Deprecated: this field is not used after 4.0. Partitions mapped to a set of racks. If the rack information is unavailable for all the partitions, an empty list is stored.", "fields": [ { "name": "Partition", "versions": "0+", "type": "int32", "about": "The partition number." }, { "name": "Racks", "versions": "0+", "type": "[]string", diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionKey.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionKey.json index 3f761b694e4fe..be66dc867eb51 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionKey.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionKey.json @@ -13,16 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 16, + "type": "coordinator-key", "name": "ConsumerGroupRegularExpressionKey", - "validVersions": "16", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "16", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "RegularExpression", "type": "string", "versions": "16", + { "name": "RegularExpression", "type": "string", "versions": "0", "about": "The regular expression." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionValue.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionValue.json index ff0d1d624a36d..f55a9b0d892e2 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionValue.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupRegularExpressionValue.json @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 16, + "type": "coordinator-value", "name": "ConsumerGroupRegularExpressionValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberKey.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberKey.json index 10b94f26e6d98..169efb9097321 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberKey.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberKey.json @@ -13,16 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 7, + "type": "coordinator-key", "name": "ConsumerGroupTargetAssignmentMemberKey", - "validVersions": "7", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "7", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "MemberId", "type": "string", "versions": "7", + { "name": "MemberId", "type": "string", "versions": "0", "about": "The member id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberValue.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberValue.json index e05c28928e878..e96893852a8e4 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberValue.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMemberValue.json @@ -13,17 +13,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 7, + "type": "coordinator-value", "name": "ConsumerGroupTargetAssignmentMemberValue", "validVersions": "0", "flexibleVersions": "0+", "fields": [ { "name": "TopicPartitions", "versions": "0+", "type": "[]TopicPartition", "about": "The assigned partitions.", "fields": [ - { "name": "TopicId", "versions": "0+", "type": "uuid" }, - { "name": "Partitions", "versions": "0+", "type": "[]int32" } + { "name": "TopicId", "versions": "0+", "type": "uuid", + "about": "The topic id."}, + { "name": "Partitions", "versions": "0+", "type": "[]int32", + "about": "The partition indexes."} ]} ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataKey.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataKey.json index e78d557fb6159..90f4ce0294d52 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataKey.json @@ -13,14 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 6, + "type": "coordinator-key", "name": "ConsumerGroupTargetAssignmentMetadataKey", - "validVersions": "6", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "6", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataValue.json b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataValue.json index 6c73971b3ea1b..939794e1e5c73 100644 --- a/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ConsumerGroupTargetAssignmentMetadataValue.json @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// KIP-848 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 6, + "type": "coordinator-value", "name": "ConsumerGroupTargetAssignmentMetadataValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/GroupMetadataKey.json b/group-coordinator/src/main/resources/common/message/GroupMetadataKey.json index fa0c9ff9f6e73..1a468638bd8cc 100644 --- a/group-coordinator/src/main/resources/common/message/GroupMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/GroupMetadataKey.json @@ -14,11 +14,13 @@ // limitations under the License. { - "type": "data", + "apiKey": 2, + "type": "coordinator-key", "name": "GroupMetadataKey", - "validVersions": "2", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "group", "type": "string", "versions": "2" } + { "name": "group", "type": "string", "versions": "0", + "about": "The group id."} ] } diff --git a/group-coordinator/src/main/resources/common/message/GroupMetadataValue.json b/group-coordinator/src/main/resources/common/message/GroupMetadataValue.json index 6045f77812266..4bf87c9df7d0b 100644 --- a/group-coordinator/src/main/resources/common/message/GroupMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/GroupMetadataValue.json @@ -14,7 +14,8 @@ // limitations under the License. { - "type": "data", + "apiKey": 2, + "type": "coordinator-value", "name": "GroupMetadataValue", // Version 4 is the first flexible version. // KIP-915: bumping the version will no longer make this record backward compatible. @@ -22,26 +23,40 @@ "validVersions": "0-4", "flexibleVersions": "4+", "fields": [ - { "name": "protocolType", "versions": "0+", "type": "string"}, - { "name": "generation", "versions": "0+", "type": "int32" }, - { "name": "protocol", "versions": "0+", "type": "string", "nullableVersions": "0+" }, - { "name": "leader", "versions": "0+", "type": "string", "nullableVersions": "0+" }, - { "name": "currentStateTimestamp", "versions": "2+", "type": "int64", "default": -1, "ignorable": true}, - { "name": "members", "versions": "0+", "type": "[]MemberMetadata" } + { "name": "protocolType", "versions": "0+", "type": "string", + "about": "The protocol type."}, + { "name": "generation", "versions": "0+", "type": "int32", + "about": "The generation id."}, + { "name": "protocol", "versions": "0+", "type": "string", "nullableVersions": "0+", + "about": "The protocol name."}, + { "name": "leader", "versions": "0+", "type": "string", "nullableVersions": "0+", + "about": "The leader id."}, + { "name": "currentStateTimestamp", "versions": "2+", "type": "int64", "default": -1, "ignorable": true, + "about": "The timestamp of the last state change."}, + { "name": "members", "versions": "0+", "type": "[]MemberMetadata", + "about": "The group members."} ], "commonStructs": [ { "name": "MemberMetadata", "versions": "0+", "fields": [ - { "name": "memberId", "versions": "0+", "type": "string" }, - { "name": "groupInstanceId", "versions": "3+", "type": "string", "default": "null", "nullableVersions": "3+", "ignorable": true}, - { "name": "clientId", "versions": "0+", "type": "string" }, - { "name": "clientHost", "versions": "0+", "type": "string" }, - { "name": "rebalanceTimeout", "versions": "1+", "type": "int32", "default": -1, "ignorable": true}, - { "name": "sessionTimeout", "versions": "0+", "type": "int32" }, - { "name": "subscription", "versions": "0+", "type": "bytes" }, - { "name": "assignment", "versions": "0+", "type": "bytes" } + { "name": "memberId", "versions": "0+", "type": "string", + "about": "The member id."}, + { "name": "groupInstanceId", "versions": "3+", "type": "string", "default": "null", "nullableVersions": "3+", "ignorable": true, + "about": "The group instance id."}, + { "name": "clientId", "versions": "0+", "type": "string", + "about": "The client id."}, + { "name": "clientHost", "versions": "0+", "type": "string", + "about": "The client host."}, + { "name": "rebalanceTimeout", "versions": "1+", "type": "int32", "default": -1, "ignorable": true, + "about": "The rebalance timeout."}, + { "name": "sessionTimeout", "versions": "0+", "type": "int32", + "about": "The session timeout."}, + { "name": "subscription", "versions": "0+", "type": "bytes", + "about": "The subscription."}, + { "name": "assignment", "versions": "0+", "type": "bytes", + "about": "The assignment."} ] } ] diff --git a/group-coordinator/src/main/resources/common/message/LegacyOffsetCommitKey.json b/group-coordinator/src/main/resources/common/message/LegacyOffsetCommitKey.json new file mode 100644 index 0000000000000..b244be13175c6 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/LegacyOffsetCommitKey.json @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 0, + "type": "coordinator-key", + "name": "LegacyOffsetCommitKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "group", "type": "string", "versions": "0", + "about": "The consumer group id."}, + { "name": "topic", "type": "string", "versions": "0", + "about": "The topic name."}, + { "name": "partition", "type": "int32", "versions": "0", + "about": "The topic partition index."} + ] +} diff --git a/group-coordinator/src/main/resources/common/message/LegacyOffsetCommitValue.json b/group-coordinator/src/main/resources/common/message/LegacyOffsetCommitValue.json new file mode 100644 index 0000000000000..c81af345c6e67 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/LegacyOffsetCommitValue.json @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 0, + "type": "coordinator-value", + "name": "LegacyOffsetCommitValue", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "offset", "type": "int64", "versions": "0", + "about": "The offset that the consumer wants to store (for this partition)."}, + { "name": "metadata", "type": "string", "versions": "0", + "about": "Any metadata the client wants to keep."}, + { "name": "commitTimestamp", "type": "int64", "versions": "0", + "about": "The time at which the commit was added to the log."} + ] +} diff --git a/group-coordinator/src/main/resources/common/message/OffsetCommitKey.json b/group-coordinator/src/main/resources/common/message/OffsetCommitKey.json index a9d1bc337804b..f5898f6860209 100644 --- a/group-coordinator/src/main/resources/common/message/OffsetCommitKey.json +++ b/group-coordinator/src/main/resources/common/message/OffsetCommitKey.json @@ -14,13 +14,17 @@ // limitations under the License. { - "type": "data", + "apiKey": 1, + "type": "coordinator-key", "name": "OffsetCommitKey", - "validVersions": "0-1", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "group", "type": "string", "versions": "0-1" }, - { "name": "topic", "type": "string", "versions": "0-1" }, - { "name": "partition", "type": "int32", "versions": "0-1" } + { "name": "group", "type": "string", "versions": "0", + "about": "The consumer group id."}, + { "name": "topic", "type": "string", "versions": "0", + "about": "The topic name."}, + { "name": "partition", "type": "int32", "versions": "0", + "about": "The topic partition index."} ] } diff --git a/group-coordinator/src/main/resources/common/message/OffsetCommitValue.json b/group-coordinator/src/main/resources/common/message/OffsetCommitValue.json index 2973c5ee12ab2..8f7d32d544796 100644 --- a/group-coordinator/src/main/resources/common/message/OffsetCommitValue.json +++ b/group-coordinator/src/main/resources/common/message/OffsetCommitValue.json @@ -14,7 +14,8 @@ // limitations under the License. { - "type": "data", + "apiKey": 1, + "type": "coordinator-value", "name": "OffsetCommitValue", // Version 4 is the first flexible version. // KIP-915: bumping the version will no longer make this record backward compatible. @@ -22,10 +23,15 @@ "validVersions": "0-4", "flexibleVersions": "4+", "fields": [ - { "name": "offset", "type": "int64", "versions": "0+" }, - { "name": "leaderEpoch", "type": "int32", "versions": "3+", "default": -1, "ignorable": true}, - { "name": "metadata", "type": "string", "versions": "0+" }, - { "name": "commitTimestamp", "type": "int64", "versions": "0+" }, - { "name": "expireTimestamp", "type": "int64", "versions": "1", "default": -1, "ignorable": true} + { "name": "offset", "type": "int64", "versions": "0+", + "about": "The offset that the consumer wants to store (for this partition)."}, + { "name": "leaderEpoch", "type": "int32", "versions": "3+", "default": -1, "ignorable": true, + "about": "The leader epoch of the last consumed record."}, + { "name": "metadata", "type": "string", "versions": "0+", + "about": "Any metadata the client wants to keep."}, + { "name": "commitTimestamp", "type": "int64", "versions": "0+", + "about": "The time at which the commit was added to the log."}, + { "name": "expireTimestamp", "type": "int64", "versions": "1", "default": -1, "ignorable": true, + "about": "The time at which the offset will expire."} ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentKey.json b/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentKey.json index 1aa74133cfcb2..c88926933f1fa 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentKey.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentKey.json @@ -15,14 +15,15 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 14, + "type": "coordinator-key", "name": "ShareGroupCurrentMemberAssignmentKey", - "validVersions": "14", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "14", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "MemberId", "type": "string", "versions": "14", + { "name": "MemberId", "type": "string", "versions": "0", "about": "The member id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentValue.json b/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentValue.json index 109ae6e870fd4..64959a42cc82a 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentValue.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupCurrentMemberAssignmentValue.json @@ -15,7 +15,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 14, + "type": "coordinator-value", "name": "ShareGroupCurrentMemberAssignmentValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataKey.json b/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataKey.json index cbe1a8dd8718d..e5e0250502be1 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataKey.json @@ -15,14 +15,15 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 10, + "type": "coordinator-key", "name": "ShareGroupMemberMetadataKey", - "validVersions": "10", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "10", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "MemberId", "type": "string", "versions": "10", + { "name": "MemberId", "type": "string", "versions": "0", "about": "The member id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataValue.json b/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataValue.json index c5d4c7abd4504..e4cf5e1690074 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupMemberMetadataValue.json @@ -15,7 +15,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 10, + "type": "coordinator-value", "name": "ShareGroupMemberMetadataValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupMetadataKey.json b/group-coordinator/src/main/resources/common/message/ShareGroupMetadataKey.json index 309b67ba31ff7..4c3fe1affb8a4 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupMetadataKey.json @@ -15,12 +15,13 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 11, + "type": "coordinator-key", "name": "ShareGroupMetadataKey", - "validVersions": "11", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "11", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupMetadataValue.json b/group-coordinator/src/main/resources/common/message/ShareGroupMetadataValue.json index 02ca3eacd0454..5ff037a2fcc1f 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupMetadataValue.json @@ -15,7 +15,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 11, + "type": "coordinator-value", "name": "ShareGroupMetadataValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataKey.json b/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataKey.json index 8a34f5dfc893d..146b9fdcb9103 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataKey.json @@ -15,12 +15,13 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 9, + "type": "coordinator-key", "name": "ShareGroupPartitionMetadataKey", - "validVersions": "9", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "9", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataValue.json b/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataValue.json index 8191b3a4045de..af63f047126e5 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataValue.json @@ -15,7 +15,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 9, + "type": "coordinator-value", "name": "ShareGroupPartitionMetadataValue", "validVersions": "0", "flexibleVersions": "0+", @@ -29,7 +30,7 @@ { "name": "NumPartitions", "versions": "0+", "type": "int32", "about": "The number of partitions of the topic." }, { "name": "PartitionMetadata", "versions": "0+", "type": "[]PartitionMetadata", - "about": "Partitions mapped to a set of racks. If the rack information is unavailable for all the partitions, an empty list is stored", "fields": [ + "about": "Partitions mapped to a set of racks. If the rack information is unavailable for all the partitions, an empty list is stored.", "fields": [ { "name": "Partition", "versions": "0+", "type": "int32", "about": "The partition number." }, { "name": "Racks", "versions": "0+", "type": "[]string", diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataKey.json b/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataKey.json index 6bae1b6d0af91..d24e7bb9d4a49 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataKey.json @@ -15,12 +15,13 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 15, + "type": "coordinator-key", "name": "ShareGroupStatePartitionMetadataKey", - "validVersions": "15", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "15", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataValue.json b/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataValue.json index 2e797910002d3..18a9df0885fef 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupStatePartitionMetadataValue.json @@ -15,7 +15,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 15, + "type": "coordinator-value", "name": "ShareGroupStatePartitionMetadataValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberKey.json b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberKey.json index 80a80112cba75..e8b364d289651 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberKey.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberKey.json @@ -15,14 +15,15 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 13, + "type": "coordinator-key", "name": "ShareGroupTargetAssignmentMemberKey", - "validVersions": "13", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "13", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "MemberId", "type": "string", "versions": "13", + { "name": "MemberId", "type": "string", "versions": "0", "about": "The member id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberValue.json b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberValue.json index 8c29c25876190..f6b52f3c86b99 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberValue.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMemberValue.json @@ -15,15 +15,18 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 13, + "type": "coordinator-value", "name": "ShareGroupTargetAssignmentMemberValue", "validVersions": "0", "flexibleVersions": "0+", "fields": [ { "name": "TopicPartitions", "versions": "0+", "type": "[]TopicPartition", "about": "The assigned partitions.", "fields": [ - { "name": "TopicId", "versions": "0+", "type": "uuid" }, - { "name": "Partitions", "versions": "0+", "type": "[]int32" } + { "name": "TopicId", "versions": "0+", "type": "uuid", + "about": "The topic id."}, + { "name": "Partitions", "versions": "0+", "type": "[]int32", + "about": "The partition indexes."} ]} ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataKey.json b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataKey.json index ccf394d068ea3..202c57de7d55b 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataKey.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataKey.json @@ -15,12 +15,13 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 12, + "type": "coordinator-key", "name": "ShareGroupTargetAssignmentMetadataKey", - "validVersions": "12", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "12", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." } ] } diff --git a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataValue.json b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataValue.json index 52a38e865c00d..0e0028855fbf5 100644 --- a/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataValue.json +++ b/group-coordinator/src/main/resources/common/message/ShareGroupTargetAssignmentMetadataValue.json @@ -15,7 +15,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 12, + "type": "coordinator-value", "name": "ShareGroupTargetAssignmentMetadataValue", "validVersions": "0", "flexibleVersions": "0+", diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupCurrentMemberAssignmentKey.json b/group-coordinator/src/main/resources/common/message/StreamsGroupCurrentMemberAssignmentKey.json new file mode 100644 index 0000000000000..236d66de03cb5 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupCurrentMemberAssignmentKey.json @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 22, + "type": "coordinator-key", + "name": "StreamsGroupCurrentMemberAssignmentKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0", + "about": "The group ID." }, + { "name": "MemberId", "type": "string", "versions": "0", + "about": "The member ID." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupCurrentMemberAssignmentValue.json b/group-coordinator/src/main/resources/common/message/StreamsGroupCurrentMemberAssignmentValue.json new file mode 100644 index 0000000000000..463c1e84e1702 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupCurrentMemberAssignmentValue.json @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 22, + "type": "coordinator-value", + "name": "StreamsGroupCurrentMemberAssignmentValue", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "MemberEpoch", "versions": "0+", "type": "int32", + "about": "The current member epoch that is expected from the member in the heartbeat request." }, + { "name": "PreviousMemberEpoch", "versions": "0+", "type": "int32", + "about": "If the last epoch bump is lost before reaching the member, the member will retry with the previous epoch." }, + { "name": "State", "versions": "0+", "type": "int8", + "about": "The member state. See StreamsGroupMember.MemberState for the possible values." }, + { "name": "ActiveTasks", "versions": "0+", "type": "[]TaskIds", + "about": "Currently assigned active tasks for this streams client." }, + { "name": "StandbyTasks", "versions": "0+", "type": "[]TaskIds", + "about": "Currently assigned standby tasks for this streams client." }, + { "name": "WarmupTasks", "versions": "0+", "type": "[]TaskIds", + "about": "Currently assigned warm-up tasks for this streams client." }, + { "name": "ActiveTasksPendingRevocation", "versions": "0+", "type": "[]TaskIds", + "about": "The active tasks that must be revoked by this member." }, + { "name": "StandbyTasksPendingRevocation", "versions": "0+", "type": "[]TaskIds", + "about": "The standby tasks that must be revoked by this member." }, + { "name": "WarmupTasksPendingRevocation", "versions": "0+", "type": "[]TaskIds", + "about": "The warmup tasks that must be revoked by this member." } + ], + "commonStructs": [ + { "name": "TaskIds", "versions": "0+", "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "The subtopology ID." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions of the input topics processed by this member." } + ]} + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupMemberMetadataKey.json b/group-coordinator/src/main/resources/common/message/StreamsGroupMemberMetadataKey.json new file mode 100644 index 0000000000000..ae1fbc8d1a758 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupMemberMetadataKey.json @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 19, + "type": "coordinator-key", + "name": "StreamsGroupMemberMetadataKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0", + "about": "The group ID." }, + { "name": "MemberId", "type": "string", "versions": "0", + "about": "The member ID." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupMemberMetadataValue.json b/group-coordinator/src/main/resources/common/message/StreamsGroupMemberMetadataValue.json new file mode 100644 index 0000000000000..1ecc047f17a84 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupMemberMetadataValue.json @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 19, + "type": "coordinator-value", + "name": "StreamsGroupMemberMetadataValue", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "InstanceId", "versions": "0+", "nullableVersions": "0+", "type": "string", + "about": "The (optional) instance ID for static membership." }, + { "name": "RackId", "versions": "0+", "nullableVersions": "0+", "type": "string", + "about": "The (optional) rack ID." }, + { "name": "ClientId", "versions": "0+", "type": "string", + "about": "The client ID." }, + { "name": "ClientHost", "versions": "0+", "type": "string", + "about": "The client host." }, + { "name": "RebalanceTimeoutMs", "type": "int32", "versions": "0+", "default": -1, + "about": "The rebalance timeout." }, + { "name": "TopologyEpoch", "type": "int32", "versions": "0+", + "about": "The epoch of the topology." }, + { "name": "ProcessId", "type": "string", "versions": "0+", + "about": "Identity of the streams instance that may have multiple consumers." }, + { "name": "UserEndpoint", "type": "Endpoint", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "User-defined endpoint for running interactive queries on this instance." }, + { "name": "ClientTags", "type": "[]KeyValue", "versions": "0+", + "about": "Used for rack-aware assignment algorithm." } + ], + "commonStructs": [ + { "name": "Endpoint", "versions": "0+", "fields": [ + { "name": "Host", "type": "string", "versions": "0+", + "about": "The host of the endpoint." }, + { "name": "Port", "type": "uint16", "versions": "0+", + "about": "The port of the endpoint." } + ]}, + { "name": "KeyValue", "versions": "0+", + "fields": [ + { "name": "Key", "type": "string", "versions": "0+", + "about": "The key of the config." }, + { "name": "Value", "type": "string", "versions": "0+", + "about": "the value of the config." } + ] + } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupMetadataKey.json b/group-coordinator/src/main/resources/common/message/StreamsGroupMetadataKey.json new file mode 100644 index 0000000000000..3d583ebb66ed5 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupMetadataKey.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 17, + "type": "coordinator-key", + "name": "StreamsGroupMetadataKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0", + "about": "The group ID." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupMetadataValue.json b/group-coordinator/src/main/resources/common/message/StreamsGroupMetadataValue.json new file mode 100644 index 0000000000000..2b4d371570aa4 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupMetadataValue.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 17, + "type": "coordinator-value", + "name": "StreamsGroupMetadataValue", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "Epoch", "versions": "0+", "type": "int32", + "about": "The group epoch." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupPartitionMetadataKey.json b/group-coordinator/src/main/resources/common/message/StreamsGroupPartitionMetadataKey.json new file mode 100644 index 0000000000000..cb82e930a09c4 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupPartitionMetadataKey.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 18, + "type": "coordinator-key", + "name": "StreamsGroupPartitionMetadataKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0", + "about": "The group ID." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupPartitionMetadataValue.json b/group-coordinator/src/main/resources/common/message/StreamsGroupPartitionMetadataValue.json new file mode 100644 index 0000000000000..1f5eb8e8dcb24 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupPartitionMetadataValue.json @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 18, + "type": "coordinator-value", + "name": "StreamsGroupPartitionMetadataValue", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "Topics", "versions": "0+", "type": "[]TopicMetadata", + "about": "The list of topic metadata.", "fields": [ + { "name": "TopicId", "versions": "0+", "type": "uuid", + "about": "The topic ID." }, + { "name": "TopicName", "versions": "0+", "type": "string", + "about": "The topic name." }, + { "name": "NumPartitions", "versions": "0+", "type": "int32", + "about": "The number of partitions of the topic." }, + { "name": "PartitionMetadata", "versions": "0+", "type": "[]PartitionMetadata", + "about": "Partitions mapped to a set of racks. If the rack information is unavailable for all the partitions, an empty list is stored.", "fields": [ + { "name": "Partition", "versions": "0+", "type": "int32", + "about": "The partition number." }, + { "name": "Racks", "versions": "0+", "type": "[]string", + "about": "The set of racks that the partition is mapped to." } + ]} + ]} + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMemberKey.json b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMemberKey.json new file mode 100644 index 0000000000000..7563f01faded4 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMemberKey.json @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 21, + "type": "coordinator-key", + "name": "StreamsGroupTargetAssignmentMemberKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0", + "about": "The group ID." }, + { "name": "MemberId", "type": "string", "versions": "0", + "about": "The member ID." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMemberValue.json b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMemberValue.json new file mode 100644 index 0000000000000..c96dd608c7fa6 --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMemberValue.json @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 21, + "type": "coordinator-value", + "name": "StreamsGroupTargetAssignmentMemberValue", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ActiveTasks", "versions": "0+", "type": "[]TaskIds", + "about": "Currently assigned active tasks for this streams client." }, + { "name": "StandbyTasks", "versions": "0+", "type": "[]TaskIds", + "about": "Currently assigned standby tasks for this streams client." }, + { "name": "WarmupTasks", "versions": "0+", "type": "[]TaskIds", + "about": "Currently assigned warm-up tasks for this streams client." } + ], + "commonStructs": [ + { "name": "TaskIds", "versions": "0+", "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "The subtopology ID." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions of the input topics processed by this member." } + ]} + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMetadataKey.json b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMetadataKey.json new file mode 100644 index 0000000000000..22fb861083a6b --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMetadataKey.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 20, + "type": "coordinator-key", + "name": "StreamsGroupTargetAssignmentMetadataKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0", + "about": "The group ID." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMetadataValue.json b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMetadataValue.json new file mode 100644 index 0000000000000..b9de317cbde6e --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupTargetAssignmentMetadataValue.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 20, + "type": "coordinator-value", + "name": "StreamsGroupTargetAssignmentMetadataValue", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "AssignmentEpoch", "versions": "0+", "type": "int32", + "about": "The assignment epoch." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupTopologyKey.json b/group-coordinator/src/main/resources/common/message/StreamsGroupTopologyKey.json new file mode 100644 index 0000000000000..ac2b8d5932aba --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupTopologyKey.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 23, + "type": "coordinator-key", + "name": "StreamsGroupTopologyKey", + "validVersions": "0", + "flexibleVersions": "none", + "fields": [ + { "name": "GroupId", "type": "string", "versions": "0", + "about": "The group ID." } + ] +} diff --git a/group-coordinator/src/main/resources/common/message/StreamsGroupTopologyValue.json b/group-coordinator/src/main/resources/common/message/StreamsGroupTopologyValue.json new file mode 100644 index 0000000000000..26ac1ff66750b --- /dev/null +++ b/group-coordinator/src/main/resources/common/message/StreamsGroupTopologyValue.json @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The streams rebalance protocol is in development. This schema is subject to non-backwards-compatible changes. +{ + "apiKey": 23, + "type": "coordinator-value", + "name": "StreamsGroupTopologyValue", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "Epoch", "type": "int32", "versions": "0+", + "about": "The epoch of the topology." }, + { "name": "Subtopologies", "type": "[]Subtopology", "versions": "0+", + "about": "The sub-topologies of the streams application.", + "fields": [ + { "name": "SubtopologyId", "type": "string", "versions": "0+", + "about": "String to uniquely identify the subtopology." }, + { "name": "SourceTopics", "type": "[]string", "versions": "0+", + "about": "The topics the topology reads from." }, + { "name": "SourceTopicRegex", "type": "[]string", "versions": "0+", + "about": "Regular expressions identifying topics the subtopology reads from." }, + { "name": "StateChangelogTopics", "type": "[]TopicInfo", "versions": "0+", + "about": "The set of state changelog topics associated with this subtopology." }, + { "name": "RepartitionSinkTopics", "type": "[]string", "versions": "0+", + "about": "The repartition topics the subtopology writes to." }, + { "name": "RepartitionSourceTopics", "type": "[]TopicInfo", "versions": "0+", + "about": "The set of source topics that are internally created repartition topics." }, + { "name": "CopartitionGroups", "type": "[]CopartitionGroup", "versions": "0+", + "about": "A subset of source topics that must be copartitioned.", + "fields": [ + { "name": "SourceTopics", "type": "[]int16", "versions": "0+", + "about": "The topics the topology reads from. Index into the array on the subtopology level." }, + { "name": "SourceTopicRegex", "type": "[]int16", "versions": "0+", + "about": "Regular expressions identifying topics the subtopology reads from. Index into the array on the subtopology level." }, + { "name": "RepartitionSourceTopics", "type": "[]int16", "versions": "0+", + "about": "The set of source topics that are internally created repartition topics. Index into the array on the subtopology level." } + ] + } + ] + } + ], + "commonStructs": [ + { "name": "TopicConfig", "versions": "0+", "fields": [ + { "name": "key", "type": "string", "versions": "0+", + "about": "The key of the topic-level configuration." }, + { "name": "value", "type": "string", "versions": "0+", + "about": "The value of the topic-level configuration." } + ] + }, + { "name": "TopicInfo", "versions": "0+", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", + "about": "The name of the topic." }, + { "name": "Partitions", "type": "int32", "versions": "0+", + "about": "The number of partitions in the topic. Can be 0 if no specific number of partitions is enforced. Always 0 for changelog topics." }, + { "name": "ReplicationFactor", "type": "int16", "versions": "0+", + "about": "The replication factor of the topic. Can be 0 if the default replication factor should be used." }, + { "name": "TopicConfigs", "type": "[]TopicConfig", "versions": "0+", + "about": "Topic-level configurations as key-value pairs." + } + ]} + ] +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/Assertions.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/Assertions.java index dba4f5285365c..85c7705c7400c 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/Assertions.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/Assertions.java @@ -19,10 +19,10 @@ import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.message.SyncGroupResponseData; +import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.types.SchemaException; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; @@ -30,128 +30,47 @@ import org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataValue; import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue; import org.apache.kafka.coordinator.group.generated.GroupMetadataValue; +import org.apache.kafka.coordinator.group.generated.ShareGroupPartitionMetadataValue; import org.apache.kafka.server.common.ApiMessageAndVersion; import org.opentest4j.AssertionFailedError; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.stream.Collectors; import static org.junit.jupiter.api.AssertionFailureBuilder.assertionFailure; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; public class Assertions { - public static void assertUnorderedListEquals( - List expected, - List actual - ) { - assertEquals(new HashSet<>(expected), new HashSet<>(actual)); - } - - public static void assertResponseEquals( - ConsumerGroupHeartbeatResponseData expected, - ConsumerGroupHeartbeatResponseData actual - ) { - if (!responseEquals(expected, actual)) { - assertionFailure() - .expected(expected) - .actual(actual) - .buildAndThrow(); - } - } + private static final BiConsumer API_MESSAGE_DEFAULT_COMPARATOR = org.junit.jupiter.api.Assertions::assertEquals; + private static final Map, BiConsumer> API_MESSAGE_COMPARATORS = Map.of( + // Register request/response comparators. + ConsumerGroupHeartbeatResponseData.class, Assertions::assertConsumerGroupHeartbeatResponse, + ShareGroupHeartbeatResponseData.class, Assertions::assertShareGroupHeartbeatResponse, + SyncGroupResponseData.class, Assertions::assertSyncGroupResponse, + + // Register record comparators. + ConsumerGroupCurrentMemberAssignmentValue.class, Assertions::assertConsumerGroupCurrentMemberAssignmentValue, + ConsumerGroupPartitionMetadataValue.class, Assertions::assertConsumerGroupPartitionMetadataValue, + GroupMetadataValue.class, Assertions::assertGroupMetadataValue, + ConsumerGroupTargetAssignmentMemberValue.class, Assertions::assertConsumerGroupTargetAssignmentMemberValue, + ShareGroupPartitionMetadataValue.class, Assertions::assertShareGroupPartitionMetadataValue + ); public static void assertResponseEquals( - ShareGroupHeartbeatResponseData expected, - ShareGroupHeartbeatResponseData actual - ) { - if (!responseEquals(expected, actual)) { - assertionFailure() - .expected(expected) - .actual(actual) - .buildAndThrow(); - } - } - - private static boolean responseEquals( - ConsumerGroupHeartbeatResponseData expected, - ConsumerGroupHeartbeatResponseData actual - ) { - if (expected.throttleTimeMs() != actual.throttleTimeMs()) return false; - if (expected.errorCode() != actual.errorCode()) return false; - if (!Objects.equals(expected.errorMessage(), actual.errorMessage())) return false; - if (!Objects.equals(expected.memberId(), actual.memberId())) return false; - if (expected.memberEpoch() != actual.memberEpoch()) return false; - if (expected.heartbeatIntervalMs() != actual.heartbeatIntervalMs()) return false; - // Unordered comparison of the assignments. - return responseAssignmentEquals(expected.assignment(), actual.assignment()); - } - - private static boolean responseEquals( - ShareGroupHeartbeatResponseData expected, - ShareGroupHeartbeatResponseData actual + ApiMessage expected, + ApiMessage actual ) { - if (expected.throttleTimeMs() != actual.throttleTimeMs()) return false; - if (expected.errorCode() != actual.errorCode()) return false; - if (!Objects.equals(expected.errorMessage(), actual.errorMessage())) return false; - if (!Objects.equals(expected.memberId(), actual.memberId())) return false; - if (expected.memberEpoch() != actual.memberEpoch()) return false; - if (expected.heartbeatIntervalMs() != actual.heartbeatIntervalMs()) return false; - // Unordered comparison of the assignments. - return responseAssignmentEquals(expected.assignment(), actual.assignment()); - } - - private static boolean responseAssignmentEquals( - ConsumerGroupHeartbeatResponseData.Assignment expected, - ConsumerGroupHeartbeatResponseData.Assignment actual - ) { - if (expected == actual) return true; - if (expected == null) return false; - if (actual == null) return false; - - return Objects.equals(fromAssignment(expected.topicPartitions()), fromAssignment(actual.topicPartitions())); - } - - private static boolean responseAssignmentEquals( - ShareGroupHeartbeatResponseData.Assignment expected, - ShareGroupHeartbeatResponseData.Assignment actual - ) { - if (expected == actual) return true; - if (expected == null) return false; - if (actual == null) return false; - - return Objects.equals(fromShareGroupAssignment(expected.topicPartitions()), fromShareGroupAssignment(actual.topicPartitions())); - } - - private static Map> fromAssignment( - List assignment - ) { - if (assignment == null) return null; - - Map> assignmentMap = new HashMap<>(); - assignment.forEach(topicPartitions -> - assignmentMap.put(topicPartitions.topicId(), new HashSet<>(topicPartitions.partitions())) - ); - return assignmentMap; - } - - private static Map> fromShareGroupAssignment( - List assignment - ) { - if (assignment == null) return null; - - Map> assignmentMap = new HashMap<>(); - assignment.forEach(topicPartitions -> { - assignmentMap.put(topicPartitions.topicId(), new HashSet<>(topicPartitions.partitions())); - }); - return assignmentMap; + BiConsumer asserter = API_MESSAGE_COMPARATORS + .getOrDefault(expected.getClass(), API_MESSAGE_DEFAULT_COMPARATOR); + asserter.accept(expected, actual); } public static void assertRecordsEquals( @@ -174,6 +93,48 @@ public static void assertRecordsEquals( } } + /** + * Assert that the expected records are equal to the provided records. + * + * @param expectedRecords An ordered list of groupings. Each grouping + * defines a list of records that must be present, + * but they could be in any order. + * @param actualRecords An ordered list of records. + * @throws AssertionFailedError if the expected and the actual records do + * not match. + */ + public static void assertUnorderedRecordsEquals( + List> expectedRecords, + List actualRecords + ) { + try { + int i = 0, j = 0; + while (i < expectedRecords.size()) { + List slice = expectedRecords.get(i); + assertRecordsEquals( + slice + .stream() + .sorted(Comparator.comparing(Object::toString)) + .collect(Collectors.toList()), + actualRecords + .subList(j, j + slice.size()) + .stream() + .sorted(Comparator.comparing(Object::toString)) + .collect(Collectors.toList()) + ); + + j += slice.size(); + i++; + } + assertEquals(j, actualRecords.size()); + } catch (AssertionFailedError e) { + assertionFailure() + .expected(expectedRecords) + .actual(actualRecords) + .buildAndThrow(); + } + } + public static void assertRecordEquals( CoordinatorRecord expected, CoordinatorRecord actual @@ -189,169 +150,219 @@ public static void assertRecordEquals( } } + private static void assertConsumerGroupHeartbeatResponse( + ApiMessage exp, + ApiMessage act + ) { + ConsumerGroupHeartbeatResponseData expected = (ConsumerGroupHeartbeatResponseData) exp.duplicate(); + ConsumerGroupHeartbeatResponseData actual = (ConsumerGroupHeartbeatResponseData) act.duplicate(); + + Consumer normalize = message -> { + if (message.assignment() != null) { + message.assignment().topicPartitions().sort(Comparator.comparing(ConsumerGroupHeartbeatResponseData.TopicPartitions::topicId)); + message.assignment().topicPartitions().forEach(topic -> topic.partitions().sort(Integer::compareTo)); + } + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); + } + + private static void assertShareGroupHeartbeatResponse( + ApiMessage exp, + ApiMessage act + ) { + ShareGroupHeartbeatResponseData expected = (ShareGroupHeartbeatResponseData) exp.duplicate(); + ShareGroupHeartbeatResponseData actual = (ShareGroupHeartbeatResponseData) act.duplicate(); + + Consumer normalize = message -> { + if (message.assignment() != null) { + message.assignment().topicPartitions().sort(Comparator.comparing(ShareGroupHeartbeatResponseData.TopicPartitions::topicId)); + message.assignment().topicPartitions().forEach(topic -> topic.partitions().sort(Integer::compareTo)); + } + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); + } + private static void assertApiMessageAndVersionEquals( ApiMessageAndVersion expected, ApiMessageAndVersion actual ) { if (expected == actual) return; - + assertNotNull(expected); + assertNotNull(actual); assertEquals(expected.version(), actual.version()); + BiConsumer asserter = API_MESSAGE_COMPARATORS + .getOrDefault(expected.message().getClass(), API_MESSAGE_DEFAULT_COMPARATOR); + asserter.accept(expected.message(), actual.message()); + } - if (actual.message() instanceof ConsumerGroupCurrentMemberAssignmentValue) { - // The order of the topics stored in ConsumerGroupCurrentMemberAssignmentValue is not - // always guaranteed. Therefore, we need a special comparator. - ConsumerGroupCurrentMemberAssignmentValue expectedValue = - (ConsumerGroupCurrentMemberAssignmentValue) expected.message(); - ConsumerGroupCurrentMemberAssignmentValue actualValue = - (ConsumerGroupCurrentMemberAssignmentValue) actual.message(); - - assertEquals(expectedValue.memberEpoch(), actualValue.memberEpoch()); - assertEquals(expectedValue.previousMemberEpoch(), actualValue.previousMemberEpoch()); - - // We transform those to Maps before comparing them. - assertEquals(fromTopicPartitions(expectedValue.assignedPartitions()), - fromTopicPartitions(actualValue.assignedPartitions())); - assertEquals(fromTopicPartitions(expectedValue.partitionsPendingRevocation()), - fromTopicPartitions(actualValue.partitionsPendingRevocation())); - } else if (actual.message() instanceof ConsumerGroupPartitionMetadataValue) { - // The order of the racks stored in the PartitionMetadata of the ConsumerGroupPartitionMetadataValue - // is not always guaranteed. Therefore, we need a special comparator. - ConsumerGroupPartitionMetadataValue expectedValue = - (ConsumerGroupPartitionMetadataValue) expected.message().duplicate(); - ConsumerGroupPartitionMetadataValue actualValue = - (ConsumerGroupPartitionMetadataValue) actual.message().duplicate(); - - List expectedTopicMetadataList = - expectedValue.topics(); - List actualTopicMetadataList = - actualValue.topics(); - - if (expectedTopicMetadataList.size() != actualTopicMetadataList.size()) { - fail("Topic metadata lists have different sizes"); - } + private static void assertConsumerGroupCurrentMemberAssignmentValue( + ApiMessage exp, + ApiMessage act + ) { + // The order of the topics stored in ConsumerGroupCurrentMemberAssignmentValue is not + // always guaranteed. Therefore, we need a special comparator. + ConsumerGroupCurrentMemberAssignmentValue expected = (ConsumerGroupCurrentMemberAssignmentValue) exp.duplicate(); + ConsumerGroupCurrentMemberAssignmentValue actual = (ConsumerGroupCurrentMemberAssignmentValue) act.duplicate(); - expectedTopicMetadataList.sort(Comparator.comparing(ConsumerGroupPartitionMetadataValue.TopicMetadata::topicId)); - actualTopicMetadataList.sort(Comparator.comparing(ConsumerGroupPartitionMetadataValue.TopicMetadata::topicId)); - - for (int i = 0; i < expectedTopicMetadataList.size(); i++) { - ConsumerGroupPartitionMetadataValue.TopicMetadata expectedTopicMetadata = - expectedTopicMetadataList.get(i); - ConsumerGroupPartitionMetadataValue.TopicMetadata actualTopicMetadata = - actualTopicMetadataList.get(i); - - assertEquals(expectedTopicMetadata.topicId(), actualTopicMetadata.topicId()); - assertEquals(expectedTopicMetadata.topicName(), actualTopicMetadata.topicName()); - assertEquals(expectedTopicMetadata.numPartitions(), actualTopicMetadata.numPartitions()); - - List expectedPartitionMetadataList = - expectedTopicMetadata.partitionMetadata(); - List actualPartitionMetadataList = - actualTopicMetadata.partitionMetadata(); - - // If the list is empty, rack information wasn't available for any replica of - // the partition and hence, the entry wasn't added to the record. - if (expectedPartitionMetadataList.size() != actualPartitionMetadataList.size()) { - fail("Partition metadata lists have different sizes"); - } else if (!expectedPartitionMetadataList.isEmpty() && !actualPartitionMetadataList.isEmpty()) { - for (int j = 0; j < expectedPartitionMetadataList.size(); j++) { - ConsumerGroupPartitionMetadataValue.PartitionMetadata expectedPartitionMetadata = - expectedPartitionMetadataList.get(j); - ConsumerGroupPartitionMetadataValue.PartitionMetadata actualPartitionMetadata = - actualPartitionMetadataList.get(j); - - assertEquals(expectedPartitionMetadata.partition(), actualPartitionMetadata.partition()); - assertUnorderedListEquals(expectedPartitionMetadata.racks(), actualPartitionMetadata.racks()); - } - } - } - } else if (actual.message() instanceof GroupMetadataValue) { - GroupMetadataValue expectedValue = (GroupMetadataValue) expected.message().duplicate(); - GroupMetadataValue actualValue = (GroupMetadataValue) actual.message().duplicate(); - - Comparator comparator = - Comparator.comparing(GroupMetadataValue.MemberMetadata::memberId); - expectedValue.members().sort(comparator); - actualValue.members().sort(comparator); + Consumer> sortTopicsAndPartitions = topicPartitions -> { + topicPartitions.sort(Comparator.comparing(ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions::topicId)); + topicPartitions.forEach(topic -> topic.partitions().sort(Integer::compareTo)); + }; + + Consumer normalize = message -> { + sortTopicsAndPartitions.accept(message.assignedPartitions()); + sortTopicsAndPartitions.accept(message.partitionsPendingRevocation()); + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); + } + + private static void assertConsumerGroupPartitionMetadataValue( + ApiMessage exp, + ApiMessage act + ) { + // The order of the racks stored in the PartitionMetadata of the ConsumerGroupPartitionMetadataValue + // is not always guaranteed. Therefore, we need a special comparator. + ConsumerGroupPartitionMetadataValue expected = (ConsumerGroupPartitionMetadataValue) exp.duplicate(); + ConsumerGroupPartitionMetadataValue actual = (ConsumerGroupPartitionMetadataValue) act.duplicate(); + + Consumer normalize = message -> { + message.topics().sort(Comparator.comparing(ConsumerGroupPartitionMetadataValue.TopicMetadata::topicId)); + message.topics().forEach(topic -> { + topic.partitionMetadata().sort(Comparator.comparing(ConsumerGroupPartitionMetadataValue.PartitionMetadata::partition)); + topic.partitionMetadata().forEach(partition -> partition.racks().sort(String::compareTo)); + }); + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); + } + + private static void assertShareGroupPartitionMetadataValue( + ApiMessage exp, + ApiMessage act + ) { + // The order of the racks stored in the PartitionMetadata of the ShareGroupPartitionMetadataValue + // is not always guaranteed. Therefore, we need a special comparator. + ShareGroupPartitionMetadataValue expected = (ShareGroupPartitionMetadataValue) exp.duplicate(); + ShareGroupPartitionMetadataValue actual = (ShareGroupPartitionMetadataValue) act.duplicate(); + + Consumer normalize = message -> { + message.topics().sort(Comparator.comparing(ShareGroupPartitionMetadataValue.TopicMetadata::topicId)); + message.topics().forEach(topic -> { + topic.partitionMetadata().sort(Comparator.comparing(ShareGroupPartitionMetadataValue.PartitionMetadata::partition)); + topic.partitionMetadata().forEach(partition -> partition.racks().sort(String::compareTo)); + }); + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); + } + + private static void assertGroupMetadataValue( + ApiMessage exp, + ApiMessage act + ) { + GroupMetadataValue expected = (GroupMetadataValue) exp.duplicate(); + GroupMetadataValue actual = (GroupMetadataValue) act.duplicate(); + + Consumer normalize = message -> { + message.members().sort(Comparator.comparing(GroupMetadataValue.MemberMetadata::memberId)); try { - Arrays.asList(expectedValue, actualValue).forEach(value -> - value.members().forEach(memberMetadata -> { - // Sort topics and ownedPartitions in Subscription. - ConsumerPartitionAssignor.Subscription subscription = - ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberMetadata.subscription())); - subscription.topics().sort(String::compareTo); - subscription.ownedPartitions().sort( - Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition) - ); - memberMetadata.setSubscription(Utils.toArray(ConsumerProtocol.serializeSubscription( - subscription, - ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(memberMetadata.subscription())) - ))); - - // Sort partitions in Assignment. - ConsumerPartitionAssignor.Assignment assignment = - ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(memberMetadata.assignment())); - assignment.partitions().sort( - Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition) - ); - memberMetadata.setAssignment(Utils.toArray(ConsumerProtocol.serializeAssignment( - assignment, - ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(memberMetadata.assignment())) - ))); - }) - ); + message.members().forEach(memberMetadata -> { + // Sort topics and ownedPartitions in Subscription. + ConsumerPartitionAssignor.Subscription subscription = + ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberMetadata.subscription())); + subscription.topics().sort(String::compareTo); + subscription.ownedPartitions().sort( + Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition) + ); + memberMetadata.setSubscription(Utils.toArray(ConsumerProtocol.serializeSubscription( + subscription, + ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(memberMetadata.subscription())) + ))); + + // Sort partitions in Assignment. + ConsumerPartitionAssignor.Assignment assignment = + ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(memberMetadata.assignment())); + assignment.partitions().sort( + Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition) + ); + memberMetadata.setAssignment(Utils.toArray(ConsumerProtocol.serializeAssignment( + assignment, + ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(memberMetadata.assignment())) + ))); + }); } catch (SchemaException ex) { fail("Failed deserialization: " + ex.getMessage()); } - assertEquals(expectedValue, actualValue); - } else if (actual.message() instanceof ConsumerGroupTargetAssignmentMemberValue) { - ConsumerGroupTargetAssignmentMemberValue expectedValue = - (ConsumerGroupTargetAssignmentMemberValue) expected.message().duplicate(); - ConsumerGroupTargetAssignmentMemberValue actualValue = - (ConsumerGroupTargetAssignmentMemberValue) actual.message().duplicate(); - - Comparator comparator = - Comparator.comparing(ConsumerGroupTargetAssignmentMemberValue.TopicPartition::topicId); - expectedValue.topicPartitions().sort(comparator); - actualValue.topicPartitions().sort(comparator); - - assertEquals(expectedValue, actualValue); - } else { - assertEquals(expected.message(), actual.message()); - } + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); } - private static Map> fromTopicPartitions( - List assignment + private static void assertConsumerGroupTargetAssignmentMemberValue( + ApiMessage exp, + ApiMessage act ) { - Map> assignmentMap = new HashMap<>(); - assignment.forEach(topicPartitions -> - assignmentMap.put(topicPartitions.topicId(), new HashSet<>(topicPartitions.partitions())) - ); - return assignmentMap; + ConsumerGroupTargetAssignmentMemberValue expected = (ConsumerGroupTargetAssignmentMemberValue) exp.duplicate(); + ConsumerGroupTargetAssignmentMemberValue actual = (ConsumerGroupTargetAssignmentMemberValue) act.duplicate(); + + Consumer normalize = message -> { + message.topicPartitions().sort(Comparator.comparing(ConsumerGroupTargetAssignmentMemberValue.TopicPartition::topicId)); + message.topicPartitions().forEach(topic -> topic.partitions().sort(Integer::compareTo)); + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); } - public static void assertSyncGroupResponseEquals( - SyncGroupResponseData expected, - SyncGroupResponseData actual + private static void assertSyncGroupResponse( + ApiMessage exp, + ApiMessage act ) { - SyncGroupResponseData expectedDuplicate = expected.duplicate(); - SyncGroupResponseData actualDuplicate = actual.duplicate(); + SyncGroupResponseData expected = (SyncGroupResponseData) exp.duplicate(); + SyncGroupResponseData actual = (SyncGroupResponseData) act.duplicate(); - Arrays.asList(expectedDuplicate, actualDuplicate).forEach(duplicate -> { + Consumer normalize = message -> { try { ConsumerPartitionAssignor.Assignment assignment = - ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(duplicate.assignment())); + ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(message.assignment())); assignment.partitions().sort( Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition) ); - duplicate.setAssignment(Utils.toArray(ConsumerProtocol.serializeAssignment( + message.setAssignment(Utils.toArray(ConsumerProtocol.serializeAssignment( assignment, - ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(duplicate.assignment())) + ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(message.assignment())) ))); } catch (SchemaException ex) { fail("Failed deserialization: " + ex.getMessage()); } - }); - assertEquals(expectedDuplicate, actualDuplicate); + }; + + normalize.accept(expected); + normalize.accept(actual); + + assertEquals(expected, actual); } } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupConfigTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupConfigTest.java index fe11f50d2ff43..b774d29bb6ad2 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupConfigTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupConfigTest.java @@ -88,6 +88,10 @@ public void testValidShareAutoOffsetResetValues() { // Check for value "earliest" props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "earliest"); doTestValidProps(props); + + // Check for value "by_duration" + props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:PT10S"); + doTestValidProps(props); } @Test @@ -148,6 +152,18 @@ public void testInvalidProps() { // Check for invalid shareAutoOffsetReset props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "hello"); doTestInvalidProps(props, ConfigException.class); + + // Check for invalid shareAutoOffsetReset, by_duration without duration + props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration"); + doTestInvalidProps(props, ConfigException.class); + + // Check for invalid shareAutoOffsetReset, by_duration with negative duration + props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:-PT10S"); + doTestInvalidProps(props, ConfigException.class); + + // Check for invalid shareAutoOffsetReset, by_duration with invalid duration + props.put(GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, "by_duration:invalid"); + doTestInvalidProps(props, ConfigException.class); } private void doTestInvalidProps(Properties props, Class exceptionClassName) { diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfigTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfigTest.java index 1ec6fe7d68c1a..4956acaf3866b 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfigTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorConfigTest.java @@ -36,7 +36,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -@SuppressWarnings("deprecation") public class GroupCoordinatorConfigTest { private static final List GROUP_COORDINATOR_CONFIG_DEFS = Arrays.asList( GroupCoordinatorConfig.GROUP_COORDINATOR_CONFIG_DEF, diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpersTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpersTest.java index 812ee093c2ca9..a1cd92d83560e 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpersTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpersTest.java @@ -50,13 +50,8 @@ import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.MetadataVersion; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.EnumSource; -import org.junit.jupiter.params.provider.MethodSource; import java.util.ArrayList; import java.util.Arrays; @@ -70,7 +65,6 @@ import java.util.OptionalInt; import java.util.OptionalLong; import java.util.Set; -import java.util.stream.Stream; import static org.apache.kafka.coordinator.group.Assertions.assertRecordEquals; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkOrderedAssignment; @@ -454,21 +448,8 @@ public void testNewConsumerGroupCurrentAssignmentTombstoneRecord() { )); } - private static Stream metadataToExpectedGroupMetadataValue() { - return Stream.of( - Arguments.arguments(MetadataVersion.IBP_0_10_0_IV0, (short) 0), - Arguments.arguments(MetadataVersion.IBP_1_1_IV0, (short) 1), - Arguments.arguments(MetadataVersion.IBP_2_2_IV0, (short) 2), - Arguments.arguments(MetadataVersion.IBP_3_5_IV0, (short) 3) - ); - } - - @ParameterizedTest - @MethodSource("metadataToExpectedGroupMetadataValue") - public void testNewGroupMetadataRecord( - MetadataVersion metadataVersion, - short expectedGroupMetadataValueVersion - ) { + @Test + public void testNewGroupMetadataRecord() { Time time = new MockTime(); List expectedMembers = new ArrayList<>(); @@ -509,7 +490,7 @@ public void testNewGroupMetadataRecord( .setGeneration(1) .setCurrentStateTimestamp(time.milliseconds()) .setMembers(expectedMembers), - expectedGroupMetadataValueVersion)); + (short) 3)); ClassicGroup group = new ClassicGroup( new LogContext(), @@ -544,8 +525,7 @@ public void testNewGroupMetadataRecord( group.initNextGeneration(); CoordinatorRecord groupMetadataRecord = GroupCoordinatorRecordHelpers.newGroupMetadataRecord( group, - assignment, - metadataVersion + assignment ); assertEquals(expectedRecord, groupMetadataRecord); @@ -610,8 +590,7 @@ public void testNewGroupMetadataRecordThrowsWhenNullSubscription() { assertThrows(IllegalStateException.class, () -> GroupCoordinatorRecordHelpers.newGroupMetadataRecord( group, - Collections.emptyMap(), - MetadataVersion.IBP_3_5_IV2 + Collections.emptyMap() )); } @@ -661,17 +640,12 @@ public void testNewGroupMetadataRecordThrowsWhenEmptyAssignment() { assertThrows(IllegalStateException.class, () -> GroupCoordinatorRecordHelpers.newGroupMetadataRecord( group, - Collections.emptyMap(), - MetadataVersion.IBP_3_5_IV2 + Collections.emptyMap() )); } - @ParameterizedTest - @MethodSource("metadataToExpectedGroupMetadataValue") - public void testEmptyGroupMetadataRecord( - MetadataVersion metadataVersion, - short expectedGroupMetadataValueVersion - ) { + @Test + public void testEmptyGroupMetadataRecord() { Time time = new MockTime(); List expectedMembers = Collections.emptyList(); @@ -689,7 +663,7 @@ public void testEmptyGroupMetadataRecord( .setGeneration(0) .setCurrentStateTimestamp(time.milliseconds()) .setMembers(expectedMembers), - expectedGroupMetadataValueVersion)); + (short) 3)); ClassicGroup group = new ClassicGroup( new LogContext(), @@ -700,16 +674,20 @@ public void testEmptyGroupMetadataRecord( group.initNextGeneration(); CoordinatorRecord groupMetadataRecord = GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord( - group, - metadataVersion + group ); assertEquals(expectedRecord, groupMetadataRecord); } - @ParameterizedTest - @EnumSource(value = MetadataVersion.class) - public void testNewOffsetCommitRecord(MetadataVersion metadataVersion) { + @Test + public void testOffsetCommitValueVersion() { + assertEquals((short) 1, GroupCoordinatorRecordHelpers.offsetCommitValueVersion(true)); + assertEquals((short) 3, GroupCoordinatorRecordHelpers.offsetCommitValueVersion(false)); + } + + @Test + public void testNewOffsetCommitRecord() { OffsetCommitKey key = new OffsetCommitKey() .setGroup("group-id") .setTopic("foo") @@ -727,8 +705,7 @@ public void testNewOffsetCommitRecord(MetadataVersion metadataVersion) { (short) 1), new ApiMessageAndVersion( value, - metadataVersion.offsetCommitValueVersion(false) - ) + GroupCoordinatorRecordHelpers.offsetCommitValueVersion(false)) ); assertEquals(expectedRecord, GroupCoordinatorRecordHelpers.newOffsetCommitRecord( @@ -740,8 +717,7 @@ public void testNewOffsetCommitRecord(MetadataVersion metadataVersion) { OptionalInt.of(10), "metadata", 1234L, - OptionalLong.empty()), - metadataVersion + OptionalLong.empty()) )); value.setLeaderEpoch(-1); @@ -755,14 +731,12 @@ public void testNewOffsetCommitRecord(MetadataVersion metadataVersion) { OptionalInt.empty(), "metadata", 1234L, - OptionalLong.empty()), - metadataVersion + OptionalLong.empty()) )); } - @ParameterizedTest - @EnumSource(value = MetadataVersion.class) - public void testNewOffsetCommitRecordWithExpireTimestamp(MetadataVersion metadataVersion) { + @Test + public void testNewOffsetCommitRecordWithExpireTimestamp() { CoordinatorRecord expectedRecord = new CoordinatorRecord( new ApiMessageAndVersion( new OffsetCommitKey() @@ -790,8 +764,7 @@ public void testNewOffsetCommitRecordWithExpireTimestamp(MetadataVersion metadat OptionalInt.of(10), "metadata", 1234L, - OptionalLong.of(5678L)), - metadataVersion + OptionalLong.of(5678L)) )); } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerdeTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerdeTest.java index d76b7326f7674..319cc9358a2b7 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerdeTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordSerdeTest.java @@ -20,36 +20,9 @@ import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataValue; import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataKey; import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataKey; -import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue; -import org.apache.kafka.coordinator.group.generated.GroupMetadataKey; -import org.apache.kafka.coordinator.group.generated.GroupMetadataValue; -import org.apache.kafka.coordinator.group.generated.OffsetCommitKey; -import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupMemberMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupMemberMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupPartitionMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupPartitionMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupStatePartitionMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupStatePartitionMetadataValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMemberKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMemberValue; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataKey; -import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataValue; +import org.apache.kafka.coordinator.group.generated.CoordinatorRecordType; import org.apache.kafka.server.common.ApiMessageAndVersion; import org.junit.jupiter.api.Test; @@ -243,22 +216,9 @@ public void testDeserializeWithInvalidValueBytes() { @Test public void testDeserializeAllRecordTypes() { - roundTrip((short) 0, new OffsetCommitKey(), new OffsetCommitValue()); - roundTrip((short) 1, new OffsetCommitKey(), new OffsetCommitValue()); - roundTrip((short) 2, new GroupMetadataKey(), new GroupMetadataValue()); - roundTrip((short) 3, new ConsumerGroupMetadataKey(), new ConsumerGroupMetadataValue()); - roundTrip((short) 4, new ConsumerGroupPartitionMetadataKey(), new ConsumerGroupPartitionMetadataValue()); - roundTrip((short) 5, new ConsumerGroupMemberMetadataKey(), new ConsumerGroupMemberMetadataValue()); - roundTrip((short) 6, new ConsumerGroupTargetAssignmentMetadataKey(), new ConsumerGroupTargetAssignmentMetadataValue()); - roundTrip((short) 7, new ConsumerGroupTargetAssignmentMemberKey(), new ConsumerGroupTargetAssignmentMemberValue()); - roundTrip((short) 8, new ConsumerGroupCurrentMemberAssignmentKey(), new ConsumerGroupCurrentMemberAssignmentValue()); - roundTrip((short) 9, new ShareGroupPartitionMetadataKey(), new ShareGroupPartitionMetadataValue()); - roundTrip((short) 10, new ShareGroupMemberMetadataKey(), new ShareGroupMemberMetadataValue()); - roundTrip((short) 11, new ShareGroupMetadataKey(), new ShareGroupMetadataValue()); - roundTrip((short) 12, new ShareGroupTargetAssignmentMetadataKey(), new ShareGroupTargetAssignmentMetadataValue()); - roundTrip((short) 13, new ShareGroupTargetAssignmentMemberKey(), new ShareGroupTargetAssignmentMemberValue()); - roundTrip((short) 14, new ShareGroupCurrentMemberAssignmentKey(), new ShareGroupCurrentMemberAssignmentValue()); - roundTrip((short) 15, new ShareGroupStatePartitionMetadataKey(), new ShareGroupStatePartitionMetadataValue()); + for (CoordinatorRecordType record : CoordinatorRecordType.values()) { + roundTrip(record.id(), record.newRecordKey(), record.newRecordValue()); + } } private void roundTrip( diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorServiceTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorServiceTest.java index bccfa57397c16..1974a796d4a64 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorServiceTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorServiceTest.java @@ -1932,8 +1932,9 @@ public void testCommitTransactionalOffsetsWithInvalidGroupId(String groupId) thr ); } - @Test - public void testCommitTransactionalOffsets() throws ExecutionException, InterruptedException { + @ParameterizedTest + @ValueSource(shorts = {4, 5}) + public void testCommitTransactionalOffsets(Short txnOffsetCommitVersion) throws ExecutionException, InterruptedException { CoordinatorRuntime runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), @@ -1976,7 +1977,7 @@ public void testCommitTransactionalOffsets() throws ExecutionException, Interrup )).thenReturn(CompletableFuture.completedFuture(response)); CompletableFuture future = service.commitTransactionalOffsets( - requestContext(ApiKeys.TXN_OFFSET_COMMIT), + requestContext(ApiKeys.TXN_OFFSET_COMMIT, txnOffsetCommitVersion), request, BufferSupplier.NO_CACHING ); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorShardTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorShardTest.java index cb68771c8a5f1..4e449a4b8b17f 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorShardTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorShardTest.java @@ -55,6 +55,8 @@ import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue; import org.apache.kafka.coordinator.group.generated.GroupMetadataKey; import org.apache.kafka.coordinator.group.generated.GroupMetadataValue; +import org.apache.kafka.coordinator.group.generated.LegacyOffsetCommitKey; +import org.apache.kafka.coordinator.group.generated.LegacyOffsetCommitValue; import org.apache.kafka.coordinator.group.generated.OffsetCommitKey; import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; import org.apache.kafka.coordinator.group.generated.ShareGroupMemberMetadataKey; @@ -337,12 +339,32 @@ public void testReplayOffsetCommit() { metricsShard ); - OffsetCommitKey key = new OffsetCommitKey(); - OffsetCommitValue value = new OffsetCommitValue(); + OffsetCommitKey key = new OffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0); + OffsetCommitValue value = new OffsetCommitValue() + .setOffset(100L) + .setCommitTimestamp(12345L) + .setExpireTimestamp(6789L) + .setMetadata("Metadata") + .setLeaderEpoch(10); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( - new ApiMessageAndVersion(key, (short) 0), - new ApiMessageAndVersion(value, (short) 0) + new ApiMessageAndVersion( + new LegacyOffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0), + (short) 0 + ), + new ApiMessageAndVersion( + new LegacyOffsetCommitValue() + .setOffset(100L) + .setCommitTimestamp(12345L) + .setMetadata("Metadata"), + (short) 0 + ) )); coordinator.replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( @@ -353,8 +375,14 @@ public void testReplayOffsetCommit() { verify(offsetMetadataManager, times(1)).replay( 0L, RecordBatch.NO_PRODUCER_ID, - key, - value + new OffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0), + new OffsetCommitValue() + .setOffset(100L) + .setCommitTimestamp(12345L) + .setMetadata("Metadata") ); verify(offsetMetadataManager, times(1)).replay( @@ -382,12 +410,32 @@ public void testReplayTransactionalOffsetCommit() { metricsShard ); - OffsetCommitKey key = new OffsetCommitKey(); - OffsetCommitValue value = new OffsetCommitValue(); + OffsetCommitKey key = new OffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0); + OffsetCommitValue value = new OffsetCommitValue() + .setOffset(100L) + .setCommitTimestamp(12345L) + .setExpireTimestamp(6789L) + .setMetadata("Metadata") + .setLeaderEpoch(10); coordinator.replay(0L, 100L, (short) 0, new CoordinatorRecord( - new ApiMessageAndVersion(key, (short) 0), - new ApiMessageAndVersion(value, (short) 0) + new ApiMessageAndVersion( + new LegacyOffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0), + (short) 0 + ), + new ApiMessageAndVersion( + new LegacyOffsetCommitValue() + .setOffset(100L) + .setCommitTimestamp(12345L) + .setMetadata("Metadata"), + (short) 0 + ) )); coordinator.replay(1L, 101L, (short) 1, new CoordinatorRecord( @@ -398,8 +446,14 @@ public void testReplayTransactionalOffsetCommit() { verify(offsetMetadataManager, times(1)).replay( 0L, 100L, - key, - value + new OffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0), + new OffsetCommitValue() + .setOffset(100L) + .setCommitTimestamp(12345L) + .setMetadata("Metadata") ); verify(offsetMetadataManager, times(1)).replay( @@ -427,10 +481,18 @@ public void testReplayOffsetCommitWithNullValue() { metricsShard ); - OffsetCommitKey key = new OffsetCommitKey(); + OffsetCommitKey key = new OffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( - new ApiMessageAndVersion(key, (short) 0), + new ApiMessageAndVersion( + new LegacyOffsetCommitKey() + .setGroup("goo") + .setTopic("foo") + .setPartition(0), + (short) 0), null )); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java index 35fc811165856..017efb3e0079a 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java @@ -45,6 +45,8 @@ import org.apache.kafka.common.message.HeartbeatRequestData; import org.apache.kafka.common.message.HeartbeatResponseData; import org.apache.kafka.common.message.JoinGroupRequestData; +import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol; +import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection; import org.apache.kafka.common.message.JoinGroupResponseData; import org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember; import org.apache.kafka.common.message.LeaveGroupRequestData; @@ -66,11 +68,12 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; import org.apache.kafka.coordinator.common.runtime.CoordinatorResult; +import org.apache.kafka.coordinator.common.runtime.MockCoordinatorExecutor; import org.apache.kafka.coordinator.common.runtime.MockCoordinatorTimer.ExpiredTimeout; import org.apache.kafka.coordinator.common.runtime.MockCoordinatorTimer.ScheduledTimeout; import org.apache.kafka.coordinator.group.api.assignor.ConsumerGroupPartitionAssignor; import org.apache.kafka.coordinator.group.api.assignor.GroupAssignment; -import org.apache.kafka.coordinator.group.api.assignor.MemberAssignment; +import org.apache.kafka.coordinator.group.api.assignor.GroupSpec; import org.apache.kafka.coordinator.group.api.assignor.PartitionAssignorException; import org.apache.kafka.coordinator.group.classic.ClassicGroup; import org.apache.kafka.coordinator.group.classic.ClassicGroupMember; @@ -91,17 +94,16 @@ import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.MetadataProvenance; -import org.apache.kafka.server.common.MetadataVersion; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import org.opentest4j.AssertionFailedError; +import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -114,8 +116,6 @@ import java.util.stream.IntStream; import java.util.stream.Stream; -import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol; -import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection; import static org.apache.kafka.common.protocol.Errors.NOT_COORDINATOR; import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH; @@ -123,7 +123,7 @@ import static org.apache.kafka.coordinator.group.Assertions.assertRecordEquals; import static org.apache.kafka.coordinator.group.Assertions.assertRecordsEquals; import static org.apache.kafka.coordinator.group.Assertions.assertResponseEquals; -import static org.apache.kafka.coordinator.group.Assertions.assertUnorderedListEquals; +import static org.apache.kafka.coordinator.group.Assertions.assertUnorderedRecordsEquals; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkAssignment; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkTopicAssignment; import static org.apache.kafka.coordinator.group.GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG; @@ -169,7 +169,7 @@ public void testConsumerHeartbeatRequestValidation() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); String memberId = Uuid.randomUuid().toString(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .build(); Exception ex; @@ -208,7 +208,7 @@ public void testConsumerHeartbeatRequestValidation() { .setRebalanceTimeoutMs(5000))); assertEquals("TopicPartitions must be empty when (re-)joining.", ex.getMessage()); - // SubscribedTopicNames or SubscribedTopicRegex must be present and non-empty in the first request (epoch == 0). + // SubscribedTopicNames or SubscribedTopicRegex must be present in the first request (epoch == 0). ex = assertThrows(InvalidRequestException.class, () -> context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setMemberId(memberId) @@ -216,7 +216,7 @@ public void testConsumerHeartbeatRequestValidation() { .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setTopicPartitions(Collections.emptyList()))); - assertEquals("SubscribedTopicNames or SubscribedTopicRegex must be set in first request.", ex.getMessage()); + assertEquals("Either SubscribedTopicNames or SubscribedTopicRegex must be non-null when (re-)joining.", ex.getMessage()); // InstanceId must be non-empty if provided in all requests. ex = assertThrows(InvalidRequestException.class, () -> context.consumerGroupHeartbeat( @@ -251,7 +251,7 @@ public void testConsumerHeartbeatRequestValidation() { .setMemberId(memberId) .setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); assertEquals("InstanceId can't be null.", ex.getMessage()); @@ -263,7 +263,7 @@ public void testConsumerHeartbeatRegexValidation() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); assignor.prepareGroupAssignment(new GroupAssignment(Collections.emptyMap())); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .build(); // Subscribing with an invalid regular expression fails. @@ -307,7 +307,7 @@ public void testConsumerHeartbeatRegexValidation() { .setMemberId(memberId) .setMemberEpoch(1) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertEquals(2, result.response().memberEpoch()); } @@ -316,7 +316,7 @@ public void testConsumerHeartbeatRegexValidation() { public void testMemberIdGeneration() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(MetadataImage.EMPTY) .build(); @@ -332,7 +332,7 @@ public void testMemberIdGeneration() { .setMemberEpoch(0) .setServerAssignor("range") .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()), (short) 0 ); @@ -363,7 +363,7 @@ public void testUnknownGroupId() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .build(); assertThrows(GroupIdNotFoundException.class, () -> @@ -373,7 +373,7 @@ public void testUnknownGroupId() { .setMemberId(memberId) .setMemberEpoch(100) // Epoch must be > 0. .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -384,7 +384,7 @@ public void testUnknownMemberIdJoinsConsumerGroup() { String memberId = Uuid.randomUuid().toString(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor())) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) .build(); // A first member joins to create the group. @@ -395,7 +395,7 @@ public void testUnknownMemberIdJoinsConsumerGroup() { .setMemberEpoch(0) .setServerAssignor(NoOpPartitionAssignor.NAME) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); // The second member is rejected because the member id is unknown and @@ -407,7 +407,7 @@ public void testUnknownMemberIdJoinsConsumerGroup() { .setMemberId(Uuid.randomUuid().toString()) .setMemberEpoch(1) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -420,7 +420,7 @@ public void testConsumerGroupMemberEpochValidation() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .build(); ConsumerGroupMember member = new ConsumerGroupMember.Builder(memberId) @@ -430,7 +430,7 @@ public void testConsumerGroupMemberEpochValidation() { .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 1, 2, 3))) .build(); @@ -455,7 +455,7 @@ public void testConsumerGroupMemberEpochValidation() { .setMemberId(memberId) .setMemberEpoch(200) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); // Member epoch is smaller than the expected epoch. assertThrows(FencedMemberEpochException.class, () -> @@ -465,7 +465,7 @@ public void testConsumerGroupMemberEpochValidation() { .setMemberId(memberId) .setMemberEpoch(50) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); // Member joins with previous epoch but without providing partitions. assertThrows(FencedMemberEpochException.class, () -> @@ -475,7 +475,7 @@ public void testConsumerGroupMemberEpochValidation() { .setMemberId(memberId) .setMemberEpoch(99) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); // Member joins with previous epoch and has a subset of the owned partitions. This // is accepted as the response with the bumped epoch may have been lost. In this @@ -486,10 +486,10 @@ public void testConsumerGroupMemberEpochValidation() { .setMemberId(memberId) .setMemberEpoch(99) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) - .setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatRequestData.TopicPartitions() + .setSubscribedTopicNames(List.of("foo", "bar")) + .setTopicPartitions(List.of(new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(1, 2))))); + .setPartitions(List.of(1, 2))))); assertEquals(100, result.response().memberEpoch()); } @@ -506,7 +506,7 @@ public void testMemberJoinsEmptyConsumerGroup() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -515,7 +515,7 @@ public void testMemberJoinsEmptyConsumerGroup() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) ))) @@ -531,7 +531,7 @@ public void testMemberJoinsEmptyConsumerGroup() { .setMemberEpoch(0) .setServerAssignor("range") .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -540,13 +540,13 @@ public void testMemberJoinsEmptyConsumerGroup() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1, 2, 3, 4, 5)), + .setPartitions(List.of(0, 1, 2, 3, 4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Arrays.asList(0, 1, 2)) + .setPartitions(List.of(0, 1, 2)) ))), result.response() ); @@ -558,19 +558,19 @@ public void testMemberJoinsEmptyConsumerGroup() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2))) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() {{ - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - }}), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 1), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), @@ -596,7 +596,7 @@ public void testUpdatingSubscriptionTriggersNewTargetAssignment() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -609,7 +609,7 @@ public void testUpdatingSubscriptionTriggersNewTargetAssignment() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) @@ -620,7 +620,7 @@ public void testUpdatingSubscriptionTriggersNewTargetAssignment() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) ))) @@ -631,7 +631,7 @@ public void testUpdatingSubscriptionTriggersNewTargetAssignment() { .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(10) - .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); + .setSubscribedTopicNames(List.of("foo", "bar"))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() @@ -639,13 +639,13 @@ public void testUpdatingSubscriptionTriggersNewTargetAssignment() { .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1, 2, 3, 4, 5)), + .setPartitions(List.of(0, 1, 2, 3, 4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Arrays.asList(0, 1, 2)) + .setPartitions(List.of(0, 1, 2)) ))), result.response() ); @@ -656,21 +656,19 @@ public void testUpdatingSubscriptionTriggersNewTargetAssignment() { .setPreviousMemberEpoch(10) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2))) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), @@ -698,7 +696,7 @@ public void testNewJoiningMemberTriggersNewTargetAssignment() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -712,7 +710,7 @@ public void testNewJoiningMemberTriggersNewTargetAssignment() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -725,7 +723,7 @@ public void testNewJoiningMemberTriggersNewTargetAssignment() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -740,24 +738,20 @@ public void testNewJoiningMemberTriggersNewTargetAssignment() { .withAssignmentEpoch(10)) .build(); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(barTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 2, 3), - mkTopicAssignment(barTopicId, 1) - ))); - put(memberId3, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 4, 5), - mkTopicAssignment(barTopicId, 2) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(barTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 2, 3), + mkTopicAssignment(barTopicId, 1) + )), + memberId3, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 4, 5), + mkTopicAssignment(barTopicId, 2) + )) + ))); // Member 3 joins the consumer group. CoordinatorResult result = context.consumerGroupHeartbeat( @@ -766,7 +760,7 @@ public void testNewJoiningMemberTriggersNewTargetAssignment() { .setMemberId(memberId3) .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignor("range") .setTopicPartitions(Collections.emptyList())); @@ -786,32 +780,33 @@ public void testNewJoiningMemberTriggersNewTargetAssignment() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .build(); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(barTopicId, 0) - )), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( - mkTopicAssignment(fooTopicId, 2, 3), - mkTopicAssignment(barTopicId, 1) - )), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId3, mkAssignment( - mkTopicAssignment(fooTopicId, 4, 5), - mkTopicAssignment(barTopicId, 2) - )), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember3) + assertUnorderedRecordsEquals( + List.of( + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(barTopicId, 0) + )), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( + mkTopicAssignment(fooTopicId, 2, 3), + mkTopicAssignment(barTopicId, 1) + )), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId3, mkAssignment( + mkTopicAssignment(fooTopicId, 4, 5), + mkTopicAssignment(barTopicId, 2) + )) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember3)) + ), + result.records() ); - - assertRecordsEquals(expectedRecords.subList(0, 2), result.records().subList(0, 2)); - assertUnorderedListEquals(expectedRecords.subList(2, 5), result.records().subList(2, 5)); - assertRecordsEquals(expectedRecords.subList(5, 7), result.records().subList(5, 7)); } @Test @@ -832,7 +827,7 @@ public void testLeavingMemberBumpsGroupEpoch() { // Consumer group with two members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -846,7 +841,7 @@ public void testLeavingMemberBumpsGroupEpoch() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -859,7 +854,7 @@ public void testLeavingMemberBumpsGroupEpoch() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) // Use zar only here to ensure that metadata needs to be recomputed. - .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .setSubscribedTopicNames(List.of("foo", "bar", "zar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -881,7 +876,7 @@ public void testLeavingMemberBumpsGroupEpoch() { .setMemberId(memberId2) .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -891,17 +886,15 @@ public void testLeavingMemberBumpsGroupEpoch() { result.response() ); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), // Subscription metadata is recomputed because zar is no longer there. - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) ); @@ -925,7 +918,7 @@ public void testGroupEpochBumpWhenNewStaticMemberJoins() { // Consumer group with two static members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -939,7 +932,7 @@ public void testGroupEpochBumpWhenNewStaticMemberJoins() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -953,7 +946,7 @@ public void testGroupEpochBumpWhenNewStaticMemberJoins() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) // Use zar only here to ensure that metadata needs to be recomputed. - .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .setSubscribedTopicNames(List.of("foo", "bar", "zar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -968,24 +961,20 @@ public void testGroupEpochBumpWhenNewStaticMemberJoins() { .withAssignmentEpoch(10)) .build(); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(barTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 2, 3), - mkTopicAssignment(barTopicId, 1) - ))); - put(memberId3, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 4, 5), - mkTopicAssignment(barTopicId, 2) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(barTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 2, 3), + mkTopicAssignment(barTopicId, 1) + )), + memberId3, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 4, 5), + mkTopicAssignment(barTopicId, 2) + )) + ))); // Member 3 joins the consumer group. CoordinatorResult result = context.consumerGroupHeartbeat( @@ -996,7 +985,7 @@ public void testGroupEpochBumpWhenNewStaticMemberJoins() { .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -1016,32 +1005,33 @@ public void testGroupEpochBumpWhenNewStaticMemberJoins() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .build(); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(barTopicId, 0) - )), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( - mkTopicAssignment(fooTopicId, 2, 3), - mkTopicAssignment(barTopicId, 1) - )), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId3, mkAssignment( - mkTopicAssignment(fooTopicId, 4, 5), - mkTopicAssignment(barTopicId, 2) - )), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember3) + assertUnorderedRecordsEquals( + List.of( + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(barTopicId, 0) + )), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( + mkTopicAssignment(fooTopicId, 2, 3), + mkTopicAssignment(barTopicId, 1) + )), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId3, mkAssignment( + mkTopicAssignment(fooTopicId, 4, 5), + mkTopicAssignment(barTopicId, 2) + )) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember3)) + ), + result.records() ); - - assertRecordsEquals(expectedRecords.subList(0, 2), result.records().subList(0, 2)); - assertUnorderedListEquals(expectedRecords.subList(2, 5), result.records().subList(2, 5)); - assertRecordsEquals(expectedRecords.subList(5, 7), result.records().subList(5, 7)); } @Test @@ -1066,7 +1056,7 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -1080,7 +1070,7 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -1089,7 +1079,7 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { // Consumer group with two static members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -1105,12 +1095,10 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) .withAssignmentEpoch(10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - })) + .withSubscriptionMetadata(Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + ))) .build(); // Member 2 leaves the consumer group. @@ -1120,7 +1108,7 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { .setMemberId(memberId2) .setInstanceId(memberId2) .setMemberEpoch(-2) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); // Member epoch of the response would be set to -2. @@ -1148,7 +1136,7 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -1157,13 +1145,13 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { .setMemberEpoch(10) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(3, 4, 5)), + .setPartitions(List.of(3, 4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(2)) + .setPartitions(List.of(2)) ))), rejoinResult.response() ); @@ -1176,7 +1164,7 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -1191,14 +1179,14 @@ public void testStaticMemberGetsBackAssignmentUponRejoin() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) .build(); - List expectedRecordsAfterRejoin = Arrays.asList( + List expectedRecordsAfterRejoin = List.of( // The previous member is deleted. GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), @@ -1243,7 +1231,7 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -1256,7 +1244,7 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) @@ -1264,7 +1252,7 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { // Consumer group with two static members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -1278,26 +1266,20 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { .withAssignment(memberId2, mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) .withAssignmentEpoch(10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - } - })) + .withSubscriptionMetadata( + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)) + )) .build(); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1, 2) - ))); - put(member2RejoinId, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 3, 4, 5), - mkTopicAssignment(barTopicId, 0, 1, 2) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2) + )), + member2RejoinId, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4, 5), + mkTopicAssignment(barTopicId, 0, 1, 2) + )) + ))); // Member 2 leaves the consumer group. CoordinatorResult result = context.consumerGroupHeartbeat( @@ -1332,7 +1314,7 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) // bar is new. + .setSubscribedTopicNames(List.of("foo", "bar")) // bar is new. .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -1341,13 +1323,13 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(3, 4, 5)), + .setPartitions(List.of(3, 4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Arrays.asList(0, 1, 2)) + .setPartitions(List.of(0, 1, 2)) ))), rejoinResult.response() ); @@ -1360,7 +1342,7 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) @@ -1374,14 +1356,14 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2))) .build(); - List expectedRecordsAfterRejoin = Arrays.asList( + List expectedRecordsAfterRejoin = List.of( // The previous member is deleted. GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), @@ -1396,12 +1378,10 @@ public void testStaticMemberRejoinsWithNewSubscribedTopics() { // As the new member as a different subscribed topic set, a rebalance is triggered. GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedRejoinedMember), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, member2RejoinId, mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -1437,7 +1417,7 @@ public void testNoGroupEpochBumpWhenStaticMemberTemporarilyLeaves() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -1451,7 +1431,7 @@ public void testNoGroupEpochBumpWhenStaticMemberTemporarilyLeaves() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) // Use zar only here to ensure that metadata needs to be recomputed. - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -1460,7 +1440,7 @@ public void testNoGroupEpochBumpWhenStaticMemberTemporarilyLeaves() { // Consumer group with two static members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -1486,7 +1466,7 @@ public void testNoGroupEpochBumpWhenStaticMemberTemporarilyLeaves() { .setInstanceId(memberId2) .setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); // member epoch of the response would be set to -2 @@ -1524,7 +1504,7 @@ public void testLeavingStaticMemberBumpsGroupEpoch() { // Consumer group with two static members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -1539,7 +1519,7 @@ public void testLeavingStaticMemberBumpsGroupEpoch() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -1553,7 +1533,7 @@ public void testLeavingStaticMemberBumpsGroupEpoch() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) // Use zar only here to ensure that metadata needs to be recomputed. - .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .setSubscribedTopicNames(List.of("foo", "bar", "zar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -1576,7 +1556,7 @@ public void testLeavingStaticMemberBumpsGroupEpoch() { .setMemberId(memberId2) .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -1586,17 +1566,15 @@ public void testLeavingStaticMemberBumpsGroupEpoch() { result.response() ); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), // Subscription metadata is recomputed because zar is no longer there. - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) ); @@ -1617,7 +1595,7 @@ public void testShouldThrownUnreleasedInstanceIdExceptionWhenNewMemberJoinsWithI // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() @@ -1630,7 +1608,7 @@ public void testShouldThrownUnreleasedInstanceIdExceptionWhenNewMemberJoinsWithI .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -1649,7 +1627,7 @@ public void testShouldThrownUnreleasedInstanceIdExceptionWhenNewMemberJoinsWithI .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -1667,7 +1645,7 @@ public void testShouldThrownUnknownMemberIdExceptionWhenUnknownStaticMemberJoins // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .build()) @@ -1679,7 +1657,7 @@ public void testShouldThrownUnknownMemberIdExceptionWhenUnknownStaticMemberJoins .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -1698,7 +1676,7 @@ public void testShouldThrownUnknownMemberIdExceptionWhenUnknownStaticMemberJoins .setMemberEpoch(10) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -1715,7 +1693,7 @@ public void testShouldThrowFencedInstanceIdExceptionWhenStaticMemberWithDifferen // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .build()) @@ -1727,7 +1705,7 @@ public void testShouldThrowFencedInstanceIdExceptionWhenStaticMemberWithDifferen .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -1744,7 +1722,7 @@ public void testShouldThrowFencedInstanceIdExceptionWhenStaticMemberWithDifferen .setInstanceId(memberId1) .setMemberEpoch(11) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -1757,7 +1735,7 @@ public void testConsumerGroupMemberEpochValidationForStaticMember() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .build(); ConsumerGroupMember member = new ConsumerGroupMember.Builder(memberId) @@ -1768,7 +1746,7 @@ public void testConsumerGroupMemberEpochValidationForStaticMember() { .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 1, 2, 3))) .build(); @@ -1794,7 +1772,7 @@ public void testConsumerGroupMemberEpochValidationForStaticMember() { .setInstanceId(memberId) .setMemberEpoch(200) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); // Member epoch is smaller than the expected epoch. assertThrows(FencedMemberEpochException.class, () -> @@ -1805,7 +1783,7 @@ public void testConsumerGroupMemberEpochValidationForStaticMember() { .setInstanceId(memberId) .setMemberEpoch(50) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); // Member joins with previous epoch but without providing partitions. assertThrows(FencedMemberEpochException.class, () -> @@ -1816,7 +1794,7 @@ public void testConsumerGroupMemberEpochValidationForStaticMember() { .setInstanceId(memberId) .setMemberEpoch(99) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); // Member joins with previous epoch and has a subset of the owned partitions. This // is accepted as the response with the bumped epoch may have been lost. In this @@ -1828,10 +1806,10 @@ public void testConsumerGroupMemberEpochValidationForStaticMember() { .setInstanceId(memberId) .setMemberEpoch(99) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) - .setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatRequestData.TopicPartitions() + .setSubscribedTopicNames(List.of("foo", "bar")) + .setTopicPartitions(List.of(new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(1, 2))))); + .setPartitions(List.of(1, 2))))); assertEquals(100, result.response().memberEpoch()); } @@ -1848,7 +1826,7 @@ public void testShouldThrowUnknownMemberIdExceptionWhenUnknownStaticMemberLeaves // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .build()) @@ -1860,7 +1838,7 @@ public void testShouldThrowUnknownMemberIdExceptionWhenUnknownStaticMemberLeaves .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -1877,7 +1855,7 @@ public void testShouldThrowUnknownMemberIdExceptionWhenUnknownStaticMemberLeaves .setInstanceId("unknown-" + memberId1) .setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -1894,7 +1872,7 @@ public void testShouldThrowFencedInstanceIdExceptionWhenStaticMemberWithDifferen // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .build()) @@ -1906,7 +1884,7 @@ public void testShouldThrowFencedInstanceIdExceptionWhenStaticMemberWithDifferen .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -1923,7 +1901,7 @@ public void testShouldThrowFencedInstanceIdExceptionWhenStaticMemberWithDifferen .setInstanceId(memberId1) .setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -1938,7 +1916,7 @@ public void testConsumerGroupHeartbeatFullResponse() { // Create a context with an empty consumer group. MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addRacks() @@ -1947,13 +1925,7 @@ public void testConsumerGroupHeartbeatFullResponse() { // Prepare new assignment for the group. assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1) - ))); - } - } + Map.of(memberId, new MemberAssignmentImpl(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1)))) )); CoordinatorResult result; @@ -1965,7 +1937,7 @@ public void testConsumerGroupHeartbeatFullResponse() { .setMemberId(memberId) .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignor("range") .setTopicPartitions(Collections.emptyList())); @@ -1975,10 +1947,10 @@ public void testConsumerGroupHeartbeatFullResponse() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1))))), + .setPartitions(List.of(0, 1))))), result.response() ); @@ -1998,14 +1970,39 @@ public void testConsumerGroupHeartbeatFullResponse() { ); // A full response should be sent back when the member sends - // a full request again. + // a full request again with topic names set. + result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId) + .setMemberEpoch(result.response().memberEpoch()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicNames(List.of("foo", "bar")) + .setServerAssignor("range") + .setTopicPartitions(Collections.emptyList())); + + assertResponseEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId) + .setMemberEpoch(1) + .setHeartbeatIntervalMs(5000) + .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List.of( + new ConsumerGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(fooTopicId) + .setPartitions(List.of(0, 1))))), + result.response() + ); + + // A full response should be sent back when the member sends + // a full request again with regex set. result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(result.response().memberEpoch()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicRegex("foo.*") .setServerAssignor("range") .setTopicPartitions(Collections.emptyList())); @@ -2015,10 +2012,10 @@ public void testConsumerGroupHeartbeatFullResponse() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1))))), + .setPartitions(List.of(0, 1))))), result.response() ); } @@ -2039,7 +2036,7 @@ public void testReconciliationProcess() { // Create a context with one consumer group containing two members. MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -2053,7 +2050,7 @@ public void testReconciliationProcess() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -2066,7 +2063,7 @@ public void testReconciliationProcess() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -2082,24 +2079,20 @@ public void testReconciliationProcess() { .build(); // Prepare new assignment for the group. - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(barTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 2, 3), - mkTopicAssignment(barTopicId, 2) - ))); - put(memberId3, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 4, 5), - mkTopicAssignment(barTopicId, 1) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(barTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 2, 3), + mkTopicAssignment(barTopicId, 2) + )), + memberId3, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 4, 5), + mkTopicAssignment(barTopicId, 1) + )) + ))); CoordinatorResult result; @@ -2118,7 +2111,7 @@ public void testReconciliationProcess() { .setMemberId(memberId3) .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignor("range") .setTopicPartitions(Collections.emptyList())); @@ -2159,18 +2152,18 @@ public void testReconciliationProcess() { .setMemberEpoch(10) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1)), + .setPartitions(List.of(0, 1)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(0)) + .setPartitions(List.of(0)) ))), result.response() ); - assertRecordsEquals(Collections.singletonList( + assertRecordsEquals(List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.UNREVOKED_PARTITIONS) .setMemberEpoch(10) @@ -2202,18 +2195,18 @@ public void testReconciliationProcess() { .setMemberEpoch(10) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Collections.singletonList(3)), + .setPartitions(List.of(3)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(2)) + .setPartitions(List.of(2)) ))), result.response() ); - assertRecordsEquals(Collections.singletonList( + assertRecordsEquals(List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId2) .setState(MemberState.UNREVOKED_PARTITIONS) .setMemberEpoch(10) @@ -2245,7 +2238,7 @@ public void testReconciliationProcess() { result.response() ); - assertRecordsEquals(Collections.singletonList( + assertRecordsEquals(List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3) .setState(MemberState.UNRELEASED_PARTITIONS) .setMemberEpoch(11) @@ -2264,13 +2257,13 @@ public void testReconciliationProcess() { .setGroupId(groupId) .setMemberId(memberId1) .setMemberEpoch(10) - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1)), + .setPartitions(List.of(0, 1)), new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(0)) + .setPartitions(List.of(0)) ))); assertResponseEquals( @@ -2281,7 +2274,7 @@ public void testReconciliationProcess() { result.response() ); - assertRecordsEquals(Collections.singletonList( + assertRecordsEquals(List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setMemberEpoch(11) @@ -2327,14 +2320,14 @@ public void testReconciliationProcess() { .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(1))))), + .setPartitions(List.of(1))))), result.response() ); - assertRecordsEquals(Collections.singletonList( + assertRecordsEquals(List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3) .setState(MemberState.UNRELEASED_PARTITIONS) .setMemberEpoch(11) @@ -2374,13 +2367,13 @@ public void testReconciliationProcess() { .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(10) - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Collections.singletonList(3)), + .setPartitions(List.of(3)), new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(2)) + .setPartitions(List.of(2)) ))); assertResponseEquals( @@ -2389,18 +2382,18 @@ public void testReconciliationProcess() { .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(2, 3)), + .setPartitions(List.of(2, 3)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(2)) + .setPartitions(List.of(2)) ))), result.response() ); - assertRecordsEquals(Collections.singletonList( + assertRecordsEquals(List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId2) .setState(MemberState.STABLE) .setMemberEpoch(11) @@ -2421,10 +2414,10 @@ public void testReconciliationProcess() { .setGroupId(groupId) .setMemberId(memberId3) .setMemberEpoch(11) - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(1))))); + .setPartitions(List.of(1))))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() @@ -2432,17 +2425,17 @@ public void testReconciliationProcess() { .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(4, 5)), + .setPartitions(List.of(4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Collections.singletonList(1))))), + .setPartitions(List.of(1))))), result.response() ); - assertRecordsEquals(Collections.singletonList( + assertRecordsEquals(List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3) .setState(MemberState.STABLE) .setMemberEpoch(11) @@ -2474,12 +2467,12 @@ public void testNewMemberIsRejectedWithMaximumMembersIsReached() { // Create a context with one consumer group containing two members. MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) .build()) - .withConsumerGroupMaxSize(2) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MAX_SIZE_CONFIG, 2) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) @@ -2488,7 +2481,7 @@ public void testNewMemberIsRejectedWithMaximumMembersIsReached() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), @@ -2501,7 +2494,7 @@ public void testNewMemberIsRejectedWithMaximumMembersIsReached() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), @@ -2524,7 +2517,7 @@ public void testNewMemberIsRejectedWithMaximumMembersIsReached() { .setMemberEpoch(0) .setServerAssignor("range") .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -2537,7 +2530,7 @@ public void testConsumerGroupStates() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)) .build(); @@ -2545,7 +2538,7 @@ public void testConsumerGroupStates() { context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .build())); context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)); @@ -2592,7 +2585,7 @@ public void testPartitionAssignorExceptionOnRegularHeartbeat() { when(assignor.assign(any(), any())).thenThrow(new PartitionAssignorException("Assignment failed.")); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -2609,7 +2602,7 @@ public void testPartitionAssignorExceptionOnRegularHeartbeat() { .setMemberId(memberId1) .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignor("range") .setTopicPartitions(Collections.emptyList()))); } @@ -2626,8 +2619,7 @@ public void testSubscriptionMetadataRefreshedAfterGroupIsLoaded() { // Create a context with one consumer group containing one member. MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) - .withConsumerGroupMetadataRefreshIntervalMs(5 * 60 * 1000) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() @@ -2640,7 +2632,7 @@ public void testSubscriptionMetadataRefreshedAfterGroupIsLoaded() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -2648,13 +2640,11 @@ public void testSubscriptionMetadataRefreshedAfterGroupIsLoaded() { .withAssignment(memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .withAssignmentEpoch(10) - .withSubscriptionMetadata(new HashMap() { - { - // foo only has 3 partitions stored in the metadata but foo has - // 6 partitions the metadata image. - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 3)); - } - })) + .withSubscriptionMetadata( + // foo only has 3 partitions stored in the metadata but foo has + // 6 partitions the metadata image. + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 3)) + )) .build(); // The metadata refresh flag should be true. @@ -2664,7 +2654,7 @@ public void testSubscriptionMetadataRefreshedAfterGroupIsLoaded() { // Prepare the assignment result. assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); @@ -2683,10 +2673,10 @@ public void testSubscriptionMetadataRefreshedAfterGroupIsLoaded() { .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1, 2, 3, 4, 5)) + .setPartitions(List.of(0, 1, 2, 3, 4, 5)) ))), result.response() ); @@ -2697,18 +2687,16 @@ public void testSubscriptionMetadataRefreshedAfterGroupIsLoaded() { .setPreviousMemberEpoch(10) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) .build(); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - } - }), + List expectedRecords = List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)) + ), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) @@ -2721,7 +2709,7 @@ public void testSubscriptionMetadataRefreshedAfterGroupIsLoaded() { // Check next refresh time. assertFalse(consumerGroup.hasMetadataExpired(context.time.milliseconds())); - assertEquals(context.time.milliseconds() + 5 * 60 * 1000, consumerGroup.metadataRefreshDeadline().deadlineMs); + assertEquals(context.time.milliseconds() + Integer.MAX_VALUE, consumerGroup.metadataRefreshDeadline().deadlineMs); assertEquals(11, consumerGroup.metadataRefreshDeadline().epoch); } @@ -2737,8 +2725,7 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { // Create a context with one consumer group containing one member. MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) - .withConsumerGroupMetadataRefreshIntervalMs(5 * 60 * 1000) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() @@ -2751,7 +2738,7 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -2759,13 +2746,11 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { .withAssignment(memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .withAssignmentEpoch(10) - .withSubscriptionMetadata(new HashMap() { - { - // foo only has 3 partitions stored in the metadata but foo has - // 6 partitions the metadata image. - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 3)); - } - })) + .withSubscriptionMetadata( + // foo only has 3 partitions stored in the metadata but foo has + // 6 partitions the metadata image. + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 3)) + )) .build(); // The metadata refresh flag should be true. @@ -2775,7 +2760,7 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { // Prepare the assignment result. assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); @@ -2789,7 +2774,7 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { // The metadata refresh flag is set to a future time. assertFalse(consumerGroup.hasMetadataExpired(context.time.milliseconds())); - assertEquals(context.time.milliseconds() + 5 * 60 * 1000, consumerGroup.metadataRefreshDeadline().deadlineMs); + assertEquals(context.time.milliseconds() + Integer.MAX_VALUE, consumerGroup.metadataRefreshDeadline().deadlineMs); assertEquals(11, consumerGroup.metadataRefreshDeadline().epoch); // Rollback the uncommitted changes. This does not rollback the metadata flag @@ -2812,10 +2797,10 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1, 2, 3, 4, 5)) + .setPartitions(List.of(0, 1, 2, 3, 4, 5)) ))), result.response() ); @@ -2826,18 +2811,16 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { .setPreviousMemberEpoch(10) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) .build(); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - } - }), + List expectedRecords = List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)) + ), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) @@ -2850,7 +2833,7 @@ public void testSubscriptionMetadataRefreshedAgainAfterWriteFailure() { // Check next refresh time. assertFalse(consumerGroup.hasMetadataExpired(context.time.milliseconds())); - assertEquals(context.time.milliseconds() + 5 * 60 * 1000, consumerGroup.metadataRefreshDeadline().deadlineMs); + assertEquals(context.time.milliseconds() + Integer.MAX_VALUE, consumerGroup.metadataRefreshDeadline().deadlineMs); assertEquals(11, consumerGroup.metadataRefreshDeadline().epoch); } @@ -2861,7 +2844,7 @@ public void testGroupIdsByTopics() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .build(); assertEquals(Collections.emptySet(), context.groupMetadataManager.groupsSubscribedToTopic("foo")); @@ -2871,7 +2854,7 @@ public void testGroupIdsByTopics() { // M1 in group 1 subscribes to foo and bar. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId1, new ConsumerGroupMember.Builder("group1-m1") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .build())); assertEquals(Set.of(groupId1), context.groupMetadataManager.groupsSubscribedToTopic("foo")); @@ -2881,7 +2864,7 @@ public void testGroupIdsByTopics() { // M1 in group 2 subscribes to foo, bar and zar. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId2, new ConsumerGroupMember.Builder("group2-m1") - .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .setSubscribedTopicNames(List.of("foo", "bar", "zar")) .build())); assertEquals(Set.of(groupId1, groupId2), context.groupMetadataManager.groupsSubscribedToTopic("foo")); @@ -2891,7 +2874,7 @@ public void testGroupIdsByTopics() { // M2 in group 1 subscribes to bar and zar. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId1, new ConsumerGroupMember.Builder("group1-m2") - .setSubscribedTopicNames(Arrays.asList("bar", "zar")) + .setSubscribedTopicNames(List.of("bar", "zar")) .build())); assertEquals(Set.of(groupId1, groupId2), context.groupMetadataManager.groupsSubscribedToTopic("foo")); @@ -2901,7 +2884,7 @@ public void testGroupIdsByTopics() { // M2 in group 2 subscribes to foo and bar. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId2, new ConsumerGroupMember.Builder("group2-m2") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .build())); assertEquals(Set.of(groupId1, groupId2), context.groupMetadataManager.groupsSubscribedToTopic("foo")); @@ -2929,7 +2912,7 @@ public void testGroupIdsByTopics() { // M2 in group 2 subscribes to foo. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId2, new ConsumerGroupMember.Builder("group2-m2") - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build())); assertEquals(Set.of(groupId2), context.groupMetadataManager.groupsSubscribedToTopic("foo")); @@ -2960,7 +2943,7 @@ public void testGroupIdsByTopics() { @Test public void testOnNewMetadataImageWithEmptyDelta() { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new MockPartitionAssignor("range"))) .build(); MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); @@ -2973,41 +2956,40 @@ public void testOnNewMetadataImageWithEmptyDelta() { @Test public void testOnNewMetadataImage() { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .build(); // M1 in group 1 subscribes to a and b. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group1", new ConsumerGroupMember.Builder("group1-m1") - .setSubscribedTopicNames(Arrays.asList("a", "b")) + .setSubscribedTopicNames(List.of("a", "b")) .build())); // M1 in group 2 subscribes to b and c. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group2", new ConsumerGroupMember.Builder("group2-m1") - .setSubscribedTopicNames(Arrays.asList("b", "c")) + .setSubscribedTopicNames(List.of("b", "c")) .build())); // M1 in group 3 subscribes to d. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group3", new ConsumerGroupMember.Builder("group3-m1") - .setSubscribedTopicNames(Collections.singletonList("d")) + .setSubscribedTopicNames(List.of("d")) .build())); // M1 in group 4 subscribes to e. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group4", new ConsumerGroupMember.Builder("group4-m1") - .setSubscribedTopicNames(Collections.singletonList("e")) + .setSubscribedTopicNames(List.of("e")) .build())); // M1 in group 5 subscribes to f. context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group5", new ConsumerGroupMember.Builder("group5-m1") - .setSubscribedTopicNames(Collections.singletonList("f")) + .setSubscribedTopicNames(List.of("f")) .build())); // Ensures that all refresh flags are set to the future. - Arrays.asList("group1", "group2", "group3", "group4", "group5").forEach(groupId -> { + List.of("group1", "group2", "group3", "group4", "group5").forEach(groupId -> { ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId); group.setMetadataRefreshDeadline(context.time.milliseconds() + 5000L, 0); assertFalse(group.hasMetadataExpired(context.time.milliseconds())); @@ -3044,12 +3026,12 @@ public void testOnNewMetadataImage() { context.groupMetadataManager.onNewMetadataImage(image, delta); // Verify the groups. - Arrays.asList("group1", "group2", "group3", "group4").forEach(groupId -> { + List.of("group1", "group2", "group3", "group4").forEach(groupId -> { ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId); assertTrue(group.hasMetadataExpired(context.time.milliseconds())); }); - Collections.singletonList("group5").forEach(groupId -> { + List.of("group5").forEach(groupId -> { ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId); assertFalse(group.hasMetadataExpired(context.time.milliseconds())); }); @@ -3069,7 +3051,7 @@ public void testSessionTimeoutLifecycle() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() @@ -3077,7 +3059,7 @@ public void testSessionTimeoutLifecycle() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); @@ -3090,7 +3072,7 @@ public void testSessionTimeoutLifecycle() { .setMemberId(memberId) .setMemberEpoch(0) .setRebalanceTimeoutMs(90000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertEquals(1, result.response().memberEpoch()); @@ -3144,7 +3126,7 @@ public void testSessionTimeoutExpiration() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() @@ -3152,7 +3134,7 @@ public void testSessionTimeoutExpiration() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); @@ -3165,7 +3147,7 @@ public void testSessionTimeoutExpiration() { .setMemberId(memberId) .setMemberEpoch(0) .setRebalanceTimeoutMs(90000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertEquals(1, result.response().memberEpoch()); @@ -3177,10 +3159,10 @@ public void testSessionTimeoutExpiration() { // Verify the expired timeout. assertEquals( - Collections.singletonList(new ExpiredTimeout( + List.of(new ExpiredTimeout( groupSessionTimeoutKey(groupId, memberId), new CoordinatorResult<>( - Arrays.asList( + List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId), @@ -3208,7 +3190,7 @@ public void testSessionTimeoutExpirationStaticMember() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() @@ -3216,7 +3198,7 @@ public void testSessionTimeoutExpirationStaticMember() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); @@ -3230,7 +3212,7 @@ public void testSessionTimeoutExpirationStaticMember() { .setInstanceId(memberId) .setMemberEpoch(0) .setRebalanceTimeoutMs(90000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertEquals(1, result.response().memberEpoch()); @@ -3245,7 +3227,7 @@ public void testSessionTimeoutExpirationStaticMember() { .setInstanceId(memberId) .setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH) .setRebalanceTimeoutMs(90000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertEquals(-2, result.response().memberEpoch()); @@ -3258,10 +3240,10 @@ public void testSessionTimeoutExpirationStaticMember() { // Verify the expired timeout. assertEquals( - Collections.singletonList(new ExpiredTimeout( + List.of(new ExpiredTimeout( groupSessionTimeoutKey(groupId, memberId), new CoordinatorResult<>( - Arrays.asList( + List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId), @@ -3290,22 +3272,16 @@ public void testRebalanceTimeoutLifecycle() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 3) .addRacks() .build()) .build(); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1, 2) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of(memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2) + ))))); // Member 1 joins the group. CoordinatorResult result = @@ -3315,7 +3291,7 @@ public void testRebalanceTimeoutLifecycle() { .setMemberId(memberId1) .setMemberEpoch(0) .setRebalanceTimeoutMs(180000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -3324,10 +3300,10 @@ public void testRebalanceTimeoutLifecycle() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1, 2))))), + .setPartitions(List.of(0, 1, 2))))), result.response() ); @@ -3337,18 +3313,14 @@ public void testRebalanceTimeoutLifecycle() { ); // Prepare next assignment. - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 2) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 2) + )) + ))); // Member 2 joins the group. result = context.consumerGroupHeartbeat( @@ -3357,7 +3329,7 @@ public void testRebalanceTimeoutLifecycle() { .setMemberId(memberId2) .setMemberEpoch(0) .setRebalanceTimeoutMs(90000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -3382,7 +3354,7 @@ public void testRebalanceTimeoutLifecycle() { .setMemberId(memberId1) .setMemberEpoch(1) .setRebalanceTimeoutMs(12000) - .setSubscribedTopicNames(Collections.singletonList("foo"))); + .setSubscribedTopicNames(List.of("foo"))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() @@ -3390,10 +3362,10 @@ public void testRebalanceTimeoutLifecycle() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1))))), + .setPartitions(List.of(0, 1))))), result.response() ); @@ -3413,9 +3385,9 @@ public void testRebalanceTimeoutLifecycle() { .setGroupId(groupId) .setMemberId(memberId1) .setMemberEpoch(1) - .setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatRequestData.TopicPartitions() + .setTopicPartitions(List.of(new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1))))); + .setPartitions(List.of(0, 1))))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() @@ -3445,7 +3417,7 @@ public void testRebalanceTimeoutExpiration() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 3) .addRacks() @@ -3453,13 +3425,7 @@ public void testRebalanceTimeoutExpiration() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1, 2) - ))); - } - } + Map.of(memberId1, new MemberAssignmentImpl(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2)))) )); // Member 1 joins the group. @@ -3470,7 +3436,7 @@ public void testRebalanceTimeoutExpiration() { .setMemberId(memberId1) .setMemberEpoch(0) .setRebalanceTimeoutMs(10000) // Use timeout smaller than session timeout. - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -3479,10 +3445,10 @@ public void testRebalanceTimeoutExpiration() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1, 2))))), + .setPartitions(List.of(0, 1, 2))))), result.response() ); @@ -3492,18 +3458,14 @@ public void testRebalanceTimeoutExpiration() { ); // Prepare next assignment. - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 2) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 2) + )) + ))); // Member 2 joins the group. result = context.consumerGroupHeartbeat( @@ -3512,7 +3474,7 @@ public void testRebalanceTimeoutExpiration() { .setMemberId(memberId2) .setMemberEpoch(0) .setRebalanceTimeoutMs(10000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertResponseEquals( @@ -3543,10 +3505,10 @@ public void testRebalanceTimeoutExpiration() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Collections.singletonList( + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1))))), + .setPartitions(List.of(0, 1))))), result.response() ); @@ -3555,10 +3517,10 @@ public void testRebalanceTimeoutExpiration() { // Verify the expired timeout. assertEquals( - Collections.singletonList(new ExpiredTimeout( + List.of(new ExpiredTimeout( consumerGroupRebalanceTimeoutKey(groupId, memberId1), new CoordinatorResult<>( - Arrays.asList( + List.of( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), @@ -3582,7 +3544,6 @@ public void testOnLoaded() { String barTopicName = "bar"; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -3594,7 +3555,7 @@ public void testOnLoaded() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -3607,7 +3568,7 @@ public void testOnLoaded() { .setPreviousMemberEpoch(10) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .build()) .withAssignment("foo-1", mkAssignment( @@ -3684,7 +3645,7 @@ public void testGenerateRecordsOnNewClassicGroup() throws Exception { ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group, MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group)), joinResult.records ); } @@ -3717,7 +3678,7 @@ public void testReplayGroupMetadataRecords(boolean useDefaultRebalanceTimeout) { .build(); byte[] subscription = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"))).array(); + List.of("foo"))).array(); List members = new ArrayList<>(); List expectedMembers = new ArrayList<>(); JoinGroupRequestProtocolCollection expectedProtocols = new JoinGroupRequestProtocolCollection(0); @@ -3757,8 +3718,7 @@ public void testReplayGroupMetadataRecords(boolean useDefaultRebalanceTimeout) { .setLeader("member-0") .setProtocolType("consumer") .setProtocol("range") - .setCurrentStateTimestamp(context.time.milliseconds()), - MetadataVersion.latestTesting()); + .setCurrentStateTimestamp(context.time.milliseconds())); context.replay(groupMetadataRecord); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); @@ -3789,11 +3749,11 @@ public void testReplayGroupMetadataRecords(boolean useDefaultRebalanceTimeout) { @Test public void testOnLoadedExceedGroupMaxSizeTriggersRebalance() { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(1) + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, 1) .build(); byte[] subscription = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"))).array(); + List.of("foo"))).array(); List members = new ArrayList<>(); IntStream.range(0, 2).forEach(i -> members.add( @@ -3815,8 +3775,7 @@ public void testOnLoadedExceedGroupMaxSizeTriggersRebalance() { .setLeader("member-0") .setProtocolType("consumer") .setProtocol("range") - .setCurrentStateTimestamp(context.time.milliseconds()), - MetadataVersion.latestTesting()); + .setCurrentStateTimestamp(context.time.milliseconds())); context.replay(groupMetadataRecord); context.groupMetadataManager.onLoaded(); @@ -3832,7 +3791,7 @@ public void testOnLoadedSchedulesClassicGroupMemberHeartbeats() { .build(); byte[] subscription = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"))).array(); + List.of("foo"))).array(); List members = new ArrayList<>(); IntStream.range(0, 2).forEach(i -> members.add( @@ -3855,8 +3814,7 @@ public void testOnLoadedSchedulesClassicGroupMemberHeartbeats() { .setLeader("member-0") .setProtocolType("consumer") .setProtocol("range") - .setCurrentStateTimestamp(context.time.milliseconds()), - MetadataVersion.latestTesting()); + .setCurrentStateTimestamp(context.time.milliseconds())); context.replay(groupMetadataRecord); context.groupMetadataManager.onLoaded(); @@ -3873,7 +3831,7 @@ public void testOnLoadedSchedulesClassicGroupMemberHeartbeats() { @Test public void testJoinGroupShouldReceiveErrorIfGroupOverMaxSize() throws Exception { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(10) + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, 10) .build(); context.createClassicGroup("group-id"); @@ -3901,8 +3859,8 @@ public void testDynamicMembersJoinGroupWithMaxSizeAndRequiredKnownMember() { boolean requiredKnownMemberId = true; int groupMaxSize = 10; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(groupMaxSize) - .withClassicGroupInitialRebalanceDelayMs(50) + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, groupMaxSize) + .withConfig(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, 50) .build(); ClassicGroup group = context.createClassicGroup("group-id"); @@ -3960,8 +3918,8 @@ public void testDynamicMembersJoinGroupWithMaxSizeAndNotRequiredKnownMember() { boolean requiredKnownMemberId = false; int groupMaxSize = 10; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(groupMaxSize) - .withClassicGroupInitialRebalanceDelayMs(50) + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, groupMaxSize) + .withConfig(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, 50) .build(); ClassicGroup group = context.createClassicGroup("group-id"); @@ -4013,8 +3971,8 @@ public void testStaticMembersJoinGroupWithMaxSize() { .collect(Collectors.toList()); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(groupMaxSize) - .withClassicGroupInitialRebalanceDelayMs(50) + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, groupMaxSize) + .withConfig(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, 50) .build(); ClassicGroup group = context.createClassicGroup("group-id"); @@ -4063,8 +4021,8 @@ public void testDynamicMembersCanRejoinGroupWithMaxSizeWhileRebalancing() { boolean requiredKnownMemberId = true; int groupMaxSize = 10; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(groupMaxSize) - .withClassicGroupInitialRebalanceDelayMs(50) + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, groupMaxSize) + .withConfig(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, 50) .build(); ClassicGroup group = context.createClassicGroup("group-id"); @@ -4126,8 +4084,8 @@ public void testLastJoiningMembersAreKickedOutWhenRejoiningGroupWithMaxSize() { int groupMaxSize = 10; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(groupMaxSize) - .withClassicGroupInitialRebalanceDelayMs(50) + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, groupMaxSize) + .withConfig(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, 50) .build(); // Create a group and add members that exceed the group max size. @@ -4729,7 +4687,7 @@ public void testHeartbeatExpirationShouldRemovePendingMember() throws Exception public void testHeartbeatExpirationShouldRemoveMember() throws Exception { // Set initial rebalance delay to simulate a long running rebalance. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupInitialRebalanceDelayMs(10 * 60 * 1000) + .withConfig(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, 10 * 60 * 1000) .build(); ClassicGroup group = context.createClassicGroup("group-id"); @@ -4754,7 +4712,7 @@ public void testHeartbeatExpirationShouldRemoveMember() throws Exception { timeouts.forEach(timeout -> { assertEquals(classicGroupHeartbeatKey("group-id", memberId), timeout.key); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), timeout.result.records() ); }); @@ -5234,7 +5192,7 @@ public void testReplaceStaticMemberInStableStateNoError( ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), joinResult.records ); assertFalse(joinResult.joinFuture.isDone()); @@ -5257,7 +5215,7 @@ public void testReplaceStaticMemberInStableStateNoError( if (supportSkippingAssignment) { expectedResponse - .setMembers(Collections.singletonList( + .setMembers(List.of( new JoinGroupResponseData.JoinGroupResponseMember() .setMemberId(newMemberId) .setGroupInstanceId("group-instance-id") @@ -5349,7 +5307,7 @@ public void testReplaceStaticMemberInStableStateErrors() throws Exception { protocols.add(new JoinGroupRequestProtocol() .setName("roundrobin") .setMetadata(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("bar"))).array())); + List.of("bar"))).array())); GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin( request @@ -5361,7 +5319,7 @@ public void testReplaceStaticMemberInStableStateErrors() throws Exception { ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), joinResult.records ); assertFalse(joinResult.joinFuture.isDone()); @@ -5437,7 +5395,7 @@ public void testReplaceStaticMemberInStableStateSucceeds( supportSkippingAssignment); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), joinResult.records ); assertFalse(joinResult.joinFuture.isDone()); @@ -5566,7 +5524,7 @@ public void testNewMemberTimeoutCompletion() throws Exception { // Member should be removed as heartbeat expires. The group is now empty. List> timeouts = context.sleep(5000); - List expectedRecords = Collections.singletonList(GroupMetadataManagerTestContext.newGroupMetadataRecord( + List expectedRecords = List.of(GroupMetadataManagerTestContext.newGroupMetadataRecord( group.groupId(), new GroupMetadataValue() .setMembers(Collections.emptyList()) @@ -5574,8 +5532,7 @@ public void testNewMemberTimeoutCompletion() throws Exception { .setLeader(null) .setProtocolType("consumer") .setProtocol(null) - .setCurrentStateTimestamp(context.time.milliseconds()), - MetadataVersion.latestTesting()) + .setCurrentStateTimestamp(context.time.milliseconds())) ); assertEquals(1, timeouts.size()); @@ -6005,7 +5962,7 @@ public void testStaticMemberRejoinWithLeaderIdAndUnknownMemberId( ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), joinResult.records ); // Simulate a successful write to the log. @@ -6109,7 +6066,7 @@ public void testStaticMemberRejoinWithLeaderIdAndKnownMemberId() throws Exceptio joinResponse, group, COMPLETING_REBALANCE, - Collections.singleton("leader-instance-id") + Set.of("leader-instance-id") ); } @@ -6318,7 +6275,7 @@ public void testStaticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWhileSel ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), followerJoinResult.records ); // Simulate a failed write to the log. @@ -6375,7 +6332,7 @@ public void testStaticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWhileSel leaderSyncResult.appendFuture.complete(null); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), leaderSyncResult.records ); @@ -6425,7 +6382,7 @@ public void testStaticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWhileSel ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), followerJoinResult.records ); @@ -6606,7 +6563,7 @@ public void testStaticMemberRejoinWithKnownLeaderIdToTriggerRebalanceAndFollower followerJoinResult.joinFuture.get(), group, COMPLETING_REBALANCE, - Collections.singleton("follower-instance-id") + Set.of("follower-instance-id") ); } @@ -6637,7 +6594,7 @@ public void testStaticMemberRejoinAsFollowerWithUnknownMemberId() throws Excepti ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), followerJoinResult.records ); // Simulate a successful write to log. @@ -6844,7 +6801,7 @@ public void testGetDifferentStaticMemberIdAfterEachRejoin() throws Exception { ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), leaderJoinResult.records ); // Simulate a successful write to log. @@ -7624,8 +7581,8 @@ public void testSyncGroupLeaderAfterFollower() throws Exception { )); assertEquals( - Collections.singletonList( - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, updatedAssignment, MetadataVersion.latestTesting())), + List.of( + GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, updatedAssignment)), syncResult.records ); @@ -8110,7 +8067,7 @@ public void testRebalanceCompletesBeforeMemberJoins() throws Exception { assertThrows(IllegalGenerationException.class, () -> context.sendClassicGroupHeartbeat(firstMemberHeartbeatRequest)); // Now session timeout the unjoined (first) member. Still keeping the new member. - List expectedErrors = Arrays.asList(Errors.NONE, Errors.NONE, Errors.REBALANCE_IN_PROGRESS); + List expectedErrors = List.of(Errors.NONE, Errors.NONE, Errors.REBALANCE_IN_PROGRESS); for (Errors expectedError : expectedErrors) { GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(2000)); HeartbeatResponseData heartbeatResponse = context.sendClassicGroupHeartbeat( @@ -8284,7 +8241,7 @@ public void testRebalanceTimesOutWhenSyncRequestIsNotReceived() throws Exception ExpiredTimeout timeout = timeouts.get(0); assertEquals(classicGroupSyncKey("group-id"), timeout.key); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), timeout.result.records() ); @@ -8440,7 +8397,7 @@ public void testRebalanceDoesNotTimeOutWhenAllSyncAreReceived() throws Exception if (response.memberId().equals(leaderId)) { assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), syncResult.records ); @@ -8521,7 +8478,7 @@ public void testListGroups() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withShareGroupAssignor(assignor) .withConsumerGroup(new ConsumerGroupBuilder(consumerGroupId, 10)) .build(); @@ -8535,14 +8492,13 @@ public void testListGroups() { .setLeader(null) .setProtocolType("classic") .setProtocol("range") - .setCurrentStateTimestamp(context.time.milliseconds()), - MetadataVersion.latestTesting())); + .setCurrentStateTimestamp(context.time.milliseconds()))); // Create one share group record. context.replay(GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(shareGroupId, 6)); context.commit(); ClassicGroup classicGroup = context.groupMetadataManager.getOrMaybeCreateClassicGroup(classicGroupId, false); context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(consumerGroupId, new ConsumerGroupMember.Builder(memberId1) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .build())); context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(consumerGroupId, 11)); @@ -8574,7 +8530,7 @@ public void testListGroups() { // List group with case-insensitive ‘empty’. actualAllGroupMap = - context.sendListGroups(Collections.singletonList("empty"), Collections.emptyList()) + context.sendListGroups(List.of("empty"), Collections.emptyList()) .stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); assertEquals(expectAllGroupMap, actualAllGroupMap); @@ -8582,7 +8538,7 @@ public void testListGroups() { context.commit(); // Test list group response to check assigning state in the consumer group. - actualAllGroupMap = context.sendListGroups(Collections.singletonList("assigning"), Collections.emptyList()).stream() + actualAllGroupMap = context.sendListGroups(List.of("assigning"), Collections.emptyList()).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Stream.of( @@ -8596,7 +8552,7 @@ public void testListGroups() { assertEquals(expectAllGroupMap, actualAllGroupMap); // Test list group response with group state filter and no group type filter. - actualAllGroupMap = context.sendListGroups(Collections.singletonList("Empty"), Collections.emptyList()).stream() + actualAllGroupMap = context.sendListGroups(List.of("Empty"), Collections.emptyList()).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Stream.of( new ListGroupsResponseData.ListedGroup() @@ -8614,7 +8570,7 @@ public void testListGroups() { assertEquals(expectAllGroupMap, actualAllGroupMap); // Test list group response with no group state filter and with group type filter. - actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList(Group.GroupType.CLASSIC.toString())).stream() + actualAllGroupMap = context.sendListGroups(Collections.emptyList(), List.of(Group.GroupType.CLASSIC.toString())).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Stream.of( new ListGroupsResponseData.ListedGroup() @@ -8627,7 +8583,7 @@ public void testListGroups() { assertEquals(expectAllGroupMap, actualAllGroupMap); // Test list group response with no group state filter and with group type filter in a different case. - actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Consumer")).stream() + actualAllGroupMap = context.sendListGroups(Collections.emptyList(), List.of("Consumer")).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Stream.of( new ListGroupsResponseData.ListedGroup() @@ -8639,7 +8595,7 @@ public void testListGroups() { assertEquals(expectAllGroupMap, actualAllGroupMap); - actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Share")).stream() + actualAllGroupMap = context.sendListGroups(Collections.emptyList(), List.of("Share")).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Stream.of( new ListGroupsResponseData.ListedGroup() @@ -8651,7 +8607,7 @@ public void testListGroups() { assertEquals(expectAllGroupMap, actualAllGroupMap); - actualAllGroupMap = context.sendListGroups(Arrays.asList("empty", "Assigning"), Collections.emptyList()).stream() + actualAllGroupMap = context.sendListGroups(List.of("empty", "Assigning"), Collections.emptyList()).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Stream.of( new ListGroupsResponseData.ListedGroup() @@ -8674,14 +8630,14 @@ public void testListGroups() { assertEquals(expectAllGroupMap, actualAllGroupMap); // Test list group response with no group state filter and with invalid group type filter . - actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Invalid")).stream() + actualAllGroupMap = context.sendListGroups(Collections.emptyList(), List.of("Invalid")).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Collections.emptyMap(); assertEquals(expectAllGroupMap, actualAllGroupMap); // Test list group response with invalid group state filter and with no group type filter . - actualAllGroupMap = context.sendListGroups(Collections.singletonList("Invalid"), Collections.emptyList()).stream() + actualAllGroupMap = context.sendListGroups(List.of("Invalid"), Collections.emptyList()).stream() .collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity())); expectAllGroupMap = Collections.emptyMap(); @@ -8690,23 +8646,23 @@ public void testListGroups() { @Test public void testConsumerGroupDescribeNoErrors() { - List consumerGroupIds = Arrays.asList("group-id-1", "group-id-2"); + List consumerGroupIds = List.of("group-id-1", "group-id-2"); int epoch = 10; String memberId = "member-id"; String topicName = "topicName"; ConsumerGroupMember.Builder memberBuilder = new ConsumerGroupMember.Builder(memberId) - .setSubscribedTopicNames(Collections.singletonList(topicName)) + .setSubscribedTopicNames(List.of(topicName)) .setServerAssignorName("assignorName"); MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withConsumerGroup(new ConsumerGroupBuilder(consumerGroupIds.get(0), epoch)) .withConsumerGroup(new ConsumerGroupBuilder(consumerGroupIds.get(1), epoch) .withMember(memberBuilder.build())) .build(); - List expected = Arrays.asList( + List expected = List.of( new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupEpoch(epoch) .setGroupId(consumerGroupIds.get(0)) @@ -8715,7 +8671,7 @@ public void testConsumerGroupDescribeNoErrors() { new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupEpoch(epoch) .setGroupId(consumerGroupIds.get(1)) - .setMembers(Collections.singletonList( + .setMembers(List.of( memberBuilder.build().asConsumerGroupDescribeMember( new Assignment(Collections.emptyMap()), new MetadataImageBuilder().build().topics() @@ -8735,14 +8691,15 @@ public void testConsumerGroupDescribeWithErrors() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .build(); - List actual = context.sendConsumerGroupDescribe(Collections.singletonList(groupId)); + List actual = context.sendConsumerGroupDescribe(List.of(groupId)); ConsumerGroupDescribeResponseData.DescribedGroup describedGroup = new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId(groupId) - .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()); - List expected = Collections.singletonList( + .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage("Group " + groupId + " not found."); + List expected = List.of( describedGroup ); @@ -8763,17 +8720,16 @@ public void testConsumerGroupDescribeBeforeAndAfterCommittingOffset() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(metadataImage) .build(); ConsumerGroupMember.Builder memberBuilder1 = new ConsumerGroupMember.Builder(memberId1) - .setSubscribedTopicNames(Collections.singletonList(topicName)); + .setSubscribedTopicNames(List.of(topicName)); context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(consumerGroupId, memberBuilder1.build())); context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(consumerGroupId, epoch + 1)); - Map> assignmentMap = new HashMap<>(); - assignmentMap.put(topicId, Collections.emptySet()); + Map> assignmentMap = Map.of(topicId, Collections.emptySet()); ConsumerGroupMember.Builder memberBuilder2 = new ConsumerGroupMember.Builder(memberId2); context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(consumerGroupId, memberBuilder2.build())); @@ -8781,11 +8737,12 @@ public void testConsumerGroupDescribeBeforeAndAfterCommittingOffset() { context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(consumerGroupId, memberBuilder2.build())); context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(consumerGroupId, epoch + 2)); - List actual = context.groupMetadataManager.consumerGroupDescribe(Collections.singletonList(consumerGroupId), context.lastCommittedOffset); + List actual = context.groupMetadataManager.consumerGroupDescribe(List.of(consumerGroupId), context.lastCommittedOffset); ConsumerGroupDescribeResponseData.DescribedGroup describedGroup = new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId(consumerGroupId) - .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()); - List expected = Collections.singletonList( + .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage("Group " + consumerGroupId + " not found."); + List expected = List.of( describedGroup ); assertEquals(expected, actual); @@ -8793,17 +8750,17 @@ public void testConsumerGroupDescribeBeforeAndAfterCommittingOffset() { // Commit the offset and test again context.commit(); - actual = context.groupMetadataManager.consumerGroupDescribe(Collections.singletonList(consumerGroupId), context.lastCommittedOffset); + actual = context.groupMetadataManager.consumerGroupDescribe(List.of(consumerGroupId), context.lastCommittedOffset); describedGroup = new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId(consumerGroupId) - .setMembers(Arrays.asList( + .setMembers(List.of( memberBuilder1.build().asConsumerGroupDescribeMember(new Assignment(Collections.emptyMap()), metadataImage.topics()), memberBuilder2.build().asConsumerGroupDescribeMember(new Assignment(assignmentMap), metadataImage.topics()) )) .setGroupState(ConsumerGroup.ConsumerGroupState.ASSIGNING.toString()) .setAssignorName("range") .setGroupEpoch(epoch + 2); - expected = Collections.singletonList( + expected = List.of( describedGroup ); assertEquals(expected, actual); @@ -8822,26 +8779,25 @@ public void testDescribeGroupStable() { .setAssignment(new byte[]{0}) .setSubscription(new byte[]{0, 1, 2}); GroupMetadataValue groupMetadataValue = new GroupMetadataValue() - .setMembers(Collections.singletonList(memberMetadata)) + .setMembers(List.of(memberMetadata)) .setProtocolType("consumer") .setProtocol("range") .setCurrentStateTimestamp(context.time.milliseconds()); context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord( "group-id", - groupMetadataValue, - MetadataVersion.latestTesting() + groupMetadataValue )); context.verifyDescribeGroupsReturnsDeadGroup("group-id"); context.commit(); - List expectedDescribedGroups = Collections.singletonList( + List expectedDescribedGroups = List.of( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-id") .setGroupState(STABLE.toString()) .setProtocolType(groupMetadataValue.protocolType()) .setProtocolData(groupMetadataValue.protocol()) - .setMembers(Collections.singletonList( + .setMembers(List.of( new DescribeGroupsResponseData.DescribedGroupMember() .setMemberId(memberMetadata.memberId()) .setGroupInstanceId(memberMetadata.groupInstanceId()) @@ -8853,7 +8809,7 @@ public void testDescribeGroupStable() { ); List describedGroups = - context.describeGroups(Collections.singletonList("group-id")); + context.describeGroups(List.of("group-id")); assertEquals(expectedDescribedGroups, describedGroups); } @@ -8871,15 +8827,14 @@ public void testDescribeGroupRebalancing() throws Exception { .setAssignment(new byte[]{0}) .setSubscription(new byte[]{0, 1, 2}); GroupMetadataValue groupMetadataValue = new GroupMetadataValue() - .setMembers(Collections.singletonList(memberMetadata)) + .setMembers(List.of(memberMetadata)) .setProtocolType("consumer") .setProtocol("range") .setCurrentStateTimestamp(context.time.milliseconds()); context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord( "group-id", - groupMetadataValue, - MetadataVersion.latestTesting() + groupMetadataValue )); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); context.groupMetadataManager.prepareRebalance(group, "trigger rebalance"); @@ -8887,13 +8842,13 @@ public void testDescribeGroupRebalancing() throws Exception { context.verifyDescribeGroupsReturnsDeadGroup("group-id"); context.commit(); - List expectedDescribedGroups = Collections.singletonList( + List expectedDescribedGroups = List.of( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-id") .setGroupState(PREPARING_REBALANCE.toString()) .setProtocolType(groupMetadataValue.protocolType()) .setProtocolData("") - .setMembers(Collections.singletonList( + .setMembers(List.of( new DescribeGroupsResponseData.DescribedGroupMember() .setMemberId(memberMetadata.memberId()) .setGroupInstanceId(memberMetadata.groupInstanceId()) @@ -8904,7 +8859,7 @@ public void testDescribeGroupRebalancing() throws Exception { ); List describedGroups = - context.describeGroups(Collections.singletonList("group-id")); + context.describeGroups(List.of("group-id")); assertEquals(expectedDescribedGroups, describedGroups); } @@ -8916,6 +8871,13 @@ public void testDescribeGroupsGroupIdNotFoundException() { context.verifyDescribeGroupsReturnsDeadGroup("group-id"); } + @Test + public void testDescribeGroupsBeforeV6GroupIdNotFoundException() { + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .build(); + context.verifyDescribeGroupsBeforeV6ReturnsDeadGroup("group-id"); + } + @Test public void testGroupStuckInRebalanceTimeoutDueToNonjoinedStaticMember() throws Exception { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() @@ -8967,7 +8929,7 @@ public void testGroupStuckInRebalanceTimeoutDueToNonjoinedStaticMember() throws CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId(rebalanceResult.followerId) .setGroupInstanceId("follower-instance-id") @@ -8975,7 +8937,7 @@ public void testGroupStuckInRebalanceTimeoutDueToNonjoinedStaticMember() throws ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Collections.singletonList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setMemberId(rebalanceResult.followerId) .setGroupInstanceId("follower-instance-id"))); @@ -9002,14 +8964,14 @@ public void testPendingMembersLeaveGroup() throws Exception { CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId(pendingJoinResponse.memberId()) )) ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Collections.singletonList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId(pendingJoinResponse.memberId()))); @@ -9043,7 +9005,7 @@ public void testLeaveGroupUnknownGroup() { assertThrows(UnknownMemberIdException.class, () -> context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("unknown-group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId("member-id") )) @@ -9067,14 +9029,14 @@ public void testLeaveGroupUnknownMemberIdExistingGroup() throws Exception { CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId("unknown-member-id") )) ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Collections.singletonList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId("unknown-member-id") @@ -9094,7 +9056,7 @@ public void testLeaveDeadGroup() { CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId("member-id") )) @@ -9125,20 +9087,20 @@ public void testValidLeaveGroup() throws Exception { CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId(joinResponse.memberId()) )) ); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), leaveResult.records() ); // Simulate a successful write to the log. leaveResult.appendFuture().complete(null); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Collections.singletonList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId(joinResponse.memberId()))); @@ -9168,7 +9130,7 @@ public void testLeaveGroupWithFencedInstanceId() throws Exception { CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setGroupInstanceId("group-instance-id") .setMemberId("other-member-id") // invalid member id @@ -9176,7 +9138,7 @@ public void testLeaveGroupWithFencedInstanceId() throws Exception { ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Collections.singletonList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId("group-instance-id") .setMemberId("other-member-id") @@ -9207,7 +9169,7 @@ public void testLeaveGroupStaticMemberWithUnknownMemberId() throws Exception { CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setGroupInstanceId("group-instance-id") .setMemberId(UNKNOWN_MEMBER_ID) @@ -9215,7 +9177,7 @@ public void testLeaveGroupStaticMemberWithUnknownMemberId() throws Exception { ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Collections.singletonList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId("group-instance-id"))); @@ -9236,7 +9198,7 @@ public void testStaticMembersValidBatchLeaveGroup() throws Exception { new LeaveGroupRequestData() .setGroupId("group-id") .setMembers( - Arrays.asList( + List.of( new MemberIdentity() .setGroupInstanceId("leader-instance-id"), new MemberIdentity() @@ -9246,7 +9208,7 @@ public void testStaticMembersValidBatchLeaveGroup() throws Exception { ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId("leader-instance-id"), new LeaveGroupResponseData.MemberResponse() @@ -9269,7 +9231,7 @@ public void testStaticMembersLeaveUnknownGroup() throws Exception { new LeaveGroupRequestData() .setGroupId("invalid-group-id") // Invalid group id .setMembers( - Arrays.asList( + List.of( new MemberIdentity() .setGroupInstanceId("leader-instance-id"), new MemberIdentity() @@ -9293,7 +9255,7 @@ public void testStaticMembersFencedInstanceBatchLeaveGroup() throws Exception { new LeaveGroupRequestData() .setGroupId("group-id") .setMembers( - Arrays.asList( + List.of( new MemberIdentity() .setGroupInstanceId("leader-instance-id"), new MemberIdentity() @@ -9304,7 +9266,7 @@ public void testStaticMembersFencedInstanceBatchLeaveGroup() throws Exception { ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId("leader-instance-id"), new LeaveGroupResponseData.MemberResponse() @@ -9329,7 +9291,7 @@ public void testStaticMembersUnknownInstanceBatchLeaveGroup() throws Exception { new LeaveGroupRequestData() .setGroupId("group-id") .setMembers( - Arrays.asList( + List.of( new MemberIdentity() .setGroupInstanceId("unknown-instance-id"), // Unknown instance id new MemberIdentity() @@ -9339,7 +9301,7 @@ public void testStaticMembersUnknownInstanceBatchLeaveGroup() throws Exception { ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId("unknown-instance-id") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()), @@ -9361,7 +9323,7 @@ public void testPendingMemberBatchLeaveGroup() throws Exception { new LeaveGroupRequestData() .setGroupId("group-id") .setMembers( - Arrays.asList( + List.of( new MemberIdentity() .setGroupInstanceId("unknown-instance-id"), // Unknown instance id new MemberIdentity() @@ -9371,7 +9333,7 @@ public void testPendingMemberBatchLeaveGroup() throws Exception { ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId("unknown-instance-id") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()), @@ -9393,7 +9355,7 @@ public void testJoinedMemberPendingMemberBatchLeaveGroup() throws Exception { new LeaveGroupRequestData() .setGroupId("group-id") .setMembers( - Arrays.asList( + List.of( new MemberIdentity() .setMemberId(pendingMemberGroupResult.leaderId), new MemberIdentity() @@ -9405,7 +9367,7 @@ public void testJoinedMemberPendingMemberBatchLeaveGroup() throws Exception { ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId(pendingMemberGroupResult.leaderId), @@ -9430,7 +9392,7 @@ public void testJoinedMemberPendingMemberBatchLeaveGroupWithUnknownMember() thro new LeaveGroupRequestData() .setGroupId("group-id") .setMembers( - Arrays.asList( + List.of( new MemberIdentity() .setMemberId(pendingMemberGroupResult.leaderId), new MemberIdentity() @@ -9444,7 +9406,7 @@ public void testJoinedMemberPendingMemberBatchLeaveGroupWithUnknownMember() thro ); LeaveGroupResponseData expectedResponse = new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId(pendingMemberGroupResult.leaderId), @@ -9468,7 +9430,7 @@ public void testClassicGroupDelete() { .build(); context.createClassicGroup("group-id"); - List expectedRecords = Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id")); + List expectedRecords = List.of(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id")); List records = new ArrayList<>(); context.groupMetadataManager.createGroupTombstoneRecords("group-id", records); assertEquals(expectedRecords, records); @@ -9480,7 +9442,7 @@ public void testClassicGroupMaybeDelete() { .build(); ClassicGroup group = context.createClassicGroup("group-id"); - List expectedRecords = Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id")); + List expectedRecords = List.of(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id")); List records = new ArrayList<>(); context.groupMetadataManager.maybeDeleteGroup("group-id", records); assertEquals(expectedRecords, records); @@ -9502,7 +9464,7 @@ public void testConsumerGroupDelete() { .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId) @@ -9519,7 +9481,7 @@ public void testConsumerGroupMaybeDelete() { .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId) @@ -9558,7 +9520,7 @@ public void testConsumerGroupRebalanceSensor() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -9567,7 +9529,7 @@ public void testConsumerGroupRebalanceSensor() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) ))) @@ -9580,7 +9542,7 @@ public void testConsumerGroupRebalanceSensor() { .setMemberEpoch(0) .setServerAssignor("range") .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); verify(context.metrics).record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); @@ -9601,7 +9563,7 @@ public void testOnClassicGroupStateTransitionOnLoading() { // Even if there are more group metadata records loaded than tombstone records, the last replayed record // (tombstone in this test) is the latest state of the group. Hence, the overall metric count should be 0. IntStream.range(0, 5).forEach(__ -> - context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, Collections.emptyMap(), MetadataVersion.LATEST_PRODUCTION)) + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, Collections.emptyMap())) ); IntStream.range(0, 4).forEach(__ -> context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id")) @@ -9660,7 +9622,7 @@ public void testConsumerGroupHeartbeatWithNonEmptyClassicGroup() { String classicGroupId = "classic-group-id"; String memberId = Uuid.randomUuid().toString(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor())) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) .build(); ClassicGroup classicGroup = new ClassicGroup( new LogContext(), @@ -9668,7 +9630,7 @@ public void testConsumerGroupHeartbeatWithNonEmptyClassicGroup() { EMPTY, context.time ); - context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment(), MetadataVersion.latestTesting())); + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment())); context.groupMetadataManager.getOrMaybeCreateClassicGroup(classicGroupId, false).transitionTo(PREPARING_REBALANCE); assertThrows(GroupIdNotFoundException.class, () -> @@ -9679,7 +9641,7 @@ public void testConsumerGroupHeartbeatWithNonEmptyClassicGroup() { .setMemberEpoch(0) .setServerAssignor(NoOpPartitionAssignor.NAME) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -9688,7 +9650,7 @@ public void testConsumerGroupHeartbeatWithEmptyClassicGroup() { String classicGroupId = "classic-group-id"; String memberId = Uuid.randomUuid().toString(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor())) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) .build(); ClassicGroup classicGroup = new ClassicGroup( new LogContext(), @@ -9696,7 +9658,7 @@ public void testConsumerGroupHeartbeatWithEmptyClassicGroup() { EMPTY, context.time ); - context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment(), MetadataVersion.latestTesting())); + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment())); CoordinatorResult result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() @@ -9705,7 +9667,7 @@ public void testConsumerGroupHeartbeatWithEmptyClassicGroup() { .setMemberEpoch(0) .setServerAssignor(NoOpPartitionAssignor.NAME) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); ConsumerGroupMember expectedMember = new ConsumerGroupMember.Builder(memberId) @@ -9715,14 +9677,14 @@ public void testConsumerGroupHeartbeatWithEmptyClassicGroup() { .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName(NoOpPartitionAssignor.NAME) .setAssignedPartitions(Collections.emptyMap()) .build(); assertEquals(Errors.NONE.code(), result.response().errorCode()); assertEquals( - Arrays.asList( + List.of( GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(classicGroupId), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(classicGroupId, expectedMember), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(classicGroupId, 1), @@ -9752,7 +9714,7 @@ public void testClassicGroupJoinWithEmptyConsumerGroup() throws Exception { .build(); GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request, true); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(consumerGroupId), GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(consumerGroupId), GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(consumerGroupId) @@ -9777,18 +9739,14 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { String barTopicName = "bar"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(barTopicId, 0) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )) + ))); MetadataImage metadataImage = new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 1) @@ -9797,8 +9755,8 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { .build(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE) - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(metadataImage) .build(); @@ -9806,18 +9764,18 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { protocols.add(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0) ) )))) ); - Map assignments = Collections.singletonMap( + Map assignments = Map.of( memberId1, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( + Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0) )))) @@ -9844,7 +9802,7 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(STABLE); - context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments)); context.commit(); group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); @@ -9855,7 +9813,7 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { .setMemberId(memberId2) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setTopicPartitions(Collections.emptyList())); ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1) @@ -9863,7 +9821,7 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { .setPreviousMemberEpoch(0) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(10000) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -9882,12 +9840,12 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setServerAssignorName("range") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(5000) .setAssignedPartitions(Collections.emptyMap()) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( // The existing classic group tombstone. GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), @@ -9902,12 +9860,10 @@ public void testConsumerGroupHeartbeatWithStableClassicGroup() { GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2), // The subscription metadata hasn't been updated during the conversion, so a new one is computed. - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 1)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 1), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + )), // Newly joining member 2 bumps the group epoch. A new target assignment is computed. GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 1), @@ -9941,21 +9897,17 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw String barTopicName = "bar"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(barTopicId, 0) - ))); - put(memberId3, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 1) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )), + memberId3, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 1) + )) + ))); MetadataImage metadataImage = new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) @@ -9964,8 +9916,8 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw .build(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE) - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(metadataImage) .build(); @@ -9973,9 +9925,9 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw protocols1.add(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1) ) @@ -9986,29 +9938,21 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw protocols2.add(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Collections.singletonList(new TopicPartition(barTopicName, 0)) + List.of(new TopicPartition(barTopicName, 0)) )))) ); - Map assignments = new HashMap() { - { - put( - memberId1, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( - new TopicPartition(fooTopicName, 0), - new TopicPartition(fooTopicName, 1) - )))) - ); - put( - memberId2, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Collections.singletonList( - new TopicPartition(barTopicName, 0) - )))) - ); - } - }; + Map assignments = Map.of( + memberId1, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( + new TopicPartition(fooTopicName, 0), + new TopicPartition(fooTopicName, 1) + )))), + memberId2, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( + new TopicPartition(barTopicName, 0) + )))) + ); // Construct a stable group with two members. ClassicGroup group = context.createClassicGroup(groupId); @@ -10044,7 +9988,7 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(STABLE); - context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments)); context.commit(); group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); @@ -10067,7 +10011,7 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw .setMemberId(memberId3) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setTopicPartitions(Collections.emptyList())); ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1) @@ -10075,7 +10019,7 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw .setPreviousMemberEpoch(0) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(10000) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -10091,7 +10035,7 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw .setPreviousMemberEpoch(0) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(10000) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -10109,12 +10053,12 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setServerAssignorName("range") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(5000) .setAssignedPartitions(Collections.emptyMap()) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( // The existing classic group tombstone. GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), @@ -10135,12 +10079,10 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3), // The subscription metadata hasn't been updated during the conversion, so a new one is computed. - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + )), // Newly joining member 3 bumps the group epoch. A new target assignment is computed. GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 1), @@ -10165,54 +10107,164 @@ public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throw assertEquals(group, context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false)); } - @Test - public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { + /** + * Supplies the {@link Arguments} to {@link #testConsumerGroupHeartbeatWithCustomAssignorClassicGroup(ByteBuffer, boolean)}. + */ + private static Stream testConsumerGroupHeartbeatWithCustomAssignorClassicGroupSource() { + return Stream.of( + Arguments.of(null, true), + Arguments.of(ByteBuffer.allocate(0), true), + Arguments.of(ByteBuffer.allocate(1), false) + ); + } + + @ParameterizedTest + @MethodSource("testConsumerGroupHeartbeatWithCustomAssignorClassicGroupSource") + public void testConsumerGroupHeartbeatWithCustomAssignorClassicGroup(ByteBuffer userData, boolean expectUpgrade) { String groupId = "group-id"; - String memberId = "member-id"; - String instanceId = "instance-id"; + String memberId1 = "member-id-1"; + String memberId2 = "member-id-2"; Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )) + ))); MetadataImage metadataImage = new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 1) + .addTopic(barTopicId, barTopicName, 1) .addRacks() .build(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor())) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(metadataImage) .build(); JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1); protocols.add(new JoinGroupRequestData.JoinGroupRequestProtocol() - .setName(NoOpPartitionAssignor.NAME) + .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Collections.singletonList(fooTopicName), + List.of(fooTopicName, barTopicName), null, - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of( + new TopicPartition(fooTopicName, 0), + new TopicPartition(barTopicName, 0) + ) )))) ); - Map assignments = Collections.singletonMap( - memberId, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment( - Collections.singletonList(new TopicPartition(fooTopicName, 0)) - ))) + Map assignments = Map.of( + memberId1, + Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( + new TopicPartition(fooTopicName, 0), + new TopicPartition(barTopicName, 0) + ), userData))) ); - // Create a stable classic group with a static member. + // Create a stable classic group with member 1. ClassicGroup group = context.createClassicGroup(groupId); - group.setProtocolName(Optional.of(NoOpPartitionAssignor.NAME)); + group.setProtocolName(Optional.of("range")); group.add( new ClassicGroupMember( - memberId, - Optional.of(instanceId), - DEFAULT_CLIENT_ID, - DEFAULT_CLIENT_ADDRESS.toString(), - 10000, - 5000, - "consumer", + memberId1, + Optional.empty(), + "client-id", + "client-host", + 10000, + 5000, + "consumer", + protocols, + assignments.get(memberId1) + ) + ); + + group.transitionTo(PREPARING_REBALANCE); + group.transitionTo(COMPLETING_REBALANCE); + group.transitionTo(STABLE); + + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments)); + context.commit(); + group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); + + // A new member 2 with new protocol joins the classic group, triggering the upgrade. + ConsumerGroupHeartbeatRequestData consumerGroupHeartbeatRequestData = + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId2) + .setRebalanceTimeoutMs(5000) + .setServerAssignor("range") + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) + .setTopicPartitions(Collections.emptyList()); + + if (expectUpgrade) { + context.consumerGroupHeartbeat(consumerGroupHeartbeatRequestData); + } else { + Exception ex = assertThrows(GroupIdNotFoundException.class, () -> context.consumerGroupHeartbeat(consumerGroupHeartbeatRequestData)); + assertEquals( + "Cannot upgrade classic group group-id to consumer group because an unsupported custom assignor is in use. " + + "Please refer to the documentation or switch to a default assignor before re-attempting the upgrade.", ex.getMessage()); + } + } + + @Test + public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { + String groupId = "group-id"; + String memberId = "member-id"; + String instanceId = "instance-id"; + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + + MetadataImage metadataImage = new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 1) + .addRacks() + .build(); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) + .withMetadataImage(metadataImage) + .build(); + + JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1); + protocols.add(new JoinGroupRequestData.JoinGroupRequestProtocol() + .setName(NoOpPartitionAssignor.NAME) + .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( + List.of(fooTopicName), + null, + List.of(new TopicPartition(fooTopicName, 0)) + )))) + ); + + Map assignments = Map.of( + memberId, + Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment( + List.of(new TopicPartition(fooTopicName, 0)) + ))) + ); + + // Create a stable classic group with a static member. + ClassicGroup group = context.createClassicGroup(groupId); + group.setProtocolName(Optional.of(NoOpPartitionAssignor.NAME)); + group.add( + new ClassicGroupMember( + memberId, + Optional.of(instanceId), + DEFAULT_CLIENT_ID, + DEFAULT_CLIENT_ADDRESS.toString(), + 10000, + 5000, + "consumer", protocols, assignments.get(memberId) ) @@ -10222,7 +10274,7 @@ public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(STABLE); - context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments)); context.commit(); // The static member rejoins with new protocol after a restart, triggering the upgrade. @@ -10234,7 +10286,7 @@ public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { .setInstanceId(instanceId) .setRebalanceTimeoutMs(5000) .setServerAssignor(NoOpPartitionAssignor.NAME) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setTopicPartitions(Collections.emptyList()), ApiKeys.CONSUMER_GROUP_HEARTBEAT.latestVersion() ); @@ -10245,7 +10297,7 @@ public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { .setPreviousMemberEpoch(0) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setRebalanceTimeoutMs(10000) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -10284,7 +10336,7 @@ public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { .setClassicMemberMetadata(null) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( // The existing classic group tombstone. GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), @@ -10325,6 +10377,127 @@ public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { context.assertSessionTimeout(groupId, newMemberId, 45000); } + @Test + public void testConsumerGroupHeartbeatToClassicGroupWithEmptyAssignmentMember() throws ExecutionException, InterruptedException { + String groupId = "group-id"; + String memberId2 = "member-id-2"; + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MetadataImage metadataImage = new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 1) + .addTopic(barTopicId, barTopicName, 1) + .addRacks() + .build(); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) + .withMetadataImage(metadataImage) + .build(); + + JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1); + protocols.add(new JoinGroupRequestData.JoinGroupRequestProtocol() + .setName("range") + .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( + List.of(fooTopicName, barTopicName) + )))) + ); + + // Member 1 joins, creating a new classic group. + GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin( + new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() + .withGroupId(groupId) + .withMemberId(UNKNOWN_MEMBER_ID) + .withProtocols(protocols) + .withSessionTimeoutMs(5000) + .withRebalanceTimeoutMs(10000) + .build() + ); + + // Triggering completion of the rebalance. + // Member 1 has never synced so its assignment is empty. + context.sleep(3000 + 1); + String memberId1 = joinResult.joinFuture.get().memberId(); + ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); + assertTrue(group.isInState(COMPLETING_REBALANCE)); + + // A new member 2 with new protocol joins the classic group, triggering the upgrade. + CoordinatorResult result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId2) + .setRebalanceTimeoutMs(5000) + .setServerAssignor(NoOpPartitionAssignor.NAME) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) + .setTopicPartitions(Collections.emptyList())); + + ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1) + .setMemberEpoch(1) + .setPreviousMemberEpoch(1) + .setState(MemberState.STABLE) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) + .setRebalanceTimeoutMs(10000) + .setClassicMemberMetadata( + new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() + .setSessionTimeoutMs(5000) + .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(protocols)) + ) + .setAssignedPartitions(Collections.emptyMap()) + .build(); + + ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(memberId2) + .setMemberEpoch(2) + .setPreviousMemberEpoch(0) + .setState(MemberState.STABLE) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setServerAssignorName(NoOpPartitionAssignor.NAME) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) + .setRebalanceTimeoutMs(5000) + .setAssignedPartitions(Collections.emptyMap()) + .build(); + + List expectedRecords = List.of( + // The existing classic group tombstone. + GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), + + // Create the new consumer group with member 1. + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1), + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, expectedMember1.assignedPartitions()), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 1), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1), + + // Member 2 joins the new consumer group. + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2), + + // The subscription metadata hasn't been updated during the conversion, so a new one is computed. + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 1), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + )), + + // Newly joining member 2 bumps the group epoch. A new target assignment is computed. + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 2), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, Collections.emptyMap()), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 2), + + // Member 2 has no pending revoking partition or pending release partition. + // Bump its member epoch and transition to STABLE. + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2) + ); + + assertRecordsEquals(expectedRecords, result.records()); + + context.assertSessionTimeout(groupId, memberId1, expectedMember1.classicProtocolSessionTimeout().get()); + context.assertSessionTimeout(groupId, memberId2, 45000); + } + @Test public void testConsumerGroupHeartbeatFromExistingClassicStaticMember() { String groupId = "group-id"; @@ -10337,13 +10510,13 @@ public void testConsumerGroupHeartbeatFromExistingClassicStaticMember() { Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), @@ -10360,7 +10533,7 @@ public void testConsumerGroupHeartbeatFromExistingClassicStaticMember() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName(NoOpPartitionAssignor.NAME) .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -10379,7 +10552,7 @@ public void testConsumerGroupHeartbeatFromExistingClassicStaticMember() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName(NoOpPartitionAssignor.NAME) .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment( @@ -10390,8 +10563,8 @@ public void testConsumerGroupHeartbeatFromExistingClassicStaticMember() { // Consumer group with two static members. // Member 1 uses the classic protocol and member 2 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor())) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -10409,12 +10582,10 @@ public void testConsumerGroupHeartbeatFromExistingClassicStaticMember() { .withAssignmentEpoch(10)) .build(); - context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - })); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + ))); context.commit(); @@ -10460,7 +10631,7 @@ public void testConsumerGroupHeartbeatFromExistingClassicStaticMember() { .setClassicMemberMetadata(null) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( // Remove the existing static member 1 because the rejoining member replaces it. GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), @@ -10495,21 +10666,17 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro String barTopicName = "bar"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(barTopicId, 0) - ))); - put(memberId3, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 1) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )), + memberId3, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 1) + )) + ))); MetadataImage metadataImage = new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) @@ -10518,8 +10685,8 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro .build(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE) - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(metadataImage) .build(); @@ -10527,9 +10694,9 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro protocols1.add(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1) ) @@ -10540,29 +10707,21 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro protocols2.add(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Collections.singletonList(new TopicPartition(barTopicName, 0)) + List.of(new TopicPartition(barTopicName, 0)) )))) ); - Map assignments = new HashMap() { - { - put( - memberId1, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( - new TopicPartition(fooTopicName, 0), - new TopicPartition(fooTopicName, 1) - )))) - ); - put( - memberId2, - Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Collections.singletonList( - new TopicPartition(barTopicName, 0) - )))) - ); - } - }; + Map assignments = Map.of( + memberId1, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( + new TopicPartition(fooTopicName, 0), + new TopicPartition(fooTopicName, 1) + )))), + memberId2, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( + new TopicPartition(barTopicName, 0) + )))) + ); // Construct a stable group with two members. ClassicGroup group = context.createClassicGroup(groupId); @@ -10598,7 +10757,7 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(STABLE); - context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments)); context.commit(); group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); @@ -10639,7 +10798,7 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro .setMemberId(memberId3) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setTopicPartitions(Collections.emptyList())); ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1) @@ -10647,7 +10806,7 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro .setPreviousMemberEpoch(1) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(10000) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -10663,7 +10822,7 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro .setPreviousMemberEpoch(1) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(10000) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -10681,12 +10840,12 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setServerAssignorName("range") - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(5000) .setAssignedPartitions(Collections.emptyMap()) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( // The existing classic group tombstone. GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), @@ -10707,12 +10866,10 @@ public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() thro GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3), // The subscription metadata hasn't been updated during the conversion, so a new one is computed. - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + )), // Newly joining member 3 bumps the group epoch. A new target assignment is computed. GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 2), @@ -10851,13 +11008,13 @@ public void testLastConsumerProtocolMemberLeavingConsumerGroup() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), @@ -10873,7 +11030,7 @@ public void testLastConsumerProtocolMemberLeavingConsumerGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -10891,7 +11048,7 @@ public void testLastConsumerProtocolMemberLeavingConsumerGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment( @@ -10902,8 +11059,8 @@ public void testLastConsumerProtocolMemberLeavingConsumerGroup() { // Consumer group with two members. // Member 1 uses the classic protocol and member 2 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -10921,12 +11078,10 @@ public void testLastConsumerProtocolMemberLeavingConsumerGroup() { .withAssignmentEpoch(10)) .build(); - context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - })); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + ))); context.commit(); ConsumerGroup consumerGroup = context.groupMetadataManager.consumerGroup(groupId); @@ -10938,18 +11093,18 @@ public void testLastConsumerProtocolMemberLeavingConsumerGroup() { .setMemberId(memberId2) .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); - byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( + byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1) )))); - Map assignments = Collections.singletonMap(memberId1, assignment); + Map assignments = Map.of(memberId1, assignment); ClassicGroup expectedClassicGroup = new ClassicGroup( new LogContext(), @@ -10976,28 +11131,28 @@ public void testLastConsumerProtocolMemberLeavingConsumerGroup() { ) ); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), - - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), - - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId), - - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()) + assertUnorderedRecordsEquals( + List.of( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2) + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments)) + ), + result.records() ); - assertUnorderedListEquals(expectedRecords.subList(0, 2), result.records().subList(0, 2)); - assertUnorderedListEquals(expectedRecords.subList(2, 4), result.records().subList(2, 4)); - assertRecordEquals(expectedRecords.get(4), result.records().get(4)); - assertUnorderedListEquals(expectedRecords.subList(5, 7), result.records().subList(5, 7)); - assertRecordsEquals(expectedRecords.subList(7, 10), result.records().subList(7, 10)); - verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.STABLE, null); // The new classic member 1 has a heartbeat timeout. @@ -11035,13 +11190,13 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), @@ -11057,7 +11212,7 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -11075,7 +11230,7 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment( @@ -11086,8 +11241,8 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { // Consumer group with two members. // Member 1 uses the classic protocol and member 2 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -11105,12 +11260,10 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { .withAssignmentEpoch(10)) .build(); - context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - })); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + ))); context.commit(); @@ -11120,7 +11273,7 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(10) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList())); // Verify that there is a session timeout. @@ -11131,14 +11284,14 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { ExpiredTimeout timeout = context.sleep(45000 + 1).get(0); assertEquals(groupSessionTimeoutKey(groupId, memberId2), timeout.key); - byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( + byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1) )))); - Map assignments = Collections.singletonMap(memberId1, assignment); + Map assignments = Map.of(memberId1, assignment); ClassicGroup expectedClassicGroup = new ClassicGroup( new LogContext(), @@ -11164,28 +11317,29 @@ public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() { assignment ) ); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), - - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), - - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()) + assertUnorderedRecordsEquals( + List.of( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2) + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments)) + ), + timeout.result.records() ); - assertUnorderedListEquals(expectedRecords.subList(0, 2), timeout.result.records().subList(0, 2)); - assertUnorderedListEquals(expectedRecords.subList(2, 4), timeout.result.records().subList(2, 4)); - assertRecordEquals(expectedRecords.get(4), timeout.result.records().get(4)); - assertUnorderedListEquals(expectedRecords.subList(5, 7), timeout.result.records().subList(5, 7)); - assertRecordsEquals(expectedRecords.subList(7, 10), timeout.result.records().subList(7, 10)); - verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.STABLE, null); // The new classic member 1 has a heartbeat timeout. @@ -11219,13 +11373,13 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), @@ -11241,7 +11395,7 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(30000) .setClassicMemberMetadata( @@ -11259,7 +11413,7 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .setSubscribedTopicNames(List.of("foo", "bar", "zar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(30000) .setAssignedPartitions(mkAssignment( @@ -11270,8 +11424,8 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { // Consumer group with two members. // Member 1 uses the classic protocol and member 2 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -11290,30 +11444,24 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { .withAssignmentEpoch(10)) .build(); - context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1)); - } - })); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3), + zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1) + ))); context.commit(); // Prepare the new assignment. - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1, 2), - mkTopicAssignment(barTopicId, 0, 1) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 3, 4, 5) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2), + mkTopicAssignment(barTopicId, 0, 1) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4, 5) + )) + ))); // Member 2 heartbeats with a different subscribedTopicNames. The assignor computes a new assignment // where member 2 will need to revoke topic partition bar-2 thus transitions to the REVOKING state. @@ -11322,14 +11470,14 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(10) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) - .setTopicPartitions(Arrays.asList( + .setSubscribedTopicNames(List.of("foo", "bar")) + .setTopicPartitions(List.of( new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(3, 4, 5)), + .setPartitions(List.of(3, 4, 5)), new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Arrays.asList(2)) + .setPartitions(List.of(2)) )) ); @@ -11341,14 +11489,14 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { ExpiredTimeout timeout = context.sleep(30000 + 1).get(0); assertEquals(consumerGroupRebalanceTimeoutKey(groupId, memberId2), timeout.key); - byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( + byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1) )))); - Map assignments = Collections.singletonMap(memberId1, assignment); + Map assignments = Map.of(memberId1, assignment); ClassicGroup expectedClassicGroup = new ClassicGroup( new LogContext(), @@ -11375,28 +11523,28 @@ public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() { ) ); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), - - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), - - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId), - - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()) + assertUnorderedRecordsEquals( + List.of( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2) + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments)) + ), + timeout.result.records() ); - assertUnorderedListEquals(expectedRecords.subList(0, 2), timeout.result.records().subList(0, 2)); - assertUnorderedListEquals(expectedRecords.subList(2, 4), timeout.result.records().subList(2, 4)); - assertRecordEquals(expectedRecords.get(4), timeout.result.records().get(4)); - assertUnorderedListEquals(expectedRecords.subList(5, 7), timeout.result.records().subList(5, 7)); - assertRecordsEquals(expectedRecords.subList(7, 10), timeout.result.records().subList(7, 10)); - verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.RECONCILING, null); // The new classic member 1 has a heartbeat timeout. @@ -11429,13 +11577,13 @@ public void testLastStaticConsumerProtocolMemberReplacedByClassicProtocolMember( MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - List protocols1 = Collections.singletonList( + List protocols1 = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), @@ -11451,7 +11599,7 @@ public void testLastStaticConsumerProtocolMemberReplacedByClassicProtocolMember( .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -11470,7 +11618,7 @@ public void testLastStaticConsumerProtocolMemberReplacedByClassicProtocolMember( .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment( @@ -11480,8 +11628,8 @@ public void testLastStaticConsumerProtocolMemberReplacedByClassicProtocolMember( // Consumer group with two members. // Member 1 uses the classic protocol and static member 2 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 2) @@ -11499,12 +11647,10 @@ public void testLastStaticConsumerProtocolMemberReplacedByClassicProtocolMember( .build(); context.groupMetadataManager.consumerGroup(groupId).setMetadataRefreshDeadline(Long.MAX_VALUE, 10); - context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 2)); - } - })); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 2) + ))); context.commit(); // A new member using classic protocol with the same instance id joins, scheduling the downgrade. @@ -11529,27 +11675,28 @@ public void testLastStaticConsumerProtocolMemberReplacedByClassicProtocolMember( .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSessionTimeoutMs(joinRequest.sessionTimeoutMs()) - .setSupportedProtocols(Collections.singletonList(new ConsumerGroupMemberMetadataValue.ClassicProtocol() + .setSupportedProtocols(List.of(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Collections.singletonList(fooTopicName))))))) + List.of(fooTopicName))))))) ).build(); - byte[] assignment1 = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( + byte[] assignment1 = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1) )))); - byte[] assignment2 = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList( + byte[] assignment2 = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( new TopicPartition(fooTopicName, 3), new TopicPartition(fooTopicName, 4), new TopicPartition(fooTopicName, 5) )))); - Map assignments = new HashMap<>(); - assignments.put(memberId1, assignment1); - assignments.put(newMemberId2, assignment2); + Map assignments = Map.of( + memberId1, assignment1, + newMemberId2, assignment2 + ); ClassicGroup expectedClassicGroup = new ClassicGroup( new LogContext(), @@ -11589,54 +11736,49 @@ public void testLastStaticConsumerProtocolMemberReplacedByClassicProtocolMember( ) ); - List expectedRecords = Arrays.asList( - // Remove the existing member 2 that uses the consumer protocol. - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, oldMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, oldMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, oldMemberId2), - - // Create the new member 2 that uses the consumer protocol. - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedNewConsumerMember2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, newMemberId2, expectedNewConsumerMember2.assignedPartitions()), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedNewConsumerMember2), - - // Update the new member 2 to the member that uses classic protocol. - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedNewClassicMember2), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedNewClassicMember2), - - // Remove member 1, member 2 and the consumer group. - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, newMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, newMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, newMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId), - - // Create the classic group. - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()) - ); - - assertEquals(expectedRecords.size(), result.records.size()); - assertRecordsEquals(expectedRecords.subList(0, 8), result.records.subList(0, 8)); - assertUnorderedListEquals(expectedRecords.subList(8, 10), result.records.subList(8, 10)); - assertUnorderedListEquals(expectedRecords.subList(10, 12), result.records.subList(10, 12)); - assertRecordEquals(expectedRecords.get(12), result.records.get(12)); - assertUnorderedListEquals(expectedRecords.subList(13, 15), result.records.subList(13, 15)); - assertRecordsEquals(expectedRecords.subList(15, 17), result.records.subList(15, 17)); - - // Leader can be either member 1 or member 2. - try { - assertRecordEquals(expectedRecords.get(17), result.records.get(17)); - } catch (AssertionFailedError e) { - expectedClassicGroup.setLeaderId(Optional.of(newMemberId2)); - assertRecordEquals( - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()), - result.records.get(9) - ); - } + // The leader of the classic group is not deterministic. + String leader = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false).leaderOrNull(); + assertTrue(Set.of(memberId1, newMemberId2).contains(leader)); + expectedClassicGroup.setLeaderId(Optional.of(leader)); + + assertUnorderedRecordsEquals( + List.of( + // Remove the existing member 2 that uses the consumer protocol. + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, oldMemberId2)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, oldMemberId2)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, oldMemberId2)), + + // Create the new member 2 that uses the consumer protocol. + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedNewConsumerMember2)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, newMemberId2, expectedNewConsumerMember2.assignedPartitions())), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedNewConsumerMember2)), + + // Update the new member 2 to the member that uses classic protocol. + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedNewClassicMember2)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedNewClassicMember2)), + + // Remove member 1, member 2 and the consumer group. + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, newMemberId2) + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, newMemberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, newMemberId2) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), + + // Create the classic group. + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments)) + ), + result.records + ); verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.STABLE, null); @@ -11662,7 +11804,7 @@ public void testJoiningConsumerGroupThrowsExceptionIfGroupOverMaxSize() { .setMemberEpoch(10) .setPreviousMemberEpoch(10) .build())) - .withConsumerGroupMaxSize(1) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MAX_SIZE_CONFIG, 1) .build(); JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() @@ -11724,18 +11866,16 @@ public void testJoiningConsumerGroupWithNewDynamicMember() throws Exception { String memberId = Uuid.randomUuid().toString(); MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addTopic(barTopicId, barTopicName, 1) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - } - }) + .withSubscriptionMetadata( + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)) + ) .withMember(new ConsumerGroupMember.Builder(memberId) .setState(MemberState.STABLE) .setMemberEpoch(10) @@ -11752,7 +11892,7 @@ public void testJoiningConsumerGroupWithNewDynamicMember() throws Exception { .withGroupId(groupId) .withMemberId(UNKNOWN_MEMBER_ID) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), Collections.emptyList(), version)) .build(); @@ -11771,18 +11911,14 @@ public void testJoiningConsumerGroupWithNewDynamicMember() throws Exception { String newMemberId = firstJoinResult.joinFuture.get().memberId(); assertNotEquals("", newMemberId); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0) - ))); - put(newMemberId, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(barTopicId, 0) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + newMemberId, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )) + ))); JoinGroupRequestData secondRequest = new JoinGroupRequestData() .setGroupId(request.groupId()) @@ -11805,7 +11941,7 @@ public void testJoiningConsumerGroupWithNewDynamicMember() throws Exception { .setState(MemberState.STABLE) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(500) .setAssignedPartitions(assignor.targetPartitions(newMemberId)) .setClassicMemberMetadata( @@ -11815,25 +11951,25 @@ public void testJoiningConsumerGroupWithNewDynamicMember() throws Exception { ) .build(); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - }), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), - - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId, assignor.targetPartitions(memberId)), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, newMemberId, assignor.targetPartitions(newMemberId)), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), - - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember) + assertUnorderedRecordsEquals( + List.of( + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + ))), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)), + + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId, assignor.targetPartitions(memberId)), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, newMemberId, assignor.targetPartitions(newMemberId)) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11)), + + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember)) + ), + secondJoinResult.records ); - assertRecordsEquals(expectedRecords.subList(0, 3), secondJoinResult.records.subList(0, 3)); - assertUnorderedListEquals(expectedRecords.subList(3, 5), secondJoinResult.records.subList(3, 5)); - assertRecordsEquals(expectedRecords.subList(5, 7), secondJoinResult.records.subList(5, 7)); secondJoinResult.appendFuture.complete(null); assertTrue(secondJoinResult.joinFuture.isDone()); @@ -11860,31 +11996,25 @@ public void testJoiningConsumerGroupFailingToPersistRecords() throws Exception { String newMemberId = Uuid.randomUuid().toString(); MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0) - ))); - put(newMemberId, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 1) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + newMemberId, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 1) + )) + ))); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - } - }) + .withSubscriptionMetadata( + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)) + ) .withMember(new ConsumerGroupMember.Builder(memberId) .setState(MemberState.STABLE) .setMemberEpoch(10) @@ -11902,7 +12032,7 @@ public void testJoiningConsumerGroupFailingToPersistRecords() throws Exception { .withGroupId(groupId) .withMemberId(newMemberId) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Collections.singletonList(fooTopicName), + List.of(fooTopicName), Collections.emptyList())) .build(); @@ -11929,18 +12059,16 @@ public void testJoiningConsumerGroupWithNewStaticMember() throws Exception { String instanceId = "instance-id"; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor())) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addTopic(barTopicId, barTopicName, 1) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - } - }) + .withSubscriptionMetadata( + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)) + ) .withMember(new ConsumerGroupMember.Builder(memberId) .setState(MemberState.STABLE) .setMemberEpoch(10) @@ -11958,7 +12086,7 @@ public void testJoiningConsumerGroupWithNewStaticMember() throws Exception { .withMemberId(UNKNOWN_MEMBER_ID) .withGroupInstanceId(instanceId) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), Collections.emptyList())) .build(); @@ -11976,7 +12104,7 @@ public void testJoiningConsumerGroupWithNewStaticMember() throws Exception { .setState(MemberState.STABLE) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setRebalanceTimeoutMs(500) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -11985,14 +12113,12 @@ public void testJoiningConsumerGroupWithNewStaticMember() throws Exception { ) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + )), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, newMemberId, Collections.emptyMap()), @@ -12025,23 +12151,22 @@ public void testJoiningConsumerGroupReplacingExistingStaticMember() throws Excep String memberId = Uuid.randomUuid().toString(); String instanceId = "instance-id"; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor())) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DISABLED.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(new NoOpPartitionAssignor())) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - } - }) + .withSubscriptionMetadata( + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)) + ) .withMember(new ConsumerGroupMember.Builder(memberId) .setInstanceId(instanceId) .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(10) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setRebalanceTimeoutMs(500) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1))) @@ -12057,7 +12182,7 @@ public void testJoiningConsumerGroupReplacingExistingStaticMember() throws Excep .withMemberId(UNKNOWN_MEMBER_ID) .withGroupInstanceId(instanceId) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Collections.singletonList(fooTopicName), + List.of(fooTopicName), Collections.emptyList())) .build(); @@ -12077,7 +12202,7 @@ public void testJoiningConsumerGroupReplacingExistingStaticMember() throws Excep .setPreviousMemberEpoch(0) .setInstanceId(instanceId) .setState(MemberState.STABLE) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1))) .setRebalanceTimeoutMs(500) @@ -12090,7 +12215,7 @@ public void testJoiningConsumerGroupReplacingExistingStaticMember() throws Excep .setState(MemberState.STABLE) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1))) .setRebalanceTimeoutMs(500) @@ -12100,7 +12225,7 @@ public void testJoiningConsumerGroupReplacingExistingStaticMember() throws Excep .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(request.protocols()))) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( // Remove the old static member. GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId), @@ -12145,7 +12270,7 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addTopic(barTopicId, barTopicName, 1) @@ -12153,13 +12278,11 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1)); - } - }) + .withSubscriptionMetadata(Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1), + zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1) + )) .withMember(new ConsumerGroupMember.Builder(memberId1) .setInstanceId(instanceId) .setState(MemberState.STABLE) @@ -12168,7 +12291,7 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() .setRebalanceTimeoutMs(500) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))) @@ -12177,8 +12300,8 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() .setSessionTimeoutMs(5000) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)) + List.of(fooTopicName, barTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)) ) )) ) @@ -12190,7 +12313,7 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() .setRebalanceTimeoutMs(500) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .build()) @@ -12204,27 +12327,23 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId); group.setMetadataRefreshDeadline(Long.MAX_VALUE, 11); - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0), - mkTopicAssignment(zarTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(barTopicId, 0), - mkTopicAssignment(fooTopicId, 1) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0), + mkTopicAssignment(zarTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0), + mkTopicAssignment(fooTopicId, 1) + )) + ))); // Member 1 rejoins with a new subscription list. JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId(groupId) .withMemberId(memberId1) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), + List.of(fooTopicName, barTopicName, zarTopicName), Collections.emptyList())) .build(); GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request); @@ -12237,7 +12356,7 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setState(MemberState.STABLE) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName, zarTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName, zarTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(zarTopicId, 0))) @@ -12246,28 +12365,30 @@ public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() .setSessionTimeoutMs(request.sessionTimeoutMs()) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), + List.of(fooTopicName, barTopicName, zarTopicName), Collections.emptyList() ) )) ) .build(); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( - mkTopicAssignment(fooTopicId, 0), - mkTopicAssignment(zarTopicId, 0))), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( - mkTopicAssignment(barTopicId, 0), - mkTopicAssignment(fooTopicId, 1))), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember) + assertUnorderedRecordsEquals( + List.of( + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0), + mkTopicAssignment(zarTopicId, 0))), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( + mkTopicAssignment(barTopicId, 0), + mkTopicAssignment(fooTopicId, 1))) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember)) + ), + joinResult.records ); - assertRecordsEquals(expectedRecords.subList(0, 2), joinResult.records.subList(0, 2)); - assertUnorderedListEquals(expectedRecords.subList(2, 4), joinResult.records.subList(2, 4)); - assertRecordsEquals(expectedRecords.subList(4, 6), joinResult.records.subList(4, 6)); joinResult.appendFuture.complete(null); assertEquals( @@ -12293,8 +12414,8 @@ public void testStaticMemberJoiningConsumerGroupWithUnknownInstanceId() throws E JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)) + List.of(fooTopicName, barTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)) ); // Set up a ConsumerGroup with no static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() @@ -12332,8 +12453,8 @@ public void testStaticMemberJoiningConsumerGroupWithUnmatchedMemberId() throws E JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)) + List.of(fooTopicName, barTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)) ); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) @@ -12375,7 +12496,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addTopic(barTopicId, barTopicName, 1) @@ -12383,12 +12504,10 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - }) + .withSubscriptionMetadata(Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + )) .withMember(new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setMemberEpoch(10) @@ -12396,7 +12515,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .setRebalanceTimeoutMs(500) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))) @@ -12405,8 +12524,8 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .setSessionTimeoutMs(5000) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)) + List.of(fooTopicName, barTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)) ) )) ) @@ -12418,7 +12537,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .setRebalanceTimeoutMs(500) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .build()) @@ -12434,19 +12553,15 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E // Prepare the new target assignment. // Member 1 will need to revoke bar-0, and member 2 will need to revoke foo-1. - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(zarTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(barTopicId, 0) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(zarTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )) + ))); // Member 1 rejoins with a new subscription list and an empty owned // partition, and transitions to UNRELEASED_PARTITIONS. @@ -12455,7 +12570,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .withMemberId(memberId1) .withSessionTimeoutMs(5000) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), + List.of(fooTopicName, barTopicName, zarTopicName), Collections.emptyList())) .build(); GroupMetadataManagerTestContext.JoinResult joinResult1 = context.sendClassicGroupJoin(request); @@ -12467,7 +12582,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setState(MemberState.UNRELEASED_PARTITIONS) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName, zarTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName, zarTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(zarTopicId, 0))) @@ -12476,37 +12591,36 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .setSessionTimeoutMs(request.sessionTimeoutMs()) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), + List.of(fooTopicName, barTopicName, zarTopicName), Collections.emptyList() ) )) ) .build(); - List expectedRecords1 = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1)); - } - }), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), + assertUnorderedRecordsEquals( + List.of( + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1), + zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1) + ))), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(zarTopicId, 0))), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( - mkTopicAssignment(barTopicId, 0))), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(zarTopicId, 0))), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( + mkTopicAssignment(barTopicId, 0))) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11)), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1) + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1)) + ), + joinResult1.records ); - assertEquals(expectedRecords1.size(), joinResult1.records.size()); - assertRecordsEquals(expectedRecords1.subList(0, 3), joinResult1.records.subList(0, 3)); - assertUnorderedListEquals(expectedRecords1.subList(3, 5), joinResult1.records.subList(3, 5)); - assertRecordsEquals(expectedRecords1.subList(5, 7), joinResult1.records.subList(5, 7)); assertEquals(expectedMember1.state(), group.getOrMaybeCreateMember(memberId1, false).state()); @@ -12530,7 +12644,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E joinResponse1.generationId(), joinResponse1.protocolName(), joinResponse1.protocolType(), - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0) ) @@ -12568,7 +12682,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E .build(); assertRecordsEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2)), joinResult2.records ); assertEquals(expectedMember2.state(), group.getOrMaybeCreateMember(memberId1, false).state()); @@ -12594,7 +12708,7 @@ public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws E joinResponse2.generationId(), joinResponse2.protocolName(), joinResponse2.protocolType(), - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(zarTopicName, 0) @@ -12617,7 +12731,7 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addTopic(barTopicId, barTopicName, 1) @@ -12625,12 +12739,10 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withSubscriptionMetadata(new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - }) + .withSubscriptionMetadata(Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + )) .withMember(new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setMemberEpoch(10) @@ -12638,7 +12750,7 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .setRebalanceTimeoutMs(500) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))) @@ -12647,8 +12759,8 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .setSessionTimeoutMs(5000) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)) + List.of(fooTopicName, barTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)) ) )) ) @@ -12660,7 +12772,7 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .setRebalanceTimeoutMs(500) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 1))) .build()) @@ -12676,19 +12788,15 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th // Prepare the new target assignment. // Member 1 will need to revoke bar-0, and member 2 will need to revoke foo-1. - assignor.prepareGroupAssignment(new GroupAssignment( - new HashMap() { - { - put(memberId1, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(zarTopicId, 0) - ))); - put(memberId2, new MemberAssignmentImpl(mkAssignment( - mkTopicAssignment(barTopicId, 0) - ))); - } - } - )); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(zarTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )) + ))); // Member 1 rejoins with a new subscription list and transitions to UNREVOKED_PARTITIONS. JoinGroupRequestData request1 = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() @@ -12696,8 +12804,8 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .withMemberId(memberId1) .withSessionTimeoutMs(5000) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))) + List.of(fooTopicName, barTopicName, zarTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))) .build(); GroupMetadataManagerTestContext.JoinResult joinResult1 = context.sendClassicGroupJoin(request1); @@ -12708,7 +12816,7 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setState(MemberState.UNREVOKED_PARTITIONS) - .setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName, zarTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName, zarTopicName)) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0))) .setPartitionsPendingRevocation(mkAssignment( @@ -12718,37 +12826,36 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .setSessionTimeoutMs(request1.sessionTimeoutMs()) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)) + List.of(fooTopicName, barTopicName, zarTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)) ) )) ) .build(); - List expectedRecords1 = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1)); - } - }), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), + assertUnorderedRecordsEquals( + List.of( + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1), + zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1) + ))), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1), - mkTopicAssignment(zarTopicId, 0))), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( - mkTopicAssignment(barTopicId, 0))), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1), + mkTopicAssignment(zarTopicId, 0))), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId2, mkAssignment( + mkTopicAssignment(barTopicId, 0))) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11)), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1) + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1)) + ), + joinResult1.records ); - assertEquals(expectedRecords1.size(), joinResult1.records.size()); - assertRecordsEquals(expectedRecords1.subList(0, 3), joinResult1.records.subList(0, 3)); - assertUnorderedListEquals(expectedRecords1.subList(3, 5), joinResult1.records.subList(3, 5)); - assertRecordsEquals(expectedRecords1.subList(5, 7), joinResult1.records.subList(5, 7)); assertEquals(expectedMember1.state(), group.getOrMaybeCreateMember(memberId1, false).state()); @@ -12772,7 +12879,7 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th joinResponse1.generationId(), joinResponse1.protocolName(), joinResponse1.protocolType(), - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(new TopicPartition(fooTopicName, 0)) ); // Member 1 heartbeats to be notified to rejoin. @@ -12793,8 +12900,8 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .withMemberId(memberId1) .withSessionTimeoutMs(5000) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), - Collections.singletonList(new TopicPartition(fooTopicName, 0)))) + List.of(fooTopicName, barTopicName, zarTopicName), + List.of(new TopicPartition(fooTopicName, 0)))) .build(); GroupMetadataManagerTestContext.JoinResult joinResult2 = context.sendClassicGroupJoin(request2); @@ -12810,15 +12917,15 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .setSessionTimeoutMs(request2.sessionTimeoutMs()) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(fooTopicName, barTopicName, zarTopicName), + List.of(new TopicPartition(fooTopicName, 0)) ) )) ) .build(); assertRecordsEquals( - Arrays.asList( + List.of( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2) ), @@ -12847,7 +12954,7 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th joinResponse2.generationId(), joinResponse2.protocolName(), joinResponse2.protocolType(), - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0) ) @@ -12880,8 +12987,8 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .withMemberId(memberId1) .withSessionTimeoutMs(5000) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)))) + List.of(fooTopicName, barTopicName, zarTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)))) .build(); GroupMetadataManagerTestContext.JoinResult joinResult3 = context.sendClassicGroupJoin(request3); @@ -12896,15 +13003,15 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th .setSessionTimeoutMs(request3.sessionTimeoutMs()) .setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection( GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName, zarTopicName), - Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)) + List.of(fooTopicName, barTopicName, zarTopicName), + List.of(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)) ) )) ) .build(); assertRecordsEquals( - Arrays.asList( + List.of( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember3), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember3) ), @@ -12933,7 +13040,7 @@ public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() th joinResponse3.generationId(), joinResponse3.protocolName(), joinResponse3.protocolType(), - Arrays.asList( + List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(zarTopicName, 0) @@ -12953,7 +13060,7 @@ public void testClassicGroupSyncToConsumerGroupWithAllConsumerProtocolVersions() String barTopicName = "bar"; for (short version = ConsumerProtocolAssignment.LOWEST_SUPPORTED_VERSION; version <= ConsumerProtocolAssignment.HIGHEST_SUPPORTED_VERSION; version++) { - List topicPartitions = Arrays.asList( + List topicPartitions = List.of( new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), @@ -12961,12 +13068,12 @@ public void testClassicGroupSyncToConsumerGroupWithAllConsumerProtocolVersions() new TopicPartition(barTopicName, 1) ); - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, topicPartitions ), @@ -12978,7 +13085,7 @@ public void testClassicGroupSyncToConsumerGroupWithAllConsumerProtocolVersions() .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(9) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setClassicMemberMetadata( new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSessionTimeoutMs(5000) @@ -12992,7 +13099,7 @@ public void testClassicGroupSyncToConsumerGroupWithAllConsumerProtocolVersions() .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(9) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) @@ -13001,8 +13108,7 @@ public void testClassicGroupSyncToConsumerGroupWithAllConsumerProtocolVersions() // Consumer group with two members. // Member 1 uses the classic protocol and member 2 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) @@ -13039,8 +13145,7 @@ public void testClassicGroupSyncToConsumerGroupWithUnknownMemberId() throws Exce // Consumer group with a member that doesn't use the classic protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .build())) @@ -13089,8 +13194,7 @@ public void testClassicGroupSyncToConsumerGroupWithFencedInstanceId() throws Exc // Consumer group with a static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .setInstanceId(instanceId) @@ -13114,12 +13218,12 @@ public void testClassicGroupSyncToConsumerGroupWithInconsistentGroupProtocol() t String groupId = "group-id"; String memberId = Uuid.randomUuid().toString(); - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"), + List.of("foo"), null, Collections.emptyList() ) @@ -13128,8 +13232,7 @@ public void testClassicGroupSyncToConsumerGroupWithInconsistentGroupProtocol() t // Consumer group with a member using the classic protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .setClassicMemberMetadata( @@ -13179,12 +13282,12 @@ public void testClassicGroupSyncToConsumerGroupWithIllegalGeneration() throws Ex String groupId = "group-id"; String memberId = Uuid.randomUuid().toString(); - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"), + List.of("foo"), null, Collections.emptyList() ) @@ -13193,8 +13296,7 @@ public void testClassicGroupSyncToConsumerGroupWithIllegalGeneration() throws Ex // Consumer group with a member using the classic protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .setClassicMemberMetadata( @@ -13222,12 +13324,12 @@ public void testClassicGroupSyncToConsumerGroupRebalanceInProgress() throws Exce String groupId = "group-id"; String memberId = Uuid.randomUuid().toString(); - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"), + List.of("foo"), null, Collections.emptyList() ) @@ -13237,8 +13339,7 @@ public void testClassicGroupSyncToConsumerGroupRebalanceInProgress() throws Exce // Consumer group with a member using the classic protocol. // The group epoch is greater than the member epoch. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 11) .withMember(new ConsumerGroupMember.Builder(memberId) .setRebalanceTimeoutMs(10000) @@ -13269,12 +13370,12 @@ public void testClassicGroupHeartbeatToConsumerGroupMaintainsSession() throws Ex String memberId = Uuid.randomUuid().toString(); int sessionTimeout = 5000; - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"), + List.of("foo"), null, Collections.emptyList() ) @@ -13283,16 +13384,15 @@ public void testClassicGroupHeartbeatToConsumerGroupMaintainsSession() throws Ex // Consumer group with a member using the classic protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) - .withMember(new ConsumerGroupMember.Builder(memberId) - .setClassicMemberMetadata( - new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() - .setSessionTimeoutMs(sessionTimeout) - .setSupportedProtocols(protocols) - ) - .setMemberEpoch(10) - .build())) + .withMember(new ConsumerGroupMember.Builder(memberId) + .setClassicMemberMetadata( + new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() + .setSessionTimeoutMs(sessionTimeout) + .setSupportedProtocols(protocols) + ) + .setMemberEpoch(10) + .build())) .build(); // Heartbeat to schedule the session timeout. @@ -13329,12 +13429,12 @@ public void testClassicGroupHeartbeatToConsumerGroupRebalanceInProgress() throws int sessionTimeout = 5000; int rebalanceTimeout = 10000; - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"), + List.of("foo"), null, Collections.emptyList() ) @@ -13379,7 +13479,6 @@ public void testClassicGroupHeartbeatToConsumerGroupRebalanceInProgress() throws .build(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(member1) .withMember(member2) @@ -13387,7 +13486,7 @@ public void testClassicGroupHeartbeatToConsumerGroupRebalanceInProgress() throws .withAssignment(memberId3, mkAssignment(mkTopicAssignment(barTopicId, 0, 1, 2)))) .build(); - Arrays.asList(memberId1, memberId2, memberId3).forEach(memberId -> { + List.of(memberId1, memberId2, memberId3).forEach(memberId -> { CoordinatorResult heartbeatResult = context.sendClassicGroupHeartbeat( new HeartbeatRequestData() .setGroupId(groupId) @@ -13504,12 +13603,12 @@ public void testConsumerGroupMemberUsingClassicProtocolFencedWhenSessionTimeout( String memberId = Uuid.randomUuid().toString(); int sessionTimeout = 5000; - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"), + List.of("foo"), null, Collections.emptyList() ) @@ -13518,7 +13617,6 @@ public void testConsumerGroupMemberUsingClassicProtocolFencedWhenSessionTimeout( // Consumer group with a member using the classic protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .setClassicMemberMetadata( @@ -13546,7 +13644,7 @@ public void testConsumerGroupMemberUsingClassicProtocolFencedWhenSessionTimeout( ExpiredTimeout timeout = timeouts.get(0); assertEquals(groupSessionTimeoutKey(groupId, memberId), timeout.key); assertRecordsEquals( - Arrays.asList( + List.of( // The member is removed. GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId), @@ -13565,12 +13663,12 @@ public void testConsumerGroupMemberUsingClassicProtocolFencedWhenJoinTimeout() { String memberId = Uuid.randomUuid().toString(); int rebalanceTimeout = 500; - List protocols = Collections.singletonList( + List protocols = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo"), + List.of("foo"), null, Collections.emptyList() ) @@ -13579,7 +13677,6 @@ public void testConsumerGroupMemberUsingClassicProtocolFencedWhenJoinTimeout() { // Consumer group with a member using the classic protocol whose member epoch is smaller than the group epoch. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .setRebalanceTimeoutMs(rebalanceTimeout) @@ -13612,7 +13709,7 @@ public void testConsumerGroupMemberUsingClassicProtocolFencedWhenJoinTimeout() { ExpiredTimeout timeout = timeouts.get(0); assertEquals(consumerGroupJoinKey(groupId, memberId), timeout.key); assertRecordsEquals( - Arrays.asList( + List.of( // The member is removed. GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId), @@ -13639,22 +13736,22 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; - List protocol1 = Collections.singletonList( + List protocol1 = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(new TopicPartition(fooTopicName, 0)) )))) ); - List protocol2 = Collections.singletonList( + List protocol2 = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Collections.singletonList(new TopicPartition(fooTopicName, 1)) + List.of(new TopicPartition(fooTopicName, 1)) )))) ); @@ -13664,7 +13761,7 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -13681,7 +13778,7 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { .setPreviousMemberEpoch(8) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -13698,7 +13795,7 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment(mkTopicAssignment(barTopicId, 0))) @@ -13709,7 +13806,6 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { // Static member 2 uses the classic protocol. // Static member 3 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addTopic(barTopicId, barTopicName, 1) @@ -13725,12 +13821,10 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { .withAssignmentEpoch(10)) .build(); context.groupMetadataManager.consumerGroup(groupId).setMetadataRefreshDeadline(Long.MAX_VALUE, 10); - context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - })); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + ))); // Member 1 joins to schedule the sync timeout and the heartbeat timeout. context.sendClassicGroupJoin( @@ -13740,8 +13834,8 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { .withRebalanceTimeoutMs(member1.rebalanceTimeoutMs()) .withSessionTimeoutMs(member1.classicMemberMetadata().get().sessionTimeoutMs()) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( - Arrays.asList(fooTopicName, barTopicName), - Collections.singletonList(new TopicPartition(fooTopicName, 0)))) + List.of(fooTopicName, barTopicName), + List.of(new TopicPartition(fooTopicName, 0)))) .build() ).appendFuture.complete(null); context.assertSyncTimeout(groupId, memberId1, member1.rebalanceTimeoutMs()); @@ -13757,15 +13851,17 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { context.assertJoinTimeout(groupId, memberId2, member2.rebalanceTimeoutMs()); context.assertSessionTimeout(groupId, memberId2, member2.classicMemberMetadata().get().sessionTimeoutMs()); - // Member 1 and member 2 leave the group. + // Member 1, member 2 and member 3 leave the group. CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Arrays.asList( + .setMembers(List.of( // Valid member id. new MemberIdentity() - .setMemberId(memberId1), + .setMemberId(memberId1) + .setGroupInstanceId(null), new MemberIdentity() + .setMemberId(UNKNOWN_MEMBER_ID) .setGroupInstanceId(instanceId2), // Member that doesn't use the classic protocol. new MemberIdentity() @@ -13773,8 +13869,10 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { .setGroupInstanceId(instanceId3), // Unknown member id. new MemberIdentity() - .setMemberId("unknown-member-id"), + .setMemberId("unknown-member-id") + .setGroupInstanceId(null), new MemberIdentity() + .setMemberId(UNKNOWN_MEMBER_ID) .setGroupInstanceId("unknown-instance-id"), // Fenced instance id. new MemberIdentity() @@ -13785,17 +13883,16 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { assertEquals( new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId(memberId1), new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(instanceId2) - .setMemberId(memberId2), + .setMemberId(UNKNOWN_MEMBER_ID), new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(instanceId3) - .setMemberId(memberId3) - .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()), + .setMemberId(memberId3), new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId("unknown-member-id") @@ -13804,26 +13901,34 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroup() { .setGroupInstanceId("unknown-instance-id") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()), new LeaveGroupResponseData.MemberResponse() - .setGroupInstanceId(instanceId3) .setMemberId("unknown-member-id") + .setGroupInstanceId(instanceId3) .setErrorCode(Errors.FENCED_INSTANCE_ID.code()) )), leaveResult.response() ); - List expectedRecords = Arrays.asList( - // Remove member 1 - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), - // Remove member 2. - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), + List> expectedRecords = List.of( + List.of( + // Remove member 1 + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + // Remove member 2. + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), + // Remove member 3. + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId3), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId3), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId3) + ), + // Update subscription metadata. + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Collections.emptyMap())), // Bump the group epoch. - GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)) ); - assertEquals(expectedRecords, leaveResult.records()); + assertUnorderedRecordsEquals(expectedRecords, leaveResult.records()); context.assertNoSessionTimeout(groupId, memberId1); context.assertNoSyncTimeout(groupId, memberId1); @@ -13842,13 +13947,13 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroupUpdatingSu Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; - List protocol = Collections.singletonList( + List protocol = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Arrays.asList(fooTopicName, barTopicName), + List.of(fooTopicName, barTopicName), null, - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(new TopicPartition(fooTopicName, 0)) )))) ); @@ -13858,7 +13963,7 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroupUpdatingSu .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -13874,7 +13979,7 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroupUpdatingSu .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment(mkTopicAssignment(barTopicId, 0))) @@ -13883,7 +13988,6 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroupUpdatingSu // Consumer group with two members. // Member 1 uses the classic protocol and member 2 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addTopic(barTopicId, barTopicName, 1) @@ -13897,18 +14001,16 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroupUpdatingSu .withAssignmentEpoch(10)) .build(); context.groupMetadataManager.consumerGroup(groupId).setMetadataRefreshDeadline(Long.MAX_VALUE, 10); - context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1)); - } - })); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 1) + ))); // Member 1 leaves the group. CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId(memberId1) )) @@ -13916,7 +14018,7 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroupUpdatingSu assertEquals( new LeaveGroupResponseData() - .setMembers(Collections.singletonList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId(memberId1) @@ -13924,17 +14026,15 @@ public void testConsumerGroupMemberUsingClassicProtocolBatchLeaveGroupUpdatingSu leaveResult.response() ); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( // Remove member 1 GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), // Update the subscription metadata. - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)); - } - }), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, + Map.of(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2)) + ), // Bump the group epoch. GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) ); @@ -13946,9 +14046,8 @@ public void testClassicGroupLeaveToConsumerGroupWithoutValidLeaveGroupMember() { String groupId = "group-id"; String memberId = Uuid.randomUuid().toString(); - // Consumer group without member using the classic protocol. + // Consumer group. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range"))) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .build())) @@ -13958,24 +14057,18 @@ public void testClassicGroupLeaveToConsumerGroupWithoutValidLeaveGroupMember() { CoordinatorResult leaveResult = context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId("group-id") - .setMembers(Arrays.asList( - new MemberIdentity() - .setMemberId("unknown-member-id"), + .setMembers(List.of( new MemberIdentity() - .setMemberId(memberId) + .setMemberId("unknown-member-id") )) ); assertEquals( new LeaveGroupResponseData() - .setMembers(Arrays.asList( + .setMembers(List.of( new LeaveGroupResponseData.MemberResponse() .setGroupInstanceId(null) .setMemberId("unknown-member-id") - .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()), - new LeaveGroupResponseData.MemberResponse() - .setGroupInstanceId(null) - .setMemberId(memberId) .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()) )), leaveResult.response() @@ -13985,43 +14078,288 @@ public void testClassicGroupLeaveToConsumerGroupWithoutValidLeaveGroupMember() { } @Test - public void testNoConversionWhenSizeExceedsClassicMaxGroupSize() throws Exception { + public void testLastConsumerProtocolMemberLeavingConsumerGroupByAdminApi() { String groupId = "group-id"; - String nonClassicMemberId = "1"; + String memberId1 = Uuid.randomUuid().toString(); + String memberId2 = Uuid.randomUuid().toString(); + String memberId3 = Uuid.randomUuid().toString(); + String memberId4 = Uuid.randomUuid().toString(); + String instanceId2 = "instance-id-2"; + String instanceId4 = "instance-id-4"; + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); - List protocols = Collections.singletonList( + List protocol1 = List.of( new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") - .setMetadata(new byte[0]) + .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( + List.of(fooTopicName, barTopicName), + null, + List.of(new TopicPartition(fooTopicName, 0)) + )))) + ); + List protocol2 = List.of( + new ConsumerGroupMemberMetadataValue.ClassicProtocol() + .setName("range") + .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( + List.of(fooTopicName, barTopicName), + null, + List.of(new TopicPartition(fooTopicName, 1)) + )))) ); - ConsumerGroupMember member = new ConsumerGroupMember.Builder(nonClassicMemberId).build(); - ConsumerGroupMember classicMember1 = new ConsumerGroupMember.Builder("2") - .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) + ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(9) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setSubscribedTopicNames(List.of("foo", "bar")) + .setServerAssignorName("range") + .setRebalanceTimeoutMs(45000) + .setClassicMemberMetadata( + new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() + .setSessionTimeoutMs(5000) + .setSupportedProtocols(protocol1) + ) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0))) .build(); - ConsumerGroupMember classicMember2 = new ConsumerGroupMember.Builder("3") - .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) + ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2) + .setInstanceId(instanceId2) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(9) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setSubscribedTopicNames(List.of("foo", "bar")) + .setServerAssignorName("range") + .setRebalanceTimeoutMs(45000) + .setClassicMemberMetadata( + new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() + .setSessionTimeoutMs(5000) + .setSupportedProtocols(protocol2) + ) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 1))) + .build(); + ConsumerGroupMember member3 = new ConsumerGroupMember.Builder(memberId3) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(9) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setSubscribedTopicNames(List.of("foo", "bar")) + .setServerAssignorName("range") + .setRebalanceTimeoutMs(45000) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(barTopicId, 0))) + .build(); + ConsumerGroupMember member4 = new ConsumerGroupMember.Builder(memberId4) + .setInstanceId(instanceId4) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(9) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setSubscribedTopicNames(List.of("foo", "bar")) + .setServerAssignorName("range") + .setRebalanceTimeoutMs(45000) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(barTopicId, 1))) .build(); + // Consumer group with four members. + // Dynamic member 1 uses the classic protocol. + // Static member 2 uses the classic protocol. + // Dynamic member 3 uses the consumer protocol. + // Static member 4 uses the consumer protocol. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withClassicGroupMaxSize(1) - .withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE) - .withConsumerGroup( - new ConsumerGroupBuilder(groupId, 10) - .withMember(member) - .withMember(classicMember1) - .withMember(classicMember2) - ) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) + .withMetadataImage(new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 2) + .addTopic(barTopicId, barTopicName, 2) + .addRacks() + .build()) + .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) + .withMember(member1) + .withMember(member2) + .withMember(member3) + .withMember(member4) + .withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0))) + .withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 1))) + .withAssignment(memberId3, mkAssignment(mkTopicAssignment(barTopicId, 0))) + .withAssignment(memberId4, mkAssignment(mkTopicAssignment(barTopicId, 1))) + .withAssignmentEpoch(10)) .build(); - assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); + context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 2) + ))); - context.consumerGroupHeartbeat( - new ConsumerGroupHeartbeatRequestData() - .setGroupId(groupId) - .setMemberId(nonClassicMemberId) - .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) - .setRebalanceTimeoutMs(5000) + context.commit(); + ConsumerGroup consumerGroup = context.groupMetadataManager.consumerGroup(groupId); + + // Member 2, member 3 and member 4 leave the group, triggering the downgrade. + CoordinatorResult leaveResult = context.sendClassicGroupLeave( + new LeaveGroupRequestData() + .setGroupId("group-id") + .setMembers(List.of( + // Static classic member 2. + new MemberIdentity() + .setMemberId(memberId2) + .setGroupInstanceId(null), + // Dynamic consumer member 3. + new MemberIdentity() + .setMemberId(memberId3) + .setGroupInstanceId(null), + // Static consumer member 4, by group instance id. + new MemberIdentity() + .setMemberId(UNKNOWN_MEMBER_ID) + .setGroupInstanceId(instanceId4) + )) + ); + + assertEquals( + new LeaveGroupResponseData() + .setMembers(List.of( + new LeaveGroupResponseData.MemberResponse() + .setGroupInstanceId(null) + .setMemberId(memberId2), + new LeaveGroupResponseData.MemberResponse() + .setGroupInstanceId(null) + .setMemberId(memberId3), + new LeaveGroupResponseData.MemberResponse() + .setGroupInstanceId(instanceId4) + .setMemberId(UNKNOWN_MEMBER_ID) + )), + leaveResult.response() + ); + + + byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( + new TopicPartition(fooTopicName, 0) + )))); + Map assignments = Map.of(memberId1, assignment); + + ClassicGroup expectedClassicGroup = new ClassicGroup( + new LogContext(), + groupId, + STABLE, + context.time, + 10, + Optional.of(ConsumerProtocol.PROTOCOL_TYPE), + Optional.of("range"), + Optional.of(memberId1), + Optional.of(context.time.milliseconds()) + ); + expectedClassicGroup.add( + new ClassicGroupMember( + memberId1, + Optional.ofNullable(member1.instanceId()), + member1.clientId(), + member1.clientHost(), + member1.rebalanceTimeoutMs(), + member1.classicProtocolSessionTimeout().get(), + ConsumerProtocol.PROTOCOL_TYPE, + member1.supportedJoinGroupRequestProtocols(), + assignment + ) + ); + + assertUnorderedRecordsEquals( + List.of( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId3), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId4) + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId3), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId4) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId)), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId3), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId4) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments)) + ), + leaveResult.records() + ); + + verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.STABLE, null); + + // The new classic member 1 has a heartbeat timeout. + ScheduledTimeout heartbeatTimeout = context.timer.timeout( + classicGroupHeartbeatKey(groupId, memberId1) + ); + assertNotNull(heartbeatTimeout); + // The new rebalance has a groupJoin timeout. + ScheduledTimeout groupJoinTimeout = context.timer.timeout( + classicGroupJoinKey(groupId) + ); + assertNotNull(groupJoinTimeout); + + // A new rebalance is triggered. + ClassicGroup classicGroup = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); + assertTrue(classicGroup.isInState(PREPARING_REBALANCE)); + + // Simulate a failed write to the log. + context.rollback(); + + // The group is reverted back to the consumer group. + assertEquals(consumerGroup, context.groupMetadataManager.consumerGroup(groupId)); + } + + @Test + public void testNoConversionWhenSizeExceedsClassicMaxGroupSize() throws Exception { + String groupId = "group-id"; + String nonClassicMemberId = "1"; + + List protocols = List.of( + new ConsumerGroupMemberMetadataValue.ClassicProtocol() + .setName("range") + .setMetadata(new byte[0]) + ); + + ConsumerGroupMember member = new ConsumerGroupMember.Builder(nonClassicMemberId).build(); + ConsumerGroupMember classicMember1 = new ConsumerGroupMember.Builder("2") + .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) + .build(); + ConsumerGroupMember classicMember2 = new ConsumerGroupMember.Builder("3") + .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSupportedProtocols(protocols)) + .build(); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, 1) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.DOWNGRADE.toString()) + .withConsumerGroup( + new ConsumerGroupBuilder(groupId, 10) + .withMember(member) + .withMember(classicMember1) + .withMember(classicMember2) + ) + .build(); + + assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); + + context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(nonClassicMemberId) + .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) + .setRebalanceTimeoutMs(5000) ); assertEquals(Group.GroupType.CONSUMER, context.groupMetadataManager.group(groupId).type()); @@ -14089,7 +14427,7 @@ public void testShareGroupDescribeRequest() { assertEquals(0, groups.size()); // Group id not found - groups = context.sendShareGroupDescribe(Collections.singletonList("unknown-group")); + groups = context.sendShareGroupDescribe(List.of("unknown-group")); assertEquals(1, groups.size()); assertEquals(Errors.GROUP_ID_NOT_FOUND.code(), groups.get(0).errorCode()); } @@ -14105,7 +14443,7 @@ public void testShareGroupDescribeNoErrors() { Collections.emptyMap() )); - List groupIds = Arrays.asList("group-id-1", "group-id-2"); + List groupIds = List.of("group-id-1", "group-id-2"); context.replay(GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(groupIds.get(0), 100)); context.replay(GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(groupIds.get(1), 15)); @@ -14114,14 +14452,14 @@ public void testShareGroupDescribeNoErrors() { .setGroupId(groupIds.get(1)) .setMemberId(Uuid.randomUuid().toString()) .setMemberEpoch(0) - .setSubscribedTopicNames(Collections.singletonList("foo"))); + .setSubscribedTopicNames(List.of("foo"))); // Verify that a member id was generated for the new member. String memberId = result.response().memberId(); assertNotNull(memberId); context.commit(); - List expected = Arrays.asList( + List expected = List.of( new ShareGroupDescribeResponseData.DescribedGroup() .setGroupEpoch(100) .setGroupId(groupIds.get(0)) @@ -14131,12 +14469,12 @@ public void testShareGroupDescribeNoErrors() { .setGroupEpoch(16) .setAssignmentEpoch(16) .setGroupId(groupIds.get(1)) - .setMembers(Collections.singletonList( + .setMembers(List.of( new ShareGroupMember.Builder(memberId) .setMemberEpoch(16) .setClientId("client") .setClientHost("localhost/127.0.0.1") - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build() .asShareGroupDescribeMember( new MetadataImageBuilder().build().topics() @@ -14168,7 +14506,7 @@ public void testShareGroupMemberIdGeneration() { .setGroupId("group-foo") .setMemberId(memberId) .setMemberEpoch(0) - .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); + .setSubscribedTopicNames(List.of("foo", "bar"))); assertEquals( memberId, @@ -14205,7 +14543,7 @@ public void testShareGroupUnknownGroupId() { .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(100) // Epoch must be > 0. - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); } @Test @@ -14223,7 +14561,7 @@ public void testShareGroupUnknownMemberIdJoins() { .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(0) - .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); + .setSubscribedTopicNames(List.of("foo", "bar"))); // The second member is rejected because the member id is unknown and // the member epoch is not zero. @@ -14233,7 +14571,7 @@ public void testShareGroupUnknownMemberIdJoins() { .setGroupId(groupId) .setMemberId(Uuid.randomUuid().toString()) .setMemberEpoch(1) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); } @Test @@ -14257,7 +14595,7 @@ public void testShareGroupMemberJoinsEmptyGroupWithAssignments() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) ))) @@ -14271,7 +14609,7 @@ public void testShareGroupMemberJoinsEmptyGroupWithAssignments() { .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(0) - .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); + .setSubscribedTopicNames(List.of("foo", "bar"))); assertResponseEquals( new ShareGroupHeartbeatResponseData() @@ -14279,13 +14617,13 @@ public void testShareGroupMemberJoinsEmptyGroupWithAssignments() { .setMemberEpoch(1) .setHeartbeatIntervalMs(5000) .setAssignment(new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(Arrays.asList( + .setTopicPartitions(List.of( new ShareGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) - .setPartitions(Arrays.asList(0, 1, 2, 3, 4, 5)), + .setPartitions(List.of(0, 1, 2, 3, 4, 5)), new ShareGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) - .setPartitions(Arrays.asList(0, 1, 2)) + .setPartitions(List.of(0, 1, 2)) ))), result.response() ); @@ -14295,21 +14633,19 @@ public void testShareGroupMemberJoinsEmptyGroupWithAssignments() { .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setMemberEpoch(1) .setPreviousMemberEpoch(0) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) )) .build(); - List expectedRecords = Arrays.asList( + List expectedRecords = List.of( GroupCoordinatorRecordHelpers.newShareGroupMemberSubscriptionRecord(groupId, expectedMember), - GroupCoordinatorRecordHelpers.newShareGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - }), + GroupCoordinatorRecordHelpers.newShareGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )), GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(groupId, 1), GroupCoordinatorRecordHelpers.newShareGroupTargetAssignmentRecord(groupId, memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), @@ -14347,34 +14683,34 @@ public void testShareGroupLeavingMemberBumpsGroupEpoch() { .build()) .withShareGroup(new ShareGroupBuilder(groupId, 10) .withMember(new ShareGroupMember.Builder(memberId1) - .setState(MemberState.STABLE) - .setMemberEpoch(10) - .setPreviousMemberEpoch(9) - .setClientId("client") - .setClientHost("localhost/127.0.0.1") - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) - .setAssignedPartitions(mkAssignment( - mkTopicAssignment(fooTopicId, 0, 1, 2), - mkTopicAssignment(barTopicId, 0, 1))) - .build()) - .withMember(new ShareGroupMember.Builder(memberId2) - .setState(MemberState.STABLE) - .setMemberEpoch(10) - .setPreviousMemberEpoch(9) - .setClientId("client") - .setClientHost("localhost/127.0.0.1") - // Use zar only here to ensure that metadata needs to be recomputed. - .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) - .setAssignedPartitions(mkAssignment( - mkTopicAssignment(fooTopicId, 3, 4, 5), - mkTopicAssignment(barTopicId, 2))) - .build()) - .withAssignment(memberId1, mkAssignment( + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(9) + .setClientId("client") + .setClientHost("localhost/127.0.0.1") + .setSubscribedTopicNames(List.of("foo", "bar")) + .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))) - .withAssignment(memberId2, mkAssignment( + .build()) + .withMember(new ShareGroupMember.Builder(memberId2) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(9) + .setClientId("client") + .setClientHost("localhost/127.0.0.1") + // Use zar only here to ensure that metadata needs to be recomputed. + .setSubscribedTopicNames(List.of("foo", "bar", "zar")) + .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) + .build()) + .withAssignment(memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2), + mkTopicAssignment(barTopicId, 0, 1))) + .withAssignment(memberId2, mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4, 5), + mkTopicAssignment(barTopicId, 2))) .withAssignmentEpoch(10)) .build(); @@ -14384,7 +14720,7 @@ public void testShareGroupLeavingMemberBumpsGroupEpoch() { .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH) - .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); + .setSubscribedTopicNames(List.of("foo", "bar"))); assertResponseEquals( new ShareGroupHeartbeatResponseData() @@ -14393,18 +14729,16 @@ public void testShareGroupLeavingMemberBumpsGroupEpoch() { result.response() ); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newShareGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newShareGroupTargetAssignmentTombstoneRecord(groupId, memberId2), - GroupCoordinatorRecordHelpers.newShareGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), - // Subscription metadata is recomputed because zar is no longer there. - GroupCoordinatorRecordHelpers.newShareGroupSubscriptionMetadataRecord(groupId, new HashMap() { - { - put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6)); - put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)); - } - }), - GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(groupId, 11) + List expectedRecords = List.of( + GroupCoordinatorRecordHelpers.newShareGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newShareGroupTargetAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newShareGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), + // Subscription metadata is recomputed because zar is no longer there. + GroupCoordinatorRecordHelpers.newShareGroupSubscriptionMetadataRecord(groupId, Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )), + GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(groupId, 11) ); assertRecordsEquals(expectedRecords, result.records()); @@ -14422,7 +14756,7 @@ public void testShareGroupNewMemberIsRejectedWithMaximumMembersIsReached() { MockPartitionAssignor assignor = new MockPartitionAssignor("share"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withShareGroupAssignor(assignor) - .withShareGroupMaxSize(1) + .withConfig(GroupCoordinatorConfig.SHARE_GROUP_MAX_SIZE_CONFIG, 1) .build(); assignor.prepareGroupAssignment(new GroupAssignment( @@ -14437,7 +14771,7 @@ public void testShareGroupNewMemberIsRejectedWithMaximumMembersIsReached() { .setGroupId(groupId) .setMemberId(memberId1) .setMemberEpoch(0) - .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); + .setSubscribedTopicNames(List.of("foo", "bar"))); assertEquals(101, result.response().memberEpoch()); // Member 2 joins the group. @@ -14446,7 +14780,7 @@ public void testShareGroupNewMemberIsRejectedWithMaximumMembersIsReached() { .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(0) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); } @Test @@ -14456,10 +14790,10 @@ public void testShareGroupDelete() { .withShareGroup(new ShareGroupBuilder(groupId, 10)) .build(); - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newShareGroupTargetAssignmentEpochTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newShareGroupSubscriptionMetadataTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newShareGroupEpochTombstoneRecord(groupId) + List expectedRecords = List.of( + GroupCoordinatorRecordHelpers.newShareGroupTargetAssignmentEpochTombstoneRecord(groupId), + GroupCoordinatorRecordHelpers.newShareGroupSubscriptionMetadataTombstoneRecord(groupId), + GroupCoordinatorRecordHelpers.newShareGroupEpochTombstoneRecord(groupId) ); List records = new ArrayList<>(); context.groupMetadataManager.createGroupTombstoneRecords("share-group-id", records); @@ -14485,7 +14819,7 @@ public void testShareGroupStates() { context.replay(GroupCoordinatorRecordHelpers.newShareGroupMemberSubscriptionRecord(groupId, new ShareGroupMember.Builder(memberId1) .setState(MemberState.STABLE) - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .build())); context.replay(GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(groupId, 11)); @@ -14527,7 +14861,7 @@ public void testConsumerGroupDynamicConfigs() { MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() @@ -14535,7 +14869,7 @@ public void testConsumerGroupDynamicConfigs() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); @@ -14548,7 +14882,7 @@ public void testConsumerGroupDynamicConfigs() { .setMemberId(memberId) .setMemberEpoch(0) .setRebalanceTimeoutMs(90000) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .setTopicPartitions(Collections.emptyList())); assertEquals(1, result.response().memberEpoch()); @@ -14622,7 +14956,7 @@ public void testShareGroupDynamicConfigs() { .build(); assignor.prepareGroupAssignment(new GroupAssignment( - Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( + Map.of(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5) ))) )); @@ -14634,7 +14968,7 @@ public void testShareGroupDynamicConfigs() { .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(0) - .setSubscribedTopicNames(Collections.singletonList("foo"))); + .setSubscribedTopicNames(List.of("foo"))); assertEquals(1, result.response().memberEpoch()); // Verify heartbeat interval @@ -14698,7 +15032,7 @@ public void testReplayConsumerGroupMemberMetadata() { .setClientHost("clienthost") .setServerAssignorName("range") .setRackId("rackid") - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build(); // The group and the member are created if they do not exist. @@ -14749,7 +15083,7 @@ public void testReplayConsumerGroupPartitionMetadata() { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .build(); - Map metadata = Collections.singletonMap( + Map metadata = Map.of( "bar", new TopicMetadata(Uuid.randomUuid(), "bar", 10) ); @@ -14867,7 +15201,7 @@ public void testConsumerGroupHeartbeatOnShareGroup() { .setPreviousMemberEpoch(0) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build()) .withAssignment(memberId, mkAssignment()) .withAssignmentEpoch(1)) @@ -14880,7 +15214,7 @@ public void testConsumerGroupHeartbeatOnShareGroup() { .setMemberEpoch(0) .setServerAssignor("range") .setRebalanceTimeoutMs(5000) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); } @@ -14900,7 +15234,7 @@ public void testClassicGroupJoinOnShareGroup() throws Exception { .setPreviousMemberEpoch(0) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build()) .withAssignment(memberId, mkAssignment()) .withAssignmentEpoch(1)) @@ -14934,7 +15268,7 @@ public void testClassicGroupSyncOnShareGroup() throws Exception { .setPreviousMemberEpoch(0) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build()) .withAssignment(memberId, mkAssignment()) .withAssignmentEpoch(1)) @@ -14969,7 +15303,7 @@ public void testClassicGroupLeaveOnShareGroup() throws Exception { .setPreviousMemberEpoch(0) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build()) .withAssignment(memberId, mkAssignment()) .withAssignmentEpoch(1)) @@ -14978,7 +15312,7 @@ public void testClassicGroupLeaveOnShareGroup() throws Exception { assertThrows(UnknownMemberIdException.class, () -> context.sendClassicGroupLeave( new LeaveGroupRequestData() .setGroupId(groupId) - .setMembers(Collections.singletonList( + .setMembers(List.of( new MemberIdentity() .setMemberId(memberId))))); } @@ -14999,19 +15333,20 @@ public void testConsumerGroupDescribeOnShareGroup() { .setPreviousMemberEpoch(0) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Collections.singletonList("foo")) + .setSubscribedTopicNames(List.of("foo")) .build()) .withAssignment(memberId, mkAssignment()) .withAssignmentEpoch(1)) .build(); - List expected = Collections.singletonList( + List expected = List.of( new ConsumerGroupDescribeResponseData.DescribedGroup() .setGroupId(groupId) .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage("Group " + groupId + " is not a consumer group.") ); - List actual = context.sendConsumerGroupDescribe(Collections.singletonList(groupId)); + List actual = context.sendConsumerGroupDescribe(List.of(groupId)); assertEquals(expected, actual); } @@ -15028,7 +15363,7 @@ public void testShareGroupHeartbeatOnConsumerGroup() { // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .build()) @@ -15040,7 +15375,7 @@ public void testShareGroupHeartbeatOnConsumerGroup() { .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setSubscribedTopicNames(List.of("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) @@ -15056,7 +15391,7 @@ public void testShareGroupHeartbeatOnConsumerGroup() { .setGroupId(groupId) .setMemberId(Uuid.randomUuid().toString()) .setMemberEpoch(1) - .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); + .setSubscribedTopicNames(List.of("foo", "bar")))); } @Test @@ -15067,23 +15402,24 @@ public void testShareGroupDescribeOnConsumerGroup() { int epoch = 10; String topicName = "topicName"; ConsumerGroupMember.Builder memberBuilder = new ConsumerGroupMember.Builder(memberId) - .setSubscribedTopicNames(Collections.singletonList(topicName)) + .setSubscribedTopicNames(List.of(topicName)) .setServerAssignorName("assignorName"); MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() - .withConsumerGroupAssignors(Collections.singletonList(assignor)) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) .withConsumerGroup(new ConsumerGroupBuilder(groupId, epoch) .withMember(memberBuilder.build())) .build(); - List expected = Collections.singletonList( + List expected = List.of( new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(groupId) .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage("Group " + groupId + " is not a share group.") ); - List actual = context.sendShareGroupDescribe(Collections.singletonList(groupId)); + List actual = context.sendShareGroupDescribe(List.of(groupId)); assertEquals(expected, actual); } @@ -15106,7 +15442,7 @@ public void testReplayConsumerGroupRegularExpression() { assertEquals( Optional.of(resolvedRegularExpression), - context.groupMetadataManager.consumerGroup("foo").regularExpression("abc*") + context.groupMetadataManager.consumerGroup("foo").resolvedRegularExpression("abc*") ); } @@ -15140,7 +15476,871 @@ public void testReplayConsumerGroupRegularExpressionTombstone() { assertEquals( Optional.empty(), - context.groupMetadataManager.consumerGroup("foo").regularExpression("abc*") + context.groupMetadataManager.consumerGroup("foo").resolvedRegularExpression("abc*") + ); + } + + @Test + public void testConsumerGroupMemberPicksUpExistingResolvedRegularExpression() { + String groupId = "fooup"; + String memberId1 = Uuid.randomUuid().toString(); + String memberId2 = Uuid.randomUuid().toString(); + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + + ConsumerGroupPartitionAssignor assignor = mock(ConsumerGroupPartitionAssignor.class); + when(assignor.name()).thenReturn("range"); + when(assignor.assign(any(), any())).thenAnswer(answer -> { + GroupSpec spec = answer.getArgument(0); + + List.of(memberId1, memberId2).forEach(memberId -> + assertEquals( + Collections.singleton(fooTopicId), + spec.memberSubscription(memberId).subscribedTopicIds(), + String.format("Member %s has unexpected subscribed topic ids", memberId) + ) + ); + + return new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 1) + )) + )); + }); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) + .withMetadataImage(new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 2) + .build()) + .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) + .withMember(new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1))) + .build()) + .withResolvedRegularExpression("foo*", new ResolvedRegularExpression( + Collections.singleton(fooTopicName), + 100L, + 12345L)) + .withAssignment(memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1))) + .withAssignmentEpoch(10)) + .build(); + + CoordinatorResult result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId2) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(10000) + .setSubscribedTopicRegex("foo*") + .setTopicPartitions(Collections.emptyList())); + + assertEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId2) + .setMemberEpoch(11) + .setHeartbeatIntervalMs(5000) + .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), + result.response() + ); + } + + @Test + public void testConsumerGroupMemberJoinsWithNewRegex() { + String groupId = "fooup"; + String memberId1 = Uuid.randomUuid().toString(); + String memberId2 = Uuid.randomUuid().toString(); + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) + .withMetadataImage(new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 6) + .build(12345L)) + .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) + .withMember(new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicNames(List.of("foo")) + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .build()) + .withAssignment(memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .withAssignmentEpoch(10)) + .build(); + + // Member 2 joins the consumer group with a new regular expression. + CoordinatorResult result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId2) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignor("range") + .setTopicPartitions(Collections.emptyList())); + + assertResponseEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId2) + .setMemberEpoch(10) + .setHeartbeatIntervalMs(5000) + .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), + result.response() + ); + + ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(memberId2) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(0) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .build(); + + List expectedRecords = List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember2), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember2) + ); + + assertRecordsEquals(expectedRecords, result.records()); + + // Execute pending tasks. + List> tasks = context.processTasks(); + assertEquals( + List.of( + new MockCoordinatorExecutor.ExecutorResult<>( + groupId + "-regex", + new CoordinatorResult<>(List.of( + // The resolution of the new regex is persisted. + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord( + groupId, + "foo*", + new ResolvedRegularExpression( + Set.of("foo"), + 12345L, + context.time.milliseconds() + ) + ), + // The group epoch is bumped. + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) + )) + ) + ), + tasks + ); + } + + @Test + public void testConsumerGroupMemberJoinsWithUpdatedRegex() { + String groupId = "fooup"; + String memberId1 = Uuid.randomUuid().toString(); + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) + .withMetadataImage(new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 6) + .addTopic(barTopicId, barTopicName, 3) + .build(12345L)) + .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) + .withMember(new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .build()) + .withAssignment(memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .withAssignmentEpoch(10)) + .build(); + + // Member 1 updates its new regular expression. + CoordinatorResult result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(10) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*|bar*") + .setServerAssignor("range") + .setTopicPartitions(Collections.emptyList())); + + assertResponseEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId1) + .setMemberEpoch(10) + .setHeartbeatIntervalMs(5000) + .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List.of( + new ConsumerGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(fooTopicId) + .setPartitions(List.of(0, 1, 2, 3, 4, 5)) + )) + ), + result.response() + ); + + ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(0) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*|bar*") + .setServerAssignorName("range") + .build(); + + List expectedRecords = List.of( + // The member subscription is updated. + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1), + // The previous regular expression is deleted. + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*") + ); + + assertRecordsEquals(expectedRecords, result.records()); + + // Execute pending tasks. + List> tasks = context.processTasks(); + assertEquals(1, tasks.size()); + + MockCoordinatorExecutor.ExecutorResult task = tasks.get(0); + assertEquals(groupId + "-regex", task.key); + assertRecordsEquals( + List.of( + // The resolution of the new regex is persisted. + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord( + groupId, + "foo*|bar*", + new ResolvedRegularExpression( + Set.of("foo", "bar"), + 12345L, + context.time.milliseconds() + ) + ), + // The updated subscription metadata. + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord( + groupId, + Map.of( + "foo", new TopicMetadata(fooTopicId, fooTopicName, 6), + "bar", new TopicMetadata(barTopicId, barTopicName, 3) + ) + ), + // The group epoch is bumped. + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) + ), + task.result.records() + ); + } + + @Test + public void testConsumerGroupMemberJoinsWithRegexAndUpdatesItBeforeResolutionCompleted() { + String groupId = "fooup"; + String memberId1 = Uuid.randomUuid().toString(); + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + assignor.prepareGroupAssignment(new GroupAssignment(Collections.emptyMap())); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) + .withMetadataImage(new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 6) + .addTopic(barTopicId, barTopicName, 3) + .build(12345L)) + .build(); + + // Member 1 joins. + CoordinatorResult result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(0) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignor("range") + .setTopicPartitions(Collections.emptyList())); + + assertResponseEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId1) + .setMemberEpoch(1) + .setHeartbeatIntervalMs(5000) + .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), + result.response() + ); + + ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(1) + .setPreviousMemberEpoch(0) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .build(); + + List expectedRecords = List.of( + // The member subscription is created. + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1), + // The group epoch is bumped. + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 1), + // The target assignment is created. + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId1, Collections.emptyMap()), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 1), + // The member current state is created. + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember1) + ); + + assertRecordsEquals(expectedRecords, result.records()); + + // The task is scheduled. + assertTrue(context.executor.isScheduled(groupId + "-regex")); + + // The member updates its regex before the resolution of the previous one completes. + result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(1) + .setSubscribedTopicRegex("foo*|bar*")); + + assertResponseEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId1) + .setMemberEpoch(1) + .setHeartbeatIntervalMs(5000), + result.response() + ); + + expectedMember1 = new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(1) + .setPreviousMemberEpoch(0) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*|bar*") + .setServerAssignorName("range") + .build(); + + expectedRecords = List.of( + // The member subscription is updated. + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember1), + // The previous regex is deleted. + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*") + ); + + assertRecordsEquals(expectedRecords, result.records()); + + // The task is still scheduled. + assertTrue(context.executor.isScheduled(groupId + "-regex")); + assertEquals(1, context.executor.size()); + + // Execute the pending tasks. + List> tasks = context.processTasks(); + assertEquals(1, tasks.size()); + + // The pending task was a no-op. + MockCoordinatorExecutor.ExecutorResult task = tasks.get(0); + assertEquals(groupId + "-regex", task.key); + assertRecordsEquals(Collections.emptyList(), task.result.records()); + + // The member heartbeats again. It triggers a new resolution. + result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(1) + .setSubscribedTopicRegex("foo*|bar*")); + + assertResponseEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId1) + .setMemberEpoch(1) + .setHeartbeatIntervalMs(5000), + result.response() + ); + + assertTrue(context.executor.isScheduled(groupId + "-regex")); + assertEquals(1, context.executor.size()); + + // Execute pending tasks. + tasks = context.processTasks(); + assertEquals(1, tasks.size()); + + task = tasks.get(0); + assertEquals(groupId + "-regex", task.key); + assertRecordsEquals( + List.of( + // The resolution of the new regex is persisted. + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord( + groupId, + "foo*|bar*", + new ResolvedRegularExpression( + Set.of("foo", "bar"), + 12345L, + context.time.milliseconds() + ) + ), + // The updated subscription metadata. + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord( + groupId, + Map.of( + "foo", new TopicMetadata(fooTopicId, fooTopicName, 6), + "bar", new TopicMetadata(barTopicId, barTopicName, 3) + ) + ), + // The group epoch is bumped. + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 2) + ), + task.result.records() + ); + } + + @Test + public void testConsumerGroupMemberJoinRefreshesExpiredRegexes() { + String groupId = "fooup"; + String memberId1 = Uuid.randomUuid().toString(); + String memberId2 = Uuid.randomUuid().toString(); + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + Uuid foooTopicId = Uuid.randomUuid(); + String foooTopicName = "fooo"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + assignor.prepareGroupAssignment(new GroupAssignment(Collections.emptyMap())); + + MetadataImage image = new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 6) + .addTopic(barTopicId, barTopicName, 3) + .build(1L); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) + .withMetadataImage(image) + .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) + .withMember(new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .build()) + .withMember(new ConsumerGroupMember.Builder(memberId2) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("bar*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(barTopicId, 0, 1, 2))) + .build()) + .withResolvedRegularExpression("foo*", new ResolvedRegularExpression( + Set.of(fooTopicName), 0L, 0L)) + .withResolvedRegularExpression("bar*", new ResolvedRegularExpression( + Set.of(barTopicName), 0L, 0L)) + .withSubscriptionMetadata(Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3))) + .withAssignment(memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .withAssignment(memberId2, mkAssignment( + mkTopicAssignment(barTopicId, 0, 1, 2))) + .withAssignmentEpoch(10)) + .build(); + + // Update metadata image. + MetadataImage newImage = new MetadataImageBuilder(image) + .addTopic(fooTopicId, fooTopicName, 6) + .addTopic(barTopicId, barTopicName, 3) + .addTopic(foooTopicId, foooTopicName, 1) + .build(2L); + + context.groupMetadataManager.onNewMetadataImage( + newImage, + new MetadataDelta(newImage) + ); + + // A member heartbeats. + context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(10)); + + // The task is NOT scheduled. + assertFalse(context.executor.isScheduled(groupId + "-regex")); + + // Advance past the batching interval. + context.sleep(11000L); + + // A member heartbeats. + context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(10)); + + // The task is scheduled. + assertTrue(context.executor.isScheduled(groupId + "-regex")); + + // Execute the pending tasks. + List> tasks = context.processTasks(); + assertEquals(1, tasks.size()); + + // Execute pending tasks. + MockCoordinatorExecutor.ExecutorResult task = tasks.get(0); + assertEquals(groupId + "-regex", task.key); + + assertUnorderedRecordsEquals( + List.of( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord( + groupId, + "foo*", + new ResolvedRegularExpression( + Set.of(fooTopicName, foooTopicName), + 2L, + context.time.milliseconds() + ) + ), + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord( + groupId, + "bar*", + new ResolvedRegularExpression( + Set.of(barTopicName), + 2L, + context.time.milliseconds() + ) + ) + ), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord( + groupId, + Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3), + foooTopicName, new TopicMetadata(foooTopicId, foooTopicName, 1) + ) + )), + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)) + ), + task.result.records() + ); + } + + @Test + public void testResolvedRegularExpressionsRemovedWhenMembersLeaveOrFenced() { + String groupId = "fooup"; + String memberId1 = Uuid.randomUuid().toString(); + String memberId2 = Uuid.randomUuid().toString(); + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + assignor.prepareGroupAssignment(new GroupAssignment(Collections.emptyMap())); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, Collections.singletonList(assignor)) + .withMetadataImage(new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 6) + .addTopic(barTopicId, barTopicName, 3) + .build(1L)) + .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) + .withMember(new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .build()) + .withMember(new ConsumerGroupMember.Builder(memberId2) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("bar*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(barTopicId, 0, 1, 2))) + .build()) + .withResolvedRegularExpression("foo*", new ResolvedRegularExpression( + Set.of(fooTopicName), 0L, 0L)) + .withResolvedRegularExpression("bar*", new ResolvedRegularExpression( + Set.of(barTopicName), 0L, 0L)) + .withSubscriptionMetadata(Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3))) + .withAssignment(memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .withAssignment(memberId2, mkAssignment( + mkTopicAssignment(barTopicId, 0, 1, 2))) + .withAssignmentEpoch(10)) + .build(); + + // Setup the timers. + context.onLoaded(); + + // Member 1 leaves the group. + CoordinatorResult result = context.consumerGroupHeartbeat( + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId1) + .setMemberEpoch(-1)); + + assertResponseEquals( + new ConsumerGroupHeartbeatResponseData() + .setMemberId(memberId1) + .setMemberEpoch(-1), + result.response() + ); + + List expectedRecords = List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*"), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, + Map.of(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3)) + ), + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11) + ); + + assertRecordsEquals(expectedRecords, result.records()); + + // Member 2 is fenced due to reaching the session timeout. + context.assertSessionTimeout(groupId, memberId2, 45000); + List> timeouts = context.sleep(45000 + 1); + + // Verify the expired timeout. + assertEquals( + Collections.singletonList(new ExpiredTimeout( + groupSessionTimeoutKey(groupId, memberId2), + new CoordinatorResult<>( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "bar*"), + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Collections.emptyMap()), + GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 12) + ) + ) + )), + timeouts + ); + } + + @Test + public void testResolvedRegularExpressionsRemovedWhenConsumerMembersRemovedByAdminApi() { + String groupId = "fooup"; + String memberId1 = Uuid.randomUuid().toString(); + String memberId2 = Uuid.randomUuid().toString(); + String memberId3 = Uuid.randomUuid().toString(); + String memberId4 = Uuid.randomUuid().toString(); + + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + assignor.prepareGroupAssignment(new GroupAssignment(Collections.emptyMap())); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, Collections.singletonList(assignor)) + .withMetadataImage(new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 6) + .addTopic(barTopicId, barTopicName, 3) + .build(1L)) + .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) + .withMember(new ConsumerGroupMember.Builder(memberId1) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setInstanceId(memberId1) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2))) + .build()) + .withMember(new ConsumerGroupMember.Builder(memberId2) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setInstanceId(memberId2) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("foo*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4, 5))) + .build()) + .withMember(new ConsumerGroupMember.Builder(memberId3) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setInstanceId(memberId3) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("bar*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(barTopicId, 0, 1))) + .build()) + .withMember(new ConsumerGroupMember.Builder(memberId4) + .setState(MemberState.STABLE) + .setMemberEpoch(10) + .setPreviousMemberEpoch(10) + .setInstanceId(memberId4) + .setClientId(DEFAULT_CLIENT_ID) + .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) + .setRebalanceTimeoutMs(5000) + .setSubscribedTopicRegex("bar*") + .setServerAssignorName("range") + .setAssignedPartitions(mkAssignment( + mkTopicAssignment(barTopicId, 2))) + .build()) + .withResolvedRegularExpression("foo*", new ResolvedRegularExpression( + Set.of(fooTopicName), 0L, 0L)) + .withResolvedRegularExpression("bar*", new ResolvedRegularExpression( + Set.of(barTopicName), 0L, 0L)) + .withSubscriptionMetadata(Map.of( + fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6), + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + )) + .withAssignment(memberId1, mkAssignment( + mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) + .withAssignment(memberId2, mkAssignment( + mkTopicAssignment(barTopicId, 0, 1, 2))) + .withAssignmentEpoch(10)) + .build(); + + // Remove members. + CoordinatorResult result = context.sendClassicGroupLeave( + new LeaveGroupRequestData() + .setGroupId(groupId) + .setMembers(List.of( + new MemberIdentity() + .setMemberId(memberId1) + .setGroupInstanceId(null), + new MemberIdentity() + .setMemberId(memberId2) + .setGroupInstanceId(memberId2), + new MemberIdentity() + .setMemberId(UNKNOWN_MEMBER_ID) + .setGroupInstanceId(memberId3) + )) + ); + + assertEquals( + new LeaveGroupResponseData() + .setMembers(List.of( + new LeaveGroupResponseData.MemberResponse() + .setMemberId(memberId1) + .setGroupInstanceId(null), + new LeaveGroupResponseData.MemberResponse() + .setMemberId(memberId2) + .setGroupInstanceId(memberId2), + new LeaveGroupResponseData.MemberResponse() + .setMemberId(UNKNOWN_MEMBER_ID) + .setGroupInstanceId(memberId3) + )), + result.response() + ); + + assertUnorderedRecordsEquals( + List.of( + List.of( + // Remove member 1. + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), + // Remove member 2. + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), + // Remove member 3. + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId3), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId3), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId3) + ), + // Remove regex. + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone(groupId, "foo*")), + // Updated subscription metadata. + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, Map.of( + barTopicName, new TopicMetadata(barTopicId, barTopicName, 3) + ))), + // Bumped epoch. + List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)) + ), + result.records() ); } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTestContext.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTestContext.java index f752fa8254427..276a48dca9e40 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTestContext.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTestContext.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; @@ -49,8 +50,10 @@ import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Utils; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; import org.apache.kafka.coordinator.common.runtime.CoordinatorResult; +import org.apache.kafka.coordinator.common.runtime.MockCoordinatorExecutor; import org.apache.kafka.coordinator.common.runtime.MockCoordinatorTimer; import org.apache.kafka.coordinator.group.api.assignor.ConsumerGroupPartitionAssignor; import org.apache.kafka.coordinator.group.api.assignor.ShareGroupPartitionAssignor; @@ -69,6 +72,7 @@ import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue; import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataKey; import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue; +import org.apache.kafka.coordinator.group.generated.CoordinatorRecordType; import org.apache.kafka.coordinator.group.generated.GroupMetadataKey; import org.apache.kafka.coordinator.group.generated.GroupMetadataValue; import org.apache.kafka.coordinator.group.generated.ShareGroupCurrentMemberAssignmentKey; @@ -91,13 +95,13 @@ import org.apache.kafka.coordinator.group.modern.share.ShareGroupBuilder; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.timeline.SnapshotRegistry; import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -109,7 +113,7 @@ import java.util.stream.IntStream; import static org.apache.kafka.common.requests.JoinGroupRequest.UNKNOWN_MEMBER_ID; -import static org.apache.kafka.coordinator.group.Assertions.assertSyncGroupResponseEquals; +import static org.apache.kafka.coordinator.group.Assertions.assertResponseEquals; import static org.apache.kafka.coordinator.group.GroupConfigManagerTest.createConfigManager; import static org.apache.kafka.coordinator.group.GroupMetadataManager.EMPTY_RESULT; import static org.apache.kafka.coordinator.group.GroupMetadataManager.classicGroupHeartbeatKey; @@ -135,6 +139,45 @@ public class GroupMetadataManagerTestContext { static final String DEFAULT_CLIENT_ID = "client"; static final InetAddress DEFAULT_CLIENT_ADDRESS = InetAddress.getLoopbackAddress(); + private static class GroupCoordinatorConfigContext extends GroupCoordinatorConfig { + GroupCoordinatorConfigContext(AbstractConfig config) { + super(config); + } + + public static GroupCoordinatorConfig fromProps( + Map props + ) { + return new GroupCoordinatorConfigContext( + new AbstractConfig( + Utils.mergeConfigs(List.of( + GroupCoordinatorConfig.GROUP_COORDINATOR_CONFIG_DEF, + GroupCoordinatorConfig.NEW_GROUP_CONFIG_DEF, + GroupCoordinatorConfig.OFFSET_MANAGEMENT_CONFIG_DEF, + GroupCoordinatorConfig.CONSUMER_GROUP_CONFIG_DEF, + GroupCoordinatorConfig.SHARE_GROUP_CONFIG_DEF + )), + props + ) + ); + } + + @Override + @SuppressWarnings("unchecked") + protected List consumerGroupAssignors( + AbstractConfig config + ) { + // In unit tests, it is pretty convenient to have the ability to pass instantiated + // assignors. Hence, we check if the provided assignors are already instantiated. + // Otherwise, we use the regular method. + List classes = config.getList(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG); + if (classes.stream().allMatch(o -> o instanceof ConsumerGroupPartitionAssignor)) { + return Collections.unmodifiableList((List) classes); + } + + return super.consumerGroupAssignors(config); + } + } + public static void assertNoOrEmptyResult(List> timeouts) { assertTrue(timeouts.size() <= 1); timeouts.forEach(timeout -> assertEquals(EMPTY_RESULT, timeout.result)); @@ -147,7 +190,7 @@ public static JoinGroupRequestData.JoinGroupRequestProtocolCollection toProtocol protocols.add(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName(protocolNames[i]) .setMetadata(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( - Collections.singletonList(topicNames.get(i % topicNames.size())))).array()) + List.of(topicNames.get(i % topicNames.size())))).array()) ); } return protocols; @@ -183,8 +226,7 @@ public static JoinGroupRequestData.JoinGroupRequestProtocolCollection toConsumer public static CoordinatorRecord newGroupMetadataRecord( String groupId, - GroupMetadataValue value, - MetadataVersion metadataVersion + GroupMetadataValue value ) { return new CoordinatorRecord( new ApiMessageAndVersion( @@ -194,7 +236,7 @@ public static CoordinatorRecord newGroupMetadataRecord( ), new ApiMessageAndVersion( value, - metadataVersion.groupMetadataValueVersion() + (short) 3 ) ); } @@ -398,35 +440,26 @@ SyncGroupRequestData build() { } public static class Builder { - private final MockTime time = new MockTime(); + private final MockTime time = new MockTime(0, 0, 0); private final MockCoordinatorTimer timer = new MockCoordinatorTimer<>(time); + private final MockCoordinatorExecutor executor = new MockCoordinatorExecutor<>(); private final LogContext logContext = new LogContext(); private final SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext); private MetadataImage metadataImage; private GroupConfigManager groupConfigManager; - private List consumerGroupAssignors = Collections.singletonList(new MockPartitionAssignor("range")); private final List consumerGroupBuilders = new ArrayList<>(); - private int consumerGroupMaxSize = Integer.MAX_VALUE; - private int consumerGroupMetadataRefreshIntervalMs = Integer.MAX_VALUE; - private int classicGroupMaxSize = Integer.MAX_VALUE; - private int classicGroupInitialRebalanceDelayMs = 3000; - private final int classicGroupNewMemberJoinTimeoutMs = 5 * 60 * 1000; - private int classicGroupMinSessionTimeoutMs = 10; - private int classicGroupMaxSessionTimeoutMs = 10 * 60 * 1000; private final GroupCoordinatorMetricsShard metrics = mock(GroupCoordinatorMetricsShard.class); - private ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy = ConsumerGroupMigrationPolicy.DISABLED; - // Share group configs private ShareGroupPartitionAssignor shareGroupAssignor = new MockPartitionAssignor("share"); private final List shareGroupBuilders = new ArrayList<>(); - private int shareGroupMaxSize = Integer.MAX_VALUE; + private final Map config = new HashMap<>(); - public Builder withMetadataImage(MetadataImage metadataImage) { - this.metadataImage = metadataImage; + public Builder withConfig(String key, Object value) { + config.put(key, value); return this; } - public Builder withConsumerGroupAssignors(List assignors) { - this.consumerGroupAssignors = assignors; + public Builder withMetadataImage(MetadataImage metadataImage) { + this.metadataImage = metadataImage; return this; } @@ -435,41 +468,6 @@ public Builder withConsumerGroup(ConsumerGroupBuilder builder) { return this; } - public Builder withConsumerGroupMaxSize(int consumerGroupMaxSize) { - this.consumerGroupMaxSize = consumerGroupMaxSize; - return this; - } - - public Builder withConsumerGroupMetadataRefreshIntervalMs(int consumerGroupMetadataRefreshIntervalMs) { - this.consumerGroupMetadataRefreshIntervalMs = consumerGroupMetadataRefreshIntervalMs; - return this; - } - - public Builder withClassicGroupMaxSize(int classicGroupMaxSize) { - this.classicGroupMaxSize = classicGroupMaxSize; - return this; - } - - public Builder withClassicGroupInitialRebalanceDelayMs(int classicGroupInitialRebalanceDelayMs) { - this.classicGroupInitialRebalanceDelayMs = classicGroupInitialRebalanceDelayMs; - return this; - } - - public Builder withClassicGroupMinSessionTimeoutMs(int classicGroupMinSessionTimeoutMs) { - this.classicGroupMinSessionTimeoutMs = classicGroupMinSessionTimeoutMs; - return this; - } - - public Builder withClassicGroupMaxSessionTimeoutMs(int classicGroupMaxSessionTimeoutMs) { - this.classicGroupMaxSessionTimeoutMs = classicGroupMaxSessionTimeoutMs; - return this; - } - - public Builder withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy) { - this.consumerGroupMigrationPolicy = consumerGroupMigrationPolicy; - return this; - } - public Builder withShareGroup(ShareGroupBuilder builder) { this.shareGroupBuilders.add(builder); return this; @@ -480,46 +478,37 @@ public Builder withShareGroupAssignor(ShareGroupPartitionAssignor shareGroupAssi return this; } - public Builder withShareGroupMaxSize(int shareGroupMaxSize) { - this.shareGroupMaxSize = shareGroupMaxSize; - return this; - } - public GroupMetadataManagerTestContext build() { if (metadataImage == null) metadataImage = MetadataImage.EMPTY; - if (consumerGroupAssignors == null) consumerGroupAssignors = Collections.emptyList(); if (groupConfigManager == null) groupConfigManager = createConfigManager(); + config.putIfAbsent( + GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, + List.of(new MockPartitionAssignor("range")) + ); + + GroupCoordinatorConfig groupCoordinatorConfig = GroupCoordinatorConfigContext.fromProps(config); + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext( time, timer, + executor, snapshotRegistry, metrics, + groupCoordinatorConfig, new GroupMetadataManager.Builder() .withSnapshotRegistry(snapshotRegistry) .withLogContext(logContext) .withTime(time) .withTimer(timer) + .withExecutor(executor) + .withConfig(groupCoordinatorConfig) .withMetadataImage(metadataImage) - .withConsumerGroupHeartbeatInterval(5000) - .withConsumerGroupSessionTimeout(45000) - .withConsumerGroupMaxSize(consumerGroupMaxSize) - .withConsumerGroupAssignors(consumerGroupAssignors) - .withConsumerGroupMetadataRefreshIntervalMs(consumerGroupMetadataRefreshIntervalMs) - .withClassicGroupMaxSize(classicGroupMaxSize) - .withClassicGroupMinSessionTimeoutMs(classicGroupMinSessionTimeoutMs) - .withClassicGroupMaxSessionTimeoutMs(classicGroupMaxSessionTimeoutMs) - .withClassicGroupInitialRebalanceDelayMs(classicGroupInitialRebalanceDelayMs) - .withClassicGroupNewMemberJoinTimeoutMs(classicGroupNewMemberJoinTimeoutMs) .withGroupCoordinatorMetricsShard(metrics) - .withConsumerGroupMigrationPolicy(consumerGroupMigrationPolicy) .withShareGroupAssignor(shareGroupAssignor) - .withShareGroupMaxSize(shareGroupMaxSize) .withGroupConfigManager(groupConfigManager) .build(), - groupConfigManager, - classicGroupInitialRebalanceDelayMs, - classicGroupNewMemberJoinTimeoutMs + groupConfigManager ); consumerGroupBuilders.forEach(builder -> builder.build(metadataImage.topics()).forEach(context::replay)); @@ -533,6 +522,7 @@ public GroupMetadataManagerTestContext build() { final MockTime time; final MockCoordinatorTimer timer; + final MockCoordinatorExecutor executor; final SnapshotRegistry snapshotRegistry; final GroupCoordinatorMetricsShard metrics; final GroupMetadataManager groupMetadataManager; @@ -546,21 +536,22 @@ public GroupMetadataManagerTestContext build() { public GroupMetadataManagerTestContext( MockTime time, MockCoordinatorTimer timer, + MockCoordinatorExecutor executor, SnapshotRegistry snapshotRegistry, GroupCoordinatorMetricsShard metrics, + GroupCoordinatorConfig config, GroupMetadataManager groupMetadataManager, - GroupConfigManager groupConfigManager, - int classicGroupInitialRebalanceDelayMs, - int classicGroupNewMemberJoinTimeoutMs + GroupConfigManager groupConfigManager ) { this.time = time; this.timer = timer; + this.executor = executor; this.snapshotRegistry = snapshotRegistry; this.metrics = metrics; this.groupMetadataManager = groupMetadataManager; this.groupConfigManager = groupConfigManager; - this.classicGroupInitialRebalanceDelayMs = classicGroupInitialRebalanceDelayMs; - this.classicGroupNewMemberJoinTimeoutMs = classicGroupNewMemberJoinTimeoutMs; + this.classicGroupInitialRebalanceDelayMs = config.classicGroupInitialRebalanceDelayMs(); + this.classicGroupNewMemberJoinTimeoutMs = config.classicGroupNewMemberJoinTimeoutMs(); snapshotRegistry.idempotentCreateSnapshot(lastWrittenOffset); } @@ -677,6 +668,16 @@ public List> sleep( return timeouts; } + public List> processTasks() { + List> results = executor.poll(); + results.forEach(taskResult -> { + if (taskResult.result.replayRecords()) { + taskResult.result.records().forEach(this::replay); + } + }); + return results; + } + public void assertSessionTimeout( String groupId, String memberId, @@ -847,7 +848,7 @@ public JoinGroupResponseData joinClassicGroupAsDynamicMemberAndCompleteRebalance .build()); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), syncResult.records ); // Simulate a successful write to the log. @@ -1054,8 +1055,8 @@ public RebalanceResult staticMembersJoinAndRebalance( SyncGroupRequestData.SyncGroupRequestAssignment::memberId, SyncGroupRequestData.SyncGroupRequestAssignment::assignment )); assertEquals( - Collections.singletonList( - GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment, MetadataVersion.latestTesting())), + List.of( + GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment)), leaderSyncResult.records ); @@ -1115,7 +1116,7 @@ public PendingMemberGroupResult setupGroupWithPendingMember(ClassicGroup group) // Now the group is stable, with the one member that joined above assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), syncResult.records ); // Simulate a successful write to log. @@ -1153,7 +1154,7 @@ public PendingMemberGroupResult setupGroupWithPendingMember(ClassicGroup group) syncResult = sendClassicGroupSync(syncRequest.setGenerationId(nextGenerationId)); assertEquals( - Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), + List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())), syncResult.records ); // Simulate a successful write to log. @@ -1208,7 +1209,7 @@ public void verifySessionExpiration(ClassicGroup group, int timeoutMs) { // Member should be removed as session expires. List> timeouts = sleep(timeoutMs); - List expectedRecords = Collections.singletonList(newGroupMetadataRecord( + List expectedRecords = List.of(newGroupMetadataRecord( group.groupId(), new GroupMetadataValue() .setMembers(Collections.emptyList()) @@ -1216,8 +1217,7 @@ public void verifySessionExpiration(ClassicGroup group, int timeoutMs) { .setLeader(null) .setProtocolType("consumer") .setProtocol(null) - .setCurrentStateTimestamp(time.milliseconds()), - MetadataVersion.latestTesting() + .setCurrentStateTimestamp(time.milliseconds()) )); @@ -1268,7 +1268,43 @@ public List sendConsumerGroupD } public List describeGroups(List groupIds) { - return groupMetadataManager.describeGroups(groupIds, lastCommittedOffset); + RequestContext context = new RequestContext( + new RequestHeader( + ApiKeys.DESCRIBE_GROUPS, + ApiKeys.DESCRIBE_GROUPS.latestVersion(), + DEFAULT_CLIENT_ID, + 0 + ), + "1", + DEFAULT_CLIENT_ADDRESS, + KafkaPrincipal.ANONYMOUS, + ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), + SecurityProtocol.PLAINTEXT, + ClientInformation.EMPTY, + false + ); + + return groupMetadataManager.describeGroups(context, groupIds, lastCommittedOffset); + } + + public List describeGroups(List groupIds, short apiVersion) { + RequestContext context = new RequestContext( + new RequestHeader( + ApiKeys.DESCRIBE_GROUPS, + apiVersion, + DEFAULT_CLIENT_ID, + 0 + ), + "1", + DEFAULT_CLIENT_ADDRESS, + KafkaPrincipal.ANONYMOUS, + ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), + SecurityProtocol.PLAINTEXT, + ClientInformation.EMPTY, + false + ); + + return groupMetadataManager.describeGroups(context, groupIds, lastCommittedOffset); } public List sendShareGroupDescribe(List groupIds) { @@ -1382,7 +1418,22 @@ public CoordinatorResult sendClassicG public void verifyDescribeGroupsReturnsDeadGroup(String groupId) { List describedGroups = - describeGroups(Collections.singletonList(groupId)); + describeGroups(List.of(groupId)); + + assertEquals( + List.of(new DescribeGroupsResponseData.DescribedGroup() + .setGroupId(groupId) + .setGroupState(DEAD.toString()) + .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code()) + .setErrorMessage("Group " + groupId + " not found.") + ), + describedGroups + ); + } + + public void verifyDescribeGroupsBeforeV6ReturnsDeadGroup(String groupId) { + List describedGroups = + describeGroups(Collections.singletonList(groupId), (short) 5); assertEquals( Collections.singletonList(new DescribeGroupsResponseData.DescribedGroup() @@ -1416,7 +1467,7 @@ public void verifyClassicGroupSyncToConsumerGroup( // Simulate a successful write to log. syncResult.appendFuture.complete(null); - assertSyncGroupResponseEquals( + assertResponseEquals( new SyncGroupResponseData() .setProtocolType(protocolType) .setProtocolName(protocolName) @@ -1467,99 +1518,99 @@ public void replay( throw new IllegalStateException("Received a null key in " + record); } - switch (key.version()) { - case GroupMetadataKey.HIGHEST_SUPPORTED_VERSION: + switch (CoordinatorRecordType.fromId(key.version())) { + case GROUP_METADATA: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) messageOrNull(value) ); break; - case ConsumerGroupMemberMetadataKey.HIGHEST_SUPPORTED_VERSION: + case CONSUMER_GROUP_MEMBER_METADATA: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) messageOrNull(value) ); break; - case ConsumerGroupMetadataKey.HIGHEST_SUPPORTED_VERSION: + case CONSUMER_GROUP_METADATA: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) messageOrNull(value) ); break; - case ConsumerGroupPartitionMetadataKey.HIGHEST_SUPPORTED_VERSION: + case CONSUMER_GROUP_PARTITION_METADATA: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) messageOrNull(value) ); break; - case ConsumerGroupTargetAssignmentMemberKey.HIGHEST_SUPPORTED_VERSION: + case CONSUMER_GROUP_TARGET_ASSIGNMENT_MEMBER: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) messageOrNull(value) ); break; - case ConsumerGroupTargetAssignmentMetadataKey.HIGHEST_SUPPORTED_VERSION: + case CONSUMER_GROUP_TARGET_ASSIGNMENT_METADATA: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) messageOrNull(value) ); break; - case ConsumerGroupCurrentMemberAssignmentKey.HIGHEST_SUPPORTED_VERSION: + case CONSUMER_GROUP_CURRENT_MEMBER_ASSIGNMENT: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) messageOrNull(value) ); break; - case ShareGroupMemberMetadataKey.HIGHEST_SUPPORTED_VERSION: + case SHARE_GROUP_MEMBER_METADATA: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) messageOrNull(value) ); break; - case ShareGroupMetadataKey.HIGHEST_SUPPORTED_VERSION: + case SHARE_GROUP_METADATA: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) messageOrNull(value) ); break; - case ShareGroupPartitionMetadataKey.HIGHEST_SUPPORTED_VERSION: + case SHARE_GROUP_PARTITION_METADATA: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) messageOrNull(value) ); break; - case ShareGroupTargetAssignmentMemberKey.HIGHEST_SUPPORTED_VERSION: + case SHARE_GROUP_TARGET_ASSIGNMENT_MEMBER: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) messageOrNull(value) ); break; - case ShareGroupTargetAssignmentMetadataKey.HIGHEST_SUPPORTED_VERSION: + case SHARE_GROUP_TARGET_ASSIGNMENT_METADATA: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) messageOrNull(value) ); break; - case ShareGroupCurrentMemberAssignmentKey.HIGHEST_SUPPORTED_VERSION: + case SHARE_GROUP_CURRENT_MEMBER_ASSIGNMENT: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) messageOrNull(value) ); break; - case ConsumerGroupRegularExpressionKey.HIGHEST_SUPPORTED_VERSION: + case CONSUMER_GROUP_REGULAR_EXPRESSION: groupMetadataManager.replay( (ConsumerGroupRegularExpressionKey) key.message(), (ConsumerGroupRegularExpressionValue) messageOrNull(value) @@ -1575,6 +1626,10 @@ public void replay( snapshotRegistry.idempotentCreateSnapshot(lastWrittenOffset); } + void onLoaded() { + groupMetadataManager.onLoaded(); + } + void onUnloaded() { groupMetadataManager.onUnloaded(); } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java index 995f1ee74a50b..23a01a6024176 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java @@ -27,7 +27,15 @@ import java.util.Arrays; public class MetadataImageBuilder { - private MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); + private final MetadataDelta delta; + + public MetadataImageBuilder() { + this(MetadataImage.EMPTY); + } + + public MetadataImageBuilder(MetadataImage image) { + this.delta = new MetadataDelta(image); + } public MetadataImageBuilder addTopic( Uuid topicId, @@ -61,6 +69,10 @@ public MetadataImageBuilder addRacks() { } public MetadataImage build() { - return delta.apply(MetadataProvenance.EMPTY); + return build(0); + } + + public MetadataImage build(long version) { + return delta.apply(new MetadataProvenance(version, 0, 0L, true)); } } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetAndMetadataTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetAndMetadataTest.java index f252caa2d5ef0..e6be1f27883e8 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetAndMetadataTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetAndMetadataTest.java @@ -87,8 +87,7 @@ public void testFromRequest() { .setPartitionIndex(0) .setCommittedOffset(100L) .setCommittedLeaderEpoch(-1) - .setCommittedMetadata(null) - .setCommitTimestamp(-1L); + .setCommittedMetadata(null); assertEquals( new OffsetAndMetadata( @@ -106,15 +105,14 @@ public void testFromRequest() { partition .setCommittedLeaderEpoch(10) - .setCommittedMetadata("hello") - .setCommitTimestamp(1234L); + .setCommittedMetadata("hello"); assertEquals( new OffsetAndMetadata( 100L, OptionalInt.of(10), "hello", - 1234L, + time.milliseconds(), OptionalLong.empty() ), OffsetAndMetadata.fromRequest( partition, @@ -128,7 +126,7 @@ public void testFromRequest() { 100L, OptionalInt.of(10), "hello", - 1234L, + time.milliseconds(), OptionalLong.of(5678L) ), OffsetAndMetadata.fromRequest( partition, diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java index fa6a95e741038..e440104be738d 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/OffsetMetadataManagerTest.java @@ -49,12 +49,13 @@ import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; import org.apache.kafka.coordinator.common.runtime.CoordinatorResult; +import org.apache.kafka.coordinator.common.runtime.MockCoordinatorExecutor; import org.apache.kafka.coordinator.common.runtime.MockCoordinatorTimer; -import org.apache.kafka.coordinator.group.assignor.RangeAssignor; import org.apache.kafka.coordinator.group.classic.ClassicGroup; import org.apache.kafka.coordinator.group.classic.ClassicGroupMember; import org.apache.kafka.coordinator.group.classic.ClassicGroupState; import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataValue; +import org.apache.kafka.coordinator.group.generated.CoordinatorRecordType; import org.apache.kafka.coordinator.group.generated.OffsetCommitKey; import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetricsShard; @@ -62,7 +63,6 @@ import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.timeline.SnapshotRegistry; import org.junit.jupiter.api.Test; @@ -100,13 +100,14 @@ static class OffsetMetadataManagerTestContext { public static class Builder { private final MockTime time = new MockTime(); private final MockCoordinatorTimer timer = new MockCoordinatorTimer<>(time); + private final MockCoordinatorExecutor executor = new MockCoordinatorExecutor<>(); private final LogContext logContext = new LogContext(); private final SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext); + private final GroupCoordinatorMetricsShard metrics = mock(GroupCoordinatorMetricsShard.class); + private final GroupConfigManager configManager = mock(GroupConfigManager.class); private GroupMetadataManager groupMetadataManager = null; private MetadataImage metadataImage = null; private GroupCoordinatorConfig config = null; - private GroupCoordinatorMetricsShard metrics = mock(GroupCoordinatorMetricsShard.class); - private GroupConfigManager configManager = mock(GroupConfigManager.class); Builder withOffsetMetadataMaxSize(int offsetMetadataMaxSize) { config = GroupCoordinatorConfigTest.createGroupCoordinatorConfig(offsetMetadataMaxSize, 60000L, 24 * 60); @@ -133,12 +134,13 @@ OffsetMetadataManagerTestContext build() { groupMetadataManager = new GroupMetadataManager.Builder() .withTime(time) .withTimer(timer) + .withExecutor(executor) .withSnapshotRegistry(snapshotRegistry) .withLogContext(logContext) .withMetadataImage(metadataImage) - .withConsumerGroupAssignors(Collections.singletonList(new RangeAssignor())) .withGroupCoordinatorMetricsShard(metrics) .withGroupConfigManager(configManager) + .withConfig(GroupCoordinatorConfig.fromProps(Collections.emptyMap())) .build(); } @@ -447,8 +449,7 @@ public void commitOffset( "metadata", commitTimestamp, OptionalLong.empty() - ), - MetadataVersion.latestTesting() + ) )); } @@ -494,8 +495,8 @@ private void replay( throw new IllegalStateException("Received a null key in " + record); } - switch (key.version()) { - case OffsetCommitKey.HIGHEST_SUPPORTED_VERSION: + switch (CoordinatorRecordType.fromId(key.version())) { + case OFFSET_COMMIT: offsetMetadataManager.replay( lastWrittenOffset, producerId, @@ -905,8 +906,7 @@ public void testGenericGroupOffsetCommitWithRetentionTime() { "", context.time.milliseconds(), OptionalLong.of(context.time.milliseconds() + 1234L) - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -1013,8 +1013,7 @@ public void testSimpleGroupOffsetCommit() { "", context.time.milliseconds(), OptionalLong.empty() - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -1073,8 +1072,7 @@ public void testSimpleGroupOffsetCommitWithInstanceId() { "", context.time.milliseconds(), OptionalLong.empty() - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -1237,8 +1235,7 @@ public void testConsumerGroupOffsetCommitFromAdminClient() { "", context.time.milliseconds(), OptionalLong.empty() - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -1275,7 +1272,6 @@ public void testConsumerGroupOffsetCommit() { .setCommittedOffset(100L) .setCommittedLeaderEpoch(10) .setCommittedMetadata("metadata") - .setCommitTimestamp(context.time.milliseconds()) )) )) ); @@ -1305,8 +1301,7 @@ public void testConsumerGroupOffsetCommit() { "metadata", context.time.milliseconds(), OptionalLong.empty() - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -1344,14 +1339,12 @@ public void testConsumerGroupOffsetCommitWithOffsetMetadataTooLarge() { .setPartitionIndex(0) .setCommittedOffset(100L) .setCommittedLeaderEpoch(10) - .setCommittedMetadata("toolarge") - .setCommitTimestamp(context.time.milliseconds()), + .setCommittedMetadata("toolarge"), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) .setCommittedOffset(100L) .setCommittedLeaderEpoch(10) .setCommittedMetadata("small") - .setCommitTimestamp(context.time.milliseconds()) )) )) ); @@ -1384,8 +1377,7 @@ public void testConsumerGroupOffsetCommitWithOffsetMetadataTooLarge() { "small", context.time.milliseconds(), OptionalLong.empty() - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -1451,8 +1443,7 @@ public void testConsumerGroupTransactionalOffsetCommit() { "metadata", context.time.milliseconds(), OptionalLong.empty() - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -1609,8 +1600,7 @@ public void testGenericGroupTransactionalOffsetCommit() { "metadata", context.time.milliseconds(), OptionalLong.empty() - ), - MetadataImage.EMPTY.features().metadataVersion() + ) )), result.records() ); @@ -3137,8 +3127,7 @@ private void verifyReplay( groupId, topic, partition, - offsetAndMetadata, - MetadataImage.EMPTY.features().metadataVersion() + offsetAndMetadata )); assertEquals(offsetAndMetadata, context.offsetMetadataManager.offset( @@ -3160,8 +3149,7 @@ private void verifyTransactionalReplay( groupId, topic, partition, - offsetAndMetadata, - MetadataImage.EMPTY.features().metadataVersion() + offsetAndMetadata )); assertEquals(offsetAndMetadata, context.offsetMetadataManager.pendingTransactionalOffset( diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/ShareGroupAutoOffsetResetStrategyTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/ShareGroupAutoOffsetResetStrategyTest.java new file mode 100644 index 0000000000000..b0523a5fb9fe0 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/ShareGroupAutoOffsetResetStrategyTest.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group; + +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.requests.ListOffsetsRequest; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.time.Instant; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ShareGroupAutoOffsetResetStrategyTest { + + @Test + public void testFromString() { + assertEquals(ShareGroupAutoOffsetResetStrategy.EARLIEST, ShareGroupAutoOffsetResetStrategy.fromString("earliest")); + assertEquals(ShareGroupAutoOffsetResetStrategy.LATEST, ShareGroupAutoOffsetResetStrategy.fromString("latest")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("invalid")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("by_duration:invalid")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("by_duration:-PT1H")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("by_duration:")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("by_duration")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("LATEST")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("EARLIEST")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("NONE")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString("")); + assertThrows(IllegalArgumentException.class, () -> ShareGroupAutoOffsetResetStrategy.fromString(null)); + + ShareGroupAutoOffsetResetStrategy strategy = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT1H"); + assertEquals("by_duration", strategy.name()); + } + + @Test + public void testValidator() { + ShareGroupAutoOffsetResetStrategy.Validator validator = new ShareGroupAutoOffsetResetStrategy.Validator(); + assertDoesNotThrow(() -> validator.ensureValid("test", "earliest")); + assertDoesNotThrow(() -> validator.ensureValid("test", "latest")); + assertDoesNotThrow(() -> validator.ensureValid("test", "by_duration:PT1H")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "invalid")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:invalid")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:-PT1H")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "LATEST")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "EARLIEST")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "NONE")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", "")); + assertThrows(ConfigException.class, () -> validator.ensureValid("test", null)); + } + + @Test + public void testEqualsAndHashCode() { + ShareGroupAutoOffsetResetStrategy earliest1 = ShareGroupAutoOffsetResetStrategy.fromString("earliest"); + ShareGroupAutoOffsetResetStrategy earliest2 = ShareGroupAutoOffsetResetStrategy.fromString("earliest"); + ShareGroupAutoOffsetResetStrategy latest1 = ShareGroupAutoOffsetResetStrategy.fromString("latest"); + + ShareGroupAutoOffsetResetStrategy duration1 = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:P2D"); + ShareGroupAutoOffsetResetStrategy duration2 = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:P2D"); + + assertEquals(earliest1, earliest2); + assertNotEquals(earliest1, latest1); + assertEquals(earliest1.hashCode(), earliest2.hashCode()); + assertNotEquals(earliest1.hashCode(), latest1.hashCode()); + + assertNotEquals(latest1, duration2); + assertEquals(duration1, duration2); + } + + @Test + public void testTimestamp() { + ShareGroupAutoOffsetResetStrategy earliest1 = ShareGroupAutoOffsetResetStrategy.fromString("earliest"); + ShareGroupAutoOffsetResetStrategy earliest2 = ShareGroupAutoOffsetResetStrategy.fromString("earliest"); + assertEquals(ListOffsetsRequest.EARLIEST_TIMESTAMP, earliest1.timestamp()); + assertEquals(earliest1, earliest2); + + ShareGroupAutoOffsetResetStrategy latest1 = ShareGroupAutoOffsetResetStrategy.fromString("latest"); + ShareGroupAutoOffsetResetStrategy latest2 = ShareGroupAutoOffsetResetStrategy.fromString("latest"); + assertEquals(ListOffsetsRequest.LATEST_TIMESTAMP, latest1.timestamp()); + assertEquals(latest1, latest2); + + ShareGroupAutoOffsetResetStrategy byDuration1 = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT1H"); + Long timestamp = byDuration1.timestamp(); + assertTrue(timestamp <= Instant.now().toEpochMilli() - Duration.ofHours(1).toMillis()); + + ShareGroupAutoOffsetResetStrategy byDuration2 = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT1H"); + ShareGroupAutoOffsetResetStrategy byDuration3 = ShareGroupAutoOffsetResetStrategy.fromString("by_duration:PT2H"); + + assertEquals(byDuration1, byDuration2); + assertNotEquals(byDuration1, byDuration3); + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupMemberTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupMemberTest.java index c26b329c06c37..0ccf3abdf40e3 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupMemberTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupMemberTest.java @@ -23,8 +23,8 @@ import org.junit.jupiter.api.Test; -import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -194,7 +194,7 @@ public void testVoteRaisesOnNoSupportedProtocols() { ); assertThrows(IllegalArgumentException.class, () -> - member.vote(Collections.singleton("unknown")) + member.vote(Set.of("unknown")) ); } @@ -290,7 +290,7 @@ public void testDescribeNoMetadata() { @Test public void testDescribe() { - JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(Collections.singletonList( + JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(List.of( new JoinGroupRequestProtocol() .setName("range") .setMetadata(new byte[]{0}) diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java index 0a321ae0349d4..93c382dc4d375 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java @@ -57,7 +57,6 @@ import org.junit.jupiter.params.ParameterizedTest; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -403,7 +402,7 @@ public void testSubscribedTopics() { .setName("range") .setMetadata(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription( - Collections.singletonList("foo") + List.of("foo") )).array())); ClassicGroupMember member = new ClassicGroupMember( @@ -422,7 +421,7 @@ public void testSubscribedTopics() { group.initNextGeneration(); - Set expectedTopics = new HashSet<>(Collections.singleton("foo")); + Set expectedTopics = new HashSet<>(Set.of("foo")); assertEquals(expectedTopics, group.subscribedTopics().get()); group.transitionTo(PREPARING_REBALANCE); @@ -851,7 +850,7 @@ public void testCanAddAndRemovePendingSyncMember() { group.add(member); assertTrue(group.addPendingSyncMember(memberId)); - assertEquals(Collections.singleton(memberId), group.allPendingSyncMembers()); + assertEquals(Set.of(memberId), group.allPendingSyncMembers()); group.removePendingSyncMember(memberId); assertEquals(Collections.emptySet(), group.allPendingSyncMembers()); } @@ -876,7 +875,7 @@ public void testRemovalFromPendingSyncWhenMemberIsRemoved() { group.add(member); assertTrue(group.addPendingSyncMember(memberId)); - assertEquals(Collections.singleton(memberId), group.allPendingSyncMembers()); + assertEquals(Set.of(memberId), group.allPendingSyncMembers()); group.remove(memberId); assertEquals(Collections.emptySet(), group.allPendingSyncMembers()); } @@ -902,7 +901,7 @@ public void testNewGenerationClearsPendingSyncMembers() { group.add(member); group.transitionTo(PREPARING_REBALANCE); assertTrue(group.addPendingSyncMember(memberId)); - assertEquals(Collections.singleton(memberId), group.allPendingSyncMembers()); + assertEquals(Set.of(memberId), group.allPendingSyncMembers()); group.initNextGeneration(); assertEquals(Collections.emptySet(), group.allPendingSyncMembers()); } @@ -1010,7 +1009,7 @@ public void testValidateOffsetCommit(short version) { 100, 100, "consumer", - new JoinGroupRequestProtocolCollection(Collections.singletonList( + new JoinGroupRequestProtocolCollection(List.of( new JoinGroupRequestProtocol() .setName("roundrobin") .setMetadata(new byte[0])).iterator()) @@ -1144,7 +1143,7 @@ public void testOffsetExpirationCondition() { protocols.add(new JoinGroupRequestProtocol() .setName("range") .setMetadata(ConsumerProtocol.serializeSubscription( - new ConsumerPartitionAssignor.Subscription(Collections.singletonList("topic"))).array())); + new ConsumerPartitionAssignor.Subscription(List.of("topic"))).array())); ClassicGroupMember memberWithNonConsumerProtocol = new ClassicGroupMember( "memberWithNonConsumerProtocol", @@ -1214,7 +1213,7 @@ public void testIsSubscribedToTopic() { protocols.add(new JoinGroupRequestProtocol() .setName("range") .setMetadata(ConsumerProtocol.serializeSubscription( - new ConsumerPartitionAssignor.Subscription(Collections.singletonList("topic"))).array())); + new ConsumerPartitionAssignor.Subscription(List.of("topic"))).array())); ClassicGroupMember memberWithNonConsumerProtocol = new ClassicGroupMember( "memberWithNonConsumerProtocol", @@ -1261,7 +1260,7 @@ public void testIsSubscribedToTopic() { group.transitionTo(PREPARING_REBALANCE); group.initNextGeneration(); assertTrue(group.isInState(COMPLETING_REBALANCE)); - assertEquals(Optional.of(Collections.singleton("topic")), group.computeSubscribedTopics()); + assertEquals(Optional.of(Set.of("topic")), group.computeSubscribedTopics()); assertTrue(group.usesConsumerGroupProtocol()); assertTrue(group.isSubscribedToTopic("topic")); } @@ -1269,22 +1268,22 @@ public void testIsSubscribedToTopic() { @Test public void testIsInStates() { ClassicGroup group = new ClassicGroup(new LogContext(), "groupId", EMPTY, Time.SYSTEM); - assertTrue(group.isInStates(Collections.singleton("empty"), 0)); + assertTrue(group.isInStates(Set.of("empty"), 0)); group.transitionTo(PREPARING_REBALANCE); - assertTrue(group.isInStates(Collections.singleton("preparingrebalance"), 0)); - assertFalse(group.isInStates(Collections.singleton("PreparingRebalance"), 0)); + assertTrue(group.isInStates(Set.of("preparingrebalance"), 0)); + assertFalse(group.isInStates(Set.of("PreparingRebalance"), 0)); group.transitionTo(COMPLETING_REBALANCE); - assertTrue(group.isInStates(new HashSet<>(Collections.singletonList("completingrebalance")), 0)); + assertTrue(group.isInStates(new HashSet<>(List.of("completingrebalance")), 0)); group.transitionTo(STABLE); - assertTrue(group.isInStates(Collections.singleton("stable"), 0)); - assertFalse(group.isInStates(Collections.singleton("empty"), 0)); + assertTrue(group.isInStates(Set.of("stable"), 0)); + assertFalse(group.isInStates(Set.of("empty"), 0)); group.transitionTo(DEAD); - assertTrue(group.isInStates(new HashSet<>(Arrays.asList("dead", " ")), 0)); + assertTrue(group.isInStates(new HashSet<>(List.of("dead", " ")), 0)); } @Test @@ -1397,15 +1396,15 @@ public void testFromConsumerGroupWithJoiningMember() { mkTopicAssignment(fooTopicId, 1) ))); - List protocols1 = Collections.singletonList(createClassicProtocol( + List protocols1 = List.of(createClassicProtocol( "range", - Collections.singletonList(fooTopicName), - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(fooTopicName), + List.of(new TopicPartition(fooTopicName, 0)) )); - List protocols2 = Collections.singletonList(createClassicProtocol( + List protocols2 = List.of(createClassicProtocol( "range", - Collections.singletonList(fooTopicName), - Collections.singletonList(new TopicPartition(fooTopicName, 1)) + List.of(fooTopicName), + List.of(new TopicPartition(fooTopicName, 1)) )); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1) @@ -1414,7 +1413,7 @@ public void testFromConsumerGroupWithJoiningMember() { .setPreviousMemberEpoch(9) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -1433,7 +1432,7 @@ public void testFromConsumerGroupWithJoiningMember() { .setPreviousMemberEpoch(9) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment( @@ -1446,7 +1445,7 @@ public void testFromConsumerGroupWithJoiningMember() { .setPreviousMemberEpoch(0) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -1459,7 +1458,7 @@ public void testFromConsumerGroupWithJoiningMember() { ClassicGroup classicGroup = ClassicGroup.fromConsumerGroup( consumerGroup, - memberId2, + Collections.emptySet(), newMember2, logContext, time, @@ -1486,13 +1485,13 @@ public void testFromConsumerGroupWithJoiningMember() { member1.rebalanceTimeoutMs(), member1.classicProtocolSessionTimeout().get(), ConsumerProtocol.PROTOCOL_TYPE, - new JoinGroupRequestData.JoinGroupRequestProtocolCollection(Collections.singletonList( + new JoinGroupRequestData.JoinGroupRequestProtocolCollection(List.of( new JoinGroupRequestData.JoinGroupRequestProtocol() .setName(protocols1.get(0).name()) .setMetadata(protocols1.get(0).metadata()) ).iterator()), Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment( - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(new TopicPartition(fooTopicName, 0)) ))) ) ); @@ -1505,13 +1504,13 @@ public void testFromConsumerGroupWithJoiningMember() { newMember2.rebalanceTimeoutMs(), newMember2.classicProtocolSessionTimeout().get(), ConsumerProtocol.PROTOCOL_TYPE, - new JoinGroupRequestData.JoinGroupRequestProtocolCollection(Collections.singletonList( + new JoinGroupRequestData.JoinGroupRequestProtocolCollection(List.of( new JoinGroupRequestData.JoinGroupRequestProtocol() .setName(protocols2.get(0).name()) .setMetadata(protocols2.get(0).metadata()) ).iterator()), Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment( - Collections.singletonList(new TopicPartition(fooTopicName, 1)) + List.of(new TopicPartition(fooTopicName, 1)) ))) ) ); @@ -1549,10 +1548,10 @@ public void testFromConsumerGroupWithoutJoiningMember() { mkTopicAssignment(fooTopicId, 1) ))); - List protocols1 = Collections.singletonList(createClassicProtocol( + List protocols1 = List.of(createClassicProtocol( "range", - Collections.singletonList(fooTopicName), - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(fooTopicName), + List.of(new TopicPartition(fooTopicName, 0)) )); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1) @@ -1561,7 +1560,7 @@ public void testFromConsumerGroupWithoutJoiningMember() { .setPreviousMemberEpoch(9) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setClassicMemberMetadata( @@ -1580,7 +1579,7 @@ public void testFromConsumerGroupWithoutJoiningMember() { .setPreviousMemberEpoch(9) .setClientId("client-id") .setClientHost("client-host") - .setSubscribedTopicNames(Collections.singletonList(fooTopicName)) + .setSubscribedTopicNames(List.of(fooTopicName)) .setServerAssignorName("range") .setRebalanceTimeoutMs(45000) .setAssignedPartitions(mkAssignment( @@ -1590,7 +1589,7 @@ public void testFromConsumerGroupWithoutJoiningMember() { ClassicGroup classicGroup = ClassicGroup.fromConsumerGroup( consumerGroup, - memberId2, + Set.of(member2), null, logContext, time, @@ -1617,13 +1616,13 @@ public void testFromConsumerGroupWithoutJoiningMember() { member1.rebalanceTimeoutMs(), member1.classicProtocolSessionTimeout().get(), ConsumerProtocol.PROTOCOL_TYPE, - new JoinGroupRequestData.JoinGroupRequestProtocolCollection(Collections.singletonList( + new JoinGroupRequestData.JoinGroupRequestProtocolCollection(List.of( new JoinGroupRequestData.JoinGroupRequestProtocol() .setName(protocols1.get(0).name()) .setMetadata(protocols1.get(0).metadata()) ).iterator()), Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment( - Collections.singletonList(new TopicPartition(fooTopicName, 0)) + List.of(new TopicPartition(fooTopicName, 0)) ))) ) ); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java index db6fae506ff3f..d04aa5338736d 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java @@ -16,8 +16,8 @@ */ package org.apache.kafka.coordinator.group.metrics; +import org.apache.kafka.common.GroupState; import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.ShareGroupState; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.internals.Topic; import org.apache.kafka.common.metrics.Metrics; @@ -116,19 +116,19 @@ public void testMetricNames() { GroupCoordinatorMetrics.METRICS_GROUP, "The number of share groups in empty state.", "protocol", Group.GroupType.SHARE.toString(), - "state", ShareGroupState.EMPTY.toString()), + "state", GroupState.EMPTY.toString()), metrics.metricName( "group-count", GroupCoordinatorMetrics.METRICS_GROUP, "The number of share groups in stable state.", "protocol", Group.GroupType.SHARE.toString(), - "state", ShareGroupState.STABLE.toString()), + "state", GroupState.STABLE.toString()), metrics.metricName( "group-count", GroupCoordinatorMetrics.METRICS_GROUP, "The number of share groups in dead state.", "protocol", Group.GroupType.SHARE.toString(), - "state", ShareGroupState.DEAD.toString()) + "state", GroupState.DEAD.toString()) )); try { diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilderTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilderTest.java index 3f2aaa34f0d02..c267195eeae9d 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilderTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/TargetAssignmentBuilderTest.java @@ -21,10 +21,10 @@ import org.apache.kafka.coordinator.group.MetadataImageBuilder; import org.apache.kafka.coordinator.group.api.assignor.GroupAssignment; import org.apache.kafka.coordinator.group.api.assignor.MemberAssignment; -import org.apache.kafka.coordinator.group.api.assignor.MemberSubscription; import org.apache.kafka.coordinator.group.api.assignor.PartitionAssignor; import org.apache.kafka.coordinator.group.api.assignor.SubscriptionType; import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; +import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression; import org.apache.kafka.image.TopicsImage; import org.junit.jupiter.api.Test; @@ -37,13 +37,13 @@ import java.util.Optional; import java.util.Set; -import static org.apache.kafka.coordinator.group.Assertions.assertUnorderedListEquals; +import static org.apache.kafka.coordinator.group.Assertions.assertRecordsEquals; +import static org.apache.kafka.coordinator.group.Assertions.assertUnorderedRecordsEquals; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkAssignment; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkTopicAssignment; import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord; import static org.apache.kafka.coordinator.group.GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord; import static org.apache.kafka.coordinator.group.api.assignor.SubscriptionType.HOMOGENEOUS; -import static org.apache.kafka.coordinator.group.modern.TargetAssignmentBuilder.createMemberSubscriptionAndAssignment; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -63,6 +63,7 @@ public static class TargetAssignmentBuilderTestContext { private final Map targetAssignment = new HashMap<>(); private final Map memberAssignments = new HashMap<>(); private final Map staticMembers = new HashMap<>(); + private final Map resolvedRegularExpressions = new HashMap<>(); private MetadataImageBuilder topicsImageBuilder = new MetadataImageBuilder(); public TargetAssignmentBuilderTestContext( @@ -78,17 +79,37 @@ public void addGroupMember( List subscriptions, Map> targetPartitions ) { - addGroupMember(memberId, null, subscriptions, targetPartitions); + addGroupMember(memberId, null, subscriptions, "", targetPartitions); } - private void addGroupMember( + public void addGroupMember( + String memberId, + List subscriptions, + String subscribedRegex, + Map> targetPartitions + ) { + addGroupMember(memberId, null, subscriptions, subscribedRegex, targetPartitions); + } + + public void addGroupMember( String memberId, String instanceId, List subscriptions, Map> targetPartitions + ) { + addGroupMember(memberId, instanceId, subscriptions, "", targetPartitions); + } + + public void addGroupMember( + String memberId, + String instanceId, + List subscriptions, + String subscribedRegex, + Map> targetPartitions ) { ConsumerGroupMember.Builder memberBuilder = new ConsumerGroupMember.Builder(memberId) - .setSubscribedTopicNames(subscriptions); + .setSubscribedTopicNames(subscriptions) + .setSubscribedTopicRegex(subscribedRegex); if (instanceId != null) { memberBuilder.setInstanceId(instanceId); @@ -158,6 +179,45 @@ public void prepareMemberAssignment( memberAssignments.put(memberId, new MemberAssignmentImpl(assignment)); } + public void addResolvedRegularExpression( + String regex, + ResolvedRegularExpression resolvedRegularExpression + ) { + resolvedRegularExpressions.put(regex, resolvedRegularExpression); + } + + private MemberSubscriptionAndAssignmentImpl newMemberSubscriptionAndAssignment( + ConsumerGroupMember member, + Assignment memberAssignment, + TopicIds.TopicResolver topicResolver + ) { + Set subscriptions = member.subscribedTopicNames(); + + // Check whether the member is also subscribed to a regular expression. If it is, + // create the union of the two subscriptions. + String subscribedTopicRegex = member.subscribedTopicRegex(); + if (subscribedTopicRegex != null && !subscribedTopicRegex.isEmpty()) { + ResolvedRegularExpression resolvedRegularExpression = resolvedRegularExpressions.get(subscribedTopicRegex); + if (resolvedRegularExpression != null) { + if (subscriptions.isEmpty()) { + subscriptions = resolvedRegularExpression.topics; + } else if (!resolvedRegularExpression.topics.isEmpty()) { + // We only use a UnionSet when the member uses both type of subscriptions. The + // protocol allows it. However, the Apache Kafka Consumer does not support it. + // Other clients such as librdkafka may support it. + subscriptions = new UnionSet<>(subscriptions, resolvedRegularExpression.topics); + } + } + } + + return new MemberSubscriptionAndAssignmentImpl( + Optional.ofNullable(member.rackId()), + Optional.ofNullable(member.instanceId()), + new TopicIds(subscriptions, topicResolver), + memberAssignment + ); + } + public TargetAssignmentBuilder.TargetAssignmentResult build() { TopicsImage topicsImage = topicsImageBuilder.build().topics(); TopicIds.TopicResolver topicResolver = new TopicIds.CachedTopicResolver(topicsImage); @@ -166,7 +226,7 @@ public TargetAssignmentBuilder.TargetAssignmentResult build() { // All the existing members are prepared. members.forEach((memberId, member) -> - memberSubscriptions.put(memberId, createMemberSubscriptionAndAssignment( + memberSubscriptions.put(memberId, newMemberSubscriptionAndAssignment( member, targetAssignment.getOrDefault(memberId, Assignment.EMPTY), topicResolver @@ -189,7 +249,7 @@ public TargetAssignmentBuilder.TargetAssignmentResult build() { } } - memberSubscriptions.put(memberId, createMemberSubscriptionAndAssignment( + memberSubscriptions.put(memberId, newMemberSubscriptionAndAssignment( updatedMemberOrNull, assignment, topicResolver @@ -223,15 +283,16 @@ public TargetAssignmentBuilder.TargetAssignmentResult build() { .thenReturn(new GroupAssignment(memberAssignments)); // Create and populate the assignment builder. - TargetAssignmentBuilder builder = - new TargetAssignmentBuilder(groupId, groupEpoch, assignor) - .withMembers(members) - .withStaticMembers(staticMembers) - .withSubscriptionMetadata(subscriptionMetadata) - .withSubscriptionType(subscriptionType) - .withTargetAssignment(targetAssignment) - .withInvertedTargetAssignment(invertedTargetAssignment) - .withTopicsImage(topicsImage); + TargetAssignmentBuilder.ConsumerTargetAssignmentBuilder builder = + new TargetAssignmentBuilder.ConsumerTargetAssignmentBuilder(groupId, groupEpoch, assignor) + .withMembers(members) + .withStaticMembers(staticMembers) + .withSubscriptionMetadata(subscriptionMetadata) + .withSubscriptionType(subscriptionType) + .withTargetAssignment(targetAssignment) + .withInvertedTargetAssignment(invertedTargetAssignment) + .withTopicsImage(topicsImage) + .withResolvedRegularExpressions(resolvedRegularExpressions); // Add the updated members or delete the deleted members. updatedMembers.forEach((memberId, updatedMemberOrNull) -> { @@ -254,42 +315,6 @@ public TargetAssignmentBuilder.TargetAssignmentResult build() { } } - @Test - public void testCreateMemberSubscriptionSpecImpl() { - Uuid fooTopicId = Uuid.randomUuid(); - Uuid barTopicId = Uuid.randomUuid(); - TopicsImage topicsImage = new MetadataImageBuilder() - .addTopic(fooTopicId, "foo", 5) - .addTopic(barTopicId, "bar", 5) - .build() - .topics(); - TopicIds.TopicResolver topicResolver = new TopicIds.DefaultTopicResolver(topicsImage); - - ConsumerGroupMember member = new ConsumerGroupMember.Builder("member-id") - .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) - .setRackId("rackId") - .setInstanceId("instanceId") - .build(); - - Assignment assignment = new Assignment(mkAssignment( - mkTopicAssignment(fooTopicId, 1, 2, 3), - mkTopicAssignment(barTopicId, 1, 2, 3) - )); - - MemberSubscription subscriptionSpec = createMemberSubscriptionAndAssignment( - member, - assignment, - topicResolver - ); - - assertEquals(new MemberSubscriptionAndAssignmentImpl( - Optional.of("rackId"), - Optional.of("instanceId"), - new TopicIds(Set.of("bar", "foo", "zar"), topicsImage), - assignment - ), subscriptionSpec); - } - @Test public void testEmpty() { TargetAssignmentBuilderTestContext context = new TargetAssignmentBuilderTestContext( @@ -387,23 +412,27 @@ public void testAssignmentSwapped() { TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); - assertEquals(3, result.records().size()); - - assertUnorderedListEquals(Arrays.asList( - newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( - mkTopicAssignment(fooTopicId, 4, 5, 6), - mkTopicAssignment(barTopicId, 4, 5, 6) - )), - newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( - mkTopicAssignment(fooTopicId, 1, 2, 3), - mkTopicAssignment(barTopicId, 1, 2, 3) - )) - ), result.records().subList(0, 2)); - - assertEquals(newConsumerGroupTargetAssignmentEpochRecord( - "my-group", - 20 - ), result.records().get(2)); + assertUnorderedRecordsEquals( + List.of( + List.of( + newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( + mkTopicAssignment(fooTopicId, 4, 5, 6), + mkTopicAssignment(barTopicId, 4, 5, 6) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( + mkTopicAssignment(fooTopicId, 1, 2, 3), + mkTopicAssignment(barTopicId, 1, 2, 3) + )) + ), + List.of( + newConsumerGroupTargetAssignmentEpochRecord( + "my-group", + 20 + ) + ) + ), + result.records() + ); Map expectedAssignment = new HashMap<>(); expectedAssignment.put("member-2", new MemberAssignmentImpl(mkAssignment( @@ -457,27 +486,31 @@ public void testNewMember() { TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); - assertEquals(4, result.records().size()); - - assertUnorderedListEquals(Arrays.asList( - newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( - mkTopicAssignment(fooTopicId, 1, 2), - mkTopicAssignment(barTopicId, 1, 2) - )), - newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( - mkTopicAssignment(fooTopicId, 3, 4), - mkTopicAssignment(barTopicId, 3, 4) - )), - newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( - mkTopicAssignment(fooTopicId, 5, 6), - mkTopicAssignment(barTopicId, 5, 6) - )) - ), result.records().subList(0, 3)); - - assertEquals(newConsumerGroupTargetAssignmentEpochRecord( - "my-group", - 20 - ), result.records().get(3)); + assertUnorderedRecordsEquals( + List.of( + List.of( + newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( + mkTopicAssignment(fooTopicId, 1, 2), + mkTopicAssignment(barTopicId, 1, 2) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4), + mkTopicAssignment(barTopicId, 3, 4) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( + mkTopicAssignment(fooTopicId, 5, 6), + mkTopicAssignment(barTopicId, 5, 6) + )) + ), + List.of( + newConsumerGroupTargetAssignmentEpochRecord( + "my-group", + 20 + ) + ) + ), + result.records() + ); Map expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( @@ -544,27 +577,31 @@ public void testUpdateMember() { TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); - assertEquals(4, result.records().size()); - - assertUnorderedListEquals(Arrays.asList( - newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( - mkTopicAssignment(fooTopicId, 1, 2), - mkTopicAssignment(barTopicId, 1, 2) - )), - newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( - mkTopicAssignment(fooTopicId, 3, 4), - mkTopicAssignment(barTopicId, 3, 4) - )), - newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( - mkTopicAssignment(fooTopicId, 5, 6), - mkTopicAssignment(barTopicId, 5, 6) - )) - ), result.records().subList(0, 3)); - - assertEquals(newConsumerGroupTargetAssignmentEpochRecord( - "my-group", - 20 - ), result.records().get(3)); + assertUnorderedRecordsEquals( + List.of( + List.of( + newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( + mkTopicAssignment(fooTopicId, 1, 2), + mkTopicAssignment(barTopicId, 1, 2) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4), + mkTopicAssignment(barTopicId, 3, 4) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( + mkTopicAssignment(fooTopicId, 5, 6), + mkTopicAssignment(barTopicId, 5, 6) + )) + ), + List.of( + newConsumerGroupTargetAssignmentEpochRecord( + "my-group", + 20 + ) + ) + ), + result.records() + ); Map expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( @@ -625,24 +662,28 @@ public void testPartialAssignmentUpdate() { TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); - assertEquals(3, result.records().size()); - - // Member 1 has no record because its assignment did not change. - assertUnorderedListEquals(Arrays.asList( - newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( - mkTopicAssignment(fooTopicId, 3, 4, 5), - mkTopicAssignment(barTopicId, 3, 4, 5) - )), - newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( - mkTopicAssignment(fooTopicId, 6), - mkTopicAssignment(barTopicId, 6) - )) - ), result.records().subList(0, 2)); - - assertEquals(newConsumerGroupTargetAssignmentEpochRecord( - "my-group", - 20 - ), result.records().get(2)); + assertUnorderedRecordsEquals( + List.of( + List.of( + // Member 1 has no record because its assignment did not change. + newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4, 5), + mkTopicAssignment(barTopicId, 3, 4, 5) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( + mkTopicAssignment(fooTopicId, 6), + mkTopicAssignment(barTopicId, 6) + )) + ), + List.of( + newConsumerGroupTargetAssignmentEpochRecord( + "my-group", + 20 + ) + ) + ), + result.records() + ); Map expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( @@ -700,23 +741,27 @@ public void testDeleteMember() { TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); - assertEquals(3, result.records().size()); - - assertUnorderedListEquals(Arrays.asList( - newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( - mkTopicAssignment(fooTopicId, 1, 2, 3), - mkTopicAssignment(barTopicId, 1, 2, 3) - )), - newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( - mkTopicAssignment(fooTopicId, 4, 5, 6), - mkTopicAssignment(barTopicId, 4, 5, 6) - )) - ), result.records().subList(0, 2)); - - assertEquals(newConsumerGroupTargetAssignmentEpochRecord( - "my-group", - 20 - ), result.records().get(2)); + assertUnorderedRecordsEquals( + List.of( + List.of( + newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( + mkTopicAssignment(fooTopicId, 1, 2, 3), + mkTopicAssignment(barTopicId, 1, 2, 3) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( + mkTopicAssignment(fooTopicId, 4, 5, 6), + mkTopicAssignment(barTopicId, 4, 5, 6) + )) + ), + List.of( + newConsumerGroupTargetAssignmentEpochRecord( + "my-group", + 20 + ) + ) + ), + result.records() + ); Map expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( @@ -779,19 +824,19 @@ public void testReplaceStaticMember() { TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); - assertEquals(2, result.records().size()); - - assertUnorderedListEquals(Collections.singletonList( - newConsumerGroupTargetAssignmentRecord("my-group", "member-3-a", mkAssignment( - mkTopicAssignment(fooTopicId, 5, 6), - mkTopicAssignment(barTopicId, 5, 6) - )) - ), result.records().subList(0, 1)); - - assertEquals(newConsumerGroupTargetAssignmentEpochRecord( - "my-group", - 20 - ), result.records().get(1)); + assertRecordsEquals( + List.of( + newConsumerGroupTargetAssignmentRecord("my-group", "member-3-a", mkAssignment( + mkTopicAssignment(fooTopicId, 5, 6), + mkTopicAssignment(barTopicId, 5, 6) + )), + newConsumerGroupTargetAssignmentEpochRecord( + "my-group", + 20 + ) + ), + result.records() + ); Map expectedAssignment = new HashMap<>(); expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( @@ -810,4 +855,84 @@ public void testReplaceStaticMember() { assertEquals(expectedAssignment, result.targetAssignment()); } + + @Test + public void testRegularExpressions() { + TargetAssignmentBuilderTestContext context = new TargetAssignmentBuilderTestContext( + "my-group", + 20 + ); + + Uuid fooTopicId = context.addTopicMetadata("foo", 6); + Uuid barTopicId = context.addTopicMetadata("bar", 6); + + context.addGroupMember("member-1", Arrays.asList("bar", "zar"), "foo*", mkAssignment()); + + context.addGroupMember("member-2", Arrays.asList("foo", "bar", "zar"), mkAssignment()); + + context.addGroupMember("member-3", Collections.emptyList(), "foo*", mkAssignment()); + + context.addResolvedRegularExpression("foo*", new ResolvedRegularExpression( + Collections.singleton("foo"), + 10L, + 12345L + )); + + context.prepareMemberAssignment("member-1", mkAssignment( + mkTopicAssignment(fooTopicId, 1, 2), + mkTopicAssignment(barTopicId, 1, 2, 3) + )); + + context.prepareMemberAssignment("member-2", mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4), + mkTopicAssignment(barTopicId, 4, 5, 6) + )); + + context.prepareMemberAssignment("member-3", mkAssignment( + mkTopicAssignment(fooTopicId, 5, 6) + )); + + TargetAssignmentBuilder.TargetAssignmentResult result = context.build(); + + assertUnorderedRecordsEquals( + List.of( + List.of( + newConsumerGroupTargetAssignmentRecord("my-group", "member-1", mkAssignment( + mkTopicAssignment(fooTopicId, 1, 2), + mkTopicAssignment(barTopicId, 1, 2, 3) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-2", mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4), + mkTopicAssignment(barTopicId, 4, 5, 6) + )), + newConsumerGroupTargetAssignmentRecord("my-group", "member-3", mkAssignment( + mkTopicAssignment(fooTopicId, 5, 6) + )) + ), + List.of( + newConsumerGroupTargetAssignmentEpochRecord( + "my-group", + 20 + ) + ) + ), + result.records() + ); + + Map expectedAssignment = new HashMap<>(); + expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 1, 2), + mkTopicAssignment(barTopicId, 1, 2, 3) + ))); + expectedAssignment.put("member-2", new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 3, 4), + mkTopicAssignment(barTopicId, 4, 5, 6) + ))); + + expectedAssignment.put("member-3", new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 5, 6) + ))); + + assertEquals(expectedAssignment, result.targetAssignment()); + } } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/UnionSetTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/UnionSetTest.java new file mode 100644 index 0000000000000..2653c7385f184 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/UnionSetTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.modern; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.IntStream; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class UnionSetTest { + @Test + public void testSetsCannotBeNull() { + assertThrows(NullPointerException.class, () -> new UnionSet(Collections.emptySet(), null)); + assertThrows(NullPointerException.class, () -> new UnionSet(null, Collections.emptySet())); + } + + @Test + public void testUnion() { + UnionSet union = new UnionSet<>( + Set.of(1, 2, 3), + Set.of(2, 3, 4, 5) + ); + + List result = new ArrayList<>(union); + result.sort(Integer::compareTo); + + assertEquals(List.of(1, 2, 3, 4, 5), result); + } + + @Test + public void testSize() { + UnionSet union = new UnionSet<>( + Set.of(1, 2, 3), + Set.of(2, 3, 4, 5) + ); + + assertEquals(5, union.size()); + } + + @Test + public void testIsEmpty() { + UnionSet union = new UnionSet<>( + Set.of(1, 2, 3), + Set.of(2, 3, 4, 5) + ); + + assertFalse(union.isEmpty()); + + union = new UnionSet<>( + Set.of(1, 2, 3), + Collections.emptySet() + ); + + assertFalse(union.isEmpty()); + + union = new UnionSet<>( + Collections.emptySet(), + Set.of(2, 3, 4, 5) + ); + + assertFalse(union.isEmpty()); + + union = new UnionSet<>( + Collections.emptySet(), + Collections.emptySet() + ); + assertTrue(union.isEmpty()); + } + + @Test + public void testContains() { + UnionSet union = new UnionSet<>( + Set.of(1, 2, 3), + Set.of(2, 3, 4, 5) + ); + + IntStream.range(1, 6).forEach(item -> assertTrue(union.contains(item))); + + assertFalse(union.contains(0)); + assertFalse(union.contains(6)); + } + + @Test + public void testToArray() { + UnionSet union = new UnionSet<>( + Set.of(1, 2, 3), + Set.of(2, 3, 4, 5) + ); + + Object[] expected = {1, 2, 3, 4, 5}; + Object[] actual = union.toArray(); + Arrays.sort(actual); + assertArrayEquals(expected, actual); + } + + @Test + public void testToArrayWithArrayParameter() { + UnionSet union = new UnionSet<>( + Set.of(1, 2, 3), + Set.of(2, 3, 4, 5) + ); + + Integer[] input = new Integer[5]; + Integer[] expected = {1, 2, 3, 4, 5}; + union.toArray(input); + Arrays.sort(input); + assertArrayEquals(expected, input); + } + + @Test + public void testEquals() { + UnionSet union = new UnionSet<>( + Set.of(1, 2, 3), + Set.of(2, 3, 4, 5) + ); + + assertEquals(Set.of(1, 2, 3, 4, 5), union); + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupBuilder.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupBuilder.java index 4c044323d063c..800073f42bc9f 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupBuilder.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupBuilder.java @@ -37,6 +37,7 @@ public class ConsumerGroupBuilder { private final Map members = new HashMap<>(); private final Map assignments = new HashMap<>(); private Map subscriptionMetadata; + private final Map resolvedRegularExpressions = new HashMap<>(); public ConsumerGroupBuilder(String groupId, int groupEpoch) { this.groupId = groupId; @@ -49,6 +50,14 @@ public ConsumerGroupBuilder withMember(ConsumerGroupMember member) { return this; } + public ConsumerGroupBuilder withResolvedRegularExpression( + String regex, + ResolvedRegularExpression resolvedRegularExpression + ) { + this.resolvedRegularExpressions.put(regex, resolvedRegularExpression); + return this; + } + public ConsumerGroupBuilder withSubscriptionMetadata(Map subscriptionMetadata) { this.subscriptionMetadata = subscriptionMetadata; return this; @@ -72,6 +81,11 @@ public List build(TopicsImage topicsImage) { records.add(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, member)) ); + // Add resolved regular expressions. + resolvedRegularExpressions.forEach((regex, resolvedRegularExpression) -> + records.add(GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionRecord(groupId, regex, resolvedRegularExpression)) + ); + // Add subscription metadata. if (subscriptionMetadata == null) { subscriptionMetadata = new HashMap<>(); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMemberTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMemberTest.java index 96122e79de3c4..658e6c23260db 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMemberTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupMemberTest.java @@ -26,6 +26,8 @@ import org.apache.kafka.image.MetadataImage; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import java.util.ArrayList; import java.util.Arrays; @@ -246,8 +248,9 @@ public void testUpdateWithConsumerGroupCurrentMemberAssignmentValue() { assertEquals(mkAssignment(mkTopicAssignment(topicId2, 3, 4, 5)), member.partitionsPendingRevocation()); } - @Test - public void testAsConsumerGroupDescribeMember() { + @ParameterizedTest(name = "{displayName}.withClassicMemberMetadata={0}") + @ValueSource(booleans = {true, false}) + public void testAsConsumerGroupDescribeMember(boolean withClassicMemberMetadata) { Uuid topicId1 = Uuid.randomUuid(); Uuid topicId2 = Uuid.randomUuid(); Uuid topicId3 = Uuid.randomUuid(); @@ -287,6 +290,8 @@ public void testAsConsumerGroupDescribeMember() { .setClientHost(clientHost) .setSubscribedTopicNames(subscribedTopicNames) .setSubscribedTopicRegex(subscribedTopicRegex) + .setClassicMemberMetadata(withClassicMemberMetadata ? new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() + .setSupportedProtocols(toClassicProtocolCollection("range")) : null) .build(); ConsumerGroupDescribeResponseData.Member actual = member.asConsumerGroupDescribeMember(targetAssignment, metadataImage.topics()); @@ -315,7 +320,8 @@ public void testAsConsumerGroupDescribeMember() { .setTopicName("topic4") .setPartitions(new ArrayList<>(item.getValue())) ).collect(Collectors.toList())) - ); + ) + .setMemberType(withClassicMemberMetadata ? (byte) 0 : (byte) 1); assertEquals(expected, actual); } @@ -344,7 +350,8 @@ public void testAsConsumerGroupDescribeWithTopicNameNotFound() { ConsumerGroupDescribeResponseData.Member expected = new ConsumerGroupDescribeResponseData.Member() .setMemberId(memberId.toString()) - .setSubscribedTopicRegex(""); + .setSubscribedTopicRegex("") + .setMemberType((byte) 1); ConsumerGroupDescribeResponseData.Member actual = member.asConsumerGroupDescribeMember(null, new MetadataImageBuilder() .addTopic(Uuid.randomUuid(), "foo", 3) diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java index 39140289ea471..c2e091aa3548b 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java @@ -46,12 +46,14 @@ import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetricsShard; import org.apache.kafka.coordinator.group.modern.Assignment; import org.apache.kafka.coordinator.group.modern.MemberState; +import org.apache.kafka.coordinator.group.modern.SubscriptionCount; import org.apache.kafka.coordinator.group.modern.TopicMetadata; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.timeline.SnapshotRegistry; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import java.util.ArrayList; import java.util.Arrays; @@ -66,9 +68,7 @@ import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; -import static org.apache.kafka.coordinator.group.Assertions.assertRecordEquals; -import static org.apache.kafka.coordinator.group.Assertions.assertRecordsEquals; -import static org.apache.kafka.coordinator.group.Assertions.assertUnorderedListEquals; +import static org.apache.kafka.coordinator.group.Assertions.assertUnorderedRecordsEquals; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkAssignment; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkTopicAssignment; import static org.apache.kafka.coordinator.group.api.assignor.SubscriptionType.HETEROGENEOUS; @@ -1037,7 +1037,7 @@ public void testMetadataRefreshDeadline() { } @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) + @ApiKeyVersionsSource(apiKey = ApiKeys.TXN_OFFSET_COMMIT) public void testValidateTransactionalOffsetCommit(short version) { boolean isTransactional = true; ConsumerGroup group = createConsumerGroup("group-foo"); @@ -1063,6 +1063,9 @@ public void testValidateTransactionalOffsetCommit(short version) { // This should succeed. group.validateOffsetCommit("member-id", "", 0, isTransactional, version); + + // This should succeed. + group.validateOffsetCommit("", null, -1, isTransactional, version); } @ParameterizedTest @@ -1093,6 +1096,8 @@ public void testValidateOffsetCommit(short version) { // A call from the admin client should fail as the group is not empty. assertThrows(UnknownMemberIdException.class, () -> group.validateOffsetCommit("", "", -1, isTransactional, version)); + assertThrows(UnknownMemberIdException.class, () -> + group.validateOffsetCommit("", null, -1, isTransactional, version)); // The member epoch is stale. if (version >= 9) { @@ -1278,9 +1283,11 @@ public void testAsDescribedGroup() { new ConsumerGroupDescribeResponseData.Member() .setMemberId("member1") .setSubscribedTopicNames(Collections.singletonList("foo")) - .setSubscribedTopicRegex(""), + .setSubscribedTopicRegex("") + .setMemberType((byte) 1), new ConsumerGroupDescribeResponseData.Member().setMemberId("member2") .setSubscribedTopicRegex("") + .setMemberType((byte) 1) )); ConsumerGroupDescribeResponseData.DescribedGroup actual = group.asDescribedGroup(1, "", new MetadataImageBuilder().build().topics()); @@ -1429,8 +1436,8 @@ public void testNumClassicProtocolMembers() { .build(); consumerGroup.updateMember(member2); assertEquals(1, consumerGroup.numClassicProtocolMembers()); - assertFalse(consumerGroup.allMembersUseClassicProtocolExcept("member-1")); - assertTrue(consumerGroup.allMembersUseClassicProtocolExcept("member-2")); + assertFalse(consumerGroup.allMembersUseClassicProtocolExcept(member1)); + assertTrue(consumerGroup.allMembersUseClassicProtocolExcept(member2)); // The group has member 2 (using the consumer protocol) and member 3 (using the consumer protocol). consumerGroup.removeMember(member1.memberId()); @@ -1438,7 +1445,7 @@ public void testNumClassicProtocolMembers() { .build(); consumerGroup.updateMember(member3); assertEquals(0, consumerGroup.numClassicProtocolMembers()); - assertFalse(consumerGroup.allMembersUseClassicProtocolExcept("member-2")); + assertFalse(consumerGroup.allMembersUseClassicProtocolExcept(member2)); // The group has member 2 (using the classic protocol). consumerGroup.removeMember(member2.memberId()); @@ -1450,50 +1457,64 @@ public void testNumClassicProtocolMembers() { assertEquals(1, consumerGroup.numClassicProtocolMembers()); } - @Test - public void testCreateGroupTombstoneRecordsWithReplacedMember() { - String groupId = "group"; - String memberId1 = "member-1"; - String memberId2 = "member-2"; - String newMemberId2 = "new-member-2"; - - ConsumerGroup consumerGroup = createConsumerGroup(groupId); + @ParameterizedTest + @CsvSource({ + "5, 5, 0, 0, false", // remove no consumer protocol members + "5, 5, 0, 4, false", // remove 4 out of 5 consumer protocol members + "5, 5, 1, 4, false", // remove 4 out of 5 consumer protocol members and 1 classic protocol member + "5, 5, 0, 5, true", // remove 5 out of 5 consumer protocol members + "5, 5, 1, 5, true", // remove 5 out of 5 consumer protocol members and 1 classic protocol member + "5, 5, 5, 5, true", // an empty consumer group is considered to have only classic protocol members + "5, 0, 0, 0, true", // a consumer group with only classic protocol members, which should not happen + "5, 0, 1, 0, true", // a consumer group with only classic protocol members, which should not happen + }) + public void testAllMembersUseClassicProtocolExcept( + int numClassicProtocolMembers, + int numConsumerProtocolMembers, + int numRemovedClassicProtocolMembers, + int numRemovedConsumerProtocolMembers, + boolean expectedResult + ) { + ConsumerGroup consumerGroup = createConsumerGroup("foo"); List protocols = new ArrayList<>(); protocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); - ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1) - .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() - .setSupportedProtocols(protocols)) - .build(); - consumerGroup.updateMember(member1); + List classicProtocolMembers = new ArrayList<>(); + List consumerProtocolMembers = new ArrayList<>(); + + // Add classic and consumer protocol members to the group + for (int i = 0; i < numClassicProtocolMembers; i++) { + ConsumerGroupMember member = new ConsumerGroupMember.Builder("classic-member-" + i) + .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() + .setSupportedProtocols(protocols)) + .build(); + classicProtocolMembers.add(member); + consumerGroup.updateMember(member); + } - ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2) - .setInstanceId("instance-id-2") - .build(); - consumerGroup.updateMember(member2); + for (int i = 0; i < numConsumerProtocolMembers; i++) { + ConsumerGroupMember member = new ConsumerGroupMember.Builder("consumer-member-" + i) + .build(); + consumerProtocolMembers.add(member); + consumerGroup.updateMember(member); + } - List records = new ArrayList<>(); - consumerGroup.createGroupTombstoneRecordsWithReplacedMember(records, memberId2, newMemberId2); - - List expectedRecords = Arrays.asList( - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, newMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, newMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId1), - GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, newMemberId2), - GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId), - GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId) - ); - assertEquals(expectedRecords.size(), records.size()); - assertUnorderedListEquals(expectedRecords.subList(0, 2), records.subList(0, 2)); - assertUnorderedListEquals(expectedRecords.subList(2, 4), records.subList(2, 4)); - assertRecordEquals(expectedRecords.get(4), records.get(4)); - assertUnorderedListEquals(expectedRecords.subList(5, 7), records.subList(5, 7)); - assertRecordsEquals(expectedRecords.subList(7, 9), records.subList(7, 9)); + assertEquals(numClassicProtocolMembers, consumerGroup.numClassicProtocolMembers()); + + // Test allMembersUseClassicProtocolExcept + Set removedMembers = new HashSet<>(); + + for (int i = 0; i < numRemovedClassicProtocolMembers; i++) { + removedMembers.add(classicProtocolMembers.get(i)); + } + + for (int i = 0; i < numRemovedConsumerProtocolMembers; i++) { + removedMembers.add(consumerProtocolMembers.get(i)); + } + + assertEquals(expectedResult, consumerGroup.allMembersUseClassicProtocolExcept(removedMembers)); } @Test @@ -1615,26 +1636,39 @@ public void testSubscribedRegularExpressionCount() { ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") .setSubscribedTopicRegex("regex1") .build(); + ConsumerGroupMember member4 = new ConsumerGroupMember.Builder("member4") + .build(); // Assert the initial state. + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(0, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Add member 1. consumerGroup.updateMember(member1); + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(1, consumerGroup.numSubscribedMembers("regex1")); assertEquals(0, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Add member 2. consumerGroup.updateMember(member2); + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(1, consumerGroup.numSubscribedMembers("regex1")); assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Add member 3. consumerGroup.updateMember(member3); + assertEquals(0, consumerGroup.numSubscribedMembers("")); + assertEquals(2, consumerGroup.numSubscribedMembers("regex1")); + assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); + assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); + + // Add member 4. + consumerGroup.updateMember(member4); + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(2, consumerGroup.numSubscribedMembers("regex1")); assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); @@ -1644,24 +1678,28 @@ public void testSubscribedRegularExpressionCount() { .setSubscribedTopicRegex("regex2") .build(); consumerGroup.updateMember(member3); + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(1, consumerGroup.numSubscribedMembers("regex1")); assertEquals(2, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Remove member 1. consumerGroup.removeMember(member1.memberId()); + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(2, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Remove member 2. consumerGroup.removeMember(member2.memberId()); + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(1, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); // Remove member 3. consumerGroup.removeMember(member3.memberId()); + assertEquals(0, consumerGroup.numSubscribedMembers("")); assertEquals(0, consumerGroup.numSubscribedMembers("regex1")); assertEquals(0, consumerGroup.numSubscribedMembers("regex2")); assertEquals(0, consumerGroup.numSubscribedMembers("regex3")); @@ -1684,9 +1722,9 @@ public void testUpdateAndRemoveRegularExpression() { // Verify initial state. assertEquals( Map.of( - "foo", 2, - "bar", 2, - "zar", 1 + "foo", new SubscriptionCount(2, 0), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); @@ -1703,9 +1741,9 @@ public void testUpdateAndRemoveRegularExpression() { assertEquals( Map.of( - "foo", 3, - "bar", 3, - "zar", 1 + "foo", new SubscriptionCount(2, 1), + "bar", new SubscriptionCount(2, 1), + "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); @@ -1722,10 +1760,10 @@ public void testUpdateAndRemoveRegularExpression() { assertEquals( Map.of( - "foo", 3, - "bar", 3, - "zar", 1, - "foobar", 1 + "foo", new SubscriptionCount(2, 1), + "bar", new SubscriptionCount(2, 1), + "zar", new SubscriptionCount(1, 0), + "foobar", new SubscriptionCount(0, 1) ), consumerGroup.subscribedTopicNames() ); @@ -1742,10 +1780,10 @@ public void testUpdateAndRemoveRegularExpression() { assertEquals( Map.of( - "foo", 3, - "bar", 2, - "zar", 1, - "foobar", 1 + "foo", new SubscriptionCount(2, 1), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0), + "foobar", new SubscriptionCount(0, 1) ), consumerGroup.subscribedTopicNames() ); @@ -1755,10 +1793,10 @@ public void testUpdateAndRemoveRegularExpression() { assertEquals( Map.of( - "foo", 2, - "bar", 2, - "zar", 1, - "foobar", 1 + "foo", new SubscriptionCount(2, 0), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0), + "foobar", new SubscriptionCount(0, 1) ), consumerGroup.subscribedTopicNames() ); @@ -1768,11 +1806,498 @@ public void testUpdateAndRemoveRegularExpression() { assertEquals( Map.of( - "foo", 2, - "bar", 2, - "zar", 1 + "foo", new SubscriptionCount(2, 0), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0) + ), + consumerGroup.subscribedTopicNames() + ); + } + + @Test + public void testComputeSubscribedTopicNamesWithoutDeletedMembers() { + ConsumerGroup consumerGroup = createConsumerGroup("foo"); + + ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") + .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .build(); + consumerGroup.updateMember(member1); + + ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") + .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .build(); + consumerGroup.updateMember(member2); + + ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") + .setSubscribedTopicRegex("foo*") + .build(); + consumerGroup.updateMember(member3); + + ConsumerGroupMember member4 = new ConsumerGroupMember.Builder("member4") + .setSubscribedTopicRegex("foo*") + .build(); + consumerGroup.updateMember(member4); + + ConsumerGroupMember member5 = new ConsumerGroupMember.Builder("member5") + .setSubscribedTopicRegex("bar*") + .build(); + consumerGroup.updateMember(member5); + + ConsumerGroupMember member6 = new ConsumerGroupMember.Builder("member6") + .setSubscribedTopicRegex("bar*") + .build(); + consumerGroup.updateMember(member6); + + consumerGroup.updateResolvedRegularExpression( + "foo*", + new ResolvedRegularExpression( + Set.of("foo", "fooo"), + 10L, + 12345L + ) + ); + + consumerGroup.updateResolvedRegularExpression( + "bar*", + new ResolvedRegularExpression( + Set.of("bar", "barr"), + 10L, + 12345L + ) + ); + + // Verify initial state. + assertEquals( + Map.of( + "foo", new SubscriptionCount(2, 1), + "fooo", new SubscriptionCount(0, 1), + "bar", new SubscriptionCount(2, 1), + "barr", new SubscriptionCount(0, 1), + "zar", new SubscriptionCount(1, 0) ), consumerGroup.subscribedTopicNames() ); + + // Compute with removed members and regexes. + assertEquals( + Map.of( + "foo", new SubscriptionCount(1, 0), + "bar", new SubscriptionCount(1, 1), + "barr", new SubscriptionCount(0, 1), + "zar", new SubscriptionCount(1, 0) + ), + consumerGroup.computeSubscribedTopicNamesWithoutDeletedMembers( + Set.of(member2, member3, member4, member5), + Set.of("foo*") + ) + ); + } + + @Test + public void testComputeSubscribedTopicNames() { + ConsumerGroup consumerGroup = createConsumerGroup("foo"); + + ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") + .setSubscribedTopicNames(List.of("foo", "bar", "zar")) + .build(); + consumerGroup.updateMember(member1); + + ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") + .setSubscribedTopicNames(List.of("foo", "bar")) + .build(); + consumerGroup.updateMember(member2); + + ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") + .setSubscribedTopicNames(List.of("foo")) + .setSubscribedTopicRegex("foo*") + .build(); + consumerGroup.updateMember(member3); + + consumerGroup.updateResolvedRegularExpression( + "foo*", + new ResolvedRegularExpression( + Set.of("foo", "fooo"), + 10L, + 12345L + ) + ); + + // Verify initial state. + assertEquals( + Map.of( + "foo", new SubscriptionCount(3, 1), + "fooo", new SubscriptionCount(0, 1), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0) + ), + consumerGroup.subscribedTopicNames() + ); + + // Compute subscribed topic names without changing anything. + assertEquals( + Map.of( + "foo", new SubscriptionCount(3, 1), + "fooo", new SubscriptionCount(0, 1), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0) + ), + consumerGroup.computeSubscribedTopicNames(member3, member3) + ); + + // Compute subscribed topic names with removing the regex. + assertEquals( + Map.of( + "foo", new SubscriptionCount(3, 0), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0) + ), + consumerGroup.computeSubscribedTopicNames( + member3, + new ConsumerGroupMember.Builder(member3) + .setSubscribedTopicRegex("") + .build() + ) + ); + + // Compute subscribed topic names with removing the names. + assertEquals( + Map.of( + "foo", new SubscriptionCount(2, 1), + "fooo", new SubscriptionCount(0, 1), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0) + ), + consumerGroup.computeSubscribedTopicNames( + member3, + new ConsumerGroupMember.Builder(member3) + .setSubscribedTopicNames(Collections.emptyList()) + .build() + ) + ); + + // Compute subscribed topic names with removing both. + assertEquals( + Map.of( + "foo", new SubscriptionCount(2, 0), + "bar", new SubscriptionCount(2, 0), + "zar", new SubscriptionCount(1, 0) + ), + consumerGroup.computeSubscribedTopicNames( + member3, + new ConsumerGroupMember.Builder(member3) + .setSubscribedTopicNames(Collections.emptyList()) + .setSubscribedTopicRegex("") + .build() + ) + ); + } + + @Test + public void testCreateGroupTombstoneRecords() { + ConsumerGroup consumerGroup = createConsumerGroup("foo"); + consumerGroup.setGroupEpoch(10); + + ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") + .setMemberEpoch(10) + .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) + .build(); + consumerGroup.updateMember(member1); + + ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") + .setMemberEpoch(10) + .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) + .build(); + consumerGroup.updateMember(member2); + + ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") + .setMemberEpoch(10) + .setSubscribedTopicRegex("foo*") + .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) + .build(); + consumerGroup.updateMember(member3); + + consumerGroup.updateResolvedRegularExpression( + "foo*", + new ResolvedRegularExpression( + Set.of("foo", "fooo"), + 10L, + 12345L + ) + ); + + consumerGroup.updateTargetAssignment("member1", new Assignment(mkAssignment( + mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) + )); + + consumerGroup.updateTargetAssignment("member2", new Assignment(mkAssignment( + mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) + )); + + consumerGroup.updateTargetAssignment("member3", new Assignment(mkAssignment( + mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) + )); + + List records = new ArrayList<>(); + consumerGroup.createGroupTombstoneRecords(records); + + assertUnorderedRecordsEquals( + List.of( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member1"), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member2"), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member3") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member1"), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member2"), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member3") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord("foo") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member1"), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member2"), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member3") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone("foo", "foo*") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord("foo") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord("foo") + ) + ), + records + ); + } + + @Test + public void testCreateGroupTombstoneRecordsWithReplacedMember() { + ConsumerGroup consumerGroup = createConsumerGroup("foo"); + consumerGroup.setGroupEpoch(10); + + ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") + .setMemberEpoch(10) + .setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) + .build(); + consumerGroup.updateMember(member1); + + ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2") + .setMemberEpoch(10) + .setSubscribedTopicNames(Arrays.asList("foo", "bar")) + .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) + .build(); + consumerGroup.updateMember(member2); + + ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3") + .setMemberEpoch(10) + .setSubscribedTopicRegex("foo*") + .setAssignedPartitions(mkAssignment(mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2))) + .build(); + consumerGroup.updateMember(member3); + + consumerGroup.updateResolvedRegularExpression( + "foo*", + new ResolvedRegularExpression( + Set.of("foo", "fooo"), + 10L, + 12345L + ) + ); + + consumerGroup.updateTargetAssignment("member1", new Assignment(mkAssignment( + mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) + )); + + consumerGroup.updateTargetAssignment("member2", new Assignment(mkAssignment( + mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) + )); + + consumerGroup.updateTargetAssignment("member3", new Assignment(mkAssignment( + mkTopicAssignment(Uuid.randomUuid(), 0, 1, 2)) + )); + + List records = new ArrayList<>(); + consumerGroup.createGroupTombstoneRecordsWithReplacedMember(records, "member3", "member4"); + + assertUnorderedRecordsEquals( + List.of( + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member1"), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member2"), + GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord("foo", "member4") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member1"), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member2"), + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord("foo", "member4") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord("foo") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member1"), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member2"), + GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord("foo", "member4") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupRegularExpressionTombstone("foo", "foo*") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord("foo") + ), + List.of( + GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord("foo") + ) + ), + records + ); + } + + @Test + public void testSubscriptionType() { + assertEquals( + HOMOGENEOUS, + ConsumerGroup.subscriptionType( + Collections.emptyMap(), + Collections.emptyMap(), + 0 + ) + ); + + assertEquals( + HOMOGENEOUS, + ConsumerGroup.subscriptionType( + Collections.emptyMap(), + Map.of("foo", new SubscriptionCount(5, 0)), + 5 + ) + ); + + assertEquals( + HETEROGENEOUS, + ConsumerGroup.subscriptionType( + Collections.emptyMap(), + Map.of( + "foo", new SubscriptionCount(4, 0), + "bar", new SubscriptionCount(1, 0) + ), + 5 + ) + ); + + assertEquals( + HOMOGENEOUS, + ConsumerGroup.subscriptionType( + Map.of("foo*", 5), + Map.of("foo", new SubscriptionCount(0, 1)), + 5 + ) + ); + + assertEquals( + HOMOGENEOUS, + ConsumerGroup.subscriptionType( + Map.of("foo*", 5), + Map.of( + "foo", new SubscriptionCount(0, 1), + "food", new SubscriptionCount(0, 1)), + 5 + ) + ); + + assertEquals( + HETEROGENEOUS, + ConsumerGroup.subscriptionType( + Map.of("foo*", 5), + Map.of("foo", new SubscriptionCount(1, 1)), + 5 + ) + ); + + assertEquals( + HETEROGENEOUS, + ConsumerGroup.subscriptionType( + Map.of("foo*", 5), + Map.of( + "foo", new SubscriptionCount(0, 1), + "bar", new SubscriptionCount(1, 0) + ), + 5 + ) + ); + + assertEquals( + HETEROGENEOUS, + ConsumerGroup.subscriptionType( + Map.of("foo*", 4, "bar*", 1), + Map.of( + "foo", new SubscriptionCount(0, 1), + "bar", new SubscriptionCount(0, 1)), + 5 + ) + ); + } + + @Test + public void testComputeSubscribedRegularExpressions() { + ConsumerGroup consumerGroup = createConsumerGroup("foo"); + consumerGroup.setGroupEpoch(10); + + consumerGroup.updateMember(new ConsumerGroupMember.Builder("m1") + .setSubscribedTopicRegex("foo*") + .build()); + + consumerGroup.updateMember(new ConsumerGroupMember.Builder("m2") + .setSubscribedTopicRegex("foo*") + .build()); + + assertEquals( + Map.of("foo*", 3), + consumerGroup.computeSubscribedRegularExpressions( + null, + new ConsumerGroupMember.Builder("m3") + .setSubscribedTopicRegex("foo*") + .build() + ) + ); + + assertEquals( + Map.of("foo*", 1), + consumerGroup.computeSubscribedRegularExpressions( + new ConsumerGroupMember.Builder("m2") + .setSubscribedTopicRegex("foo*") + .build(), + null + ) + ); + + assertEquals( + Map.of("foo*", 2, "bar*", 1), + consumerGroup.computeSubscribedRegularExpressions( + null, + new ConsumerGroupMember.Builder("m4") + .setSubscribedTopicRegex("bar*") + .build() + ) + ); + + assertEquals( + Map.of("foo*", 1, "bar*", 1), + consumerGroup.computeSubscribedRegularExpressions( + new ConsumerGroupMember.Builder("m2") + .setSubscribedTopicRegex("foo*") + .build(), + new ConsumerGroupMember.Builder("m2") + .setSubscribedTopicRegex("bar*") + .build() + ) + ); } } diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/AssignmentTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/AssignmentTest.java new file mode 100644 index 0000000000000..7c0baf273648e --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/AssignmentTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import org.apache.kafka.coordinator.group.generated.StreamsGroupTargetAssignmentMemberValue; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.kafka.coordinator.group.streams.TaskAssignmentTestUtil.mkTasks; +import static org.apache.kafka.coordinator.group.streams.TaskAssignmentTestUtil.mkTasksPerSubtopology; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class AssignmentTest { + + static final String SUBTOPOLOGY_1 = "subtopology1"; + static final String SUBTOPOLOGY_2 = "subtopology2"; + static final String SUBTOPOLOGY_3 = "subtopology3"; + + @Test + public void testTasksCannotBeNull() { + assertThrows(NullPointerException.class, () -> new Assignment(null, Collections.emptyMap(), Collections.emptyMap())); + assertThrows(NullPointerException.class, () -> new Assignment(Collections.emptyMap(), null, Collections.emptyMap())); + assertThrows(NullPointerException.class, () -> new Assignment(Collections.emptyMap(), Collections.emptyMap(), null)); + } + + @Test + public void testReturnUnmodifiableTaskAssignments() { + Map> activeTasks = mkTasksPerSubtopology( + mkTasks(SUBTOPOLOGY_1, 1, 2, 3) + ); + Map> standbyTasks = mkTasksPerSubtopology( + mkTasks(SUBTOPOLOGY_2, 9, 8, 7) + ); + Map> warmupTasks = mkTasksPerSubtopology( + mkTasks(SUBTOPOLOGY_3, 4, 5, 6) + ); + Assignment assignment = new Assignment(activeTasks, standbyTasks, warmupTasks); + + assertEquals(activeTasks, assignment.activeTasks()); + assertThrows(UnsupportedOperationException.class, () -> assignment.activeTasks().put("not allowed", Collections.emptySet())); + assertEquals(standbyTasks, assignment.standbyTasks()); + assertThrows(UnsupportedOperationException.class, () -> assignment.standbyTasks().put("not allowed", Collections.emptySet())); + assertEquals(warmupTasks, assignment.warmupTasks()); + assertThrows(UnsupportedOperationException.class, () -> assignment.warmupTasks().put("not allowed", Collections.emptySet())); + } + + @Test + public void testFromTargetAssignmentRecord() { + List activeTasks = new ArrayList<>(); + activeTasks.add(new StreamsGroupTargetAssignmentMemberValue.TaskIds() + .setSubtopologyId(SUBTOPOLOGY_1) + .setPartitions(Arrays.asList(1, 2, 3))); + activeTasks.add(new StreamsGroupTargetAssignmentMemberValue.TaskIds() + .setSubtopologyId(SUBTOPOLOGY_2) + .setPartitions(Arrays.asList(4, 5, 6))); + List standbyTasks = new ArrayList<>(); + standbyTasks.add(new StreamsGroupTargetAssignmentMemberValue.TaskIds() + .setSubtopologyId(SUBTOPOLOGY_1) + .setPartitions(Arrays.asList(7, 8, 9))); + standbyTasks.add(new StreamsGroupTargetAssignmentMemberValue.TaskIds() + .setSubtopologyId(SUBTOPOLOGY_2) + .setPartitions(Arrays.asList(1, 2, 3))); + List warmupTasks = new ArrayList<>(); + warmupTasks.add(new StreamsGroupTargetAssignmentMemberValue.TaskIds() + .setSubtopologyId(SUBTOPOLOGY_1) + .setPartitions(Arrays.asList(4, 5, 6))); + warmupTasks.add(new StreamsGroupTargetAssignmentMemberValue.TaskIds() + .setSubtopologyId(SUBTOPOLOGY_2) + .setPartitions(Arrays.asList(7, 8, 9))); + + StreamsGroupTargetAssignmentMemberValue record = new StreamsGroupTargetAssignmentMemberValue() + .setActiveTasks(activeTasks) + .setStandbyTasks(standbyTasks) + .setWarmupTasks(warmupTasks); + + Assignment assignment = Assignment.fromRecord(record); + + assertEquals( + mkTasksPerSubtopology( + mkTasks(SUBTOPOLOGY_1, 1, 2, 3), + mkTasks(SUBTOPOLOGY_2, 4, 5, 6) + ), + assignment.activeTasks() + ); + assertEquals( + mkTasksPerSubtopology( + mkTasks(SUBTOPOLOGY_1, 7, 8, 9), + mkTasks(SUBTOPOLOGY_2, 1, 2, 3) + ), + assignment.standbyTasks() + ); + assertEquals( + mkTasksPerSubtopology( + mkTasks(SUBTOPOLOGY_1, 4, 5, 6), + mkTasks(SUBTOPOLOGY_2, 7, 8, 9) + ), + assignment.warmupTasks() + ); + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsGroupMemberTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsGroupMemberTest.java new file mode 100644 index 0000000000000..8c6d3d9088aba --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsGroupMemberTest.java @@ -0,0 +1,429 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; +import org.apache.kafka.coordinator.group.generated.StreamsGroupCurrentMemberAssignmentValue; +import org.apache.kafka.coordinator.group.generated.StreamsGroupCurrentMemberAssignmentValue.TaskIds; +import org.apache.kafka.coordinator.group.generated.StreamsGroupMemberMetadataValue; +import org.apache.kafka.coordinator.group.generated.StreamsGroupMemberMetadataValue.KeyValue; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.kafka.common.utils.Utils.mkEntry; +import static org.apache.kafka.common.utils.Utils.mkMap; +import static org.apache.kafka.coordinator.group.streams.TaskAssignmentTestUtil.mkTasks; +import static org.apache.kafka.coordinator.group.streams.TaskAssignmentTestUtil.mkTasksPerSubtopology; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class StreamsGroupMemberTest { + + private static final String MEMBER_ID = "member-id"; + private static final int MEMBER_EPOCH = 10; + private static final int PREVIOUS_MEMBER_EPOCH = 9; + private static final MemberState STATE = MemberState.UNRELEASED_TASKS; + private static final String INSTANCE_ID = "instance-id"; + private static final String RACK_ID = "rack-id"; + private static final int REBALANCE_TIMEOUT = 5000; + private static final String CLIENT_ID = "client-id"; + private static final String HOSTNAME = "hostname"; + private static final int TOPOLOGY_EPOCH = 3; + private static final String PROCESS_ID = "process-id"; + private static final String SUBTOPOLOGY1 = "subtopology1"; + private static final String SUBTOPOLOGY2 = "subtopology2"; + private static final String SUBTOPOLOGY3 = "subtopology3"; + private static final StreamsGroupMemberMetadataValue.Endpoint USER_ENDPOINT = + new StreamsGroupMemberMetadataValue.Endpoint().setHost("host").setPort(9090); + private static final String CLIENT_TAG_KEY = "client"; + private static final String CLIENT_TAG_VALUE = "tag"; + private static final Map CLIENT_TAGS = mkMap(mkEntry(CLIENT_TAG_KEY, CLIENT_TAG_VALUE)); + private static final List TASKS1 = List.of(1, 2, 3); + private static final List TASKS2 = List.of(4, 5, 6); + private static final List TASKS3 = List.of(7, 8); + private static final List TASKS4 = List.of(3, 2, 1); + private static final List TASKS5 = List.of(6, 5, 4); + private static final List TASKS6 = List.of(9, 7); + private static final Map> ASSIGNED_ACTIVE_TASKS = mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY1, TASKS1.toArray(Integer[]::new))); + private static final Map> ASSIGNED_STANDBY_TASKS = mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY2, TASKS2.toArray(Integer[]::new))); + private static final Map> ASSIGNED_WARMUP_TASKS = mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY1, TASKS3.toArray(Integer[]::new))); + private static final Map> ACTIVE_TASKS_PENDING_REVOCATION = mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY2, TASKS4.toArray(Integer[]::new))); + private static final Map> STANDBY_TASKS_PENDING_REVOCATION = mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY1, TASKS5.toArray(Integer[]::new))); + private static final Map> WARMUP_TASKS_PENDING_REVOCATION = mkTasksPerSubtopology(mkTasks(SUBTOPOLOGY2, TASKS6.toArray(Integer[]::new))); + + @Test + public void testBuilderWithMemberIdIsNull() { + final Exception exception = assertThrows( + NullPointerException.class, + () -> new StreamsGroupMember.Builder((String) null).build() + ); + assertEquals("memberId cannot be null", exception.getMessage()); + } + + @Test + public void testBuilderWithMemberIsNull() { + final Exception exception = assertThrows( + NullPointerException.class, + () -> new StreamsGroupMember.Builder((StreamsGroupMember) null).build() + ); + assertEquals("member cannot be null", exception.getMessage()); + } + + @Test + public void testBuilderWithDefaults() { + StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID).build(); + + assertEquals(MEMBER_ID, member.memberId()); + assertNull(member.memberEpoch()); + assertNull(member.previousMemberEpoch()); + assertNull(member.state()); + assertNull(member.instanceId()); + assertNull(member.rackId()); + assertNull(member.rebalanceTimeoutMs()); + assertNull(member.clientId()); + assertNull(member.clientHost()); + assertNull(member.topologyEpoch()); + assertNull(member.processId()); + assertNull(member.userEndpoint()); + assertNull(member.clientTags()); + assertNull(member.assignedActiveTasks()); + assertNull(member.assignedStandbyTasks()); + assertNull(member.assignedWarmupTasks()); + assertNull(member.activeTasksPendingRevocation()); + assertNull(member.standbyTasksPendingRevocation()); + assertNull(member.warmupTasksPendingRevocation()); + } + + @Test + public void testBuilderNewMember() { + StreamsGroupMember member = createStreamsGroupMember(); + + assertEquals(MEMBER_ID, member.memberId()); + assertEquals(MEMBER_EPOCH, member.memberEpoch()); + assertEquals(PREVIOUS_MEMBER_EPOCH, member.previousMemberEpoch()); + assertEquals(STATE, member.state()); + assertEquals(Optional.of(INSTANCE_ID), member.instanceId()); + assertEquals(Optional.of(RACK_ID), member.rackId()); + assertEquals(CLIENT_ID, member.clientId()); + assertEquals(HOSTNAME, member.clientHost()); + assertEquals(TOPOLOGY_EPOCH, member.topologyEpoch()); + assertEquals(PROCESS_ID, member.processId()); + assertEquals(Optional.of(USER_ENDPOINT), member.userEndpoint()); + assertEquals(CLIENT_TAGS, member.clientTags()); + assertEquals(ASSIGNED_ACTIVE_TASKS, member.assignedActiveTasks()); + assertEquals(ASSIGNED_STANDBY_TASKS, member.assignedStandbyTasks()); + assertEquals(ASSIGNED_WARMUP_TASKS, member.assignedWarmupTasks()); + assertEquals(ACTIVE_TASKS_PENDING_REVOCATION, member.activeTasksPendingRevocation()); + assertEquals(STANDBY_TASKS_PENDING_REVOCATION, member.standbyTasksPendingRevocation()); + assertEquals(WARMUP_TASKS_PENDING_REVOCATION, member.warmupTasksPendingRevocation()); + } + + @Test + public void testBuilderUpdateWithStreamsGroupMemberMetadataValue() { + StreamsGroupMemberMetadataValue record = new StreamsGroupMemberMetadataValue() + .setClientId(CLIENT_ID) + .setClientHost(HOSTNAME) + .setInstanceId(INSTANCE_ID) + .setRackId(RACK_ID) + .setRebalanceTimeoutMs(REBALANCE_TIMEOUT) + .setTopologyEpoch(TOPOLOGY_EPOCH) + .setProcessId(PROCESS_ID) + .setUserEndpoint(USER_ENDPOINT) + .setClientTags(CLIENT_TAGS.entrySet().stream() + .map(e -> new KeyValue().setKey(e.getKey()).setValue(e.getValue())) + .collect(Collectors.toList())); + + StreamsGroupMember member = new StreamsGroupMember.Builder("member-id") + .updateWith(record) + .build(); + + assertEquals(record.clientId(), member.clientId()); + assertEquals(record.clientHost(), member.clientHost()); + assertEquals(Optional.of(record.instanceId()), member.instanceId()); + assertEquals(Optional.of(record.rackId()), member.rackId()); + assertEquals(record.rebalanceTimeoutMs(), member.rebalanceTimeoutMs()); + assertEquals(record.topologyEpoch(), member.topologyEpoch()); + assertEquals(record.processId(), member.processId()); + assertEquals(Optional.of(record.userEndpoint()), member.userEndpoint()); + assertEquals( + record.clientTags().stream().collect(Collectors.toMap(KeyValue::key, KeyValue::value)), + member.clientTags() + ); + assertEquals(MEMBER_ID, member.memberId()); + assertNull(member.memberEpoch()); + assertNull(member.previousMemberEpoch()); + assertNull(member.state()); + assertNull(member.assignedActiveTasks()); + assertNull(member.assignedStandbyTasks()); + assertNull(member.assignedWarmupTasks()); + assertNull(member.activeTasksPendingRevocation()); + assertNull(member.standbyTasksPendingRevocation()); + assertNull(member.warmupTasksPendingRevocation()); + } + + @Test + public void testBuilderUpdateWithConsumerGroupCurrentMemberAssignmentValue() { + StreamsGroupCurrentMemberAssignmentValue record = new StreamsGroupCurrentMemberAssignmentValue() + .setMemberEpoch(MEMBER_EPOCH) + .setPreviousMemberEpoch(PREVIOUS_MEMBER_EPOCH) + .setState(STATE.value()) + .setActiveTasks(List.of(new TaskIds().setSubtopologyId(SUBTOPOLOGY1).setPartitions(TASKS1))) + .setStandbyTasks(List.of(new TaskIds().setSubtopologyId(SUBTOPOLOGY2).setPartitions(TASKS2))) + .setWarmupTasks(List.of(new TaskIds().setSubtopologyId(SUBTOPOLOGY1).setPartitions(TASKS3))) + .setActiveTasksPendingRevocation(List.of(new TaskIds().setSubtopologyId(SUBTOPOLOGY2).setPartitions(TASKS4))) + .setStandbyTasksPendingRevocation(List.of(new TaskIds().setSubtopologyId(SUBTOPOLOGY1).setPartitions(TASKS5))) + .setWarmupTasksPendingRevocation(List.of(new TaskIds().setSubtopologyId(SUBTOPOLOGY2).setPartitions(TASKS6))); + + StreamsGroupMember member = new StreamsGroupMember.Builder(MEMBER_ID) + .updateWith(record) + .build(); + + assertEquals(MEMBER_ID, member.memberId()); + assertEquals(record.memberEpoch(), member.memberEpoch()); + assertEquals(record.previousMemberEpoch(), member.previousMemberEpoch()); + assertEquals(MemberState.fromValue(record.state()), member.state()); + assertEquals(ASSIGNED_ACTIVE_TASKS, member.assignedActiveTasks()); + assertEquals(ASSIGNED_STANDBY_TASKS, member.assignedStandbyTasks()); + assertEquals(ASSIGNED_WARMUP_TASKS, member.assignedWarmupTasks()); + assertEquals(ACTIVE_TASKS_PENDING_REVOCATION, member.activeTasksPendingRevocation()); + assertEquals(STANDBY_TASKS_PENDING_REVOCATION, member.standbyTasksPendingRevocation()); + assertEquals(WARMUP_TASKS_PENDING_REVOCATION, member.warmupTasksPendingRevocation()); + assertNull(member.instanceId()); + assertNull(member.rackId()); + assertNull(member.rebalanceTimeoutMs()); + assertNull(member.clientId()); + assertNull(member.clientHost()); + assertNull(member.topologyEpoch()); + assertNull(member.processId()); + assertNull(member.userEndpoint()); + assertNull(member.clientTags()); + } + + @Test + public void testBuilderMaybeUpdateMember() { + final StreamsGroupMember member = createStreamsGroupMember(); + + // This is a no-op. + StreamsGroupMember updatedMember = new StreamsGroupMember.Builder(member) + .maybeUpdateRackId(Optional.empty()) + .maybeUpdateInstanceId(Optional.empty()) + .maybeUpdateRebalanceTimeoutMs(OptionalInt.empty()) + .maybeUpdateProcessId(Optional.empty()) + .maybeUpdateTopologyEpoch(OptionalInt.empty()) + .maybeUpdateUserEndpoint(Optional.empty()) + .maybeUpdateClientTags(Optional.empty()) + .build(); + + assertEquals(member, updatedMember); + + final String newRackId = "new" + member.rackId(); + final String newInstanceId = "new" + member.instanceId(); + final Integer newRebalanceTimeout = member.rebalanceTimeoutMs() + 1000; + final String newProcessId = "new" + member.processId(); + final Integer newTopologyEpoch = member.topologyEpoch() + 1; + final StreamsGroupMemberMetadataValue.Endpoint newUserEndpoint = + new StreamsGroupMemberMetadataValue.Endpoint().setHost(member.userEndpoint().get().host() + "2").setPort(9090); + final Map newClientTags = new HashMap<>(member.clientTags()); + newClientTags.put("client2", "tag2"); + + updatedMember = new StreamsGroupMember.Builder(member) + .maybeUpdateRackId(Optional.of(newRackId)) + .maybeUpdateInstanceId(Optional.of(newInstanceId)) + .maybeUpdateRebalanceTimeoutMs(OptionalInt.of(6000)) + .maybeUpdateProcessId(Optional.of(newProcessId)) + .maybeUpdateTopologyEpoch(OptionalInt.of(newTopologyEpoch)) + .maybeUpdateUserEndpoint(Optional.of(newUserEndpoint)) + .maybeUpdateClientTags(Optional.of(newClientTags)) + .build(); + + assertEquals(Optional.of(newRackId), updatedMember.rackId()); + assertEquals(Optional.of(newInstanceId), updatedMember.instanceId()); + assertEquals(newRebalanceTimeout, updatedMember.rebalanceTimeoutMs()); + assertEquals(newProcessId, updatedMember.processId()); + assertEquals(newTopologyEpoch, updatedMember.topologyEpoch()); + assertEquals(Optional.of(newUserEndpoint), updatedMember.userEndpoint()); + assertEquals(newClientTags, updatedMember.clientTags()); + assertEquals(member.memberId(), updatedMember.memberId()); + assertEquals(member.memberEpoch(), updatedMember.memberEpoch()); + assertEquals(member.previousMemberEpoch(), updatedMember.previousMemberEpoch()); + assertEquals(member.state(), updatedMember.state()); + assertEquals(member.clientId(), updatedMember.clientId()); + assertEquals(member.clientHost(), updatedMember.clientHost()); + assertEquals(member.assignedActiveTasks(), updatedMember.assignedActiveTasks()); + assertEquals(member.assignedStandbyTasks(), updatedMember.assignedStandbyTasks()); + assertEquals(member.assignedWarmupTasks(), updatedMember.assignedWarmupTasks()); + assertEquals(member.activeTasksPendingRevocation(), updatedMember.activeTasksPendingRevocation()); + assertEquals(member.standbyTasksPendingRevocation(), updatedMember.standbyTasksPendingRevocation()); + assertEquals(member.warmupTasksPendingRevocation(), updatedMember.warmupTasksPendingRevocation()); + } + + @Test + public void testBuilderUpdateMemberEpoch() { + final StreamsGroupMember member = createStreamsGroupMember(); + + final int newMemberEpoch = member.memberEpoch() + 1; + final StreamsGroupMember updatedMember = new StreamsGroupMember.Builder(member) + .updateMemberEpoch(newMemberEpoch) + .build(); + + assertEquals(member.memberId(), updatedMember.memberId()); + assertEquals(newMemberEpoch, updatedMember.memberEpoch()); + // The previous member epoch becomes the old current member epoch. + assertEquals(member.memberEpoch(), updatedMember.previousMemberEpoch()); + assertEquals(member.state(), updatedMember.state()); + assertEquals(member.instanceId(), updatedMember.instanceId()); + assertEquals(member.rackId(), updatedMember.rackId()); + assertEquals(member.rebalanceTimeoutMs(), updatedMember.rebalanceTimeoutMs()); + assertEquals(member.clientId(), updatedMember.clientId()); + assertEquals(member.clientHost(), updatedMember.clientHost()); + assertEquals(member.topologyEpoch(), updatedMember.topologyEpoch()); + assertEquals(member.processId(), updatedMember.processId()); + assertEquals(member.userEndpoint(), updatedMember.userEndpoint()); + assertEquals(member.clientTags(), updatedMember.clientTags()); + assertEquals(member.assignedActiveTasks(), updatedMember.assignedActiveTasks()); + assertEquals(member.assignedStandbyTasks(), updatedMember.assignedStandbyTasks()); + assertEquals(member.assignedWarmupTasks(), updatedMember.assignedWarmupTasks()); + assertEquals(member.activeTasksPendingRevocation(), updatedMember.activeTasksPendingRevocation()); + assertEquals(member.standbyTasksPendingRevocation(), updatedMember.standbyTasksPendingRevocation()); + assertEquals(member.warmupTasksPendingRevocation(), updatedMember.warmupTasksPendingRevocation()); + } + + @Test + public void testReturnUnmodifiableFields() { + final StreamsGroupMember member = createStreamsGroupMember(); + + assertThrows(UnsupportedOperationException.class, () -> member.clientTags().put("not allowed", "")); + assertThrows(UnsupportedOperationException.class, () -> member.assignedActiveTasks().put("not allowed", Collections.emptySet())); + assertThrows(UnsupportedOperationException.class, () -> member.assignedStandbyTasks().put("not allowed", Collections.emptySet())); + assertThrows(UnsupportedOperationException.class, () -> member.assignedWarmupTasks().put("not allowed", Collections.emptySet())); + assertThrows(UnsupportedOperationException.class, () -> member.activeTasksPendingRevocation().put("not allowed", Collections.emptySet())); + assertThrows(UnsupportedOperationException.class, () -> member.standbyTasksPendingRevocation().put("not allowed", Collections.emptySet())); + assertThrows(UnsupportedOperationException.class, () -> member.warmupTasksPendingRevocation().put("not allowed", Collections.emptySet())); + } + + @Test + public void testAsStreamsGroupDescribeMember() { + final StreamsGroupMember member = createStreamsGroupMember(); + List assignedTasks1 = Arrays.asList(10, 11, 12); + List assignedTasks2 = Arrays.asList(13, 14, 15); + List assignedTasks3 = Arrays.asList(16, 17, 18); + Assignment targetAssignment = new Assignment( + mkMap(mkEntry(SUBTOPOLOGY1, new HashSet<>(assignedTasks3))), + mkMap(mkEntry(SUBTOPOLOGY2, new HashSet<>(assignedTasks2))), + mkMap(mkEntry(SUBTOPOLOGY3, new HashSet<>(assignedTasks1))) + ); + + StreamsGroupDescribeResponseData.Member actual = member.asStreamsGroupDescribeMember(targetAssignment); + StreamsGroupDescribeResponseData.Member expected = new StreamsGroupDescribeResponseData.Member() + .setMemberId(MEMBER_ID) + .setMemberEpoch(MEMBER_EPOCH) + .setClientId(CLIENT_ID) + .setInstanceId(INSTANCE_ID) + .setRackId(RACK_ID) + .setClientHost(HOSTNAME) + .setProcessId(PROCESS_ID) + .setTopologyEpoch(TOPOLOGY_EPOCH) + .setClientTags(List.of( + new StreamsGroupDescribeResponseData.KeyValue().setKey(CLIENT_TAG_KEY).setValue(CLIENT_TAG_VALUE)) + ) + .setAssignment( + new StreamsGroupDescribeResponseData.Assignment() + .setActiveTasks(List.of( + new StreamsGroupDescribeResponseData.TaskIds() + .setSubtopologyId(SUBTOPOLOGY1) + .setPartitions(TASKS1)) + ) + .setStandbyTasks(List.of( + new StreamsGroupDescribeResponseData.TaskIds() + .setSubtopologyId(SUBTOPOLOGY2) + .setPartitions(TASKS2)) + ) + .setWarmupTasks(List.of( + new StreamsGroupDescribeResponseData.TaskIds() + .setSubtopologyId(SUBTOPOLOGY1) + .setPartitions(TASKS3)) + ) + ) + .setTargetAssignment( + new StreamsGroupDescribeResponseData.Assignment() + .setActiveTasks(List.of( + new StreamsGroupDescribeResponseData.TaskIds() + .setSubtopologyId(SUBTOPOLOGY1) + .setPartitions(assignedTasks3)) + ) + .setStandbyTasks(List.of( + new StreamsGroupDescribeResponseData.TaskIds() + .setSubtopologyId(SUBTOPOLOGY2) + .setPartitions(assignedTasks2)) + ) + .setWarmupTasks(List.of( + new StreamsGroupDescribeResponseData.TaskIds() + .setSubtopologyId(SUBTOPOLOGY3) + .setPartitions(assignedTasks1)) + ) + ) + .setUserEndpoint(new StreamsGroupDescribeResponseData.Endpoint() + .setHost(USER_ENDPOINT.host()) + .setPort(USER_ENDPOINT.port()) + ); + + assertEquals(expected, actual); + } + + @Test + public void testAsStreamsGroupDescribeWithTargetAssignmentNull() { + final StreamsGroupMember member = createStreamsGroupMember(); + StreamsGroupDescribeResponseData.Member streamsGroupDescribeMember = member.asStreamsGroupDescribeMember(null); + + assertEquals(new StreamsGroupDescribeResponseData.Assignment(), streamsGroupDescribeMember.targetAssignment()); + } + + private StreamsGroupMember createStreamsGroupMember() { + return new StreamsGroupMember.Builder(MEMBER_ID) + .setMemberEpoch(MEMBER_EPOCH) + .setPreviousMemberEpoch(PREVIOUS_MEMBER_EPOCH) + .setState(STATE) + .setInstanceId(INSTANCE_ID) + .setRackId(RACK_ID) + .setRebalanceTimeoutMs(REBALANCE_TIMEOUT) + .setClientId(CLIENT_ID) + .setClientHost(HOSTNAME) + .setTopologyEpoch(TOPOLOGY_EPOCH) + .setProcessId(PROCESS_ID) + .setUserEndpoint(USER_ENDPOINT) + .setClientTags(CLIENT_TAGS) + .setAssignedActiveTasks(ASSIGNED_ACTIVE_TASKS) + .setAssignedStandbyTasks(ASSIGNED_STANDBY_TASKS) + .setAssignedWarmupTasks(ASSIGNED_WARMUP_TASKS) + .setActiveTasksPendingRevocation(ACTIVE_TASKS_PENDING_REVOCATION) + .setStandbyTasksPendingRevocation(STANDBY_TASKS_PENDING_REVOCATION) + .setWarmupTasksPendingRevocation(WARMUP_TASKS_PENDING_REVOCATION) + .build(); + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsTopologyTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsTopologyTest.java new file mode 100644 index 0000000000000..89c785d633e3c --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/StreamsTopologyTest.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.Subtopology; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.TopicInfo; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.kafka.common.utils.Utils.mkEntry; +import static org.apache.kafka.common.utils.Utils.mkMap; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class StreamsTopologyTest { + + private static final String SUBTOPOLOGY_ID_1 = "subtopology-1"; + private static final String SUBTOPOLOGY_ID_2 = "subtopology-2"; + private static final String SOURCE_TOPIC_1 = "source-topic-1"; + private static final String SOURCE_TOPIC_2 = "source-topic-2"; + private static final String SOURCE_TOPIC_3 = "source-topic-3"; + private static final String REPARTITION_TOPIC_1 = "repartition-topic-1"; + private static final String REPARTITION_TOPIC_2 = "repartition-topic-2"; + private static final String REPARTITION_TOPIC_3 = "repartition-topic-3"; + private static final String CHANGELOG_TOPIC_1 = "changelog-1"; + private static final String CHANGELOG_TOPIC_2 = "changelog-2"; + private static final String CHANGELOG_TOPIC_3 = "changelog-3"; + + @Test + public void subtopologiesMapShouldNotBeNull() { + final Exception exception = assertThrows(NullPointerException.class, () -> new StreamsTopology(1, null)); + assertEquals("Subtopologies cannot be null.", exception.getMessage()); + } + + @Test + public void topologyEpochShouldNotBeNegative() { + Map subtopologies = mkMap( + mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1()) + ); + final Exception exception = assertThrows(IllegalArgumentException.class, () -> new StreamsTopology(-1, subtopologies)); + assertEquals("Topology epoch must be non-negative.", exception.getMessage()); + } + + @Test + public void subtopologiesMapShouldBeImmutable() { + Map subtopologies = mkMap( + mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1()) + ); + assertThrows( + UnsupportedOperationException.class, + () -> new StreamsTopology(1, subtopologies).subtopologies().put("subtopology-2", mkSubtopology2()) + ); + } + + @Test + public void requiredTopicsShouldBeCorrect() { + Map subtopologies = mkMap( + mkEntry(SUBTOPOLOGY_ID_1, mkSubtopology1()), + mkEntry(SUBTOPOLOGY_ID_2, mkSubtopology2()) + ); + StreamsTopology topology = new StreamsTopology(1, subtopologies); + Set expectedTopics = Set.of( + SOURCE_TOPIC_1, SOURCE_TOPIC_2, SOURCE_TOPIC_3, + REPARTITION_TOPIC_1, REPARTITION_TOPIC_2, REPARTITION_TOPIC_3, + CHANGELOG_TOPIC_1, CHANGELOG_TOPIC_2, CHANGELOG_TOPIC_3 + ); + + assertEquals(expectedTopics, topology.requiredTopics()); + } + + @Test + public void fromRecordShouldCreateCorrectTopology() { + StreamsGroupTopologyValue record = new StreamsGroupTopologyValue() + .setEpoch(1) + .setSubtopologies(Arrays.asList(mkSubtopology1(), mkSubtopology2())); + StreamsTopology topology = StreamsTopology.fromRecord(record); + assertEquals(1, topology.topologyEpoch()); + assertEquals(2, topology.subtopologies().size()); + assertTrue(topology.subtopologies().containsKey(SUBTOPOLOGY_ID_1)); + assertEquals(mkSubtopology1(), topology.subtopologies().get(SUBTOPOLOGY_ID_1)); + assertTrue(topology.subtopologies().containsKey(SUBTOPOLOGY_ID_2)); + assertEquals(mkSubtopology2(), topology.subtopologies().get(SUBTOPOLOGY_ID_2)); + } + + private Subtopology mkSubtopology1() { + return new Subtopology() + .setSubtopologyId(SUBTOPOLOGY_ID_1) + .setSourceTopics(List.of( + SOURCE_TOPIC_1, + SOURCE_TOPIC_2, + REPARTITION_TOPIC_1, + REPARTITION_TOPIC_2 + )) + .setRepartitionSourceTopics(List.of( + new TopicInfo().setName(REPARTITION_TOPIC_1), + new TopicInfo().setName(REPARTITION_TOPIC_2) + )) + .setRepartitionSinkTopics(List.of( + REPARTITION_TOPIC_3 + )) + .setStateChangelogTopics(List.of( + new TopicInfo().setName(CHANGELOG_TOPIC_1), + new TopicInfo().setName(CHANGELOG_TOPIC_2) + )) + .setCopartitionGroups(List.of( + new StreamsGroupTopologyValue.CopartitionGroup() + .setRepartitionSourceTopics(List.of((short) 0)) + .setSourceTopics(List.of((short) 0)), + new StreamsGroupTopologyValue.CopartitionGroup() + .setRepartitionSourceTopics(List.of((short) 1)) + .setSourceTopics(List.of((short) 1)) + )); + } + + private Subtopology mkSubtopology2() { + return new Subtopology() + .setSubtopologyId(SUBTOPOLOGY_ID_2) + .setSourceTopics(List.of( + SOURCE_TOPIC_3, + REPARTITION_TOPIC_3 + )) + .setRepartitionSourceTopics(List.of( + new TopicInfo().setName(REPARTITION_TOPIC_3) + )) + .setStateChangelogTopics(List.of( + new TopicInfo().setName(CHANGELOG_TOPIC_3) + )); + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/TaskAssignmentTestUtil.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/TaskAssignmentTestUtil.java new file mode 100644 index 0000000000000..47668ec84c0f0 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/TaskAssignmentTestUtil.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams; + +import java.util.AbstractMap; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +public class TaskAssignmentTestUtil { + + public static Assignment mkAssignment(final Map> activeTasks, + final Map> standbyTasks, + final Map> warmupTasks) { + return new Assignment( + Collections.unmodifiableMap(Objects.requireNonNull(activeTasks)), + Collections.unmodifiableMap(Objects.requireNonNull(standbyTasks)), + Collections.unmodifiableMap(Objects.requireNonNull(warmupTasks)) + ); + } + + public static Map.Entry> mkTasks(String subtopologyId, + Integer... tasks) { + return new AbstractMap.SimpleEntry<>( + subtopologyId, + new HashSet<>(List.of(tasks)) + ); + } + + @SafeVarargs + public static Map> mkTasksPerSubtopology(Map.Entry>... entries) { + Map> assignment = new HashMap<>(); + for (Map.Entry> entry : entries) { + assignment.put(entry.getKey(), entry.getValue()); + } + return assignment; + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpecImplTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpecImplTest.java new file mode 100644 index 0000000000000..5deccb9717f17 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/GroupSpecImplTest.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; + + +public class GroupSpecImplTest { + + private Map members; + private GroupSpecImpl groupSpec; + + @BeforeEach + void setUp() { + members = new HashMap<>(); + + members.put("test-member", new AssignmentMemberSpec( + Optional.of("test-instance"), + Optional.of("test-rack"), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + )); + + groupSpec = new GroupSpecImpl( + members, + new HashMap<>() + ); + } + + @Test + void testMembers() { + assertEquals(members, groupSpec.members()); + } + +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignorTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignorTest.java new file mode 100644 index 0000000000000..25dada072df13 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignorTest.java @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.assignor; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import static org.apache.kafka.common.utils.Utils.mkEntry; +import static org.apache.kafka.common.utils.Utils.mkMap; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + + +public class MockAssignorTest { + + private final MockAssignor assignor = new MockAssignor(); + + @Test + public void testZeroMembers() { + + TaskAssignorException ex = assertThrows(TaskAssignorException.class, () -> assignor.assign( + new GroupSpecImpl( + Collections.emptyMap(), + new HashMap<>() + ), + new TopologyDescriberImpl(5, Collections.singletonList("test-subtopology")) + )); + + assertEquals("No member available to assign task 0 of subtopology test-subtopology", ex.getMessage()); + } + + @Test + public void testDoubleAssignment() { + + final AssignmentMemberSpec memberSpec1 = new AssignmentMemberSpec( + Optional.empty(), + Optional.empty(), + Collections.singletonMap("test-subtopology", new HashSet<>(List.of(0))), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + + final AssignmentMemberSpec memberSpec2 = new AssignmentMemberSpec( + Optional.empty(), + Optional.empty(), + Collections.singletonMap("test-subtopology", new HashSet<>(List.of(0))), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + + TaskAssignorException ex = assertThrows(TaskAssignorException.class, () -> assignor.assign( + new GroupSpecImpl( + Map.of("member1", memberSpec1, "member2", memberSpec2), + new HashMap<>() + ), + new TopologyDescriberImpl(5, Collections.singletonList("test-subtopology")) + )); + + assertEquals("Task 0 of subtopology test-subtopology is assigned to multiple members", ex.getMessage()); + } + + @Test + public void testBasicScenario() { + + final GroupAssignment result = assignor.assign( + new GroupSpecImpl( + Collections.emptyMap(), + new HashMap<>() + ), + new TopologyDescriberImpl(5, Collections.emptyList()) + ); + + assertEquals(0, result.members().size()); + } + + + @Test + public void testSingleMember() { + + final AssignmentMemberSpec memberSpec = new AssignmentMemberSpec( + Optional.empty(), + Optional.empty(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + + final GroupAssignment result = assignor.assign( + new GroupSpecImpl( + Collections.singletonMap("test_member", memberSpec), + new HashMap<>() + ), + new TopologyDescriberImpl(4, List.of("test-subtopology")) + ); + + assertEquals(1, result.members().size()); + final MemberAssignment testMember = result.members().get("test_member"); + assertNotNull(testMember); + assertEquals(mkMap( + mkEntry("test-subtopology", Set.of(0, 1, 2, 3)) + ), testMember.activeTasks()); + } + + + @Test + public void testTwoMembersTwoSubtopologies() { + + final AssignmentMemberSpec memberSpec1 = new AssignmentMemberSpec( + Optional.empty(), + Optional.empty(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + + final AssignmentMemberSpec memberSpec2 = new AssignmentMemberSpec( + Optional.empty(), + Optional.empty(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + + final GroupAssignment result = assignor.assign( + new GroupSpecImpl( + mkMap(mkEntry("test_member1", memberSpec1), mkEntry("test_member2", memberSpec2)), + new HashMap<>() + ), + new TopologyDescriberImpl(4, List.of("test-subtopology1", "test-subtopology2")) + ); + + final Map> expected1 = mkMap( + mkEntry("test-subtopology1", Set.of(1, 3)), + mkEntry("test-subtopology2", Set.of(1, 3)) + ); + final Map> expected2 = mkMap( + mkEntry("test-subtopology1", Set.of(0, 2)), + mkEntry("test-subtopology2", Set.of(0, 2)) + ); + + assertEquals(2, result.members().size()); + final MemberAssignment testMember1 = result.members().get("test_member1"); + final MemberAssignment testMember2 = result.members().get("test_member2"); + assertNotNull(testMember1); + assertNotNull(testMember2); + assertTrue(expected1.equals(testMember1.activeTasks()) || expected2.equals(testMember1.activeTasks())); + assertTrue(expected1.equals(testMember2.activeTasks()) || expected2.equals(testMember2.activeTasks())); + } + + @Test + public void testTwoMembersTwoSubtopologiesStickiness() { + + final AssignmentMemberSpec memberSpec1 = new AssignmentMemberSpec( + Optional.empty(), + Optional.empty(), + mkMap( + mkEntry("test-subtopology1", new HashSet<>(List.of(0, 2, 3))), + mkEntry("test-subtopology2", new HashSet<>(List.of(0))) + ), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + + final AssignmentMemberSpec memberSpec2 = new AssignmentMemberSpec( + Optional.empty(), + Optional.empty(), + mkMap( + mkEntry("test-subtopology1", new HashSet<>(List.of(1))), + mkEntry("test-subtopology2", new HashSet<>(List.of(3))) + ), + Collections.emptyMap(), + Collections.emptyMap(), + "test-process", + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() + ); + final GroupAssignment result = assignor.assign( + new GroupSpecImpl( + mkMap(mkEntry("test_member1", memberSpec1), mkEntry("test_member2", memberSpec2)), + new HashMap<>() + ), + new TopologyDescriberImpl(4, List.of("test-subtopology1", "test-subtopology2")) + ); + + assertEquals(2, result.members().size()); + final MemberAssignment testMember1 = result.members().get("test_member1"); + final MemberAssignment testMember2 = result.members().get("test_member2"); + assertNotNull(testMember1); + assertNotNull(testMember2); + assertEquals(mkMap( + mkEntry("test-subtopology1", Set.of(0, 2, 3)), + mkEntry("test-subtopology2", Set.of(0)) + ), testMember1.activeTasks()); + assertEquals(mkMap( + mkEntry("test-subtopology1", Set.of(1)), + mkEntry("test-subtopology2", Set.of(1, 2, 3)) + ), testMember2.activeTasks()); + } + + private record TopologyDescriberImpl(int numPartitions, List subtopologies) implements TopologyDescriber { + + @Override + public List subtopologies() { + return subtopologies; + } + + @Override + public int numTasks(String subtopologyId) { + return numPartitions; + } + + @Override + public boolean isStateful(String subtopologyId) { + return false; + } + + } + +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ChangelogTopicsTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ChangelogTopicsTest.java new file mode 100644 index 0000000000000..ab7aec1dce2fe --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ChangelogTopicsTest.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.errors.StreamsInvalidTopologyException; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.Subtopology; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.TopicConfig; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.TopicInfo; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.OptionalInt; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ChangelogTopicsTest { + + private static final LogContext LOG_CONTEXT = new LogContext(); + private static final String SOURCE_TOPIC_NAME = "source"; + private static final String SINK_TOPIC_NAME = "sink"; + private static final String REPARTITION_TOPIC_NAME = "repartition"; + private static final String CHANGELOG_TOPIC_NAME1 = "changelog1"; + private static final TopicConfig TOPIC_CONFIG = new TopicConfig().setKey("config1").setValue("val1"); + private static final TopicInfo REPARTITION_TOPIC_INFO = new TopicInfo() + .setName(REPARTITION_TOPIC_NAME) + .setTopicConfigs(List.of(TOPIC_CONFIG)); + private static final Subtopology SUBTOPOLOGY_NO_SOURCE = new Subtopology() + .setSubtopologyId("SUBTOPOLOGY_NO_SOURCE") + .setSourceTopics(Collections.emptyList()) + .setRepartitionSinkTopics(Collections.singletonList(SINK_TOPIC_NAME)) + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_INFO)) + .setStateChangelogTopics(Collections.emptyList()); + private static final Subtopology SUBTOPOLOGY_STATELESS = new Subtopology() + .setSubtopologyId("SUBTOPOLOGY_STATELESS") + .setSourceTopics(Collections.singletonList(SOURCE_TOPIC_NAME)) + .setRepartitionSinkTopics(Collections.singletonList(SINK_TOPIC_NAME)) + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_INFO)) + .setStateChangelogTopics(Collections.emptyList()); + private static final TopicInfo SOURCE_CHANGELOG_TOPIC_CONFIG = new TopicInfo() + .setName(SOURCE_TOPIC_NAME) + .setTopicConfigs(List.of(TOPIC_CONFIG)); + private static final Subtopology SUBTOPOLOGY_SOURCE_CHANGELOG = new Subtopology() + .setSubtopologyId("SUBTOPOLOGY_SOURCE_CHANGELOG") + .setSourceTopics(Collections.singletonList(SOURCE_TOPIC_NAME)) + .setRepartitionSinkTopics(Collections.singletonList(SINK_TOPIC_NAME)) + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_INFO)) + .setStateChangelogTopics(List.of(SOURCE_CHANGELOG_TOPIC_CONFIG)); + private static final TopicInfo CHANGELOG_TOPIC_CONFIG = new TopicInfo() + .setName(CHANGELOG_TOPIC_NAME1) + .setTopicConfigs(List.of(TOPIC_CONFIG)); + private static final Subtopology SUBTOPOLOGY_STATEFUL = new Subtopology() + .setSubtopologyId("SUBTOPOLOGY_STATEFUL") + .setSourceTopics(Collections.singletonList(SOURCE_TOPIC_NAME)) + .setRepartitionSinkTopics(Collections.singletonList(SINK_TOPIC_NAME)) + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_INFO)) + .setStateChangelogTopics(List.of(CHANGELOG_TOPIC_CONFIG)); + private static final Subtopology SUBTOPOLOGY_BOTH = new Subtopology() + .setSubtopologyId("SUBTOPOLOGY_BOTH") + .setSourceTopics(Collections.singletonList(SOURCE_TOPIC_NAME)) + .setRepartitionSinkTopics(Collections.singletonList(SINK_TOPIC_NAME)) + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_INFO)) + .setStateChangelogTopics(List.of(SOURCE_CHANGELOG_TOPIC_CONFIG, CHANGELOG_TOPIC_CONFIG)); + + private static OptionalInt topicPartitionProvider(String s) { + return OptionalInt.of(3); + } + + @Test + public void shouldFailIfNoSourceTopics() { + final List subtopologies = List.of(SUBTOPOLOGY_NO_SOURCE); + + final ChangelogTopics changelogTopics = + new ChangelogTopics(LOG_CONTEXT, subtopologies, ChangelogTopicsTest::topicPartitionProvider); + StreamsInvalidTopologyException e = assertThrows(StreamsInvalidTopologyException.class, changelogTopics::setup); + + assertTrue(e.getMessage().contains("No source topics found for subtopology")); + } + + @Test + public void shouldNotContainChangelogsForStatelessTasks() { + final List subtopologies = List.of(SUBTOPOLOGY_STATELESS); + + final ChangelogTopics changelogTopics = + new ChangelogTopics(LOG_CONTEXT, subtopologies, ChangelogTopicsTest::topicPartitionProvider); + Map setup = changelogTopics.setup(); + + assertEquals(Collections.emptyMap(), setup); + } + + @Test + public void shouldContainNonSourceBasedChangelogs() { + final List subtopologies = List.of(SUBTOPOLOGY_STATEFUL); + + final ChangelogTopics changelogTopics = + new ChangelogTopics(LOG_CONTEXT, subtopologies, ChangelogTopicsTest::topicPartitionProvider); + Map setup = changelogTopics.setup(); + + assertEquals(Map.of(CHANGELOG_TOPIC_CONFIG.name(), 3), setup); + } + + @Test + public void shouldNotContainSourceBasedChangelogs() { + final List subtopologies = List.of(SUBTOPOLOGY_SOURCE_CHANGELOG); + + final ChangelogTopics changelogTopics = + new ChangelogTopics(LOG_CONTEXT, subtopologies, ChangelogTopicsTest::topicPartitionProvider); + Map setup = changelogTopics.setup(); + + assertEquals(Collections.emptyMap(), setup); + } + + @Test + public void shouldContainBothTypesOfPreExistingChangelogs() { + final List subtopologies = List.of(SUBTOPOLOGY_BOTH); + + final ChangelogTopics changelogTopics = + new ChangelogTopics(LOG_CONTEXT, subtopologies, ChangelogTopicsTest::topicPartitionProvider); + Map setup = changelogTopics.setup(); + + assertEquals(Map.of(CHANGELOG_TOPIC_CONFIG.name(), 3), setup); + } +} diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopicTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopicTest.java new file mode 100644 index 0000000000000..e1db0f048ac93 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredInternalTopicTest.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class ConfiguredInternalTopicTest { + + @Test + public void testConstructorWithNullName() { + assertThrows(NullPointerException.class, + () -> new ConfiguredInternalTopic(null, 1, Optional.empty(), Collections.emptyMap())); + } + + @Test + public void testConstructorWithInvalidName() { + assertThrows(InvalidTopicException.class, + () -> new ConfiguredInternalTopic("invalid topic name", 1, Optional.empty(), Collections.emptyMap())); + } + + @Test + public void testConstructorWithNullTopicConfigs() { + assertThrows(NullPointerException.class, + () -> new ConfiguredInternalTopic("test-topic", 1, Optional.empty(), null)); + } + + @Test + public void testConstructorWithZeroPartitions() { + assertThrows(IllegalArgumentException.class, + () -> new ConfiguredInternalTopic("test-topic", 0, Optional.empty(), Collections.emptyMap())); + } + + @Test + public void testAsStreamsGroupDescribeTopicInfo() { + String topicName = "test-topic"; + Map topicConfigs = new HashMap<>(); + topicConfigs.put("retention.ms", "1000"); + int numberOfPartitions = 3; + Optional replicationFactor = Optional.of((short) 2); + ConfiguredInternalTopic configuredInternalTopic = new ConfiguredInternalTopic( + topicName, numberOfPartitions, replicationFactor, topicConfigs); + + StreamsGroupDescribeResponseData.TopicInfo topicInfo = configuredInternalTopic.asStreamsGroupDescribeTopicInfo(); + + assertEquals(topicName, topicInfo.name()); + assertEquals(numberOfPartitions, topicInfo.partitions()); + assertEquals(replicationFactor.orElse((short) 0).shortValue(), topicInfo.replicationFactor()); + assertEquals(1, topicInfo.topicConfigs().size()); + assertEquals("1000", topicInfo.topicConfigs().get(0).value()); + } +} \ No newline at end of file diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopologyTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopologyTest.java new file mode 100644 index 0000000000000..d30716c25f7d8 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopologyTest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ConfiguredSubtopologyTest { + + @Test + public void testConstructorWithNullSourceTopics() { + assertThrows(NullPointerException.class, + () -> new ConfiguredSubtopology( + null, + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptyMap() + ) + ); + } + + @Test + public void testConstructorWithNullRepartitionSourceTopics() { + assertThrows(NullPointerException.class, + () -> new ConfiguredSubtopology( + Collections.emptySet(), + null, + Collections.emptySet(), + Collections.emptyMap() + ) + ); + } + + @Test + public void testConstructorWithNullRepartitionSinkTopics() { + assertThrows(NullPointerException.class, + () -> new ConfiguredSubtopology( + Collections.emptySet(), + Collections.emptyMap(), + null, + Collections.emptyMap() + ) + ); + } + + @Test + public void testConstructorWithNullStateChangelogTopics() { + assertThrows(NullPointerException.class, + () -> new ConfiguredSubtopology( + Collections.emptySet(), + Collections.emptyMap(), + Collections.emptySet(), + null + ) + ); + } + + @Test + public void testAsStreamsGroupDescribeSubtopology() { + String subtopologyId = "subtopology1"; + Set sourceTopics = new HashSet<>(Set.of("sourceTopic1", "sourceTopic2")); + Set repartitionSinkTopics = new HashSet<>(Set.of("repartitionSinkTopic1", "repartitionSinkTopic2")); + ConfiguredInternalTopic internalTopicMock = mock(ConfiguredInternalTopic.class); + StreamsGroupDescribeResponseData.TopicInfo topicInfo = new StreamsGroupDescribeResponseData.TopicInfo(); + when(internalTopicMock.asStreamsGroupDescribeTopicInfo()).thenReturn(topicInfo); + Map repartitionSourceTopics = Map.of("repartitionSourceTopic1", internalTopicMock); + Map stateChangelogTopics = Map.of("stateChangelogTopic1", internalTopicMock); + ConfiguredSubtopology configuredSubtopology = new ConfiguredSubtopology( + sourceTopics, repartitionSourceTopics, repartitionSinkTopics, stateChangelogTopics); + + StreamsGroupDescribeResponseData.Subtopology subtopology = configuredSubtopology.asStreamsGroupDescribeSubtopology(subtopologyId); + + assertEquals(subtopologyId, subtopology.subtopologyId()); + assertEquals(sourceTopics.stream().sorted().toList(), subtopology.sourceTopics()); + assertEquals(repartitionSinkTopics.stream().sorted().toList(), subtopology.repartitionSinkTopics()); + assertEquals(List.of(topicInfo), subtopology.repartitionSourceTopics()); + assertEquals(List.of(topicInfo), subtopology.stateChangelogTopics()); + } + +} \ No newline at end of file diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredTopologyTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredTopologyTest.java new file mode 100644 index 0000000000000..fc862a7a02745 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredTopologyTest.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic; +import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; + +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ConfiguredTopologyTest { + + @Test + public void testConstructorWithNullSubtopologies() { + assertThrows(NullPointerException.class, + () -> new ConfiguredTopology( + 0, + null, + Collections.emptyMap(), + Optional.empty() + ) + ); + } + + @Test + public void testConstructorWithNullInternalTopicsToBeCreated() { + assertThrows(NullPointerException.class, + () -> new ConfiguredTopology( + 0, + Collections.emptyMap(), + null, + Optional.empty() + ) + ); + } + + @Test + public void testConstructorWithNullTopicConfigurationException() { + assertThrows(NullPointerException.class, + () -> new ConfiguredTopology( + 0, + Collections.emptyMap(), + Collections.emptyMap(), + null + ) + ); + } + + @Test + public void testConstructorWithInvalidTopologyEpoch() { + assertThrows(IllegalArgumentException.class, + () -> new ConfiguredTopology( + -1, + Collections.emptyMap(), + Collections.emptyMap(), + Optional.empty() + ) + ); + } + + @Test + public void testIsReady() { + ConfiguredTopology readyTopology = new ConfiguredTopology( + 1, new HashMap<>(), new HashMap<>(), Optional.empty()); + assertTrue(readyTopology.isReady()); + + ConfiguredTopology notReadyTopology = new ConfiguredTopology( + 1, new HashMap<>(), new HashMap<>(), Optional.of(TopicConfigurationException.missingSourceTopics("missing"))); + assertFalse(notReadyTopology.isReady()); + } + + @Test + public void testAsStreamsGroupDescribeTopology() { + int topologyEpoch = 1; + ConfiguredSubtopology subtopologyMock = mock(ConfiguredSubtopology.class); + StreamsGroupDescribeResponseData.Subtopology subtopologyResponse = new StreamsGroupDescribeResponseData.Subtopology(); + when(subtopologyMock.asStreamsGroupDescribeSubtopology(Mockito.anyString())).thenReturn(subtopologyResponse); + Map subtopologies = new HashMap<>(); + subtopologies.put("subtopology1", subtopologyMock); + Map internalTopicsToBeCreated = new HashMap<>(); + Optional topicConfigurationException = Optional.empty(); + ConfiguredTopology configuredTopology = new ConfiguredTopology( + topologyEpoch, subtopologies, internalTopicsToBeCreated, topicConfigurationException); + + StreamsGroupDescribeResponseData.Topology topology = configuredTopology.asStreamsGroupDescribeTopology(); + + assertEquals(topologyEpoch, topology.epoch()); + assertEquals(1, topology.subtopologies().size()); + assertEquals(subtopologyResponse, topology.subtopologies().get(0)); + } +} \ No newline at end of file diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/CopartitionedTopicsEnforcerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/CopartitionedTopicsEnforcerTest.java new file mode 100644 index 0000000000000..d2c466157f9c8 --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/CopartitionedTopicsEnforcerTest.java @@ -0,0 +1,242 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.requests.StreamsGroupHeartbeatResponse.Status; +import org.apache.kafka.common.utils.LogContext; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.Map; +import java.util.OptionalInt; +import java.util.Set; +import java.util.TreeMap; +import java.util.function.Function; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class CopartitionedTopicsEnforcerTest { + + private static final LogContext LOG_CONTEXT = new LogContext(); + private static final String REPARTITION_TOPIC_1 = "repartitioned-1"; + private static final String REPARTITION_TOPIC_2 = "repartitioned-2"; + private static final String REPARTITION_TOPIC_3 = "repartitioned-3"; + private static final String SOURCE_TOPIC_1 = "source-1"; + private static final String SOURCE_TOPIC_2 = "source-2"; + + private static Function topicPartitionProvider(Map topicPartitionCounts) { + return topic -> { + Integer a = topicPartitionCounts.get(topic); + return a == null ? OptionalInt.empty() : OptionalInt.of(a); + }; + } + + @Test + public void shouldThrowTopicConfigurationExceptionIfNoPartitionsFoundForCoPartitionedTopic() { + final Map topicPartitionCounts = Collections.emptyMap(); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final TopicConfigurationException ex = assertThrows(TopicConfigurationException.class, () -> + enforcer.enforce( + Set.of(SOURCE_TOPIC_1), + Set.of(), + Set.of() + )); + assertEquals(Status.MISSING_SOURCE_TOPICS, ex.status()); + assertEquals(String.format("Following topics are missing: [%s]", SOURCE_TOPIC_1), ex.getMessage()); + } + + @Test + public void shouldThrowTopicConfigurationExceptionIfPartitionCountsForCoPartitionedTopicsDontMatch() { + final Map topicPartitionCounts = Map.of(SOURCE_TOPIC_1, 2, SOURCE_TOPIC_2, 1); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final TopicConfigurationException ex = assertThrows(TopicConfigurationException.class, () -> + enforcer.enforce( + Set.of(SOURCE_TOPIC_1, SOURCE_TOPIC_2), + Set.of(), + Set.of() + ) + ); + assertEquals(Status.INCORRECTLY_PARTITIONED_TOPICS, ex.status()); + assertEquals(String.format("Following topics do not have the same number of partitions: " + + "[{%s=2, %s=1}]", SOURCE_TOPIC_1, SOURCE_TOPIC_2), ex.getMessage()); + } + + @Test + public void shouldEnforceCopartitioningOnRepartitionTopics() { + final Map topicPartitionCounts = Map.of( + SOURCE_TOPIC_1, 2, + SOURCE_TOPIC_2, 2, + REPARTITION_TOPIC_1, 10 + ); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final Map result = + enforcer.enforce( + Set.of(SOURCE_TOPIC_1, SOURCE_TOPIC_2, REPARTITION_TOPIC_1), + Set.of(), + Set.of(REPARTITION_TOPIC_1) + ); + + assertEquals(Map.of(REPARTITION_TOPIC_1, 2), result); + } + + @Test + public void shouldSetNumPartitionsToMaximumPartitionsWhenAllTopicsAreRepartitionTopics() { + final Map topicPartitionCounts = Map.of( + REPARTITION_TOPIC_1, 1, + REPARTITION_TOPIC_2, 15, + REPARTITION_TOPIC_3, 5 + ); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final Map result = enforcer.enforce( + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2, REPARTITION_TOPIC_3), + Set.of(), + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2, REPARTITION_TOPIC_3) + ); + + assertEquals(Map.of( + REPARTITION_TOPIC_1, 15, + REPARTITION_TOPIC_2, 15, + REPARTITION_TOPIC_3, 15 + ), result); + } + + @Test + public void shouldThrowAnExceptionIfTopicInfosWithEnforcedNumOfPartitionsHaveDifferentNumOfPartitions() { + final Map topicPartitionCounts = Map.of( + REPARTITION_TOPIC_1, 10, + REPARTITION_TOPIC_2, 5 + ); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final TopicConfigurationException ex = assertThrows( + TopicConfigurationException.class, + () -> enforcer.enforce( + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2), + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2), + Set.of() + ) + ); + + final TreeMap sorted = new TreeMap<>( + Map.of(REPARTITION_TOPIC_1, 10, REPARTITION_TOPIC_2, 5) + ); + assertEquals(Status.INCORRECTLY_PARTITIONED_TOPICS, ex.status()); + assertEquals(String.format( + "Following topics do not have the same number of partitions: " + + "[%s]", sorted), ex.getMessage()); + } + + @Test + public void shouldReturnThePartitionCountsUnchangedWhenTopicInfosWithEnforcedNumOfPartitionsAreValid() { + final Map topicPartitionCounts = Map.of( + REPARTITION_TOPIC_1, 10, + REPARTITION_TOPIC_2, 10 + ); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final Map enforced = enforcer.enforce( + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2), + Set.of(), + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2) + ); + + assertEquals(Map.of( + REPARTITION_TOPIC_1, 10, + REPARTITION_TOPIC_2, 10 + ), enforced); + } + + @Test + public void shouldThrowAnExceptionWhenNumberOfPartitionsOfNonRepartitionTopicAndRepartitionTopicWithEnforcedNumOfPartitionsDoNotMatch() { + final Map topicPartitionCounts = Map.of( + REPARTITION_TOPIC_1, 10, + SOURCE_TOPIC_1, 2 + ); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final TopicConfigurationException ex = assertThrows( + TopicConfigurationException.class, + () -> enforcer.enforce( + Set.of(REPARTITION_TOPIC_1, SOURCE_TOPIC_1), + Set.of(REPARTITION_TOPIC_1), + Set.of()) + ); + + assertEquals(Status.INCORRECTLY_PARTITIONED_TOPICS, ex.status()); + assertEquals(String.format("Number of partitions [%s] " + + "of repartition topic [%s] " + + "doesn't match number of partitions [%s] of the source topic.", + 10, REPARTITION_TOPIC_1, 2), ex.getMessage()); + } + + @Test + public void shouldReturnThePartitionCountsUnchangedWhenNumberOfPartitionsOfNonRepartitionTopicAndRepartitionTopicWithEnforcedNumOfPartitionsMatch() { + final Map topicPartitionCounts = Map.of( + REPARTITION_TOPIC_1, 2, + SOURCE_TOPIC_1, 2 + ); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final Map enforced = enforcer.enforce( + Set.of(REPARTITION_TOPIC_1, SOURCE_TOPIC_1), + Set.of(), + Set.of(REPARTITION_TOPIC_1) + ); + + assertEquals(Map.of( + REPARTITION_TOPIC_1, 2 + ), enforced); + } + + @Test + public void shouldDeductNumberOfPartitionsFromRepartitionTopicWithEnforcedNumberOfPartitions() { + final Map topicPartitionCounts = Map.of( + REPARTITION_TOPIC_1, 2, + REPARTITION_TOPIC_2, 5, + REPARTITION_TOPIC_3, 2 + ); + final CopartitionedTopicsEnforcer enforcer = + new CopartitionedTopicsEnforcer(LOG_CONTEXT, topicPartitionProvider(topicPartitionCounts)); + + final Map enforced = enforcer.enforce( + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_2, REPARTITION_TOPIC_3), + Set.of(REPARTITION_TOPIC_1, REPARTITION_TOPIC_3), + Set.of(REPARTITION_TOPIC_2) + ); + + assertEquals(Map.of( + REPARTITION_TOPIC_1, 2, + REPARTITION_TOPIC_2, 2, + REPARTITION_TOPIC_3, 2 + ), enforced); + } + +} \ No newline at end of file diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/RepartitionTopicsTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/RepartitionTopicsTest.java new file mode 100644 index 0000000000000..8257f42dbae3b --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/RepartitionTopicsTest.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.errors.StreamsInvalidTopologyException; +import org.apache.kafka.common.requests.StreamsGroupHeartbeatResponse.Status; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.Subtopology; +import org.apache.kafka.coordinator.group.generated.StreamsGroupTopologyValue.TopicInfo; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.OptionalInt; +import java.util.function.Function; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class RepartitionTopicsTest { + + private static final LogContext LOG_CONTEXT = new LogContext(); + private static final String SOURCE_TOPIC_NAME1 = "source1"; + private static final String SOURCE_TOPIC_NAME2 = "source2"; + private static final TopicInfo REPARTITION_TOPIC1 = new TopicInfo().setName("repartition1").setPartitions(4); + private static final TopicInfo REPARTITION_TOPIC2 = new TopicInfo().setName("repartition2").setPartitions(2); + private static final TopicInfo REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT = new TopicInfo().setName("repartitionWithoutPartitionCount"); + + private static OptionalInt sourceTopicPartitionCounts(final String topicName) { + return SOURCE_TOPIC_NAME1.equals(topicName) || SOURCE_TOPIC_NAME2.equals(topicName) ? OptionalInt.of(3) : OptionalInt.empty(); + } + + @Test + public void shouldSetupRepartitionTopics() { + final Subtopology subtopology1 = new Subtopology() + .setSubtopologyId("subtopology1") + .setSourceTopics(List.of(SOURCE_TOPIC_NAME1, SOURCE_TOPIC_NAME2)) + .setRepartitionSinkTopics(List.of(REPARTITION_TOPIC1.name())); + final Subtopology subtopology2 = new Subtopology() + .setSubtopologyId("subtopology2") + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC1)); + final List subtopologies = List.of(subtopology1, subtopology2); + final RepartitionTopics repartitionTopics = new RepartitionTopics( + LOG_CONTEXT, + subtopologies, + RepartitionTopicsTest::sourceTopicPartitionCounts + ); + + final Map setup = repartitionTopics.setup(); + + assertEquals( + Map.of(REPARTITION_TOPIC1.name(), REPARTITION_TOPIC1.partitions()), + setup + ); + } + + @Test + public void shouldThrowStreamsMissingSourceTopicsExceptionIfMissingSourceTopics() { + final Subtopology subtopology1 = new Subtopology() + .setSubtopologyId("subtopology1") + .setSourceTopics(List.of(SOURCE_TOPIC_NAME1, SOURCE_TOPIC_NAME2)) + .setRepartitionSinkTopics(List.of(REPARTITION_TOPIC1.name())); + final Subtopology subtopology2 = new Subtopology() + .setSubtopologyId("subtopology2") + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC1)); + final Function topicPartitionCountProvider = + s -> Objects.equals(s, SOURCE_TOPIC_NAME1) ? OptionalInt.empty() : sourceTopicPartitionCounts(s); + final RepartitionTopics repartitionTopics = new RepartitionTopics( + LOG_CONTEXT, + List.of(subtopology1, subtopology2), + topicPartitionCountProvider + ); + + final TopicConfigurationException exception = assertThrows(TopicConfigurationException.class, + repartitionTopics::setup); + + assertEquals(Status.MISSING_SOURCE_TOPICS, exception.status()); + assertEquals("Missing source topics: source1", exception.getMessage()); + } + + @Test + public void shouldThrowStreamsInvalidTopologyExceptionIfPartitionCountCannotBeComputedForAllRepartitionTopicsDueToLoops() { + final Subtopology subtopology1 = new Subtopology() + .setSubtopologyId("subtopology1") + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT)) + .setRepartitionSinkTopics(List.of(REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT.name())); + final RepartitionTopics repartitionTopics = new RepartitionTopics( + LOG_CONTEXT, + List.of(subtopology1), + RepartitionTopicsTest::sourceTopicPartitionCounts + ); + + final StreamsInvalidTopologyException exception = assertThrows(StreamsInvalidTopologyException.class, repartitionTopics::setup); + + assertEquals( + "Failed to compute number of partitions for all repartition topics. There may be loops in the topology that cannot be resolved.", + exception.getMessage() + ); + } + + @Test + public void shouldThrowStreamsInvalidTopologyExceptionIfPartitionCountCannotBeComputedForAllRepartitionTopicsDueToMissingSinks() { + final Subtopology subtopology1 = new Subtopology() + .setSubtopologyId("subtopology1") + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT)); + final RepartitionTopics repartitionTopics = new RepartitionTopics( + LOG_CONTEXT, + List.of(subtopology1), + RepartitionTopicsTest::sourceTopicPartitionCounts + ); + + final StreamsInvalidTopologyException exception = assertThrows(StreamsInvalidTopologyException.class, repartitionTopics::setup); + + assertEquals( + "Failed to compute number of partitions for all repartition topics, because a repartition source topic is never used as a sink topic.", + exception.getMessage() + ); + } + + @Test + public void shouldSetRepartitionTopicPartitionCountFromUpstreamExternalSourceTopic() { + final Subtopology subtopology = new Subtopology() + .setSubtopologyId("subtopology0") + .setSourceTopics(List.of(SOURCE_TOPIC_NAME1)) + .setRepartitionSinkTopics(List.of(REPARTITION_TOPIC1.name(), REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT.name())) + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC2)); + final Subtopology subtopologyWithoutPartitionCount = new Subtopology() + .setSubtopologyId("subtopologyWithoutPartitionCount") + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC1, REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT)); + final RepartitionTopics repartitionTopics = new RepartitionTopics( + LOG_CONTEXT, + List.of(subtopology, subtopologyWithoutPartitionCount), + RepartitionTopicsTest::sourceTopicPartitionCounts + ); + + final Map setup = repartitionTopics.setup(); + + assertEquals(Map.of( + REPARTITION_TOPIC1.name(), REPARTITION_TOPIC1.partitions(), + REPARTITION_TOPIC2.name(), REPARTITION_TOPIC2.partitions(), + REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT.name(), sourceTopicPartitionCounts(SOURCE_TOPIC_NAME1).getAsInt() + ), setup); + } + + @Test + public void shouldSetRepartitionTopicPartitionCountFromUpstreamInternalRepartitionSourceTopic() { + final Subtopology subtopology = new Subtopology() + .setSubtopologyId("subtopology0") + .setSourceTopics(List.of(SOURCE_TOPIC_NAME1)) + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC1)) + .setRepartitionSinkTopics(List.of(REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT.name())); + final Subtopology subtopologyWithoutPartitionCount = new Subtopology() + .setSubtopologyId("subtopologyWithoutPartitionCount") + .setRepartitionSourceTopics(List.of(REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT)) + .setRepartitionSinkTopics(List.of(REPARTITION_TOPIC1.name())); + final RepartitionTopics repartitionTopics = new RepartitionTopics( + LOG_CONTEXT, + List.of(subtopology, subtopologyWithoutPartitionCount), + RepartitionTopicsTest::sourceTopicPartitionCounts + ); + + final Map setup = repartitionTopics.setup(); + + assertEquals( + Map.of( + REPARTITION_TOPIC1.name(), REPARTITION_TOPIC1.partitions(), + REPARTITION_TOPIC_WITHOUT_PARTITION_COUNT.name(), REPARTITION_TOPIC1.partitions() + ), + setup + ); + } + + @Test + public void shouldNotSetupRepartitionTopicsWhenTopologyDoesNotContainAnyRepartitionTopics() { + final Subtopology subtopology = new Subtopology() + .setSubtopologyId("subtopology0") + .setSourceTopics(List.of(SOURCE_TOPIC_NAME1)); + final RepartitionTopics repartitionTopics = new RepartitionTopics( + LOG_CONTEXT, + List.of(subtopology), + RepartitionTopicsTest::sourceTopicPartitionCounts + ); + + final Map setup = repartitionTopics.setup(); + + assertEquals(Collections.emptyMap(), setup); + } + +} \ No newline at end of file diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/TopicConfigurationExceptionTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/TopicConfigurationExceptionTest.java new file mode 100644 index 0000000000000..479cef5db13aa --- /dev/null +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/TopicConfigurationExceptionTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.coordinator.group.streams.topics; + +import org.apache.kafka.common.requests.StreamsGroupHeartbeatResponse.Status; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class TopicConfigurationExceptionTest { + + @Test + public void testMissingSourceTopics() { + TopicConfigurationException exception = TopicConfigurationException.missingSourceTopics("test"); + assertEquals(Status.MISSING_SOURCE_TOPICS, exception.status()); + assertEquals("test", exception.getMessage()); + } + + @Test + public void testMissingInternalTopics() { + TopicConfigurationException exception = TopicConfigurationException.missingInternalTopics("test"); + assertEquals(Status.MISSING_INTERNAL_TOPICS, exception.status()); + assertEquals("test", exception.getMessage()); + } + + @Test + public void testIncorrectlyPartitionedTopics() { + TopicConfigurationException exception = TopicConfigurationException.incorrectlyPartitionedTopics("test"); + assertEquals(Status.INCORRECTLY_PARTITIONED_TOPICS, exception.status()); + assertEquals("test", exception.getMessage()); + } + +} diff --git a/group-coordinator/src/test/resources/log4j2.yaml b/group-coordinator/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..59b02951909e6 --- /dev/null +++ b/group-coordinator/src/test/resources/log4j2.yaml @@ -0,0 +1,31 @@ +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: DEBUG + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka + level: DEBUG diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/TargetAssignmentBuilderBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/TargetAssignmentBuilderBenchmark.java index 2a23d22b65536..6fbb7908622b0 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/TargetAssignmentBuilderBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/TargetAssignmentBuilderBenchmark.java @@ -82,7 +82,7 @@ public class TargetAssignmentBuilderBenchmark { private PartitionAssignor partitionAssignor; - private TargetAssignmentBuilder targetAssignmentBuilder; + private TargetAssignmentBuilder.ConsumerTargetAssignmentBuilder targetAssignmentBuilder; /** The number of homogeneous subgroups to create for the heterogeneous subscription case. */ private static final int MAX_BUCKET_COUNT = 5; @@ -116,7 +116,7 @@ public void setup() { .setSubscribedTopicNames(allTopicNames) .build(); - targetAssignmentBuilder = new TargetAssignmentBuilder(GROUP_ID, GROUP_EPOCH, partitionAssignor) + targetAssignmentBuilder = new TargetAssignmentBuilder.ConsumerTargetAssignmentBuilder(GROUP_ID, GROUP_EPOCH, partitionAssignor) .withMembers(members) .withSubscriptionMetadata(subscriptionMetadata) .withSubscriptionType(subscriptionType) diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ImplicitLinkedHashCollectionBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ImplicitLinkedHashCollectionBenchmark.java index 8e814daa967f0..8861428103a52 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ImplicitLinkedHashCollectionBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ImplicitLinkedHashCollectionBenchmark.java @@ -83,8 +83,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof TestElement)) return false; - TestElement other = (TestElement) o; + if (!(o instanceof TestElement other)) return false; return value.equals(other.value); } } diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ProduceRequestBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ProduceRequestBenchmark.java index 55ccee8516e61..2238c2259439f 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ProduceRequestBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/common/ProduceRequestBenchmark.java @@ -54,7 +54,7 @@ public class ProduceRequestBenchmark { @Setup(Level.Trial) public void setup() { - this.produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData()) + this.produceRequest = ProduceRequest.builder(new ProduceRequestData()) .build(ApiKeys.PRODUCE.latestVersion()); } diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/connect/JsonConverterBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/connect/JsonConverterBenchmark.java index aa45a7e711b8b..35998a70ef08c 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/connect/JsonConverterBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/connect/JsonConverterBenchmark.java @@ -51,7 +51,7 @@ public class JsonConverterBenchmark { private JsonConverter converter; @Param({"true", "false"}) - private boolean afterBurnModule; + private boolean blackbirdModule; @State(Scope.Benchmark) public static class Data { @@ -425,7 +425,7 @@ private static Struct buildTransactionStruct() { @Setup(Level.Trial) public void setup(BenchmarkParams params) { - converter = new JsonConverter(Boolean.parseBoolean(params.getParam("afterBurnModule"))); + converter = new JsonConverter(Boolean.parseBoolean(params.getParam("blackbirdModule"))); converter.configure(Collections.emptyMap(), false); } diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/consumer/SubscriptionStateBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/consumer/SubscriptionStateBenchmark.java index 9652ce464c6d3..dc67a662b7ec7 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/consumer/SubscriptionStateBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/consumer/SubscriptionStateBenchmark.java @@ -18,7 +18,7 @@ package org.apache.kafka.jmh.consumer; import org.apache.kafka.clients.Metadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -66,7 +66,7 @@ public void setup() { assignment.add(new TopicPartition(String.format("topic-%04d", topicId), partitionId)) ) ); - subscriptionState = new SubscriptionState(new LogContext(), OffsetResetStrategy.EARLIEST); + subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); subscriptionState.assignFromUser(assignment); SubscriptionState.FetchPosition position = new SubscriptionState.FetchPosition( 0L, diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/coordinator/RegexResolutionBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/coordinator/RegexResolutionBenchmark.java new file mode 100644 index 0000000000000..08db52e4e601e --- /dev/null +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/coordinator/RegexResolutionBenchmark.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.jmh.coordinator; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metadata.TopicRecord; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.coordinator.group.GroupMetadataManager; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.MetadataProvenance; + +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; +import org.slf4j.Logger; + +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +@State(Scope.Benchmark) +@Fork(value = 1) +@Warmup(iterations = 5) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class RegexResolutionBenchmark { + private static final Logger LOG = new LogContext().logger(RegexResolutionBenchmark.class); + private static final Time TIME = Time.SYSTEM; + private static final String GROUP_ID = "my-group-id"; + + private static final List WORDS = List.of( + "data", + "stream", + "queue", + "analytics", + "service", + "event", + "log", + "cloud", + "process", + "system", + "message", + "broker", + "partition", + "key", + "value", + "cluster", + "zookeeper", + "replication", + "topic", + "producer" + ); + + @Param({"10000", "100000", "1000000"}) + private int topicCount; + + @Param({"1", "10", "100"}) + private int regexCount; + + private MetadataImage image; + + private Set regexes; + + @Setup(Level.Trial) + public void setup() { + Random random = new Random(); + + MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); + for (int i = 0; i < topicCount; i++) { + String topicName = + WORDS.get(random.nextInt(WORDS.size())) + "_" + + WORDS.get(random.nextInt(WORDS.size())) + "_" + + i; + + delta.replay(new TopicRecord() + .setTopicId(Uuid.randomUuid()) + .setName(topicName)); + } + image = delta.apply(MetadataProvenance.EMPTY); + + regexes = new HashSet<>(); + for (int i = 0; i < regexCount; i++) { + regexes.add(".*" + WORDS.get(random.nextInt(WORDS.size())) + ".*"); + } + } + + @Benchmark + @Threads(1) + @OutputTimeUnit(TimeUnit.MILLISECONDS) + public void run() { + GroupMetadataManager.refreshRegularExpressions( + GROUP_ID, + LOG, + TIME, + image, + regexes + ); + } +} diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java index 951ce6fb6c471..bd5137e9fbac9 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java @@ -419,8 +419,7 @@ public long getDelay(TimeUnit unit) { @Override public int compareTo(Delayed o) { - if (o instanceof Scheduled) { - Scheduled other = (Scheduled) o; + if (o instanceof Scheduled other) { if (operation.completesAt < other.operation.completesAt) return -1; else if (operation.completesAt > other.operation.completesAt) diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java index d8ccd14bdc272..16e54582a290a 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java @@ -17,11 +17,8 @@ package org.apache.kafka.jmh.fetcher; -import kafka.cluster.AlterPartitionListener; -import kafka.cluster.DelayedOperations; import kafka.cluster.Partition; import kafka.log.LogManager; -import kafka.server.AlterPartitionManager; import kafka.server.BrokerBlockingSender; import kafka.server.FailedPartitions; import kafka.server.InitialFetchState; @@ -36,10 +33,8 @@ import kafka.server.builders.LogManagerBuilder; import kafka.server.builders.ReplicaManagerBuilder; import kafka.server.metadata.MockConfigRepository; -import kafka.server.metadata.ZkMetadataCache; import kafka.utils.Pool; import kafka.utils.TestUtils; -import kafka.zk.KafkaZkClient; import org.apache.kafka.clients.FetchSessionHandler; import org.apache.kafka.common.TopicIdPartition; @@ -49,7 +44,6 @@ import org.apache.kafka.common.message.LeaderAndIsrRequestData; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; -import org.apache.kafka.common.message.UpdateMetadataRequestData; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; @@ -57,11 +51,9 @@ import org.apache.kafka.common.record.RecordsSend; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.requests.FetchResponse; -import org.apache.kafka.common.requests.UpdateMetadataRequest; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.server.BrokerFeatures; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.OffsetAndEpoch; import org.apache.kafka.server.network.BrokerEndPoint; @@ -91,20 +83,21 @@ import java.io.File; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Optional; import java.util.Properties; -import java.util.UUID; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import scala.Option; import scala.collection.Iterator; import scala.collection.Map; +import scala.jdk.javaapi.CollectionConverters; + +import static org.apache.kafka.server.common.KRaftVersion.KRAFT_VERSION_1; @State(Scope.Benchmark) @Fork(value = 1) @@ -113,7 +106,6 @@ @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.NANOSECONDS) public class ReplicaFetcherThreadBenchmark { - private final File logDir = new File(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); private final KafkaScheduler scheduler = new KafkaScheduler(1, true, "scheduler"); private final Pool pool = new Pool<>(Option.empty()); private final Metrics metrics = new Metrics(); @@ -127,18 +119,16 @@ public class ReplicaFetcherThreadBenchmark { @Setup(Level.Trial) public void setup() throws IOException { - if (!logDir.mkdir()) - throw new IOException("error creating test directory"); - scheduler.startup(); - Properties props = new Properties(); - props.put("zookeeper.connect", "127.0.0.1:9999"); - KafkaConfig config = new KafkaConfig(props); + KafkaConfig config = KafkaConfig.fromProps(TestUtils.createBrokerConfig( + 0, true, true, 9092, Option.empty(), Option.empty(), + Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, + (short) 1, false)); LogConfig logConfig = createLogConfig(); BrokerTopicStats brokerTopicStats = new BrokerTopicStats(false); - LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class); - List logDirs = Collections.singletonList(logDir); + LogDirFailureChannel logDirFailureChannel = new LogDirFailureChannel(config.logDirs().size()); + List logDirs = CollectionConverters.asJava(config.logDirs()).stream().map(File::new).collect(Collectors.toList()); logManager = new LogManagerBuilder(). setLogDirs(logDirs). setInitialOfflineDirs(Collections.emptyList()). @@ -156,13 +146,23 @@ public void setup() throws IOException { setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(logDirFailureChannel). setTime(Time.SYSTEM). - setKeepPartitionMetadataFile(true). + build(); + + replicaManager = new ReplicaManagerBuilder(). + setConfig(config). + setMetrics(metrics). + setTime(new MockTime()). + setScheduler(scheduler). + setLogManager(logManager). + setQuotaManagers(Mockito.mock(QuotaFactory.QuotaManagers.class)). + setBrokerTopicStats(brokerTopicStats). + setMetadataCache(MetadataCache.kRaftMetadataCache(config.nodeId(), () -> KRAFT_VERSION_1)). + setLogDirFailureChannel(new LogDirFailureChannel(logDirs.size())). + setAlterPartitionManager(TestUtils.createAlterIsrManager()). build(); LinkedHashMap initialFetched = new LinkedHashMap<>(); - HashMap topicIds = new HashMap<>(); scala.collection.mutable.Map initialFetchStates = new scala.collection.mutable.HashMap<>(); - List updatePartitionState = new ArrayList<>(); for (int i = 0; i < partitionCount; i++) { TopicPartition tp = new TopicPartition("topic", i); @@ -176,15 +176,10 @@ public void setup() throws IOException { .setReplicas(replicas) .setIsNew(true); - AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class); - OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class); - Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Optional.of(0L)); - AlterPartitionManager isrChannelManager = Mockito.mock(AlterPartitionManager.class); - Partition partition = new Partition(tp, 100, MetadataVersion.latestTesting(), - 0, () -> -1, Time.SYSTEM, alterPartitionListener, new DelayedOperationsMock(topicId, tp), - Mockito.mock(MetadataCache.class), logManager, isrChannelManager, topicId); + OffsetCheckpoints checkpoints = (logDir, topicPartition) -> Optional.of(0L); + Partition partition = replicaManager.createPartition(tp); - partition.makeFollower(partitionState, offsetCheckpoints, topicId, Option.empty()); + partition.makeFollower(partitionState, checkpoints, topicId, Option.empty()); pool.put(tp, partition); initialFetchStates.put(tp, new InitialFetchState(topicId, new BrokerEndPoint(3, "host", 3000), 0, 0)); BaseRecords fetched = new BaseRecords() { @@ -203,39 +198,8 @@ public RecordsSend toSend() { .setLastStableOffset(0) .setLogStartOffset(0) .setRecords(fetched)); - - updatePartitionState.add( - new UpdateMetadataRequestData.UpdateMetadataPartitionState() - .setTopicName("topic") - .setPartitionIndex(i) - .setControllerEpoch(0) - .setLeader(0) - .setLeaderEpoch(0) - .setIsr(replicas) - .setZkVersion(1) - .setReplicas(replicas)); } - UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), - 0, 0, 0, updatePartitionState, Collections.emptyList(), topicIds).build(); - - // TODO: fix to support raft - ZkMetadataCache metadataCache = MetadataCache.zkMetadataCache(0, - config.interBrokerProtocolVersion(), BrokerFeatures.createEmpty(), false); - metadataCache.updateMetadata(0, updateMetadataRequest); - replicaManager = new ReplicaManagerBuilder(). - setConfig(config). - setMetrics(metrics). - setTime(new MockTime()). - setZkClient(Mockito.mock(KafkaZkClient.class)). - setScheduler(scheduler). - setLogManager(logManager). - setQuotaManagers(Mockito.mock(QuotaFactory.QuotaManagers.class)). - setBrokerTopicStats(brokerTopicStats). - setMetadataCache(metadataCache). - setLogDirFailureChannel(new LogDirFailureChannel(logDirs.size())). - setAlterPartitionManager(TestUtils.createAlterIsrManager()). - build(); replicaQuota = new ReplicaQuota() { @Override public boolean isQuotaExceeded() { @@ -266,7 +230,9 @@ public void tearDown() throws IOException, InterruptedException { replicaManager.shutdown(false); logManager.shutdown(-1L); scheduler.shutdown(); - Utils.delete(logDir); + for (File dir : CollectionConverters.asJava(logManager.liveLogDirs())) { + Utils.delete(dir); + } } @Benchmark @@ -275,18 +241,6 @@ public long testFetcher() { return fetcher.fetcherStats().requestRate().count(); } - // avoid mocked DelayedOperations to avoid mocked class affecting benchmark results - private static class DelayedOperationsMock extends DelayedOperations { - DelayedOperationsMock(Option topicId, TopicPartition topicPartition) { - super(topicId, topicPartition, null, null, null, null); - } - - @Override - public int numDelayedDelete() { - return 0; - } - } - private static LogConfig createLogConfig() { return new LogConfig(new Properties()); } @@ -318,7 +272,7 @@ static class ReplicaFetcherBenchThread extends ReplicaFetcherThread { replicaManager, replicaQuota, config::interBrokerProtocolVersion, - () -> -1 + () -> -1L ) { @Override public OffsetAndEpoch fetchEarliestOffset(TopicPartition topicPartition, int currentLeaderEpoch) { diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/StressTestLog.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/StressTestLog.java index 69a4fe236aa04..3b9eae7aeaa1e 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/StressTestLog.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/StressTestLog.java @@ -29,7 +29,6 @@ import org.apache.kafka.common.record.Records; import org.apache.kafka.common.utils.Exit; import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.RequestLocal; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.util.MockTime; @@ -79,7 +78,6 @@ public static void main(String[] args) throws Exception { new LogDirFailureChannel(10), true, Option.empty(), - true, new ConcurrentHashMap<>(), false, LogOffsetsListener.NO_OP_OFFSETS_LISTENER @@ -166,7 +164,6 @@ protected void work() throws Exception { LogAppendInfo logAppendInfo = log.appendAsLeader(records, 0, AppendOrigin.CLIENT, - MetadataVersion.LATEST_PRODUCTION, RequestLocal.noCaching(), VerificationGuard.SENTINEL); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/TestLinearWriteSpeed.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/TestLinearWriteSpeed.java new file mode 100644 index 0000000000000..cb22efe880863 --- /dev/null +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/log/TestLinearWriteSpeed.java @@ -0,0 +1,339 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.jmh.log; + +import kafka.log.UnifiedLog; + +import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.compress.GzipCompression; +import org.apache.kafka.common.compress.Lz4Compression; +import org.apache.kafka.common.compress.ZstdCompression; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.Records; +import org.apache.kafka.common.record.SimpleRecord; +import org.apache.kafka.common.utils.CopyOnWriteMap; +import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.coordinator.transaction.TransactionLogConfig; +import org.apache.kafka.server.common.RequestLocal; +import org.apache.kafka.server.util.CommandLineUtils; +import org.apache.kafka.server.util.KafkaScheduler; +import org.apache.kafka.server.util.Scheduler; +import org.apache.kafka.storage.internals.log.AppendOrigin; +import org.apache.kafka.storage.internals.log.LogConfig; +import org.apache.kafka.storage.internals.log.LogDirFailureChannel; +import org.apache.kafka.storage.internals.log.LogOffsetsListener; +import org.apache.kafka.storage.internals.log.ProducerStateManagerConfig; +import org.apache.kafka.storage.internals.log.VerificationGuard; +import org.apache.kafka.storage.log.metrics.BrokerTopicStats; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.ThreadLocalRandom; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import scala.Option; + +public class TestLinearWriteSpeed { + + public static void main(String[] args) throws Exception { + OptionParser parser = new OptionParser(); + + OptionSpec dirOpt = parser.accepts("dir", "The directory to write to.") + .withRequiredArg() + .describedAs("path") + .ofType(String.class) + .defaultsTo(System.getProperty("java.io.tmpdir")); + + OptionSpec bytesOpt = parser.accepts("bytes", "REQUIRED: The total number of bytes to write.") + .withRequiredArg() + .describedAs("num_bytes") + .ofType(Long.class); + + OptionSpec sizeOpt = parser.accepts("size", "REQUIRED: The size of each write.") + .withRequiredArg() + .describedAs("num_bytes") + .ofType(Integer.class); + + OptionSpec messageSizeOpt = parser.accepts("message-size", "REQUIRED: The size of each message in the message set.") + .withRequiredArg() + .describedAs("num_bytes") + .ofType(Integer.class) + .defaultsTo(1024); + + OptionSpec filesOpt = parser.accepts("files", "REQUIRED: The number of logs or files.") + .withRequiredArg() + .describedAs("num_files") + .ofType(Integer.class) + .defaultsTo(1); + + OptionSpec reportingIntervalOpt = parser.accepts("reporting-interval", "The number of ms between updates.") + .withRequiredArg() + .describedAs("ms") + .ofType(Long.class) + .defaultsTo(1000L); + + OptionSpec maxThroughputOpt = parser.accepts("max-throughput-mb", "The maximum throughput.") + .withRequiredArg() + .describedAs("mb") + .ofType(Integer.class) + .defaultsTo(Integer.MAX_VALUE); + + OptionSpec flushIntervalOpt = parser.accepts("flush-interval", "The number of messages between flushes") + .withRequiredArg() + .describedAs("message_count") + .ofType(Long.class) + .defaultsTo(Long.MAX_VALUE); + + OptionSpec compressionCodecOpt = parser.accepts("compression", "The compression codec to use") + .withRequiredArg() + .describedAs("codec") + .ofType(String.class) + .defaultsTo(CompressionType.NONE.name); + + OptionSpec compressionLevelOpt = parser.accepts("level", "The compression level to use") + .withRequiredArg() + .describedAs("level") + .ofType(Integer.class) + .defaultsTo(0); + + OptionSpec mmapOpt = parser.accepts("mmap", "Do writes to memory-mapped files."); + OptionSpec channelOpt = parser.accepts("channel", "Do writes to file channels."); + OptionSpec logOpt = parser.accepts("log", "Do writes to kafka logs."); + OptionSet options = parser.parse(args); + CommandLineUtils.checkRequiredArgs(parser, options, bytesOpt, sizeOpt, filesOpt); + + long bytesToWrite = options.valueOf(bytesOpt); + int bufferSize = options.valueOf(sizeOpt); + int numFiles = options.valueOf(filesOpt); + long reportingInterval = options.valueOf(reportingIntervalOpt); + String dir = options.valueOf(dirOpt); + long maxThroughputBytes = options.valueOf(maxThroughputOpt) * 1024L * 1024L; + ByteBuffer buffer = ByteBuffer.allocate(bufferSize); + int messageSize = options.valueOf(messageSizeOpt); + long flushInterval = options.valueOf(flushIntervalOpt); + CompressionType compressionType = CompressionType.forName(options.valueOf(compressionCodecOpt)); + Compression.Builder compressionBuilder = Compression.of(compressionType); + int compressionLevel = options.valueOf(compressionLevelOpt); + + setupCompression(compressionType, compressionBuilder, compressionLevel); + + ThreadLocalRandom.current().nextBytes(buffer.array()); + int numMessages = bufferSize / (messageSize + Records.LOG_OVERHEAD); + long createTime = System.currentTimeMillis(); + + List recordsList = new ArrayList<>(); + for (int i = 0; i < numMessages; i++) { + recordsList.add(new SimpleRecord(createTime, null, new byte[messageSize])); + } + + MemoryRecords messageSet = MemoryRecords.withRecords(Compression.NONE, recordsList.toArray(new SimpleRecord[0])); + Writable[] writables = new Writable[numFiles]; + KafkaScheduler scheduler = new KafkaScheduler(1); + scheduler.startup(); + + for (int i = 0; i < numFiles; i++) { + if (options.has(mmapOpt)) { + writables[i] = new MmapWritable(new File(dir, "kafka-test-" + i + ".dat"), bytesToWrite / numFiles, buffer); + } else if (options.has(channelOpt)) { + writables[i] = new ChannelWritable(new File(dir, "kafka-test-" + i + ".dat"), buffer); + } else if (options.has(logOpt)) { + int segmentSize = ThreadLocalRandom.current().nextInt(512) * 1024 * 1024 + 64 * 1024 * 1024; + Properties logProperties = new Properties(); + logProperties.put(TopicConfig.SEGMENT_BYTES_CONFIG, Integer.toString(segmentSize)); + logProperties.put(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, Long.toString(flushInterval)); + LogConfig logConfig = new LogConfig(logProperties); + writables[i] = new LogWritable(new File(dir, "kafka-test-" + i), logConfig, scheduler, messageSet); + } else { + System.err.println("Must specify what to write to with one of --log, --channel, or --mmap"); + Exit.exit(1); + } + } + bytesToWrite = (bytesToWrite / numFiles) * numFiles; + + System.out.printf("%10s\t%10s\t%10s%n", "mb_sec", "avg_latency", "max_latency"); + + long beginTest = System.nanoTime(); + long maxLatency = 0L; + long totalLatency = 0L; + long count = 0L; + long written = 0L; + long totalWritten = 0L; + long lastReport = beginTest; + + while (totalWritten + bufferSize < bytesToWrite) { + long start = System.nanoTime(); + int writeSize = writables[(int) (count % numFiles)].write(); + long elapsed = System.nanoTime() - start; + maxLatency = Math.max(elapsed, maxLatency); + totalLatency += elapsed; + written += writeSize; + count += 1; + totalWritten += writeSize; + if ((start - lastReport) / (1000.0 * 1000.0) > reportingInterval) { + double elapsedSecs = (start - lastReport) / (1000.0 * 1000.0 * 1000.0); + double mb = written / (1024.0 * 1024.0); + System.out.printf("%10.3f\t%10.3f\t%10.3f%n", mb / elapsedSecs, (totalLatency / (double) count) / (1000.0 * 1000.0), maxLatency / (1000.0 * 1000.0)); + lastReport = start; + written = 0; + maxLatency = 0L; + totalLatency = 0L; + } else if (written > maxThroughputBytes * (reportingInterval / 1000.0)) { + long lastReportMs = lastReport / (1000 * 1000); + long now = System.nanoTime() / (1000 * 1000); + long sleepMs = lastReportMs + reportingInterval - now; + if (sleepMs > 0) + Thread.sleep(sleepMs); + } + } + double elapsedSecs = (System.nanoTime() - beginTest) / (1000.0 * 1000.0 * 1000.0); + System.out.println((bytesToWrite / (1024.0 * 1024.0 * elapsedSecs)) + " MB per sec"); + scheduler.shutdown(); + } + + private static void setupCompression(CompressionType compressionType, + Compression.Builder compressionBuilder, + int compressionLevel) { + switch (compressionType) { + case GZIP: + ((GzipCompression.Builder) compressionBuilder).level(compressionLevel); + break; + case LZ4: + ((Lz4Compression.Builder) compressionBuilder).level(compressionLevel); + break; + case ZSTD: + ((ZstdCompression.Builder) compressionBuilder).level(compressionLevel); + break; + default: + break; + } + } + + interface Writable { + int write() throws IOException; + + void close() throws IOException; + } + + static class MmapWritable implements Writable { + File file; + ByteBuffer content; + RandomAccessFile raf; + MappedByteBuffer buffer; + + public MmapWritable(File file, long size, ByteBuffer content) throws IOException { + this.file = file; + this.content = content; + file.deleteOnExit(); + raf = new RandomAccessFile(file, "rw"); + raf.setLength(size); + buffer = raf.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, raf.length()); + } + + public int write() { + buffer.put(content); + content.rewind(); + return content.limit(); + } + + public void close() throws IOException { + raf.close(); + Utils.delete(file); + } + } + + static class ChannelWritable implements Writable { + File file; + ByteBuffer content; + FileChannel channel; + + public ChannelWritable(File file, ByteBuffer content) throws IOException { + this.file = file; + this.content = content; + file.deleteOnExit(); + channel = FileChannel.open(file.toPath(), StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE); + } + + public int write() throws IOException { + channel.write(content); + content.rewind(); + return content.limit(); + } + + public void close() throws IOException { + channel.close(); + Utils.delete(file); + } + } + + static class LogWritable implements Writable { + MemoryRecords messages; + UnifiedLog log; + + public LogWritable(File dir, LogConfig config, Scheduler scheduler, MemoryRecords messages) throws IOException { + this.messages = messages; + Utils.delete(dir); + this.log = UnifiedLog.apply( + dir, + config, + 0L, + 0L, + scheduler, + new BrokerTopicStats(), + Time.SYSTEM, + 5 * 60 * 1000, + new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), + TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + new LogDirFailureChannel(10), + true, + Option.empty(), + new CopyOnWriteMap<>(), + false, + LogOffsetsListener.NO_OP_OFFSETS_LISTENER + ); + } + + public int write() { + log.appendAsLeader( + messages, + 0, + AppendOrigin.CLIENT, + RequestLocal.noCaching(), + VerificationGuard.SENTINEL + ); + return messages.sizeInBytes(); + } + + public void close() throws IOException { + log.close(); + Utils.delete(log.dir()); + } + } +} diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/metadata/KRaftMetadataRequestBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/metadata/KRaftMetadataRequestBenchmark.java index a6463cfd4433f..a46a4d94bbe97 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/metadata/KRaftMetadataRequestBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/metadata/KRaftMetadataRequestBenchmark.java @@ -29,13 +29,13 @@ import kafka.server.KafkaConfig; import kafka.server.MetadataCache; import kafka.server.QuotaFactory; -import kafka.server.RaftSupport; import kafka.server.ReplicaManager; import kafka.server.ReplicationQuotaManager; import kafka.server.SimpleApiVersionManager; import kafka.server.builders.KafkaApisBuilder; import kafka.server.metadata.KRaftMetadataCache; import kafka.server.metadata.MockConfigRepository; +import kafka.server.share.SharePartitionManager; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.memory.MemoryPool; @@ -60,6 +60,7 @@ import org.apache.kafka.network.RequestConvertToJson; import org.apache.kafka.network.metrics.RequestChannelMetrics; import org.apache.kafka.raft.QuorumConfig; +import org.apache.kafka.server.ClientMetricsManager; import org.apache.kafka.server.common.FinalizedFeatures; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.MetadataVersion; @@ -118,6 +119,8 @@ public class KRaftMetadataRequestBenchmark { clientQuotaManager, clientRequestQuotaManager, controllerMutationQuotaManager, replicaQuotaManager, replicaQuotaManager, replicaQuotaManager, Optional.empty()); private final FetchManager fetchManager = Mockito.mock(FetchManager.class); + private final SharePartitionManager sharePartitionManager = Mockito.mock(SharePartitionManager.class); + private final ClientMetricsManager clientMetricsManager = Mockito.mock(ClientMetricsManager.class); private final BrokerTopicStats brokerTopicStats = new BrokerTopicStats(false); private final KafkaPrincipal principal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user"); @Param({"500", "1000", "5000"}) @@ -187,7 +190,7 @@ private KafkaApis createKafkaApis() { KafkaConfig config = new KafkaConfig(kafkaProps); return new KafkaApisBuilder(). setRequestChannel(requestChannel). - setMetadataSupport(new RaftSupport(forwardingManager, metadataCache)). + setForwardingManager(forwardingManager). setReplicaManager(replicaManager). setGroupCoordinator(groupCoordinator). setTxnCoordinator(transactionCoordinator). @@ -200,7 +203,8 @@ private KafkaApis createKafkaApis() { setAuthorizer(Optional.empty()). setQuotas(quotaManagers). setFetchManager(fetchManager). - setSharePartitionManager(Optional.empty()). + setSharePartitionManager(sharePartitionManager). + setClientMetricsManager(clientMetricsManager). setBrokerTopicStats(brokerTopicStats). setClusterId("clusterId"). setTime(Time.SYSTEM). diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java index 6b21ccefff777..3750bb47c4630 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java @@ -115,7 +115,7 @@ public void setup() throws IOException { setScheduler(scheduler). setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(logDirFailureChannel). - setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true). + setTime(Time.SYSTEM). build(); TopicPartition tp = new TopicPartition("topic", 0); @@ -124,8 +124,7 @@ public void setup() throws IOException { Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Optional.of(0L)); AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class); AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class); - partition = new Partition(tp, 100, - MetadataVersion.latestTesting(), 0, () -> -1, Time.SYSTEM, + partition = new Partition(tp, 100, 0, () -> -1, Time.SYSTEM, alterPartitionListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterPartitionManager, topicId); partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId, Option.empty()); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java index 2a35bb97a6c0f..e339a9e783ea3 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java @@ -106,7 +106,6 @@ public void setUp() { setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(logDirFailureChannel). setTime(Time.SYSTEM). - setKeepPartitionMetadataFile(true). build(); OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class); Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), topicPartition)).thenReturn(Optional.of(0L)); @@ -128,7 +127,7 @@ public void setUp() { AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class); AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class); partition = new Partition(topicPartition, 100, - MetadataVersion.latestTesting(), 0, () -> -1, Time.SYSTEM, + 0, () -> -1, Time.SYSTEM, alterPartitionListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterPartitionManager, topicId); partition.makeLeader(partitionState, offsetCheckpoints, topicId, Option.empty()); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/producer/ProducerRequestBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/producer/ProducerRequestBenchmark.java index 3bde86f827e4f..f76bb728fe699 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/producer/ProducerRequestBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/producer/ProducerRequestBenchmark.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.message.ProduceRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.requests.ProduceRequest; import org.apache.kafka.common.requests.ProduceResponse; @@ -68,7 +67,7 @@ public class ProducerRequestBenchmark { .setTopicData(new ProduceRequestData.TopicProduceDataCollection(TOPIC_PRODUCE_DATA.iterator())); private static ProduceRequest request() { - return ProduceRequest.forMagic(RecordBatch.CURRENT_MAGIC_VALUE, PRODUCE_REQUEST_DATA).build(); + return ProduceRequest.builder(PRODUCE_REQUEST_DATA, false).build(); } private static final ProduceRequest REQUEST = request(); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java index a75a8a4ca2ce4..fca7fc73aa65c 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.storage.internals.log.AppendOrigin; import org.apache.kafka.storage.internals.log.LogValidator; @@ -55,8 +54,7 @@ public void measureValidateMessagesAndAssignOffsetsCompressed(Blackhole bh) { MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate()); new LogValidator(records, new TopicPartition("a", 0), Time.SYSTEM, compressionType, compression(), false, messageVersion, - TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT ).validateMessagesAndAssignOffsetsCompressed(PrimitiveRef.ofLong(startingOffset), validatorMetricsRecorder, requestLocal.bufferSupplier()); } diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/UncompressedRecordBatchValidationBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/UncompressedRecordBatchValidationBenchmark.java index fb05990b4dd37..a9327437e2a7d 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/UncompressedRecordBatchValidationBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/UncompressedRecordBatchValidationBenchmark.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.storage.internals.log.AppendOrigin; import org.apache.kafka.storage.internals.log.LogValidator; @@ -51,8 +50,7 @@ public void measureAssignOffsetsNonCompressed(Blackhole bh) { MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate()); new LogValidator(records, new TopicPartition("a", 0), Time.SYSTEM, CompressionType.NONE, Compression.NONE, false, - messageVersion, TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + messageVersion, TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT ).assignOffsetsNonCompressed(PrimitiveRef.ofLong(startingOffset), validatorMetricsRecorder); } } diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java index 07b1ecedd08f6..ddea968215ebd 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java @@ -102,7 +102,7 @@ public class CheckpointBench { public void setup() { this.scheduler = new KafkaScheduler(1, true, "scheduler-thread"); this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig( - 0, null, true, true, 9092, Option.empty(), Option.empty(), + 0, true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1, false)); this.metrics = new Metrics(); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java index c04106040a128..179bcafdfa718 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java @@ -109,7 +109,7 @@ public void setup() { this.scheduler = new KafkaScheduler(1, true, "scheduler-thread"); this.brokerProperties = KafkaConfig.fromProps(TestUtils.createBrokerConfig( - 0, null, true, true, 9092, Option.empty(), Option.empty(), + 0, true, true, 9092, Option.empty(), Option.empty(), Option.empty(), true, false, 0, false, 0, false, 0, Option.empty(), 1, true, 1, (short) 1, false)); this.metrics = new Metrics(); @@ -141,7 +141,6 @@ public void setup() { setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(failureChannel). setTime(Time.SYSTEM). - setKeepPartitionMetadataFile(true). build(); scheduler.startup(); this.quotaManagers = QuotaFactory.instantiate(this.brokerProperties, this.metrics, this.time, ""); diff --git a/metadata/src/main/java/org/apache/kafka/controller/AclControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/AclControlManager.java index d7c5dd1a3e585..ca324ab57888c 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/AclControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/AclControlManager.java @@ -160,6 +160,12 @@ static void validateNewAcl(AclBinding binding) { if (binding.pattern().name() == null || binding.pattern().name().isEmpty()) { throw new InvalidRequestException("Resource name should not be empty"); } + int colonIndex = binding.entry().principal().indexOf(":"); + if (colonIndex == -1) { + throw new InvalidRequestException("Could not parse principal from `" + + binding.entry().principal() + "` " + "(no colon is present separating the " + + "principal type from the principal name)"); + } } ControllerResult> deleteAcls(List filters) { diff --git a/metadata/src/main/java/org/apache/kafka/controller/ActivationRecordsGenerator.java b/metadata/src/main/java/org/apache/kafka/controller/ActivationRecordsGenerator.java index b2ef4fe4f11d0..a9ea13d40e011 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ActivationRecordsGenerator.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ActivationRecordsGenerator.java @@ -17,20 +17,22 @@ package org.apache.kafka.controller; +import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.metadata.AbortTransactionRecord; import org.apache.kafka.common.metadata.BeginTransactionRecord; +import org.apache.kafka.common.metadata.ConfigRecord; import org.apache.kafka.common.metadata.EndTransactionRecord; import org.apache.kafka.metadata.bootstrap.BootstrapMetadata; -import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; import org.apache.kafka.server.common.MetadataVersion; import java.util.ArrayList; import java.util.List; import java.util.function.Consumer; -import static org.apache.kafka.metadata.migration.ZkMigrationState.NONE; -import static org.apache.kafka.metadata.migration.ZkMigrationState.POST_MIGRATION; +import static org.apache.kafka.common.config.ConfigResource.Type.BROKER; + public class ActivationRecordsGenerator { @@ -38,7 +40,8 @@ static ControllerResult recordsForEmptyLog( Consumer activationMessageConsumer, long transactionStartOffset, BootstrapMetadata bootstrapMetadata, - MetadataVersion metadataVersion + MetadataVersion metadataVersion, + int defaultMinInSyncReplicas ) { StringBuilder logMessageBuilder = new StringBuilder("Performing controller activation. "); List records = new ArrayList<>(); @@ -90,10 +93,13 @@ static ControllerResult recordsForEmptyLog( // initialization, etc. records.addAll(bootstrapMetadata.records()); - if (metadataVersion.isMigrationSupported()) { - logMessageBuilder.append("Setting the ZK migration state to NONE since this is a de-novo " + - "KRaft cluster. "); - records.add(NONE.toRecord()); + // If ELR is enabled, we need to set a cluster-level min.insync.replicas. + if (bootstrapMetadata.featureLevel(EligibleLeaderReplicasVersion.FEATURE_NAME) > 0) { + records.add(new ApiMessageAndVersion(new ConfigRecord(). + setResourceType(BROKER.id()). + setResourceName(""). + setName(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG). + setValue(Integer.toString(defaultMinInSyncReplicas)), (short) 0)); } activationMessageConsumer.accept(logMessageBuilder.toString().trim()); @@ -108,7 +114,6 @@ static ControllerResult recordsForEmptyLog( static ControllerResult recordsForNonEmptyLog( Consumer activationMessageConsumer, long transactionStartOffset, - ZkMigrationState zkMigrationState, MetadataVersion curMetadataVersion ) { StringBuilder logMessageBuilder = new StringBuilder("Performing controller activation. "); @@ -139,24 +144,6 @@ static ControllerResult recordsForNonEmptyLog( .append(". "); } - if (curMetadataVersion.isMigrationSupported()) { - if (zkMigrationState == NONE || zkMigrationState == POST_MIGRATION) { - logMessageBuilder - .append("Loaded ZK migration state of ") - .append(zkMigrationState) - .append(". "); - if (zkMigrationState == NONE) { - logMessageBuilder.append("This is expected because this is a de-novo KRaft cluster."); - } - } else { - throw new RuntimeException("Cannot load ZkMigrationState." + zkMigrationState + - " because ZK migration is no longer supported."); - } - } else if (zkMigrationState != NONE) { - throw new RuntimeException("Should not have ZkMigrationState." + zkMigrationState + - " on a cluster running metadata version " + curMetadataVersion + "."); - } - activationMessageConsumer.accept(logMessageBuilder.toString().trim()); return ControllerResult.atomicOf(records, null); } @@ -176,15 +163,19 @@ static ControllerResult generate( boolean isEmpty, long transactionStartOffset, BootstrapMetadata bootstrapMetadata, - ZkMigrationState zkMigrationState, - MetadataVersion curMetadataVersion + MetadataVersion curMetadataVersion, + int defaultMinInSyncReplicas ) { if (isEmpty) { - return recordsForEmptyLog(activationMessageConsumer, transactionStartOffset, - bootstrapMetadata, bootstrapMetadata.metadataVersion()); + return recordsForEmptyLog(activationMessageConsumer, + transactionStartOffset, + bootstrapMetadata, + bootstrapMetadata.metadataVersion(), + defaultMinInSyncReplicas); } else { - return recordsForNonEmptyLog(activationMessageConsumer, transactionStartOffset, - zkMigrationState, curMetadataVersion); + return recordsForNonEmptyLog(activationMessageConsumer, + transactionStartOffset, + curMetadataVersion); } } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java b/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java index 660585223e770..b3b4dc9414ffd 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java +++ b/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java @@ -44,8 +44,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof BrokerControlStates)) return false; - BrokerControlStates other = (BrokerControlStates) o; + if (!(o instanceof BrokerControlStates other)) return false; return other.current == current && other.next == next; } diff --git a/metadata/src/main/java/org/apache/kafka/controller/BrokerHeartbeatManager.java b/metadata/src/main/java/org/apache/kafka/controller/BrokerHeartbeatManager.java index e63170ca5bf23..2762d36f48726 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/BrokerHeartbeatManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/BrokerHeartbeatManager.java @@ -75,8 +75,7 @@ static class BrokerHeartbeatState { /** * The offset at which the broker should complete its controlled shutdown, or -1 - * if the broker is not performing a controlled shutdown. When this field is - * updated, we also have to update the broker's position in the shuttingDown set. + * if the broker is not performing a controlled shutdown. */ private long controlledShutdownOffset; diff --git a/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java b/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java index a0cf60d1f6554..7ef75c4e3b331 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java +++ b/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java @@ -41,8 +41,7 @@ public long epoch() { @Override public boolean equals(Object o) { - if (o == null || (!(o instanceof BrokerIdAndEpoch))) return false; - BrokerIdAndEpoch other = (BrokerIdAndEpoch) o; + if (o == null || (!(o instanceof BrokerIdAndEpoch other))) return false; return id == other.id && epoch == other.epoch; } diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index c583906d4ee89..43edafb77f417 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -70,7 +70,6 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import static java.util.concurrent.TimeUnit.NANOSECONDS; @@ -307,8 +306,11 @@ ReplicaPlacer replicaPlacer() { */ public void activate() { heartbeatManager = new BrokerHeartbeatManager(logContext, time, sessionTimeoutNs); + long nowNs = time.nanoseconds(); for (BrokerRegistration registration : brokerRegistrations.values()) { heartbeatManager.register(registration.id(), registration.fenced()); + heartbeatManager.tracker().updateContactTime( + new BrokerIdAndEpoch(registration.id(), registration.epoch()), nowNs); } } @@ -327,14 +329,6 @@ Map brokerRegistrations() { return brokerRegistrations; } - Set fencedBrokerIds() { - return brokerRegistrations.values() - .stream() - .filter(BrokerRegistration::fenced) - .map(BrokerRegistration::id) - .collect(Collectors.toSet()); - } - /** * Process an incoming broker registration request. */ @@ -460,7 +454,11 @@ public ControllerResult registerBroker( } heartbeatManager.register(brokerId, record.fenced()); - return ControllerResult.atomicOf(records, new BrokerRegistrationReply(record.brokerEpoch())); + // A broker registration that cleans up a previous incarnation's unclean shutdown may generate a large number of records. + // It is safe to return these records as a non-atomic batch as long as the registration record is added last. + // This ensures that in case of a controller failure, the broker will re-register and the new controller + // can retry the unclean shutdown cleanup. + return ControllerResult.of(records, new BrokerRegistrationReply(record.brokerEpoch())); } ControllerResult registerController(ControllerRegistrationRequestData request) { diff --git a/metadata/src/main/java/org/apache/kafka/controller/ConfigurationControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ConfigurationControlManager.java index 3b776651b870c..15b5bbbf9dfda 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ConfigurationControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ConfigurationControlManager.java @@ -19,9 +19,11 @@ import org.apache.kafka.clients.admin.AlterConfigOp.OpType; import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.FeatureUpdate; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.config.ConfigResource.Type; +import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.config.types.Password; import org.apache.kafka.common.metadata.ConfigRecord; import org.apache.kafka.common.protocol.Errors; @@ -29,11 +31,13 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.metadata.KafkaConfigSchema; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; import org.apache.kafka.server.mutable.BoundedList; import org.apache.kafka.server.policy.AlterConfigPolicy; import org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata; import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.TimelineHashMap; +import org.apache.kafka.timeline.TimelineHashSet; import org.slf4j.Logger; @@ -50,7 +54,12 @@ import java.util.function.Consumer; import static org.apache.kafka.clients.admin.AlterConfigOp.OpType.APPEND; +import static org.apache.kafka.clients.admin.AlterConfigOp.OpType.DELETE; +import static org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET; +import static org.apache.kafka.common.config.ConfigResource.Type.BROKER; +import static org.apache.kafka.common.config.TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG; import static org.apache.kafka.common.config.TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG; +import static org.apache.kafka.common.metadata.MetadataRecordType.CONFIG_RECORD; import static org.apache.kafka.common.protocol.Errors.INVALID_CONFIG; import static org.apache.kafka.controller.QuorumController.MAX_RECORDS_PER_USER_OP; @@ -65,8 +74,10 @@ public class ConfigurationControlManager { private final Optional alterConfigPolicy; private final ConfigurationValidator validator; private final TimelineHashMap> configData; + private final TimelineHashSet brokersWithConfigs; private final Map staticConfig; private final ConfigResource currentController; + private final FeatureControlManager featureControl; static class Builder { private LogContext logContext = null; @@ -77,6 +88,7 @@ static class Builder { private ConfigurationValidator validator = ConfigurationValidator.NO_OP; private Map staticConfig = Collections.emptyMap(); private int nodeId = 0; + private FeatureControlManager featureControl = null; Builder setLogContext(LogContext logContext) { this.logContext = logContext; @@ -118,12 +130,20 @@ Builder setNodeId(int nodeId) { return this; } + Builder setFeatureControl(FeatureControlManager featureControl) { + this.featureControl = featureControl; + return this; + } + ConfigurationControlManager build() { if (logContext == null) logContext = new LogContext(); if (snapshotRegistry == null) snapshotRegistry = new SnapshotRegistry(logContext); if (configSchema == null) { throw new RuntimeException("You must set the configSchema."); } + if (featureControl == null) { + featureControl = new FeatureControlManager.Builder().build(); + } return new ConfigurationControlManager( logContext, snapshotRegistry, @@ -132,7 +152,8 @@ ConfigurationControlManager build() { alterConfigPolicy, validator, staticConfig, - nodeId); + nodeId, + featureControl); } } @@ -143,7 +164,8 @@ private ConfigurationControlManager(LogContext logContext, Optional alterConfigPolicy, ConfigurationValidator validator, Map staticConfig, - int nodeId) { + int nodeId, + FeatureControlManager featureControl) { this.log = logContext.logger(ConfigurationControlManager.class); this.snapshotRegistry = snapshotRegistry; this.configSchema = configSchema; @@ -151,8 +173,10 @@ private ConfigurationControlManager(LogContext logContext, this.alterConfigPolicy = alterConfigPolicy; this.validator = validator; this.configData = new TimelineHashMap<>(snapshotRegistry, 0); + this.brokersWithConfigs = new TimelineHashSet<>(snapshotRegistry, 0); this.staticConfig = Collections.unmodifiableMap(new HashMap<>(staticConfig)); this.currentController = new ConfigResource(Type.BROKER, Integer.toString(nodeId)); + this.featureControl = featureControl; } SnapshotRegistry snapshotRegistry() { @@ -268,10 +292,12 @@ private ApiError incrementalAlterConfigResource( return ApiError.NONE; } - private ApiError validateAlterConfig(ConfigResource configResource, - List recordsExplicitlyAltered, - List recordsImplicitlyDeleted, - boolean newlyCreatedResource) { + private ApiError validateAlterConfig( + ConfigResource configResource, + List recordsExplicitlyAltered, + List recordsImplicitlyDeleted, + boolean newlyCreatedResource + ) { Map allConfigs = new HashMap<>(); Map existingConfigsMap = new HashMap<>(); Map alteredConfigsForAlterConfigPolicyCheck = new HashMap<>(); @@ -282,7 +308,11 @@ private ApiError validateAlterConfig(ConfigResource configResource, } for (ApiMessageAndVersion newRecord : recordsExplicitlyAltered) { ConfigRecord configRecord = (ConfigRecord) newRecord.message(); - if (configRecord.value() == null) { + if (isDisallowedBrokerMinIsrTransition(configRecord)) { + return DISALLOWED_BROKER_MIN_ISR_TRANSITION_ERROR; + } else if (isDisallowedClusterMinIsrTransition(configRecord)) { + return DISALLOWED_CLUSTER_MIN_ISR_REMOVAL_ERROR; + } else if (configRecord.value() == null) { allConfigs.remove(configRecord.name()); } else { allConfigs.put(configRecord.name(), configRecord.value()); @@ -291,7 +321,13 @@ private ApiError validateAlterConfig(ConfigResource configResource, } for (ApiMessageAndVersion recordImplicitlyDeleted : recordsImplicitlyDeleted) { ConfigRecord configRecord = (ConfigRecord) recordImplicitlyDeleted.message(); - allConfigs.remove(configRecord.name()); + if (isDisallowedBrokerMinIsrTransition(configRecord)) { + return DISALLOWED_BROKER_MIN_ISR_TRANSITION_ERROR; + } else if (isDisallowedClusterMinIsrTransition(configRecord)) { + return DISALLOWED_CLUSTER_MIN_ISR_REMOVAL_ERROR; + } else { + allConfigs.remove(configRecord.name()); + } // As per KAFKA-14195, do not include implicit deletions caused by using the legacy AlterConfigs API // in the list passed to the policy in order to maintain backwards compatibility } @@ -316,6 +352,37 @@ private ApiError validateAlterConfig(ConfigResource configResource, return ApiError.NONE; } + private static final ApiError DISALLOWED_BROKER_MIN_ISR_TRANSITION_ERROR = + new ApiError(INVALID_CONFIG, "Broker-level " + MIN_IN_SYNC_REPLICAS_CONFIG + + " cannot be altered while ELR is enabled."); + + private static final ApiError DISALLOWED_CLUSTER_MIN_ISR_REMOVAL_ERROR = + new ApiError(INVALID_CONFIG, "Cluster-level " + MIN_IN_SYNC_REPLICAS_CONFIG + + " cannot be removed while ELR is enabled."); + + boolean isDisallowedBrokerMinIsrTransition(ConfigRecord configRecord) { + if (configRecord.name().equals(MIN_IN_SYNC_REPLICAS_CONFIG) && + configRecord.resourceType() == BROKER.id() && + !configRecord.resourceName().isEmpty()) { + if (featureControl.isElrFeatureEnabled()) { + return true; + } + } + return false; + } + + boolean isDisallowedClusterMinIsrTransition(ConfigRecord configRecord) { + if (configRecord.name().equals(MIN_IN_SYNC_REPLICAS_CONFIG) && + configRecord.resourceType() == BROKER.id() && + configRecord.resourceName().isEmpty() && + configRecord.value() == null) { + if (featureControl.isElrFeatureEnabled()) { + return true; + } + } + return false; + } + /** * Determine the result of applying a batch of legacy configuration changes. Note * that this method does not change the contents of memory. It just generates a @@ -415,6 +482,9 @@ public void replay(ConfigRecord record) { if (configs == null) { configs = new TimelineHashMap<>(snapshotRegistry, 0); configData.put(configResource, configs); + if (configResource.type().equals(BROKER) && !configResource.name().isEmpty()) { + brokersWithConfigs.add(Integer.parseInt(configResource.name())); + } } if (record.value() == null) { configs.remove(record.name()); @@ -423,6 +493,9 @@ public void replay(ConfigRecord record) { } if (configs.isEmpty()) { configData.remove(configResource); + if (configResource.type().equals(BROKER) && !configResource.name().isEmpty()) { + brokersWithConfigs.remove(Integer.parseInt(configResource.name())); + } } if (configSchema.isSensitive(record)) { log.info("Replayed ConfigRecord for {} which set configuration {} to {}", @@ -439,7 +512,7 @@ Map getConfigs(ConfigResource configResource) { if (map == null) { return Collections.emptyMap(); } else { - return Collections.unmodifiableMap(new HashMap<>(map)); + return Map.copyOf(map); } } @@ -501,6 +574,88 @@ void deleteTopicConfigs(String name) { configData.remove(new ConfigResource(Type.TOPIC, name)); } + int getStaticallyConfiguredMinInsyncReplicas() { + return configSchema.getStaticallyConfiguredMinInsyncReplicas(staticConfig); + } + + /** + * Generate any configuration records that are needed to make it safe to enable ELR. + * Specifically, we need to remove all cluster-level configurations for min.insync.replicas, + * and create a cluster-level configuration for min.insync.replicas. It is always safe to call + * this function if ELR is already enabled; it will simply do nothing if the necessary + * configurations already exist. + * + * @param outputRecords A list to add the new records to. + * + * @return The log message to generate. + */ + String maybeGenerateElrSafetyRecords(List outputRecords) { + StringBuilder bld = new StringBuilder(); + String prefix = ""; + if (!clusterConfig().containsKey(MIN_IN_SYNC_REPLICAS_CONFIG)) { + int minInsyncReplicas = configSchema.getStaticallyConfiguredMinInsyncReplicas(staticConfig); + outputRecords.add(new ApiMessageAndVersion( + new ConfigRecord(). + setResourceType(BROKER.id()). + setResourceName(""). + setName(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG). + setValue(Integer.toString(minInsyncReplicas)), + CONFIG_RECORD.highestSupportedVersion())); + bld.append("Generating cluster-level ").append(MIN_IN_SYNC_REPLICAS_CONFIG). + append(" of ").append(minInsyncReplicas); + prefix = ". "; + } + prefix = prefix + "Removing broker-level " + MIN_IN_SYNC_REPLICAS_CONFIG + " for brokers: "; + for (Integer brokerId : brokersWithConfigs) { + ConfigResource configResource = new ConfigResource(BROKER, brokerId.toString()); + Map configs = configData.get(configResource); + if (configs.containsKey(MIN_IN_SYNC_REPLICAS_CONFIG)) { + outputRecords.add(new ApiMessageAndVersion( + new ConfigRecord().setResourceType(BROKER.id()).setResourceName(configResource.name()). + setName(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).setValue(null), + CONFIG_RECORD.highestSupportedVersion())); + bld.append(prefix).append(brokerId); + prefix = ", "; + } + } + if (bld.isEmpty()) { + return ""; + } else { + bld.append("."); + return bld.toString(); + } + } + + /** + * Update a Kafka feature, generating any configuration changes that are required. + * + * @param updates The user-requested updates. + * @param upgradeTypes The user-requested upgrade types. + * @param validateOnly True if we should validate the request but not make changes. + * + * @return The result. + */ + ControllerResult updateFeatures( + Map updates, + Map upgradeTypes, + boolean validateOnly + ) { + ControllerResult result = featureControl.updateFeatures(updates, upgradeTypes, validateOnly); + if (result.response().isSuccess() && + !validateOnly && + updates.getOrDefault(EligibleLeaderReplicasVersion.FEATURE_NAME, (short) 0) > 0 + ) { + List records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); + String logMessage = maybeGenerateElrSafetyRecords(records); + if (!logMessage.isEmpty()) { + log.info("{}", logMessage); + } + records.addAll(result.records()); + return ControllerResult.atomicOf(records, null); + } + return result; + } + /** * Check if this topic has "unclean.leader.election.enable" set to true. * @@ -512,7 +667,6 @@ boolean uncleanLeaderElectionEnabledForTopic(String topicName) { if (!uncleanLeaderElection.isEmpty()) { return Boolean.parseBoolean(uncleanLeaderElection); } - return false; } @@ -535,4 +689,9 @@ Map currentTopicConfig(String topicName) { Map result = configData.get(new ConfigResource(Type.TOPIC, topicName)); return (result == null) ? Collections.emptyMap() : result; } + + // Visible to test + TimelineHashSet brokersWithConfigs() { + return brokersWithConfigs; + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/EventPerformanceMonitor.java b/metadata/src/main/java/org/apache/kafka/controller/EventPerformanceMonitor.java new file mode 100644 index 0000000000000..fbe8b1c3cbbb8 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/EventPerformanceMonitor.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller; + +import org.apache.kafka.common.utils.LogContext; + +import org.slf4j.Logger; + +import java.text.DecimalFormat; +import java.util.AbstractMap; +import java.util.Map; + +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; + +/** + * Track the performance of controller events. Periodically log the slowest events. + * Log any event slower than a certain threshold. + */ +class EventPerformanceMonitor { + /** + * The format to use when displaying milliseconds. + */ + private static final DecimalFormat MILLISECOND_DECIMAL_FORMAT = new DecimalFormat("#0.00"); + + static class Builder { + LogContext logContext = null; + long periodNs = SECONDS.toNanos(60); + long alwaysLogThresholdNs = SECONDS.toNanos(2); + + Builder setLogContext(LogContext logContext) { + this.logContext = logContext; + return this; + } + + Builder setPeriodNs(long periodNs) { + this.periodNs = periodNs; + return this; + } + + Builder setAlwaysLogThresholdNs(long alwaysLogThresholdNs) { + this.alwaysLogThresholdNs = alwaysLogThresholdNs; + return this; + } + + EventPerformanceMonitor build() { + if (logContext == null) logContext = new LogContext(); + return new EventPerformanceMonitor(logContext, + periodNs, + alwaysLogThresholdNs); + } + } + + /** + * The log4j object to use. + */ + private final Logger log; + + /** + * The period in nanoseconds. + */ + private long periodNs; + + /** + * The always-log threshold in nanoseconds. + */ + private long alwaysLogThresholdNs; + + /** + * The name of the slowest event we've seen so far, or null if none has been seen. + */ + private String slowestEventName; + + /** + * The duration of the slowest event we've seen so far, or 0 if none has been seen. + */ + private long slowestEventDurationNs; + + /** + * The total duration of all the events we've seen. + */ + private long totalEventDurationNs; + + /** + * The number of events we've seen. + */ + private int numEvents; + + private EventPerformanceMonitor( + LogContext logContext, + long periodNs, + long alwaysLogThresholdNs + ) { + this.log = logContext.logger(EventPerformanceMonitor.class); + this.periodNs = periodNs; + this.alwaysLogThresholdNs = alwaysLogThresholdNs; + reset(); + } + + long periodNs() { + return periodNs; + } + + Map.Entry slowestEvent() { + return new AbstractMap.SimpleImmutableEntry<>(slowestEventName, slowestEventDurationNs); + } + + /** + * Reset all internal state. + */ + void reset() { + this.slowestEventName = null; + this.slowestEventDurationNs = 0; + this.totalEventDurationNs = 0; + this.numEvents = 0; + } + + /** + * Handle a controller event being finished. + * + * @param name The name of the controller event. + * @param durationNs The duration of the controller event in nanoseconds. + */ + void observeEvent(String name, long durationNs) { + String message = doObserveEvent(name, durationNs); + if (message != null) { + log.error("{}", message); + } + } + + /** + * Handle a controller event being finished. + * + * @param name The name of the controller event. + * @param durationNs The duration of the controller event in nanoseconds. + * + * @return The message to log, or null otherwise. + */ + String doObserveEvent(String name, long durationNs) { + if (slowestEventName == null || slowestEventDurationNs < durationNs) { + slowestEventName = name; + slowestEventDurationNs = durationNs; + } + totalEventDurationNs += durationNs; + numEvents++; + if (durationNs < alwaysLogThresholdNs) { + return null; + } + return "Exceptionally slow controller event " + name + " took " + + NANOSECONDS.toMillis(durationNs) + " ms."; + } + + /** + * Generate a log message summarizing the events of the last period, + * and then reset our internal state. + */ + void generatePeriodicPerformanceMessage() { + String message = periodicPerformanceMessage(); + log.info("{}", message); + reset(); + } + + /** + * Generate a log message summarizing the events of the last period. + * + * @return The summary string. + */ + String periodicPerformanceMessage() { + StringBuilder bld = new StringBuilder(); + bld.append("In the last "); + bld.append(NANOSECONDS.toMillis(periodNs)); + bld.append(" ms period, "); + if (numEvents == 0) { + bld.append("there were no controller events completed."); + } else { + bld.append(numEvents).append(" controller events were completed, which took an average of "); + bld.append(formatNsAsDecimalMs(totalEventDurationNs / numEvents)); + bld.append(" ms each. The slowest event was ").append(slowestEventName); + bld.append(", which took "); + bld.append(formatNsAsDecimalMs(slowestEventDurationNs)); + bld.append(" ms."); + } + return bld.toString(); + } + + /** + * Translate a duration in nanoseconds to a decimal duration in milliseconds. + * + * @param durationNs The duration in nanoseconds. + * @return The decimal duration in milliseconds. + */ + static String formatNsAsDecimalMs(long durationNs) { + double number = NANOSECONDS.toMicros(durationNs); + number /= 1000; + return MILLISECOND_DECIMAL_FORMAT.format(number); + } +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java index f114d594ae53f..b5cbc38665811 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java @@ -19,15 +19,14 @@ import org.apache.kafka.clients.admin.FeatureUpdate; import org.apache.kafka.common.metadata.FeatureLevelRecord; -import org.apache.kafka.common.metadata.ZkMigrationStateRecord; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ApiError; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.metadata.FinalizedControllerFeatures; import org.apache.kafka.metadata.VersionRange; -import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.mutable.BoundedList; import org.apache.kafka.timeline.SnapshotRegistry; @@ -137,11 +136,6 @@ public FeatureControlManager build() { */ private final TimelineObject metadataVersion; - /** - * The current ZK migration state - */ - private final TimelineObject migrationControlState; - /** * The minimum bootstrap version that we can't downgrade before. */ @@ -165,7 +159,6 @@ private FeatureControlManager( this.finalizedVersions = new TimelineHashMap<>(snapshotRegistry, 0); this.metadataVersion = new TimelineObject<>(snapshotRegistry, metadataVersion); this.minimumBootstrapVersion = minimumBootstrapVersion; - this.migrationControlState = new TimelineObject<>(snapshotRegistry, ZkMigrationState.NONE); this.clusterSupportDescriber = clusterSupportDescriber; } @@ -200,10 +193,6 @@ MetadataVersion metadataVersion() { return metadataVersion.get(); } - ZkMigrationState zkMigrationState() { - return migrationControlState.get(); - } - private ApiError updateFeature( String featureName, short newVersion, @@ -251,9 +240,9 @@ private ApiError updateFeature( } else { // Validate dependencies for features that are not metadata.version try { - Features.validateVersion( + Feature.validateVersion( // Allow unstable feature versions is true because the version range is already checked above. - Features.featureFromName(featureName).fromFeatureLevel(newVersion, true), + Feature.featureFromName(featureName).fromFeatureLevel(newVersion, true), proposedUpdatedVersions); } catch (IllegalArgumentException e) { return invalidUpdateVersion(featureName, newVersion, e.getMessage()); @@ -335,7 +324,6 @@ private ApiError updateMetadataVersion( Consumer recordConsumer ) { MetadataVersion currentVersion = metadataVersion(); - ZkMigrationState zkMigrationState = zkMigrationState(); final MetadataVersion newVersion; try { newVersion = MetadataVersion.fromFeatureLevel(newVersionLevel); @@ -343,12 +331,6 @@ private ApiError updateMetadataVersion( return invalidMetadataVersion(newVersionLevel, "Unknown metadata.version."); } - // Don't allow metadata.version changes while we're migrating - if (zkMigrationState.inProgress()) { - return invalidMetadataVersion(newVersionLevel, "Unable to modify metadata.version while a " + - "ZK migration is in progress."); - } - // We cannot set a version earlier than IBP_3_3_IV0, since that was the first version that contained // FeatureLevelRecord itself. if (newVersion.isLessThan(minimumBootstrapVersion)) { @@ -396,6 +378,15 @@ FinalizedControllerFeatures finalizedFeatures(long epoch) { return new FinalizedControllerFeatures(features, epoch); } + FinalizedControllerFeatures latestFinalizedFeatures() { + Map features = new HashMap<>(); + features.put(MetadataVersion.FEATURE_NAME, metadataVersion.get().featureLevel()); + for (Entry entry : finalizedVersions.entrySet()) { + features.put(entry.getKey(), entry.getValue()); + } + return new FinalizedControllerFeatures(features, -1); + } + public void replay(FeatureLevelRecord record) { VersionRange range = quorumFeatures.localSupportedFeature(record.name()); if (!range.contains(record.featureLevel())) { @@ -418,20 +409,12 @@ public void replay(FeatureLevelRecord record) { } } - public void replay(ZkMigrationStateRecord record) { - ZkMigrationState newState = ZkMigrationState.of(record.zkMigrationState()); - ZkMigrationState previousState = migrationControlState.get(); - if (previousState.equals(newState)) { - log.debug("Replayed a ZkMigrationStateRecord which did not alter the state from {}.", - previousState); - } else { - migrationControlState.set(newState); - log.info("Replayed a ZkMigrationStateRecord changing the migration state from {} to {}.", - previousState, newState); - } - } - boolean isControllerId(int nodeId) { return quorumFeatures.isControllerId(nodeId); } + + boolean isElrFeatureEnabled() { + return latestFinalizedFeatures().versionOrDefault(EligibleLeaderReplicasVersion.FEATURE_NAME, (short) 0) >= + EligibleLeaderReplicasVersion.ELRV_1.featureLevel(); + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java b/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java index 842743520b020..51fdf42bb67c1 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java +++ b/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java @@ -152,8 +152,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof PartitionReassignmentReplicas)) return false; - PartitionReassignmentReplicas other = (PartitionReassignmentReplicas) o; + if (!(o instanceof PartitionReassignmentReplicas other)) return false; return removing.equals(other.removing) && adding.equals(other.adding) && replicas.equals(other.replicas); diff --git a/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentRevert.java b/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentRevert.java index f2e0845cf5a38..036a24808531b 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentRevert.java +++ b/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentRevert.java @@ -86,8 +86,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof PartitionReassignmentRevert)) return false; - PartitionReassignmentRevert other = (PartitionReassignmentRevert) o; + if (!(o instanceof PartitionReassignmentRevert other)) return false; return replicas.equals(other.replicas) && isr.equals(other.isr) && unclean == other.unclean; diff --git a/metadata/src/main/java/org/apache/kafka/controller/PeriodicTaskControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/PeriodicTaskControlManager.java index a184a0f4f04bf..821fa47df208d 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/PeriodicTaskControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/PeriodicTaskControlManager.java @@ -148,7 +148,7 @@ private PeriodicTaskControlManager( Time time, QueueAccessor queueAccessor ) { - this.log = logContext.logger(OffsetControlManager.class); + this.log = logContext.logger(PeriodicTaskControlManager.class); this.time = time; this.queueAccessor = queueAccessor; this.active = false; diff --git a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java index a541be68f3519..1da98d632a32a 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java +++ b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java @@ -82,7 +82,6 @@ import org.apache.kafka.common.metadata.UnfenceBrokerRecord; import org.apache.kafka.common.metadata.UnregisterBrokerRecord; import org.apache.kafka.common.metadata.UserScramCredentialRecord; -import org.apache.kafka.common.metadata.ZkMigrationStateRecord; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.quota.ClientQuotaAlteration; import org.apache.kafka.common.quota.ClientQuotaEntity; @@ -176,16 +175,21 @@ */ public final class QuorumController implements Controller { /** - * The maximum records that the controller will write in a single batch. + * The default maximum records that the controller will write in a single batch. */ - private static final int MAX_RECORDS_PER_BATCH = 10000; + private static final int DEFAULT_MAX_RECORDS_PER_BATCH = 10000; + + /** + * The default minimum event time that can be logged as a slow event. + */ + private static final int DEFAULT_MIN_SLOW_EVENT_TIME_MS = 200; /** * The maximum records any user-initiated operation is allowed to generate. * * For now, this is set to the maximum records in a single batch. */ - static final int MAX_RECORDS_PER_USER_OP = MAX_RECORDS_PER_BATCH; + static final int MAX_RECORDS_PER_USER_OP = DEFAULT_MAX_RECORDS_PER_BATCH; /** * A builder class which creates the QuorumController. @@ -207,14 +211,16 @@ public static class Builder { private OptionalLong leaderImbalanceCheckIntervalNs = OptionalLong.empty(); private OptionalLong maxIdleIntervalNs = OptionalLong.empty(); private long sessionTimeoutNs = ClusterControlManager.DEFAULT_SESSION_TIMEOUT_NS; + private OptionalLong fenceStaleBrokerIntervalNs = OptionalLong.empty(); private QuorumControllerMetrics controllerMetrics = null; private Optional createTopicPolicy = Optional.empty(); private Optional alterConfigPolicy = Optional.empty(); private ConfigurationValidator configurationValidator = ConfigurationValidator.NO_OP; private Map staticConfig = Collections.emptyMap(); private BootstrapMetadata bootstrapMetadata = null; - private int maxRecordsPerBatch = MAX_RECORDS_PER_BATCH; - private boolean eligibleLeaderReplicasEnabled = false; + private int maxRecordsPerBatch = DEFAULT_MAX_RECORDS_PER_BATCH; + private long controllerPerformanceSamplePeriodMs = 60000L; + private long controllerPerformanceAlwaysLogThresholdMs = 2000L; private DelegationTokenCache tokenCache; private String tokenSecretKeyString; private long delegationTokenMaxLifeMs; @@ -302,6 +308,11 @@ public Builder setSessionTimeoutNs(long sessionTimeoutNs) { return this; } + public Builder setFenceStaleBrokerIntervalNs(long fenceStaleBrokerIntervalNs) { + this.fenceStaleBrokerIntervalNs = OptionalLong.of(fenceStaleBrokerIntervalNs); + return this; + } + public Builder setMetrics(QuorumControllerMetrics controllerMetrics) { this.controllerMetrics = controllerMetrics; return this; @@ -317,6 +328,16 @@ public Builder setMaxRecordsPerBatch(int maxRecordsPerBatch) { return this; } + public Builder setControllerPerformanceSamplePeriodMs(long controllerPerformanceSamplePeriodMs) { + this.controllerPerformanceSamplePeriodMs = controllerPerformanceSamplePeriodMs; + return this; + } + + public Builder setControllerPerformanceAlwaysLogThresholdMs(long controllerPerformanceAlwaysLogThresholdMs) { + this.controllerPerformanceAlwaysLogThresholdMs = controllerPerformanceAlwaysLogThresholdMs; + return this; + } + public Builder setCreateTopicPolicy(Optional createTopicPolicy) { this.createTopicPolicy = createTopicPolicy; return this; @@ -337,11 +358,6 @@ public Builder setStaticConfig(Map staticConfig) { return this; } - public Builder setEligibleLeaderReplicasEnabled(boolean eligibleLeaderReplicasEnabled) { - this.eligibleLeaderReplicasEnabled = eligibleLeaderReplicasEnabled; - return this; - } - public Builder setDelegationTokenCache(DelegationTokenCache tokenCache) { this.tokenCache = tokenCache; return this; @@ -420,6 +436,7 @@ public QuorumController build() throws Exception { leaderImbalanceCheckIntervalNs, maxIdleIntervalNs, sessionTimeoutNs, + fenceStaleBrokerIntervalNs, controllerMetrics, createTopicPolicy, alterConfigPolicy, @@ -432,9 +449,10 @@ public QuorumController build() throws Exception { delegationTokenMaxLifeMs, delegationTokenExpiryTimeMs, delegationTokenExpiryCheckIntervalMs, - eligibleLeaderReplicasEnabled, uncleanLeaderElectionCheckIntervalMs, - interBrokerListenerName + interBrokerListenerName, + controllerPerformanceSamplePeriodMs, + controllerPerformanceAlwaysLogThresholdMs ); } catch (Exception e) { Utils.closeQuietly(queue, "event queue"); @@ -525,6 +543,7 @@ private void handleEventEnd(String name, long startProcessingTimeNs) { long deltaNs = endProcessingTime - startProcessingTimeNs; log.debug("Processed {} in {} us", name, MICROSECONDS.convert(deltaNs, NANOSECONDS)); + performanceMonitor.observeEvent(name, deltaNs); controllerMetrics.updateEventQueueProcessingTime(NANOSECONDS.toMillis(deltaNs)); } @@ -537,6 +556,8 @@ private Throwable handleEventException( if (startProcessingTimeNs.isPresent()) { long endProcessingTime = time.nanoseconds(); long deltaNs = endProcessingTime - startProcessingTimeNs.getAsLong(); + performanceMonitor.observeEvent(name, deltaNs); + controllerMetrics.updateEventQueueProcessingTime(NANOSECONDS.toMillis(deltaNs)); deltaUs = OptionalLong.of(MICROSECONDS.convert(deltaNs, NANOSECONDS)); } else { deltaUs = OptionalLong.empty(); @@ -1139,8 +1160,8 @@ public ControllerResult generateRecordsAndResult() { logReplayTracker.empty(), offsetControl.transactionStartOffset(), bootstrapMetadata, - featureControl.zkMigrationState(), - featureControl.metadataVersion()); + featureControl.metadataVersion(), + configurationControl.getStaticallyConfiguredMinInsyncReplicas()); } catch (Throwable t) { throw fatalFaultHandler.handleFault("exception while completing controller " + "activation", t); @@ -1257,7 +1278,9 @@ private void replay(ApiMessage message, Optional snapshotId, lon // NoOpRecord is an empty record and doesn't need to be replayed break; case ZK_MIGRATION_STATE_RECORD: - featureControl.replay((ZkMigrationStateRecord) message); + // In 4.0, although migration is no longer supported and ZK has been removed from Kafka, + // users might migrate from ZK to KRaft in version 3.x and then perform a rolling upgrade to 4.0. + // Therefore, this case needs to be retained but will be a no-op. break; case BEGIN_TRANSACTION_RECORD: offsetControl.replay((BeginTransactionRecord) message, offset); @@ -1431,37 +1454,11 @@ private void replay(ApiMessage message, Optional snapshotId, lon */ private volatile int curClaimEpoch; - /** - * How long to delay partition leader balancing operations. - */ - private final OptionalLong leaderImbalanceCheckIntervalNs; - - private enum ImbalanceSchedule { - // The leader balancing operation has been scheduled - SCHEDULED, - // If the leader balancing operation should be scheduled, schedule it with a delay - DEFERRED, - // If the leader balancing operation should be scheduled, schedule it immediately - IMMEDIATELY - } - - /** - * Tracks the scheduling state for partition leader balancing operations. - */ - private final ImbalanceSchedule imbalancedScheduled = ImbalanceSchedule.DEFERRED; - - /** - * Tracks the scheduling state for unclean leader election operations. - */ - private final ImbalanceSchedule uncleanScheduled = ImbalanceSchedule.DEFERRED; - /** * The bootstrap metadata to use for initialization if needed. */ private final BootstrapMetadata bootstrapMetadata; - private final boolean eligibleLeaderReplicasEnabled; - /** * The maximum number of records per batch to allow. */ @@ -1472,6 +1469,11 @@ private enum ImbalanceSchedule { */ private final RecordRedactor recordRedactor; + /** + * Monitors the performance of controller events and generates logs about it. + */ + private final EventPerformanceMonitor performanceMonitor; + private QuorumController( FaultHandler nonFatalFaultHandler, FaultHandler fatalFaultHandler, @@ -1489,6 +1491,7 @@ private QuorumController( OptionalLong leaderImbalanceCheckIntervalNs, OptionalLong maxIdleIntervalNs, long sessionTimeoutNs, + OptionalLong fenceStaleBrokerIntervalNs, QuorumControllerMetrics controllerMetrics, Optional createTopicPolicy, Optional alterConfigPolicy, @@ -1501,9 +1504,10 @@ private QuorumController( long delegationTokenMaxLifeMs, long delegationTokenExpiryTimeMs, long delegationTokenExpiryCheckIntervalMs, - boolean eligibleLeaderReplicasEnabled, long uncleanLeaderElectionCheckIntervalMs, - String interBrokerListenerName + String interBrokerListenerName, + long controllerPerformanceSamplePeriodMs, + long controllerPerformanceAlwaysLogThresholdMs ) { this.nonFatalFaultHandler = nonFatalFaultHandler; this.fatalFaultHandler = fatalFaultHandler; @@ -1515,23 +1519,7 @@ private QuorumController( this.controllerMetrics = controllerMetrics; this.snapshotRegistry = new SnapshotRegistry(logContext); this.deferredEventQueue = new DeferredEventQueue(logContext); - this.offsetControl = new OffsetControlManager.Builder(). - setLogContext(logContext). - setSnapshotRegistry(snapshotRegistry). - setMetrics(controllerMetrics). - setTime(time). - build(); this.resourceExists = new ConfigResourceExistenceChecker(); - this.configurationControl = new ConfigurationControlManager.Builder(). - setLogContext(logContext). - setSnapshotRegistry(snapshotRegistry). - setKafkaConfigSchema(configSchema). - setExistenceChecker(resourceExists). - setAlterConfigPolicy(alterConfigPolicy). - setValidator(configurationValidator). - setStaticConfig(staticConfig). - setNodeId(nodeId). - build(); this.clientQuotaControlManager = new ClientQuotaControlManager.Builder(). setLogContext(logContext). setSnapshotRegistry(snapshotRegistry). @@ -1566,18 +1554,27 @@ private QuorumController( setBrokerUncleanShutdownHandler(this::handleUncleanBrokerShutdown). setInterBrokerListenerName(interBrokerListenerName). build(); + this.configurationControl = new ConfigurationControlManager.Builder(). + setLogContext(logContext). + setSnapshotRegistry(snapshotRegistry). + setKafkaConfigSchema(configSchema). + setExistenceChecker(resourceExists). + setAlterConfigPolicy(alterConfigPolicy). + setValidator(configurationValidator). + setStaticConfig(staticConfig). + setNodeId(nodeId). + setFeatureControl(featureControl). + build(); this.producerIdControlManager = new ProducerIdControlManager.Builder(). setLogContext(logContext). setSnapshotRegistry(snapshotRegistry). setClusterControlManager(clusterControl). build(); - this.leaderImbalanceCheckIntervalNs = leaderImbalanceCheckIntervalNs; this.replicationControl = new ReplicationControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setLogContext(logContext). setDefaultReplicationFactor(defaultReplicationFactor). setDefaultNumPartitions(defaultNumPartitions). - setEligibleLeaderReplicasEnabled(eligibleLeaderReplicasEnabled). setMaxElectionsPerImbalance(ReplicationControlManager.MAX_ELECTIONS_PER_IMBALANCE). setConfigurationControl(configurationControl). setClusterControl(clusterControl). @@ -1608,21 +1605,34 @@ private QuorumController( this.metaLogListener = new QuorumMetaLogListener(); this.curClaimEpoch = -1; this.recordRedactor = new RecordRedactor(configSchema); - this.eligibleLeaderReplicasEnabled = eligibleLeaderReplicasEnabled; + this.performanceMonitor = new EventPerformanceMonitor.Builder(). + setLogContext(logContext). + setPeriodNs(TimeUnit.MILLISECONDS.toNanos(controllerPerformanceSamplePeriodMs)). + setAlwaysLogThresholdNs(TimeUnit.MILLISECONDS.toNanos(controllerPerformanceAlwaysLogThresholdMs)). + build(); if (maxIdleIntervalNs.isPresent()) { registerWriteNoOpRecord(maxIdleIntervalNs.getAsLong()); } - registerMaybeFenceStaleBroker(sessionTimeoutNs); + if (fenceStaleBrokerIntervalNs.isPresent()) { + registerMaybeFenceStaleBroker(fenceStaleBrokerIntervalNs.getAsLong()); + } else { + registerMaybeFenceStaleBroker(maybeFenceStaleBrokerPeriodNs(sessionTimeoutNs)); + } if (leaderImbalanceCheckIntervalNs.isPresent()) { registerElectPreferred(leaderImbalanceCheckIntervalNs.getAsLong()); } registerElectUnclean(TimeUnit.MILLISECONDS.toNanos(uncleanLeaderElectionCheckIntervalMs)); registerExpireDelegationTokens(MILLISECONDS.toNanos(delegationTokenExpiryCheckIntervalMs)); - - log.info("Creating new QuorumController with clusterId {}.{}", - clusterId, - eligibleLeaderReplicasEnabled ? " Eligible leader replicas enabled." : ""); - + registerGeneratePeriodicPerformanceMessage(); + // OffsetControlManager must be initialized last, because its constructor will take the + // initial in-memory snapshot of all extant timeline data structures. + this.offsetControl = new OffsetControlManager.Builder(). + setLogContext(logContext). + setSnapshotRegistry(snapshotRegistry). + setMetrics(controllerMetrics). + setTime(time). + build(); + log.info("Creating new QuorumController with clusterId {}", clusterId); this.raftClient.register(metaLogListener); } @@ -1667,12 +1677,12 @@ static long maybeFenceStaleBrokerPeriodNs(long sessionTimeoutNs) { * This task periodically checks to see if there is a stale broker that needs to * be fenced. It will only ever remove one stale broker at a time. * - * @param sessionTimeoutNs The broker session timeout in nanoseconds. + * @param fenceStaleBrokerIntervalNs The interval to check for stale brokers in nanoseconds */ - private void registerMaybeFenceStaleBroker(long sessionTimeoutNs) { + private void registerMaybeFenceStaleBroker(long fenceStaleBrokerIntervalNs) { periodicControl.registerTask(new PeriodicTask("maybeFenceStaleBroker", replicationControl::maybeFenceOneStaleBroker, - maybeFenceStaleBrokerPeriodNs(sessionTimeoutNs), + fenceStaleBrokerIntervalNs, EnumSet.noneOf(PeriodicTaskFlag.class))); } @@ -1706,6 +1716,21 @@ private void registerElectUnclean(long checkIntervalNs) { EnumSet.of(PeriodicTaskFlag.VERBOSE))); } + /** + * Register the generatePeriodicPerformanceMessage task. + * + * This task periodically logs some statistics about controller performance. + */ + private void registerGeneratePeriodicPerformanceMessage() { + periodicControl.registerTask(new PeriodicTask("generatePeriodicPerformanceMessage", + () -> { + performanceMonitor.generatePeriodicPerformanceMessage(); + return ControllerResult.of(Collections.emptyList(), false); + }, + performanceMonitor.periodNs(), + EnumSet.noneOf(PeriodicTaskFlag.class))); + } + /** * Register the delegation token expiration task. * @@ -2045,7 +2070,7 @@ public CompletableFuture updateFeatures( upgradeTypes.put(featureName, FeatureUpdate.UpgradeType.fromCode(featureUpdate.upgradeType())); updates.put(featureName, featureUpdate.maxVersionLevel()); }); - return featureControl.updateFeatures(updates, upgradeTypes, request.validateOnly()); + return configurationControl.updateFeatures(updates, upgradeTypes, request.validateOnly()); }).thenApply(result -> { UpdateFeaturesResponseData responseData = new UpdateFeaturesResponseData(); diff --git a/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java b/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java index 17ec3acd6a257..90017d7b75e74 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java +++ b/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java @@ -17,9 +17,8 @@ package org.apache.kafka.controller; -import org.apache.kafka.metadata.ControllerRegistration; import org.apache.kafka.metadata.VersionRange; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.MetadataVersion; import java.util.ArrayList; @@ -55,14 +54,14 @@ public static Optional reasonNotSupported( return Optional.empty(); } - public static Map defaultFeatureMap(boolean enableUnstable) { + public static Map defaultSupportedFeatureMap(boolean enableUnstable) { Map features = new HashMap<>(1); features.put(MetadataVersion.FEATURE_NAME, VersionRange.of( MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), enableUnstable ? MetadataVersion.latestTesting().featureLevel() : MetadataVersion.latestProduction().featureLevel())); - for (Features feature : Features.PRODUCTION_FEATURES) { + for (Feature feature : Feature.PRODUCTION_FEATURES) { short maxVersion = enableUnstable ? feature.latestTesting() : feature.latestProduction(); if (maxVersion > 0) { features.put(feature.featureName(), VersionRange.of(feature.minimumProduction(), maxVersion)); @@ -110,27 +109,6 @@ public Optional reasonNotLocallySupported( localSupportedFeature(featureName)); } - public Optional reasonAllControllersZkMigrationNotReady( - MetadataVersion metadataVersion, - Map controllers - ) { - if (!metadataVersion.isMigrationSupported()) { - return Optional.of("The metadata.version too low at " + metadataVersion); - } else if (!metadataVersion.isControllerRegistrationSupported()) { - return Optional.empty(); - } - for (int quorumNodeId : quorumNodeIds) { - ControllerRegistration registration = controllers.get(quorumNodeId); - if (registration == null) { - return Optional.of("No registration found for controller " + quorumNodeId); - } else if (!registration.zkMigrationReady()) { - return Optional.of("Controller " + quorumNodeId + " has not enabled " + - "zookeeper.metadata.migration.enable"); - } - } - return Optional.empty(); - } - @Override public int hashCode() { return Objects.hash(nodeId, localSupportedFeatures, quorumNodeIds); diff --git a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java index 16cc762ebc56e..b2e232cfdab34 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java @@ -165,7 +165,6 @@ static class Builder { private ClusterControlManager clusterControl = null; private Optional createTopicPolicy = Optional.empty(); private FeatureControlManager featureControl = null; - private boolean eligibleLeaderReplicasEnabled = false; Builder setSnapshotRegistry(SnapshotRegistry snapshotRegistry) { this.snapshotRegistry = snapshotRegistry; @@ -187,11 +186,6 @@ Builder setDefaultNumPartitions(int defaultNumPartitions) { return this; } - Builder setEligibleLeaderReplicasEnabled(boolean eligibleLeaderReplicasEnabled) { - this.eligibleLeaderReplicasEnabled = eligibleLeaderReplicasEnabled; - return this; - } - Builder setMaxElectionsPerImbalance(int maxElectionsPerImbalance) { this.maxElectionsPerImbalance = maxElectionsPerImbalance; return this; @@ -233,7 +227,6 @@ ReplicationControlManager build() { defaultReplicationFactor, defaultNumPartitions, maxElectionsPerImbalance, - eligibleLeaderReplicasEnabled, configurationControl, clusterControl, createTopicPolicy, @@ -305,11 +298,6 @@ static Map translateCreationConfigs(CreatableTopicConfigCollecti */ private final int defaultNumPartitions; - /** - * True if eligible leader replicas is enabled. - */ - private final boolean eligibleLeaderReplicasEnabled; - /** * Maximum number of leader elections to perform during one partition leader balancing operation. */ @@ -399,7 +387,6 @@ private ReplicationControlManager( short defaultReplicationFactor, int defaultNumPartitions, int maxElectionsPerImbalance, - boolean eligibleLeaderReplicasEnabled, ConfigurationControlManager configurationControl, ClusterControlManager clusterControl, Optional createTopicPolicy, @@ -410,7 +397,6 @@ private ReplicationControlManager( this.defaultReplicationFactor = defaultReplicationFactor; this.defaultNumPartitions = defaultNumPartitions; this.maxElectionsPerImbalance = maxElectionsPerImbalance; - this.eligibleLeaderReplicasEnabled = eligibleLeaderReplicasEnabled; this.configurationControl = configurationControl; this.createTopicPolicy = createTopicPolicy; this.featureControl = featureControl; @@ -1028,10 +1014,6 @@ TimelineHashSet imbalancedPartitions() { return imbalancedPartitions; } - boolean isElrEnabled() { - return eligibleLeaderReplicasEnabled && featureControl.metadataVersion().isElrSupported(); - } - ControllerResult alterPartition( ControllerRequestContext context, AlterPartitionRequestData request @@ -1097,7 +1079,7 @@ ControllerResult alterPartition( featureControl.metadataVersion(), getTopicEffectiveMinIsr(topic.name) ) - .setEligibleLeaderReplicasEnabled(isElrEnabled()); + .setEligibleLeaderReplicasEnabled(featureControl.isElrFeatureEnabled()); if (configurationControl.uncleanLeaderElectionEnabledForTopic(topic.name())) { builder.setElection(PartitionChangeBuilder.Election.UNCLEAN); } @@ -1434,11 +1416,17 @@ void handleBrokerInControlledShutdown(int brokerId, long brokerEpoch, List records) { - if (!featureControl.metadataVersion().isElrSupported()) return; - generateLeaderAndIsrUpdates("handleBrokerUncleanShutdown", NO_LEADER, NO_LEADER, brokerId, records, - brokersToIsrs.partitionsWithBrokerInIsr(brokerId)); - generateLeaderAndIsrUpdates("handleBrokerUncleanShutdown", NO_LEADER, NO_LEADER, brokerId, records, - brokersToElrs.partitionsWithBrokerInElr(brokerId)); + if (featureControl.metadataVersion().isElrSupported()) { + // ELR is enabled, generate unclean shutdown partition change records + generateLeaderAndIsrUpdates("handleBrokerUncleanShutdown", NO_LEADER, NO_LEADER, brokerId, records, + brokersToIsrs.partitionsWithBrokerInIsr(brokerId)); + generateLeaderAndIsrUpdates("handleBrokerUncleanShutdown", NO_LEADER, NO_LEADER, brokerId, records, + brokersToElrs.partitionsWithBrokerInElr(brokerId)); + } else { + // ELR is not enabled, handle the unclean shutdown as if the broker was fenced + generateLeaderAndIsrUpdates("handleBrokerUncleanShutdown", brokerId, NO_LEADER, NO_LEADER, records, + brokersToIsrs.partitionsWithBrokerInIsr(brokerId)); + } } /** @@ -1571,10 +1559,10 @@ ApiError electLeader(String topic, int partitionId, ElectionType electionType, getTopicEffectiveMinIsr(topic) ) .setElection(election) - .setEligibleLeaderReplicasEnabled(isElrEnabled()) + .setEligibleLeaderReplicasEnabled(featureControl.isElrFeatureEnabled()) .setDefaultDirProvider(clusterDescriber) .build(); - if (!record.isPresent()) { + if (record.isEmpty()) { if (electionType == ElectionType.PREFERRED) { return new ApiError(Errors.PREFERRED_LEADER_NOT_AVAILABLE); } else { @@ -1655,7 +1643,7 @@ public ControllerResult unregisterBroker(int brokerId) { ControllerResult maybeFenceOneStaleBroker() { BrokerHeartbeatManager heartbeatManager = clusterControl.heartbeatManager(); Optional idAndEpoch = heartbeatManager.tracker().maybeRemoveExpired(); - if (!idAndEpoch.isPresent()) { + if (idAndEpoch.isEmpty()) { log.debug("No stale brokers found."); return ControllerResult.of(Collections.emptyList(), false); } @@ -1735,7 +1723,7 @@ void maybeTriggerLeaderChangeForPartitionsWithoutPreferredLeader( getTopicEffectiveMinIsr(topic.name) ) .setElection(PartitionChangeBuilder.Election.PREFERRED) - .setEligibleLeaderReplicasEnabled(isElrEnabled()) + .setEligibleLeaderReplicasEnabled(featureControl.isElrFeatureEnabled()) .setDefaultDirProvider(clusterDescriber) .build().ifPresent(records::add); } @@ -1773,7 +1761,7 @@ void maybeTriggerUncleanLeaderElectionForLeaderlessPartitions( ApiError result = electLeader(topic.name, topicIdPartition.partitionId(), ElectionType.UNCLEAN, records); if (result.error().equals(Errors.NONE)) { - log.error("Triggering unclean leader election for offline partition {}-{}.", + log.info("Triggering unclean leader election for offline partition {}-{}.", topic.name, topicIdPartition.partitionId()); } else { log.warn("Cannot trigger unclean leader election for offline partition {}-{}: {}", @@ -2004,7 +1992,7 @@ void generateLeaderAndIsrUpdates(String context, featureControl.metadataVersion(), getTopicEffectiveMinIsr(topic.name) ); - builder.setEligibleLeaderReplicasEnabled(isElrEnabled()); + builder.setEligibleLeaderReplicasEnabled(featureControl.isElrFeatureEnabled()); if (configurationControl.uncleanLeaderElectionEnabledForTopic(topic.name)) { builder.setElection(PartitionChangeBuilder.Election.UNCLEAN); } @@ -2122,7 +2110,7 @@ Optional cancelPartitionReassignment(String topicName, featureControl.metadataVersion(), getTopicEffectiveMinIsr(topicName) ); - builder.setEligibleLeaderReplicasEnabled(isElrEnabled()); + builder.setEligibleLeaderReplicasEnabled(featureControl.isElrFeatureEnabled()); if (configurationControl.uncleanLeaderElectionEnabledForTopic(topicName)) { builder.setElection(PartitionChangeBuilder.Election.UNCLEAN); } @@ -2183,7 +2171,7 @@ Optional changePartitionReassignment(TopicIdPartition tp, featureControl.metadataVersion(), getTopicEffectiveMinIsr(topics.get(tp.topicId()).name) ); - builder.setEligibleLeaderReplicasEnabled(isElrEnabled()); + builder.setEligibleLeaderReplicasEnabled(featureControl.isElrFeatureEnabled()); if (!reassignment.replicas().equals(currentReplicas)) { builder.setTargetReplicas(reassignment.replicas()); } diff --git a/metadata/src/main/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsPublisher.java b/metadata/src/main/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsPublisher.java index 7459fe657af1b..c4aec11079362 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsPublisher.java +++ b/metadata/src/main/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsPublisher.java @@ -115,9 +115,6 @@ private void publishDelta(MetadataDelta delta) { } } changes.apply(metrics); - if (delta.featuresDelta() != null) { - delta.featuresDelta().getZkMigrationStateChange().ifPresent(state -> metrics.setZkMigrationState(state.value())); - } } private void publishSnapshot(MetadataImage newImage) { @@ -156,7 +153,6 @@ private void publishSnapshot(MetadataImage newImage) { metrics.setGlobalPartitionCount(totalPartitions); metrics.setOfflinePartitionCount(offlinePartitions); metrics.setPreferredReplicaImbalanceCount(partitionsWithoutPreferredLeader); - metrics.setZkMigrationState(newImage.features().zkMigrationState().value()); } @Override diff --git a/metadata/src/main/java/org/apache/kafka/image/AclsImage.java b/metadata/src/main/java/org/apache/kafka/image/AclsImage.java index 371abbbb35bbd..85489a1d2622a 100644 --- a/metadata/src/main/java/org/apache/kafka/image/AclsImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/AclsImage.java @@ -69,8 +69,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof AclsImage)) return false; - AclsImage other = (AclsImage) o; + if (!(o instanceof AclsImage other)) return false; return acls.equals(other.acls); } diff --git a/metadata/src/main/java/org/apache/kafka/image/ClientQuotaImage.java b/metadata/src/main/java/org/apache/kafka/image/ClientQuotaImage.java index 7ae96f83a0df0..00f56fef53de3 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ClientQuotaImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/ClientQuotaImage.java @@ -102,8 +102,7 @@ public boolean isEmpty() { @Override public boolean equals(Object o) { - if (!(o instanceof ClientQuotaImage)) return false; - ClientQuotaImage other = (ClientQuotaImage) o; + if (!(o instanceof ClientQuotaImage other)) return false; return quotas.equals(other.quotas); } diff --git a/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java b/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java index 0cea475b2b427..7ad86fda5e231 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java @@ -176,8 +176,7 @@ private static EntryData toDescribeEntry(ClientQuotaEntity entity, @Override public boolean equals(Object o) { - if (!(o instanceof ClientQuotasImage)) return false; - ClientQuotasImage other = (ClientQuotasImage) o; + if (!(o instanceof ClientQuotasImage other)) return false; return entities.equals(other.entities); } diff --git a/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java b/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java index 17bb385bad8fb..2e8951526ba95 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java @@ -100,8 +100,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof ClusterImage)) return false; - ClusterImage other = (ClusterImage) o; + if (!(o instanceof ClusterImage other)) return false; return brokers.equals(other.brokers) && controllers.equals(other.controllers); } diff --git a/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java b/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java index 0e10579f0fbf6..b4227b3df6599 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java @@ -85,8 +85,7 @@ public void write( @Override public boolean equals(Object o) { - if (!(o instanceof ConfigurationImage)) return false; - ConfigurationImage other = (ConfigurationImage) o; + if (!(o instanceof ConfigurationImage other)) return false; return data.equals(other.data); } diff --git a/metadata/src/main/java/org/apache/kafka/image/ConfigurationsImage.java b/metadata/src/main/java/org/apache/kafka/image/ConfigurationsImage.java index 71f9def40a5cb..20df483fecc41 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ConfigurationsImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/ConfigurationsImage.java @@ -84,8 +84,7 @@ public void write(ImageWriter writer, ImageWriterOptions options) { @Override public boolean equals(Object o) { - if (!(o instanceof ConfigurationsImage)) return false; - ConfigurationsImage other = (ConfigurationsImage) o; + if (!(o instanceof ConfigurationsImage other)) return false; return data.equals(other.data); } diff --git a/metadata/src/main/java/org/apache/kafka/image/FeaturesDelta.java b/metadata/src/main/java/org/apache/kafka/image/FeaturesDelta.java index 66e371835fa93..587e42d7c989d 100644 --- a/metadata/src/main/java/org/apache/kafka/image/FeaturesDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/FeaturesDelta.java @@ -18,8 +18,6 @@ package org.apache.kafka.image; import org.apache.kafka.common.metadata.FeatureLevelRecord; -import org.apache.kafka.common.metadata.ZkMigrationStateRecord; -import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.server.common.MetadataVersion; import java.util.HashMap; @@ -38,8 +36,6 @@ public final class FeaturesDelta { private MetadataVersion metadataVersionChange = null; - private ZkMigrationState zkMigrationStateChange = null; - public FeaturesDelta(FeaturesImage image) { this.image = image; } @@ -48,10 +44,6 @@ public Map> changes() { return changes; } - public Optional getZkMigrationStateChange() { - return Optional.ofNullable(zkMigrationStateChange); - } - public Optional metadataVersionChange() { return Optional.ofNullable(metadataVersionChange); } @@ -76,10 +68,6 @@ public void replay(FeatureLevelRecord record) { } } - public void replay(ZkMigrationStateRecord record) { - this.zkMigrationStateChange = ZkMigrationState.of(record.zkMigrationState()); - } - public FeaturesImage apply() { Map newFinalizedVersions = new HashMap<>(image.finalizedVersions().size()); @@ -109,13 +97,7 @@ public FeaturesImage apply() { metadataVersion = metadataVersionChange; } - final ZkMigrationState zkMigrationState; - if (zkMigrationStateChange == null) { - zkMigrationState = image.zkMigrationState(); - } else { - zkMigrationState = zkMigrationStateChange; - } - return new FeaturesImage(newFinalizedVersions, metadataVersion, zkMigrationState); + return new FeaturesImage(newFinalizedVersions, metadataVersion); } @Override @@ -123,7 +105,6 @@ public String toString() { return "FeaturesDelta(" + "changes=" + changes + ", metadataVersionChange=" + metadataVersionChange + - ", zkMigrationStateChange=" + zkMigrationStateChange + ')'; } } diff --git a/metadata/src/main/java/org/apache/kafka/image/FeaturesImage.java b/metadata/src/main/java/org/apache/kafka/image/FeaturesImage.java index cbdb5c6489ac0..eba2d3c26afd2 100644 --- a/metadata/src/main/java/org/apache/kafka/image/FeaturesImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/FeaturesImage.java @@ -21,7 +21,6 @@ import org.apache.kafka.image.node.FeaturesImageNode; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; -import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.server.common.MetadataVersion; import java.util.ArrayList; @@ -41,30 +40,23 @@ public final class FeaturesImage { public static final FeaturesImage EMPTY = new FeaturesImage( Collections.emptyMap(), - MetadataVersion.MINIMUM_KRAFT_VERSION, - ZkMigrationState.NONE + MetadataVersion.MINIMUM_KRAFT_VERSION ); private final Map finalizedVersions; private final MetadataVersion metadataVersion; - private final ZkMigrationState zkMigrationState; - public FeaturesImage( Map finalizedVersions, - MetadataVersion metadataVersion, - ZkMigrationState zkMigrationState - ) { + MetadataVersion metadataVersion) { this.finalizedVersions = Collections.unmodifiableMap(finalizedVersions); this.metadataVersion = metadataVersion; - this.zkMigrationState = zkMigrationState; } public boolean isEmpty() { return finalizedVersions.isEmpty() && - metadataVersion.equals(MetadataVersion.MINIMUM_KRAFT_VERSION) && - zkMigrationState.equals(ZkMigrationState.NONE); + metadataVersion.equals(MetadataVersion.MINIMUM_KRAFT_VERSION); } public MetadataVersion metadataVersion() { @@ -75,10 +67,6 @@ public Map finalizedVersions() { return finalizedVersions; } - public ZkMigrationState zkMigrationState() { - return zkMigrationState; - } - private Optional finalizedVersion(String feature) { return Optional.ofNullable(finalizedVersions.get(feature)); } @@ -89,14 +77,6 @@ public void write(ImageWriter writer, ImageWriterOptions options) { } else { writeFeatureLevels(writer, options); } - - if (options.metadataVersion().isMigrationSupported()) { - writer.write(0, zkMigrationState.toRecord().message()); - } else { - if (!zkMigrationState.equals(ZkMigrationState.NONE)) { - options.handleLoss("the ZK Migration state which was " + zkMigrationState); - } - } } private void handleFeatureLevelNotSupported(ImageWriterOptions options) { @@ -131,16 +111,14 @@ private void writeFeatureLevels(ImageWriter writer, ImageWriterOptions options) @Override public int hashCode() { - return Objects.hash(finalizedVersions, metadataVersion, zkMigrationState); + return Objects.hash(finalizedVersions, metadataVersion); } @Override public boolean equals(Object o) { - if (!(o instanceof FeaturesImage)) return false; - FeaturesImage other = (FeaturesImage) o; + if (!(o instanceof FeaturesImage other)) return false; return finalizedVersions.equals(other.finalizedVersions) && - metadataVersion.equals(other.metadataVersion) && - zkMigrationState.equals(other.zkMigrationState); + metadataVersion.equals(other.metadataVersion); } @Override diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java index b4120ad8595c9..ae021a6f2fb4a 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java @@ -38,7 +38,6 @@ import org.apache.kafka.common.metadata.UnfenceBrokerRecord; import org.apache.kafka.common.metadata.UnregisterBrokerRecord; import org.apache.kafka.common.metadata.UserScramCredentialRecord; -import org.apache.kafka.common.metadata.ZkMigrationStateRecord; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.server.common.MetadataVersion; @@ -247,7 +246,9 @@ public void replay(ApiMessage record) { */ break; case ZK_MIGRATION_STATE_RECORD: - replay((ZkMigrationStateRecord) record); + // In 4.0, although migration is no longer supported and ZK has been removed from Kafka, + // users might migrate from ZK to KRaft in version 3.x and then perform a rolling upgrade to 4.0. + // Therefore, this case needs to be retained but will be a no-op. break; case REGISTER_CONTROLLER_RECORD: replay((RegisterControllerRecord) record); @@ -345,10 +346,6 @@ public void replay(RemoveUserScramCredentialRecord record) { getOrCreateScramDelta().replay(record); } - public void replay(ZkMigrationStateRecord record) { - getOrCreateFeaturesDelta().replay(record); - } - public void replay(RegisterControllerRecord record) { getOrCreateClusterDelta().replay(record); } diff --git a/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java b/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java index 36a1f49048dad..8b7402ed98ef6 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java @@ -61,8 +61,7 @@ public boolean isEmpty() { @Override public boolean equals(Object o) { - if (!(o instanceof ProducerIdsImage)) return false; - ProducerIdsImage other = (ProducerIdsImage) o; + if (!(o instanceof ProducerIdsImage other)) return false; return nextProducerId == other.nextProducerId; } diff --git a/metadata/src/main/java/org/apache/kafka/image/TopicImage.java b/metadata/src/main/java/org/apache/kafka/image/TopicImage.java index eaf1f5e1a8505..48c71b7da892b 100644 --- a/metadata/src/main/java/org/apache/kafka/image/TopicImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/TopicImage.java @@ -74,8 +74,7 @@ public void write(ImageWriter writer, ImageWriterOptions options) { @Override public boolean equals(Object o) { - if (!(o instanceof TopicImage)) return false; - TopicImage other = (TopicImage) o; + if (!(o instanceof TopicImage other)) return false; return name.equals(other.name) && id.equals(other.id) && partitions.equals(other.partitions); diff --git a/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java b/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java index 5ac5331973779..21dba62576ae4 100644 --- a/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java @@ -87,8 +87,7 @@ public void write(ImageWriter writer, ImageWriterOptions options) { @Override public boolean equals(Object o) { - if (!(o instanceof TopicsImage)) return false; - TopicsImage other = (TopicsImage) o; + if (!(o instanceof TopicsImage other)) return false; return topicsById.equals(other.topicsById) && topicsByName.equals(other.topicsByName); } diff --git a/metadata/src/main/java/org/apache/kafka/image/node/FeaturesImageNode.java b/metadata/src/main/java/org/apache/kafka/image/node/FeaturesImageNode.java index 8882c7fe42502..286e31dba0a15 100644 --- a/metadata/src/main/java/org/apache/kafka/image/node/FeaturesImageNode.java +++ b/metadata/src/main/java/org/apache/kafka/image/node/FeaturesImageNode.java @@ -68,8 +68,6 @@ public Collection childNames() { public MetadataNode child(String name) { if (name.equals(METADATA_VERSION)) { return new MetadataLeafNode(image.metadataVersion().toString()); - } else if (name.equals(ZK_MIGRATION_STATE)) { - return new MetadataLeafNode(image.zkMigrationState().toString()); } else if (name.startsWith(FINALIZED_PREFIX)) { String key = name.substring(FINALIZED_PREFIX.length()); return new MetadataLeafNode( diff --git a/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java b/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java index b13598d47380b..42b9ee21c3b62 100644 --- a/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java +++ b/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java @@ -20,8 +20,6 @@ import org.apache.kafka.image.MetadataImage; import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.function.Function; @@ -37,22 +35,18 @@ public class MetadataImageNode implements MetadataNode { */ private final MetadataImage image; - private static final Map> CHILDREN; - - static { - Map> children = new HashMap<>(); - children.put(ProvenanceNode.NAME, image -> new ProvenanceNode(image.provenance())); - children.put(FeaturesImageNode.NAME, image -> new FeaturesImageNode(image.features())); - children.put(ClusterImageNode.NAME, image -> new ClusterImageNode(image.cluster())); - children.put(TopicsImageNode.NAME, image -> new TopicsImageNode(image.topics())); - children.put(ConfigurationsImageNode.NAME, image -> new ConfigurationsImageNode(image.configs())); - children.put(ClientQuotasImageNode.NAME, image -> new ClientQuotasImageNode(image.clientQuotas())); - children.put(ProducerIdsImageNode.NAME, image -> new ProducerIdsImageNode(image.producerIds())); - children.put(AclsImageNode.NAME, image -> new AclsImageByIdNode(image.acls())); - children.put(ScramImageNode.NAME, image -> new ScramImageNode(image.scram())); - children.put(DelegationTokenImageNode.NAME, image -> new DelegationTokenImageNode(image.delegationTokens())); - CHILDREN = Collections.unmodifiableMap(children); - } + private static final Map> CHILDREN = Map.of( + ProvenanceNode.NAME, image -> new ProvenanceNode(image.provenance()), + FeaturesImageNode.NAME, image -> new FeaturesImageNode(image.features()), + ClusterImageNode.NAME, image -> new ClusterImageNode(image.cluster()), + TopicsImageNode.NAME, image -> new TopicsImageNode(image.topics()), + ConfigurationsImageNode.NAME, image -> new ConfigurationsImageNode(image.configs()), + ClientQuotasImageNode.NAME, image -> new ClientQuotasImageNode(image.clientQuotas()), + ProducerIdsImageNode.NAME, image -> new ProducerIdsImageNode(image.producerIds()), + AclsImageNode.NAME, image -> new AclsImageByIdNode(image.acls()), + ScramImageNode.NAME, image -> new ScramImageNode(image.scram()), + DelegationTokenImageNode.NAME, image -> new DelegationTokenImageNode(image.delegationTokens()) + ); public MetadataImageNode(MetadataImage image) { this.image = image; diff --git a/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java b/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java index c936601e9d21e..9380bccbcfdd4 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java @@ -74,8 +74,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof BrokerHeartbeatReply)) return false; - BrokerHeartbeatReply other = (BrokerHeartbeatReply) o; + if (!(o instanceof BrokerHeartbeatReply other)) return false; return other.isCaughtUp == isCaughtUp && other.isFenced == isFenced && other.inControlledShutdown == inControlledShutdown && diff --git a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java index 2bd77f06e1134..7ad24f15b1d20 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java @@ -233,7 +233,7 @@ public Optional node(String listenerName) { if (endpoint == null) { return Optional.empty(); } - return Optional.of(new Node(id, endpoint.host(), endpoint.port(), rack.orElse(null))); + return Optional.of(new Node(id, endpoint.host(), endpoint.port(), rack.orElse(null), fenced)); } public Map supportedFeatures() { @@ -342,8 +342,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof BrokerRegistration)) return false; - BrokerRegistration other = (BrokerRegistration) o; + if (!(o instanceof BrokerRegistration other)) return false; return other.id == id && other.epoch == epoch && other.incarnationId.equals(incarnationId) && diff --git a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java index 40678edf644b6..cfd86e67ab5cb 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java @@ -38,8 +38,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof BrokerRegistrationReply)) return false; - BrokerRegistrationReply other = (BrokerRegistrationReply) o; + if (!(o instanceof BrokerRegistrationReply other)) return false; return other.epoch == epoch; } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java index eded4f7ef765b..2e2c4889dffa8 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java @@ -203,8 +203,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof ControllerRegistration)) return false; - ControllerRegistration other = (ControllerRegistration) o; + if (!(o instanceof ControllerRegistration other)) return false; return other.id == id && other.incarnationId.equals(incarnationId) && other.zkMigrationReady == zkMigrationReady && diff --git a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java index 8e240d9f8c704..0a5af620f2a37 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java @@ -63,8 +63,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof FinalizedControllerFeatures)) return false; - FinalizedControllerFeatures other = (FinalizedControllerFeatures) o; + if (!(o instanceof FinalizedControllerFeatures other)) return false; return featureMap.equals(other.featureMap) && epoch == other.epoch; } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/KafkaConfigSchema.java b/metadata/src/main/java/org/apache/kafka/metadata/KafkaConfigSchema.java index d2d2521c1289c..3c00390f71f5c 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/KafkaConfigSchema.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/KafkaConfigSchema.java @@ -30,10 +30,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Function; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; +import static org.apache.kafka.common.config.TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG; /** @@ -224,6 +226,24 @@ public ConfigEntry resolveEffectiveTopicConfig( ConfigSource.DEFAULT_CONFIG, Function.identity()); } + public String getStaticOrDefaultConfig( + String configName, + Map staticNodeConfig + ) { + ConfigDef configDef = configDefs.getOrDefault(ConfigResource.Type.BROKER, EMPTY_CONFIG_DEF); + ConfigDef.ConfigKey configKey = configDef.configKeys().get(configName); + if (configKey == null) return null; + List synonyms = logConfigSynonyms.getOrDefault(configKey.name, emptyList()); + for (ConfigSynonym synonym : synonyms) { + if (staticNodeConfig.containsKey(synonym.name())) { + return toConfigEntry(configKey, staticNodeConfig.get(synonym.name()), + ConfigSource.STATIC_BROKER_CONFIG, synonym.converter()).value(); + } + } + return toConfigEntry(configKey, configKey.hasDefault() ? configKey.defaultValue : null, + ConfigSource.DEFAULT_CONFIG, Function.identity()).value(); + } + private ConfigEntry toConfigEntry(ConfigDef.ConfigKey configKey, Object value, ConfigSource source, @@ -262,4 +282,12 @@ private ConfigEntry toConfigEntry(ConfigDef.ConfigKey configKey, translateConfigType(configKey.type()), configKey.documentation); } + + public int getStaticallyConfiguredMinInsyncReplicas(Map staticNodeConfig) { + String minInsyncReplicasString = Objects.requireNonNull( + getStaticOrDefaultConfig(MIN_IN_SYNC_REPLICAS_CONFIG, staticNodeConfig)); + return (int) ConfigDef.parseType(MIN_IN_SYNC_REPLICAS_CONFIG, + minInsyncReplicasString, + ConfigDef.Type.INT); + } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java index 59c3763ca7205..f17b14d37124e 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/PartitionRegistration.java @@ -432,8 +432,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof PartitionRegistration)) return false; - PartitionRegistration other = (PartitionRegistration) o; + if (!(o instanceof PartitionRegistration other)) return false; return Arrays.equals(replicas, other.replicas) && Arrays.equals(directories, other.directories) && Arrays.equals(isr, other.isr) && diff --git a/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java b/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java index 18c37db8e5054..b9ced14c48816 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java @@ -70,8 +70,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof VersionRange)) return false; - VersionRange other = (VersionRange) o; + if (!(o instanceof VersionRange other)) return false; return other.min == min && other.max == max; } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadata.java b/metadata/src/main/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadata.java index 2dc6d9a6eaf58..1dd6beedeafd5 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadata.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadata.java @@ -97,8 +97,7 @@ public static BootstrapMetadata fromRecords(List records, } public static Optional recordToMetadataVersion(ApiMessage record) { - if (record instanceof FeatureLevelRecord) { - FeatureLevelRecord featureLevel = (FeatureLevelRecord) record; + if (record instanceof FeatureLevelRecord featureLevel) { if (featureLevel.name().equals(MetadataVersion.FEATURE_NAME)) { return Optional.of(MetadataVersion.fromFeatureLevel(featureLevel.featureLevel())); } @@ -137,8 +136,7 @@ public String source() { public short featureLevel(String featureName) { short result = 0; for (ApiMessageAndVersion record : records) { - if (record.message() instanceof FeatureLevelRecord) { - FeatureLevelRecord message = (FeatureLevelRecord) record.message(); + if (record.message() instanceof FeatureLevelRecord message) { if (message.name().equals(featureName)) { result = message.featureLevel(); } @@ -151,8 +149,7 @@ public BootstrapMetadata copyWithFeatureRecord(String featureName, short level) List newRecords = new ArrayList<>(); int i = 0; while (i < records.size()) { - if (records.get(i).message() instanceof FeatureLevelRecord) { - FeatureLevelRecord record = (FeatureLevelRecord) records.get(i).message(); + if (records.get(i).message() instanceof FeatureLevelRecord record) { if (record.name().equals(featureName)) { FeatureLevelRecord newRecord = record.duplicate(); newRecord.setFeatureLevel(level); diff --git a/metadata/src/main/java/org/apache/kafka/metadata/migration/ZkMigrationLeadershipState.java b/metadata/src/main/java/org/apache/kafka/metadata/migration/ZkMigrationLeadershipState.java deleted file mode 100644 index 15b8b789ae7ff..0000000000000 --- a/metadata/src/main/java/org/apache/kafka/metadata/migration/ZkMigrationLeadershipState.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.metadata.migration; - -import org.apache.kafka.raft.OffsetAndEpoch; - -import java.util.Objects; - -/** - * Persistent state needed to recover an ongoing migration. This data is stored in ZooKeeper under the "/migration" - * ZNode and is recovered by the active KRaft controller following an election. The absence of this data in ZK indicates - * that no migration has been started. - */ -public class ZkMigrationLeadershipState { - /** - * A Kafka-internal constant used to indicate that the znode version is unknown. See ZkVersion.UnknownVersion. - */ - public static final int UNKNOWN_ZK_VERSION = -2; - - // Use -2 as sentinel for "unknown version" for ZK versions to avoid sending an actual -1 "any version" - // when doing ZK writes - public static final ZkMigrationLeadershipState EMPTY = - new ZkMigrationLeadershipState(-1, -1, -1, -1, -1, -2, -1, UNKNOWN_ZK_VERSION); - - private final int kraftControllerId; - - private final int kraftControllerEpoch; - - private final long kraftMetadataOffset; - - private final int kraftMetadataEpoch; - - private final long lastUpdatedTimeMs; - - private final int migrationZkVersion; - - private final int zkControllerEpoch; - - private final int zkControllerEpochZkVersion; - - - public ZkMigrationLeadershipState(int kraftControllerId, int kraftControllerEpoch, - long kraftMetadataOffset, int kraftMetadataEpoch, - long lastUpdatedTimeMs, int migrationZkVersion, - int zkControllerEpoch, int zkControllerEpochZkVersion) { - this.kraftControllerId = kraftControllerId; - this.kraftControllerEpoch = kraftControllerEpoch; - this.kraftMetadataOffset = kraftMetadataOffset; - this.kraftMetadataEpoch = kraftMetadataEpoch; - this.lastUpdatedTimeMs = lastUpdatedTimeMs; - this.migrationZkVersion = migrationZkVersion; - this.zkControllerEpoch = zkControllerEpoch; - this.zkControllerEpochZkVersion = zkControllerEpochZkVersion; - } - - public ZkMigrationLeadershipState withMigrationZkVersion(int zkVersion) { - return new ZkMigrationLeadershipState( - this.kraftControllerId, this.kraftControllerEpoch, this.kraftMetadataOffset, - this.kraftMetadataEpoch, this.lastUpdatedTimeMs, zkVersion, this.zkControllerEpoch, this.zkControllerEpochZkVersion); - } - - public ZkMigrationLeadershipState withZkController(int zkControllerEpoch, int zkControllerEpochZkVersion) { - return new ZkMigrationLeadershipState( - this.kraftControllerId, this.kraftControllerEpoch, this.kraftMetadataOffset, - this.kraftMetadataEpoch, this.lastUpdatedTimeMs, this.migrationZkVersion, zkControllerEpoch, zkControllerEpochZkVersion); - } - - public ZkMigrationLeadershipState withUnknownZkController() { - return withZkController(EMPTY.zkControllerEpoch, EMPTY.zkControllerEpochZkVersion); - } - - - public ZkMigrationLeadershipState withNewKRaftController(int controllerId, int controllerEpoch) { - return new ZkMigrationLeadershipState( - controllerId, controllerEpoch, this.kraftMetadataOffset, - this.kraftMetadataEpoch, this.lastUpdatedTimeMs, this.migrationZkVersion, this.zkControllerEpoch, this.zkControllerEpochZkVersion); - } - - public ZkMigrationLeadershipState withKRaftMetadataOffsetAndEpoch(long metadataOffset, - int metadataEpoch) { - return new ZkMigrationLeadershipState( - this.kraftControllerId, - this.kraftControllerEpoch, - metadataOffset, - metadataEpoch, - this.lastUpdatedTimeMs, - this.migrationZkVersion, - this.zkControllerEpoch, - this.zkControllerEpochZkVersion); - } - - public int kraftControllerId() { - return kraftControllerId; - } - - public int kraftControllerEpoch() { - return kraftControllerEpoch; - } - - public long kraftMetadataOffset() { - return kraftMetadataOffset; - } - - public int kraftMetadataEpoch() { - return kraftMetadataEpoch; - } - - public long lastUpdatedTimeMs() { - return lastUpdatedTimeMs; - } - - public int migrationZkVersion() { - return migrationZkVersion; - } - - public int zkControllerEpoch() { - return zkControllerEpoch; - } - - public int zkControllerEpochZkVersion() { - return zkControllerEpochZkVersion; - } - - public boolean initialZkMigrationComplete() { - return kraftMetadataOffset > 0; - } - - public OffsetAndEpoch offsetAndEpoch() { - return new OffsetAndEpoch(kraftMetadataOffset, kraftMetadataEpoch); - } - - public boolean loggableChangeSinceState(ZkMigrationLeadershipState other) { - if (other == null) { - return false; - } - if (this.equals(other)) { - return false; - } else { - // Did the controller change, or did we finish the migration? - return - this.kraftControllerId != other.kraftControllerId || - this.kraftControllerEpoch != other.kraftControllerEpoch || - (!other.initialZkMigrationComplete() && this.initialZkMigrationComplete()); - } - } - - @Override - public String toString() { - return "ZkMigrationLeadershipState{" + - "kraftControllerId=" + kraftControllerId + - ", kraftControllerEpoch=" + kraftControllerEpoch + - ", kraftMetadataOffset=" + kraftMetadataOffset + - ", kraftMetadataEpoch=" + kraftMetadataEpoch + - ", lastUpdatedTimeMs=" + lastUpdatedTimeMs + - ", migrationZkVersion=" + migrationZkVersion + - ", controllerZkEpoch=" + zkControllerEpoch + - ", controllerZkVersion=" + zkControllerEpochZkVersion + - '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ZkMigrationLeadershipState that = (ZkMigrationLeadershipState) o; - return kraftControllerId == that.kraftControllerId - && kraftControllerEpoch == that.kraftControllerEpoch - && kraftMetadataOffset == that.kraftMetadataOffset - && kraftMetadataEpoch == that.kraftMetadataEpoch - && lastUpdatedTimeMs == that.lastUpdatedTimeMs - && migrationZkVersion == that.migrationZkVersion - && zkControllerEpoch == that.zkControllerEpoch - && zkControllerEpochZkVersion == that.zkControllerEpochZkVersion; - } - - @Override - public int hashCode() { - return Objects.hash( - kraftControllerId, - kraftControllerEpoch, - kraftMetadataOffset, - kraftMetadataEpoch, - lastUpdatedTimeMs, - migrationZkVersion, - zkControllerEpoch, - zkControllerEpochZkVersion); - } -} diff --git a/metadata/src/main/java/org/apache/kafka/metadata/migration/ZkMigrationState.java b/metadata/src/main/java/org/apache/kafka/metadata/migration/ZkMigrationState.java deleted file mode 100644 index ff8ebd08b38a2..0000000000000 --- a/metadata/src/main/java/org/apache/kafka/metadata/migration/ZkMigrationState.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.metadata.migration; - -import org.apache.kafka.common.metadata.ZkMigrationStateRecord; -import org.apache.kafka.server.common.ApiMessageAndVersion; - -import java.util.Optional; - -/** - * The cluster-wide ZooKeeper migration state. - *

              - * An enumeration of the possible states of the ZkMigrationState field in ZkMigrationStateRecord. - * This information is persisted in the metadata log and image. - * - * @see org.apache.kafka.common.metadata.ZkMigrationStateRecord - */ -public enum ZkMigrationState { - /** - * The cluster was created in KRaft mode. A cluster that was created in ZK mode can never attain - * this state; the endpoint of migration is POST_MIGRATION, instead. This value is also used as - * the default migration state in an empty metadata log. - */ - NONE((byte) 0), - - /** - * A KRaft controller has been elected with "zookeeper.metadata.migration.enable" set to "true". - * The controller is now awaiting the preconditions for starting the migration to KRaft. In this - * state, the metadata log does not yet contain the cluster's data. There is a metadata quorum, - * but it is not doing anything useful yet. - *

              - * In Kafka 3.4, PRE_MIGRATION was written out as value 1 to the log, but no MIGRATION state - * was ever written. Since this would be an invalid log state in 3.5+, we have swapped the - * enum values for PRE_MIGRATION and MIGRATION. This allows us to handle the upgrade case - * from 3.4 without adding additional fields to the migration record. - */ - PRE_MIGRATION((byte) 2), - - /** - * The ZK data has been migrated, and the KRaft controller is now writing metadata to both ZK - * and the metadata log. The controller will remain in this state until all the brokers have - * been restarted in KRaft mode. - */ - MIGRATION((byte) 1), - - /** - * The migration from ZK has been fully completed. The cluster is running in KRaft mode. This state - * will persist indefinitely after the migration. In operational terms, this is the same as the NONE - * state. - */ - POST_MIGRATION((byte) 3), - - /** - * The controller is a ZK controller. No migration has been performed. This state is never persisted - * and is only used by KafkaController in order to have a unified metric that indicates what kind of - * metadata state the controller is in. - */ - ZK((byte) 4); - - private final byte value; - - ZkMigrationState(byte value) { - this.value = value; - } - - public byte value() { - return value; - } - - public ApiMessageAndVersion toRecord() { - return new ApiMessageAndVersion( - new ZkMigrationStateRecord().setZkMigrationState(value()), - (short) 0 - ); - } - - public static ZkMigrationState of(byte value) { - return optionalOf(value) - .orElseThrow(() -> new IllegalArgumentException(String.format("Value %s is not a valid Zk migration state", value))); - } - - public static Optional optionalOf(byte value) { - for (ZkMigrationState state : ZkMigrationState.values()) { - if (state.value == value) { - return Optional.of(state); - } - } - return Optional.empty(); - } - - public boolean inProgress() { - return this == PRE_MIGRATION || this == MIGRATION; - } -} diff --git a/metadata/src/main/java/org/apache/kafka/metadata/placement/PartitionAssignment.java b/metadata/src/main/java/org/apache/kafka/metadata/placement/PartitionAssignment.java index a7012d1505c03..cefba273b25fd 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/placement/PartitionAssignment.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/placement/PartitionAssignment.java @@ -19,9 +19,6 @@ import org.apache.kafka.common.Uuid; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -39,12 +36,8 @@ public class PartitionAssignment { private final List directories; public PartitionAssignment(List replicas, DefaultDirProvider defaultDirProvider) { - this.replicas = Collections.unmodifiableList(new ArrayList<>(replicas)); - Uuid[] directories = new Uuid[replicas.size()]; - for (int i = 0; i < directories.length; i++) { - directories[i] = defaultDirProvider.defaultDir(replicas.get(i)); - } - this.directories = Collections.unmodifiableList(Arrays.asList(directories)); + this.replicas = List.copyOf(replicas); + this.directories = replicas.stream().map(replica -> defaultDirProvider.defaultDir(replica)).toList(); } /** diff --git a/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java b/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java index c5574819c0a25..88bdc5df96cc1 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java @@ -17,8 +17,6 @@ package org.apache.kafka.metadata.placement; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -31,7 +29,7 @@ public class TopicAssignment { private final List assignments; public TopicAssignment(List assignments) { - this.assignments = Collections.unmodifiableList(new ArrayList<>(assignments)); + this.assignments = List.copyOf(assignments); } /** @@ -43,8 +41,7 @@ public List assignments() { @Override public boolean equals(Object o) { - if (!(o instanceof TopicAssignment)) return false; - TopicAssignment other = (TopicAssignment) o; + if (!(o instanceof TopicAssignment other)) return false; return assignments.equals(other.assignments); } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java b/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java index 75d16d7718b12..17f531d023f9c 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java @@ -54,8 +54,7 @@ public boolean fenced() { @Override public boolean equals(Object o) { - if (!(o instanceof UsableBroker)) return false; - UsableBroker other = (UsableBroker) o; + if (!(o instanceof UsableBroker other)) return false; return other.id == id && other.rack.equals(rack) && other.fenced == fenced; } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/storage/Formatter.java b/metadata/src/main/java/org/apache/kafka/metadata/storage/Formatter.java index 53013307149f2..79437d4da6d3c 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/storage/Formatter.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/storage/Formatter.java @@ -29,8 +29,8 @@ import org.apache.kafka.raft.KafkaRaftClient; import org.apache.kafka.raft.VoterSet; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.FeatureVersion; -import org.apache.kafka.server.common.Features; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.snapshot.FileRawSnapshotWriter; @@ -69,7 +69,7 @@ public class Formatter { /** * The features that are supported. */ - private List supportedFeatures = Features.PRODUCTION_FEATURES; + private List supportedFeatures = Feature.PRODUCTION_FEATURES; /** * The current node id. @@ -93,8 +93,10 @@ public class Formatter { /** * Maps feature names to the level they will start off with. + * + * Visible for testing. */ - private Map featureLevels = new TreeMap<>(); + protected Map featureLevels = new TreeMap<>(); /** * The bootstrap metadata used to format the cluster. @@ -130,13 +132,14 @@ public class Formatter { * The initial KIP-853 voters. */ private Optional initialControllers = Optional.empty(); + private boolean noInitialControllersFlag = false; public Formatter setPrintStream(PrintStream printStream) { this.printStream = printStream; return this; } - public Formatter setSupportedFeatures(List supportedFeatures) { + public Formatter setSupportedFeatures(List supportedFeatures) { this.supportedFeatures = supportedFeatures; return this; } @@ -215,12 +218,17 @@ public Formatter setInitialControllers(DynamicVoters initialControllers) { return this; } + public Formatter setNoInitialControllersFlag(boolean noInitialControllersFlag) { + this.noInitialControllersFlag = noInitialControllersFlag; + return this; + } + public Optional initialVoters() { return initialControllers; } boolean hasDynamicQuorum() { - return initialControllers.isPresent(); + return initialControllers.isPresent() || noInitialControllersFlag; } public BootstrapMetadata bootstrapMetadata() { @@ -290,7 +298,7 @@ MetadataVersion verifyReleaseVersion(MetadataVersion metadataVersion) { } Map calculateEffectiveFeatureLevels() { - Map nameToSupportedFeature = new TreeMap<>(); + Map nameToSupportedFeature = new TreeMap<>(); supportedFeatures.forEach(feature -> nameToSupportedFeature.put(feature.featureName(), feature)); Map newFeatureLevels = new TreeMap<>(); // Verify that all specified features are known to us. @@ -313,7 +321,7 @@ Map calculateEffectiveFeatureLevels() { Optional.ofNullable(newFeatureLevels.get(KRaftVersion.FEATURE_NAME)))); } else if (!newFeatureLevels.containsKey(supportedFeature.featureName())) { newFeatureLevels.put(supportedFeature.featureName(), - supportedFeature.defaultValue(releaseVersion)); + supportedFeature.defaultLevel(releaseVersion)); } }); // Verify that the specified features support the given levels. This requires the full @@ -322,10 +330,10 @@ Map calculateEffectiveFeatureLevels() { String featureName = entry.getKey(); if (!featureName.equals(MetadataVersion.FEATURE_NAME)) { short level = entry.getValue(); - Features supportedFeature = nameToSupportedFeature.get(featureName); + Feature supportedFeature = nameToSupportedFeature.get(featureName); FeatureVersion featureVersion = supportedFeature.fromFeatureLevel(level, unstableFeatureVersionsEnabled); - Features.validateVersion(featureVersion, newFeatureLevels); + Feature.validateVersion(featureVersion, newFeatureLevels); } } return newFeatureLevels; diff --git a/metadata/src/main/resources/common/metadata/BrokerRegistrationChangeRecord.json b/metadata/src/main/resources/common/metadata/BrokerRegistrationChangeRecord.json index 2824c51fcf1c8..7a484a6aeb484 100644 --- a/metadata/src/main/resources/common/metadata/BrokerRegistrationChangeRecord.json +++ b/metadata/src/main/resources/common/metadata/BrokerRegistrationChangeRecord.json @@ -30,7 +30,7 @@ "about": "-1 if the broker has been unfenced, 0 if no change, 1 if the broker has been fenced." }, { "name": "InControlledShutdown", "type": "int8", "versions": "1+", "taggedVersions": "1+", "tag": 1, "about": "0 if no change, 1 if the broker is in controlled shutdown." }, - { "name": "LogDirs", "type": "[]uuid", "versions": "2+", "taggedVersions": "2+", "tag": "2", + { "name": "LogDirs", "type": "[]uuid", "versions": "2+", "taggedVersions": "2+", "tag": 2, "about": "Log directories configured in this broker which are available." } ] } diff --git a/metadata/src/main/resources/common/metadata/RegisterBrokerRecord.json b/metadata/src/main/resources/common/metadata/RegisterBrokerRecord.json index 42c1fa2fb9a27..48e5c466c69ff 100644 --- a/metadata/src/main/resources/common/metadata/RegisterBrokerRecord.json +++ b/metadata/src/main/resources/common/metadata/RegisterBrokerRecord.json @@ -57,7 +57,7 @@ "about": "True if the broker is fenced." }, { "name": "InControlledShutdown", "type": "bool", "versions": "1+", "default": "false", "about": "True if the broker is in controlled shutdown." }, - { "name": "LogDirs", "type": "[]uuid", "versions": "3+", "taggedVersions": "3+", "tag": "0", + { "name": "LogDirs", "type": "[]uuid", "versions": "3+", "taggedVersions": "3+", "tag": 0, "about": "Log directories configured in this broker which are available." } ] } diff --git a/metadata/src/main/resources/common/metadata/ZkMigrationRecord.json b/metadata/src/main/resources/common/metadata/ZkMigrationRecord.json index aaaed4f4a083f..7d7a61626dd06 100644 --- a/metadata/src/main/resources/common/metadata/ZkMigrationRecord.json +++ b/metadata/src/main/resources/common/metadata/ZkMigrationRecord.json @@ -23,7 +23,9 @@ // In 3.4, the defined values are: 0 (None), 1 (PreMigration), 2 (Migration), 3 (PostMigration). // In 3.5, the values for PreMigration and Migration were swapped: 0 (None), 2 (PreMigration), 1 (Migration), 3 (PostMigration). // This was done to work around the fact that we never wrote Migration or PostMigration records in 3.4 - // + // In 4.0, although migration is no longer supported and ZK has been removed from Kafka, + // users might migrate from ZK to KRaft in version 3.x and then perform a rolling upgrade to 4.0. + // Therefore, this generated code needs to be retained. "validVersions": "0", "flexibleVersions": "0+", "fields": [ diff --git a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java index 84143c8b3e13d..30210fe01575a 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java @@ -114,6 +114,34 @@ public void testValidateNewAcl() { getMessage()); } + /** + * Verify that validateNewAcl catches invalid ACLs with principals that do not contain a colon. + */ + @Test + public void testValidateAclWithBadPrincipal() { + assertEquals("Could not parse principal from `invalid` (no colon is present " + + "separating the principal type from the principal name)", + assertThrows(InvalidRequestException.class, () -> + AclControlManager.validateNewAcl(new AclBinding( + new ResourcePattern(TOPIC, "*", LITERAL), + new AccessControlEntry("invalid", "*", ALTER, ALLOW)))). + getMessage()); + } + + /** + * Verify that validateNewAcl catches invalid ACLs with principals that do not contain a colon. + */ + @Test + public void testValidateAclWithEmptyPrincipal() { + assertEquals("Could not parse principal from `` (no colon is present " + + "separating the principal type from the principal name)", + assertThrows(InvalidRequestException.class, () -> + AclControlManager.validateNewAcl(new AclBinding( + new ResourcePattern(TOPIC, "*", LITERAL), + new AccessControlEntry("", "*", ALTER, ALLOW)))). + getMessage()); + } + /** * Verify that validateFilter catches invalid filters. */ diff --git a/metadata/src/test/java/org/apache/kafka/controller/ActivationRecordsGeneratorTest.java b/metadata/src/test/java/org/apache/kafka/controller/ActivationRecordsGeneratorTest.java index 5f948a79885f0..1668b6bebe3f1 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ActivationRecordsGeneratorTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ActivationRecordsGeneratorTest.java @@ -17,17 +17,19 @@ package org.apache.kafka.controller; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.metadata.ConfigRecord; import org.apache.kafka.metadata.bootstrap.BootstrapMetadata; -import org.apache.kafka.metadata.migration.ZkMigrationState; +import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; import org.apache.kafka.server.common.MetadataVersion; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * This class is for testing the log message or exception produced by ActivationRecordsGenerator. For tests that @@ -37,154 +39,82 @@ public class ActivationRecordsGeneratorTest { @Test public void testActivationMessageForEmptyLog() { - ControllerResult result; - result = ActivationRecordsGenerator.recordsForEmptyLog( + ControllerResult result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) at metadata.version 3.0-IV1 from bootstrap source 'test'.", logMsg), -1L, BootstrapMetadata.fromVersion(MetadataVersion.MINIMUM_BOOTSTRAP_VERSION, "test"), - MetadataVersion.MINIMUM_KRAFT_VERSION + MetadataVersion.MINIMUM_KRAFT_VERSION, + 2 ); assertTrue(result.isAtomic()); assertEquals(1, result.records().size()); + } - result = ActivationRecordsGenerator.recordsForEmptyLog( + @Test + public void testActivationMessageForEmptyLogAtMv3_4() { + ControllerResult result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) at metadata.version 3.4-IV0 from bootstrap " + - "source 'test'. Setting the ZK migration state to NONE since this is a de-novo KRaft cluster.", logMsg), + "source 'test'.", logMsg), -1L, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_4_IV0, "test"), - MetadataVersion.IBP_3_4_IV0 + MetadataVersion.IBP_3_4_IV0, + 2 ); assertTrue(result.isAtomic()); - assertEquals(2, result.records().size()); - + assertEquals(1, result.records().size()); + } - result = ActivationRecordsGenerator.recordsForEmptyLog( + @Test + public void testActivationMessageForEmptyLogAtMv3_6() { + ControllerResult result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) in metadata transaction at metadata.version 3.6-IV1 from bootstrap " + - "source 'test'. Setting the ZK migration state to NONE since this is a de-novo KRaft cluster.", logMsg), + "source 'test'.", logMsg), -1L, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), - MetadataVersion.IBP_3_6_IV1 + MetadataVersion.IBP_3_6_IV1, + 2 ); assertFalse(result.isAtomic()); - assertEquals(4, result.records().size()); + assertEquals(3, result.records().size()); + } - result = ActivationRecordsGenerator.recordsForEmptyLog( + @Test + public void testActivationMessageForEmptyLogAtMv3_6WithTransaction() { + ControllerResult result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. Aborting partial bootstrap records " + "transaction at offset 0. Re-appending 1 bootstrap record(s) in new metadata transaction at " + - "metadata.version 3.6-IV1 from bootstrap source 'test'. Setting the ZK migration state to NONE " + - "since this is a de-novo KRaft cluster.", logMsg), + "metadata.version 3.6-IV1 from bootstrap source 'test'.", logMsg), 0L, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), - MetadataVersion.IBP_3_6_IV1 + MetadataVersion.IBP_3_6_IV1, + 2 ); assertFalse(result.isAtomic()); - assertEquals(5, result.records().size()); - } - - @Test - public void testActivationMessageForNonEmptyLogNoMigrations() { - ControllerResult result; - - result = ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> assertEquals("Performing controller activation. No metadata.version feature level " + - "record was found in the log. Treating the log as version 3.0-IV1.", logMsg), - -1L, - ZkMigrationState.NONE, - MetadataVersion.MINIMUM_KRAFT_VERSION - ); - assertTrue(result.isAtomic()); - assertEquals(0, result.records().size()); - - result = ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> assertEquals("Performing controller activation.", logMsg), - -1L, - ZkMigrationState.NONE, - MetadataVersion.IBP_3_3_IV0 - ); - assertTrue(result.isAtomic()); - assertEquals(0, result.records().size()); - - result = ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> assertEquals("Performing controller activation. Loaded ZK migration state of NONE. " - + "This is expected because this is a de-novo KRaft cluster.", logMsg), - -1L, - ZkMigrationState.NONE, - MetadataVersion.IBP_3_4_IV0 - ); - assertTrue(result.isAtomic()); - assertEquals(0, result.records().size()); - - result = ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> assertEquals("Performing controller activation. Aborting in-progress metadata " + - "transaction at offset 42. Loaded ZK migration state of NONE. " + - "This is expected because this is a de-novo KRaft cluster.", logMsg), - 42L, - ZkMigrationState.NONE, - MetadataVersion.IBP_3_6_IV1 - ); - assertTrue(result.isAtomic()); - assertEquals(1, result.records().size()); - - assertEquals( - "Detected in-progress transaction at offset 42, but the metadata.version 3.6-IV0 does not support " + - "transactions. Cannot continue.", - assertThrows(RuntimeException.class, () -> - ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> fail(), - 42L, - ZkMigrationState.NONE, - MetadataVersion.IBP_3_6_IV0 - )).getMessage() - ); + assertEquals(4, result.records().size()); } @Test - public void testActivationMessageForNonEmptyLogWithMigrations() { - assertEquals( - "Should not have ZkMigrationState.MIGRATION on a cluster running metadata version 3.3-IV0.", - assertThrows(RuntimeException.class, () -> - ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> fail(), - -1L, - ZkMigrationState.MIGRATION, - MetadataVersion.IBP_3_3_IV0 - )).getMessage() - ); - - assertEquals( - "Cannot load ZkMigrationState.MIGRATION because ZK migration is no longer supported.", - assertThrows(RuntimeException.class, () -> - ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> fail(), - -1L, - ZkMigrationState.MIGRATION, - MetadataVersion.IBP_3_9_IV0 - ) - ).getMessage() - ); - - ControllerResult result; - result = ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> assertEquals("Performing controller activation. Loaded ZK migration state of " + - "POST_MIGRATION.", logMsg), - -1L, - ZkMigrationState.POST_MIGRATION, - MetadataVersion.IBP_3_4_IV0 - ); - assertTrue(result.isAtomic()); - assertEquals(0, result.records().size()); - - result = ActivationRecordsGenerator.recordsForNonEmptyLog( - logMsg -> assertEquals("Performing controller activation. Aborting in-progress metadata " + - "transaction at offset 42. Loaded ZK migration state of POST_MIGRATION.", logMsg), - 42L, - ZkMigrationState.POST_MIGRATION, - MetadataVersion.IBP_3_6_IV1 + public void testActivationMessageForEmptyLogAtMv3_6WithTransactionAndElr() { + ControllerResult result = ActivationRecordsGenerator.recordsForEmptyLog( + logMsg -> assertEquals("Performing controller activation. Aborting partial bootstrap records " + + "transaction at offset 0. Re-appending 2 bootstrap record(s) in new metadata transaction at " + + "metadata.version 4.0-IV1 from bootstrap source 'test'.", logMsg), + 0L, + BootstrapMetadata.fromVersion(MetadataVersion.IBP_4_0_IV1, "test").copyWithFeatureRecord( + EligibleLeaderReplicasVersion.FEATURE_NAME, + EligibleLeaderReplicasVersion.ELRV_1.featureLevel()), + MetadataVersion.IBP_4_0_IV1, + 2 ); - assertTrue(result.isAtomic()); - assertEquals(1, result.records().size()); + assertFalse(result.isAtomic()); + assertEquals(6, result.records().size()); + assertTrue(result.records().contains(new ApiMessageAndVersion(new ConfigRecord(). + setResourceType(ConfigResource.Type.BROKER.id()). + setResourceName(""). + setName(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG). + setValue("2"), (short) 0))); } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java index 0646d4aaa5647..e7d190339f44d 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java @@ -71,6 +71,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalLong; import java.util.stream.Stream; import static java.util.Arrays.asList; @@ -93,7 +94,7 @@ public void testReplay(MetadataVersion metadataVersion) { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). build(); ClusterControlManager clusterControl = new ClusterControlManager.Builder(). @@ -154,7 +155,7 @@ public void testReplayRegisterBrokerRecord() { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). build(); ClusterControlManager clusterControl = new ClusterControlManager.Builder(). @@ -207,7 +208,7 @@ public void testReplayBrokerRegistrationChangeRecord() { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). build(); ClusterControlManager clusterControl = new ClusterControlManager.Builder(). @@ -262,7 +263,7 @@ public void testRegistrationWithIncorrectClusterId() { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). build(); ClusterControlManager clusterControl = new ClusterControlManager.Builder(). @@ -300,7 +301,7 @@ public void testRegisterBrokerRecordVersion(MetadataVersion metadataVersion) { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). setMetadataVersion(metadataVersion). build(); @@ -363,7 +364,7 @@ public void testUnregister() { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). build(); ClusterControlManager clusterControl = new ClusterControlManager.Builder(). @@ -402,7 +403,7 @@ public void testPlaceReplicas(int numUsableBrokers) { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). build(); ClusterControlManager clusterControl = new ClusterControlManager.Builder(). @@ -465,7 +466,7 @@ public void testRegistrationsToRecords(MetadataVersion metadataVersion) { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). setMetadataVersion(metadataVersion). build(); @@ -768,7 +769,8 @@ public void testRegisterWithDuplicateDirectoryId() { void registerNewBrokerWithDirs(ClusterControlManager clusterControl, int brokerId, List dirs) { BrokerRegistrationRequestData data = new BrokerRegistrationRequestData().setBrokerId(brokerId) .setClusterId(clusterControl.clusterId()) - .setIncarnationId(Uuid.randomUuid()).setLogDirs(dirs); + .setIncarnationId(new Uuid(brokerId, brokerId)) + .setLogDirs(dirs); FinalizedControllerFeatures finalizedFeatures = new FinalizedControllerFeatures(Collections.emptyMap(), 456L); ControllerResult result = clusterControl.registerBroker(data, 123L, finalizedFeatures); RecordTestUtils.replayAll(clusterControl, result.records()); @@ -852,4 +854,32 @@ public void testReRegistrationAndBrokerEpoch(boolean newIncarnationId) { clusterControl.brokerRegistrations().get(1).epoch()); } } + + @Test + public void testBrokerContactTimesAreUpdatedOnClusterControlActivation() { + MockTime time = new MockTime(0L, 20L, 1000L); + ClusterControlManager clusterControl = new ClusterControlManager.Builder(). + setClusterId("pjvUwj3ZTEeSVQmUiH3IJw"). + setFeatureControlManager(new FeatureControlManager.Builder().build()). + setBrokerUncleanShutdownHandler((brokerId, records) -> { }). + setTime(time). + build(); + clusterControl.replay(new RegisterBrokerRecord(). + setBrokerEpoch(100). + setBrokerId(0). + setLogDirs(asList(Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ"))), 10002); + clusterControl.replay(new RegisterBrokerRecord(). + setBrokerEpoch(123). + setBrokerId(1). + setLogDirs(asList(Uuid.fromString("TyNK6XSSQJaJc2q9uflNHg"))), 10005); + clusterControl.activate(); + assertEquals(OptionalLong.of(1000L), clusterControl.heartbeatManager().tracker(). + contactTime(new BrokerIdAndEpoch(0, 100))); + assertEquals(OptionalLong.of(1000L), clusterControl.heartbeatManager().tracker(). + contactTime(new BrokerIdAndEpoch(1, 123))); + assertEquals(OptionalLong.empty(), clusterControl.heartbeatManager().tracker(). + contactTime(new BrokerIdAndEpoch(1, 124))); + assertEquals(OptionalLong.empty(), clusterControl.heartbeatManager().tracker(). + contactTime(new BrokerIdAndEpoch(2, 100))); + } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java index 2ce417f63aa90..1129784549297 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ConfigurationControlManagerTest.java @@ -18,8 +18,10 @@ package org.apache.kafka.controller; import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.FeatureUpdate; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.errors.PolicyViolationException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.metadata.ConfigRecord; @@ -28,14 +30,19 @@ import org.apache.kafka.metadata.KafkaConfigSchema; import org.apache.kafka.metadata.RecordTestUtils; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; import org.apache.kafka.server.config.ConfigSynonym; import org.apache.kafka.server.policy.AlterConfigPolicy; import org.apache.kafka.server.policy.AlterConfigPolicy.RequestMetadata; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -43,6 +50,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Optional; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -56,6 +64,7 @@ import static org.apache.kafka.common.metadata.MetadataRecordType.CONFIG_RECORD; import static org.apache.kafka.server.config.ConfigSynonym.HOURS_TO_MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; @Timeout(value = 40) @@ -67,7 +76,10 @@ public class ConfigurationControlManagerTest { CONFIGS.put(BROKER, new ConfigDef(). define("foo.bar", ConfigDef.Type.LIST, "1", ConfigDef.Importance.HIGH, "foo bar"). define("baz", ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, "baz"). - define("quux", ConfigDef.Type.INT, ConfigDef.Importance.HIGH, "quux")); + define("quux", ConfigDef.Type.INT, ConfigDef.Importance.HIGH, "quux"). + define(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, + ConfigDef.Type.INT, "1", ConfigDef.Importance.HIGH, "min.isr")); + CONFIGS.put(TOPIC, new ConfigDef(). define("abc", ConfigDef.Type.LIST, ConfigDef.Importance.HIGH, "abc"). define("def", ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, "def"). @@ -80,6 +92,8 @@ public class ConfigurationControlManagerTest { static { SYNONYMS.put("abc", Collections.singletonList(new ConfigSynonym("foo.bar"))); SYNONYMS.put("def", Collections.singletonList(new ConfigSynonym("baz"))); + SYNONYMS.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, + Collections.singletonList(new ConfigSynonym(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG))); SYNONYMS.put("quuux", Collections.singletonList(new ConfigSynonym("quux", HOURS_TO_MILLISECONDS))); } @@ -316,10 +330,10 @@ public void testIncrementalAlterConfigsWithPolicy() { assertEquals(ControllerResult.atomicOf(asList(new ApiMessageAndVersion( new ConfigRecord().setResourceType(BROKER.id()).setResourceName("0"). setName("foo.bar").setValue("123"), CONFIG_RECORD.highestSupportedVersion()), new ApiMessageAndVersion( - new ConfigRecord().setResourceType(BROKER.id()).setResourceName("0"). - setName("quux").setValue("456"), CONFIG_RECORD.highestSupportedVersion()), new ApiMessageAndVersion( - new ConfigRecord().setResourceType(BROKER.id()).setResourceName("0"). - setName("broker.config.to.remove").setValue(null), CONFIG_RECORD.highestSupportedVersion()) + new ConfigRecord().setResourceType(BROKER.id()).setResourceName("0"). + setName("quux").setValue("456"), CONFIG_RECORD.highestSupportedVersion()), new ApiMessageAndVersion( + new ConfigRecord().setResourceType(BROKER.id()).setResourceName("0"). + setName("broker.config.to.remove").setValue(null), CONFIG_RECORD.highestSupportedVersion()) ), toMap(entry(MYTOPIC, new ApiError(Errors.POLICY_VIOLATION, "Expected: AlterConfigPolicy.RequestMetadata(resource=ConfigResource(" + @@ -391,4 +405,95 @@ expectedRecords1, toMap(entry(MYTOPIC, ApiError.NONE))), manager.legacyAlterConfigs(toMap(entry(MYTOPIC, toMap(entry("def", "901")))), true)); } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testMaybeGenerateElrSafetyRecords(boolean setStaticConfig) { + ConfigurationControlManager.Builder builder = new ConfigurationControlManager.Builder(). + setKafkaConfigSchema(SCHEMA); + if (setStaticConfig) { + builder.setStaticConfig(Map.of(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")); + } + ConfigurationControlManager manager = builder.build(); + Map> keyToOps = + toMap(entry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, entry(SET, "3"))); + ConfigResource brokerConfigResource = new ConfigResource(ConfigResource.Type.BROKER, "1"); + ControllerResult result = manager.incrementalAlterConfig(brokerConfigResource, keyToOps, true); + assertEquals(Collections.emptySet(), manager.brokersWithConfigs()); + + assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( + new ConfigRecord().setResourceType(BROKER.id()).setResourceName("1"). + setName(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).setValue("3"), (short) 0)), + ApiError.NONE), result); + + RecordTestUtils.replayAll(manager, result.records()); + assertEquals(Set.of(1), manager.brokersWithConfigs()); + + List records = new ArrayList<>(); + String effectiveMinInsync = setStaticConfig ? "2" : "1"; + assertEquals("Generating cluster-level min.insync.replicas of " + + effectiveMinInsync + ". Removing broker-level min.insync.replicas " + + "for brokers: 1.", manager.maybeGenerateElrSafetyRecords(records)); + + assertEquals(Arrays.asList(new ApiMessageAndVersion( + new ConfigRecord(). + setResourceType(BROKER.id()). + setResourceName(""). + setName(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG). + setValue(effectiveMinInsync), (short) 0), + new ApiMessageAndVersion(new ConfigRecord(). + setResourceType(BROKER.id()). + setResourceName("1"). + setName(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG). + setValue(null), (short) 0)), + records); + RecordTestUtils.replayAll(manager, records); + assertEquals(Collections.emptySet(), manager.brokersWithConfigs()); + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testRejectMinIsrChangeWhenElrEnabled(boolean removal) { + FeatureControlManager featureManager = new FeatureControlManager.Builder(). + setQuorumFeatures(new QuorumFeatures(0, + QuorumFeatures.defaultSupportedFeatureMap(true), + Collections.emptyList())). + build(); + ConfigurationControlManager manager = new ConfigurationControlManager.Builder(). + setStaticConfig(Map.of(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")). + setFeatureControl(featureManager). + setKafkaConfigSchema(SCHEMA). + build(); + ControllerResult result = manager.updateFeatures( + Collections.singletonMap(EligibleLeaderReplicasVersion.FEATURE_NAME, + EligibleLeaderReplicasVersion.ELRV_1.featureLevel()), + Collections.singletonMap(EligibleLeaderReplicasVersion.FEATURE_NAME, + FeatureUpdate.UpgradeType.UPGRADE), + false); + assertNull(result.response()); + RecordTestUtils.replayAll(manager, result.records()); + RecordTestUtils.replayAll(featureManager, result.records()); + + // Broker level update is not allowed. + result = manager.incrementalAlterConfig(new ConfigResource(ConfigResource.Type.BROKER, "1"), + toMap(entry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, + removal ? entry(DELETE, null) : entry(SET, "3"))), + true); + assertEquals(Errors.INVALID_CONFIG, result.response().error()); + assertEquals("Broker-level min.insync.replicas cannot be altered while ELR is enabled.", + result.response().message()); + + // Cluster level removal is not allowed. + result = manager.incrementalAlterConfig(new ConfigResource(ConfigResource.Type.BROKER, ""), + toMap(entry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, + removal ? entry(DELETE, null) : entry(SET, "3"))), + true); + if (removal) { + assertEquals(Errors.INVALID_CONFIG, result.response().error()); + assertEquals("Cluster-level min.insync.replicas cannot be removed while ELR is enabled.", + result.response().message()); + } else { + assertEquals(Errors.NONE, result.response().error()); + } + } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/EventPerformanceMonitorTest.java b/metadata/src/test/java/org/apache/kafka/controller/EventPerformanceMonitorTest.java new file mode 100644 index 0000000000000..81e01679dc0fd --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/controller/EventPerformanceMonitorTest.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller; + +import org.junit.jupiter.api.Test; + +import java.util.AbstractMap; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +public class EventPerformanceMonitorTest { + @Test + public void testDefaultPeriodNs() { + assertEquals(SECONDS.toNanos(60), + new EventPerformanceMonitor.Builder().build().periodNs()); + } + + @Test + public void testSlowestEventWithNoEvents() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + assertEquals(new AbstractMap.SimpleImmutableEntry<>(null, 0L), + monitor.slowestEvent()); + } + + @Test + public void testSlowestEventWithThreeEvents() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + monitor.observeEvent("fastEvent", MILLISECONDS.toNanos(2)); + monitor.observeEvent("slowEvent", MILLISECONDS.toNanos(100)); + assertEquals(new AbstractMap.SimpleImmutableEntry<>("slowEvent", MILLISECONDS.toNanos(100)), + monitor.slowestEvent()); + } + + @Test + public void testLogSlowEvent() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + assertEquals("Exceptionally slow controller event slowEvent took 5000 ms.", + monitor.doObserveEvent("slowEvent", SECONDS.toNanos(5))); + } + + @Test + public void testDoNotLogFastEvent() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + assertNull(monitor.doObserveEvent("slowEvent", MILLISECONDS.toNanos(250))); + } + + @Test + public void testFormatNsAsDecimalMsWithZero() { + assertEquals("0.00", + EventPerformanceMonitor.formatNsAsDecimalMs(0)); + } + + @Test + public void testFormatNsAsDecimalMsWith100() { + assertEquals("100.00", + EventPerformanceMonitor.formatNsAsDecimalMs(MILLISECONDS.toNanos(100))); + } + + @Test + public void testFormatNsAsDecimalMsWith123456789() { + assertEquals("123.46", + EventPerformanceMonitor.formatNsAsDecimalMs(123456789)); + } + + @Test + public void testPeriodicPerformanceMessageWithNoEvents() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + assertEquals("In the last 60000 ms period, there were no controller events completed.", + monitor.periodicPerformanceMessage()); + } + + @Test + public void testPeriodicPerformanceMessageWithOneEvent() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + monitor.observeEvent("myEvent", MILLISECONDS.toNanos(12)); + assertEquals("In the last 60000 ms period, 1 controller events were completed, which took an " + + "average of 12.00 ms each. The slowest event was myEvent, which took 12.00 ms.", + monitor.periodicPerformanceMessage()); + } + + @Test + public void testPeriodicPerformanceMessageWithThreeEvents() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + monitor.observeEvent("myEvent", MILLISECONDS.toNanos(12)); + monitor.observeEvent("myEvent2", MILLISECONDS.toNanos(19)); + monitor.observeEvent("myEvent3", MILLISECONDS.toNanos(1)); + assertEquals("In the last 60000 ms period, 3 controller events were completed, which took an " + + "average of 10.67 ms each. The slowest event was myEvent2, which took 19.00 ms.", + monitor.periodicPerformanceMessage()); + } + + @Test + public void testGeneratePeriodicPerformanceMessageResetsState() { + EventPerformanceMonitor monitor = new EventPerformanceMonitor.Builder().build(); + monitor.observeEvent("myEvent", MILLISECONDS.toNanos(12)); + monitor.observeEvent("myEvent2", MILLISECONDS.toNanos(19)); + monitor.observeEvent("myEvent3", MILLISECONDS.toNanos(1)); + monitor.generatePeriodicPerformanceMessage(); + assertEquals("In the last 60000 ms period, there were no controller events completed.", + monitor.periodicPerformanceMessage()); + } +} diff --git a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java index 378f6367cc629..87ae118ad84d7 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java @@ -25,10 +25,8 @@ import org.apache.kafka.metadata.FinalizedControllerFeatures; import org.apache.kafka.metadata.RecordTestUtils; import org.apache.kafka.metadata.VersionRange; -import org.apache.kafka.metadata.bootstrap.BootstrapMetadata; -import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.TestFeatureVersion; import org.apache.kafka.server.common.TransactionVersion; @@ -52,6 +50,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; @Timeout(value = 40) @@ -79,7 +78,7 @@ private static Map versionMap(Object... args) { } public static QuorumFeatures features(Object... args) { - Map features = QuorumFeatures.defaultFeatureMap(true); + Map features = QuorumFeatures.defaultSupportedFeatureMap(true); features.putAll(rangeMap(args)); return new QuorumFeatures(0, features, emptyList()); } @@ -350,9 +349,9 @@ public void testCanUseUnsafeDowngradeIfMetadataChanged() { public void testCanUseSafeDowngradeIfMetadataDidNotChange() { FeatureControlManager manager = new FeatureControlManager.Builder(). setQuorumFeatures(features(MetadataVersion.FEATURE_NAME, - MetadataVersion.IBP_3_0_IV0.featureLevel(), MetadataVersion.IBP_3_3_IV1.featureLevel())). + MetadataVersion.IBP_3_0_IV1.featureLevel(), MetadataVersion.IBP_3_3_IV1.featureLevel())). setMetadataVersion(MetadataVersion.IBP_3_1_IV0). - setMinimumBootstrapVersion(MetadataVersion.IBP_3_0_IV0). + setMinimumBootstrapVersion(MetadataVersion.IBP_3_0_IV1). build(); assertEquals(ControllerResult.of(Collections.emptyList(), ApiError.NONE), manager.updateFeatures( @@ -365,7 +364,7 @@ public void testCanUseSafeDowngradeIfMetadataDidNotChange() { public void testCannotDowngradeBefore3_3_IV0() { FeatureControlManager manager = new FeatureControlManager.Builder(). setQuorumFeatures(features(MetadataVersion.FEATURE_NAME, - MetadataVersion.IBP_3_0_IV0.featureLevel(), MetadataVersion.IBP_3_3_IV3.featureLevel())). + MetadataVersion.IBP_3_0_IV1.featureLevel(), MetadataVersion.IBP_3_3_IV3.featureLevel())). setMetadataVersion(MetadataVersion.IBP_3_3_IV0). build(); assertEquals(ControllerResult.of(Collections.emptyList(), new ApiError(Errors.INVALID_UPDATE_VERSION, @@ -381,67 +380,60 @@ public void testCreateFeatureLevelRecords() { Map localSupportedFeatures = new HashMap<>(); localSupportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of( MetadataVersion.IBP_3_0_IV1.featureLevel(), MetadataVersion.latestTesting().featureLevel())); - localSupportedFeatures.put(Features.TEST_VERSION.featureName(), VersionRange.of(0, 2)); + localSupportedFeatures.put(Feature.TEST_VERSION.featureName(), VersionRange.of(0, 2)); FeatureControlManager manager = new FeatureControlManager.Builder(). setQuorumFeatures(new QuorumFeatures(0, localSupportedFeatures, emptyList())). setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( - Collections.singletonList(new SimpleImmutableEntry<>(1, Collections.singletonMap(Features.TEST_VERSION.featureName(), VersionRange.of(0, 3)))), + Collections.singletonList(new SimpleImmutableEntry<>(1, Collections.singletonMap(Feature.TEST_VERSION.featureName(), VersionRange.of(0, 3)))), emptyList())). build(); ControllerResult result = manager.updateFeatures( - Collections.singletonMap(Features.TEST_VERSION.featureName(), (short) 1), - Collections.singletonMap(Features.TEST_VERSION.featureName(), FeatureUpdate.UpgradeType.UPGRADE), + Collections.singletonMap(Feature.TEST_VERSION.featureName(), (short) 1), + Collections.singletonMap(Feature.TEST_VERSION.featureName(), FeatureUpdate.UpgradeType.UPGRADE), false); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( - new FeatureLevelRecord().setName(Features.TEST_VERSION.featureName()).setFeatureLevel((short) 1), (short) 0)), + new FeatureLevelRecord().setName(Feature.TEST_VERSION.featureName()).setFeatureLevel((short) 1), (short) 0)), ApiError.NONE), result); RecordTestUtils.replayAll(manager, result.records()); - assertEquals(Optional.of((short) 1), manager.finalizedFeatures(Long.MAX_VALUE).get(Features.TEST_VERSION.featureName())); + assertEquals(Optional.of((short) 1), manager.finalizedFeatures(Long.MAX_VALUE).get(Feature.TEST_VERSION.featureName())); ControllerResult result2 = manager.updateFeatures( - Collections.singletonMap(Features.TEST_VERSION.featureName(), (short) 0), - Collections.singletonMap(Features.TEST_VERSION.featureName(), FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), + Collections.singletonMap(Feature.TEST_VERSION.featureName(), (short) 0), + Collections.singletonMap(Feature.TEST_VERSION.featureName(), FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), false); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( - new FeatureLevelRecord().setName(Features.TEST_VERSION.featureName()).setFeatureLevel((short) 0), (short) 0)), + new FeatureLevelRecord().setName(Feature.TEST_VERSION.featureName()).setFeatureLevel((short) 0), (short) 0)), ApiError.NONE), result2); RecordTestUtils.replayAll(manager, result2.records()); - assertEquals(Optional.empty(), manager.finalizedFeatures(Long.MAX_VALUE).get(Features.TEST_VERSION.featureName())); + assertEquals(Optional.empty(), manager.finalizedFeatures(Long.MAX_VALUE).get(Feature.TEST_VERSION.featureName())); } @Test - public void testNoMetadataVersionChangeDuringMigration() { + public void testUpgradeElrFeatureLevel() { + Map localSupportedFeatures = new HashMap<>(); + localSupportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of( + MetadataVersion.IBP_4_0_IV1.featureLevel(), MetadataVersion.latestTesting().featureLevel())); + localSupportedFeatures.put(Feature.ELIGIBLE_LEADER_REPLICAS_VERSION.featureName(), VersionRange.of(0, 1)); FeatureControlManager manager = new FeatureControlManager.Builder(). - setQuorumFeatures(features(MetadataVersion.FEATURE_NAME, - MetadataVersion.IBP_3_0_IV0.featureLevel(), MetadataVersion.IBP_3_5_IV1.featureLevel())). - setMetadataVersion(MetadataVersion.IBP_3_4_IV0). + setQuorumFeatures(new QuorumFeatures(0, localSupportedFeatures, emptyList())). + setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( + Collections.singletonList(new SimpleImmutableEntry<>(1, Collections.singletonMap(Feature.ELIGIBLE_LEADER_REPLICAS_VERSION.featureName(), VersionRange.of(0, 1)))), + emptyList())). + setMetadataVersion(MetadataVersion.IBP_4_0_IV1). build(); - BootstrapMetadata bootstrapMetadata = BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_4_IV0, "FeatureControlManagerTest"); - RecordTestUtils.replayAll(manager, bootstrapMetadata.records()); - RecordTestUtils.replayOne(manager, ZkMigrationState.PRE_MIGRATION.toRecord()); - - assertEquals(ControllerResult.of(Collections.emptyList(), new ApiError(Errors.INVALID_UPDATE_VERSION, - "Invalid metadata.version 10. Unable to modify metadata.version while a ZK migration is in progress.")), - manager.updateFeatures( - singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_5_IV1.featureLevel()), - singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UPGRADE), - true)); - - assertEquals(ControllerResult.of(Collections.emptyList(), new ApiError(Errors.INVALID_UPDATE_VERSION, - "Invalid metadata.version 4. Unable to modify metadata.version while a ZK migration is in progress.")), - manager.updateFeatures( - singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()), - singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - true)); - - // Complete the migration - RecordTestUtils.replayOne(manager, ZkMigrationState.POST_MIGRATION.toRecord()); ControllerResult result = manager.updateFeatures( - singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_5_IV1.featureLevel()), - singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UPGRADE), + Collections.singletonMap(Feature.ELIGIBLE_LEADER_REPLICAS_VERSION.featureName(), (short) 1), + Collections.singletonMap(Feature.ELIGIBLE_LEADER_REPLICAS_VERSION.featureName(), FeatureUpdate.UpgradeType.UPGRADE), false); - assertEquals(ApiError.NONE, result.response()); + assertTrue(result.response().isSuccess()); + assertEquals(Collections.singletonList(new ApiMessageAndVersion( + new FeatureLevelRecord(). + setName(Feature.ELIGIBLE_LEADER_REPLICAS_VERSION.featureName()). + setFeatureLevel((short) 1), (short) 0)), + result.records()); RecordTestUtils.replayAll(manager, result.records()); - assertEquals(MetadataVersion.IBP_3_5_IV1, manager.metadataVersion()); + assertEquals(Optional.of((short) 1), manager.finalizedFeatures(Long.MAX_VALUE). + get(Feature.ELIGIBLE_LEADER_REPLICAS_VERSION.featureName())); } + } diff --git a/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java index e607f7faa1e7c..407e03f4b500d 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java @@ -47,7 +47,7 @@ public void setUp() { FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). build(); ClusterControlManager clusterControl = new ClusterControlManager.Builder(). diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java index a8331fe9f2312..2cca9ef7cc6dd 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerIntegrationTestUtils.java @@ -29,6 +29,7 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.metadata.BrokerHeartbeatReply; import org.apache.kafka.metadata.BrokerRegistrationReply; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; import org.apache.kafka.server.common.MetadataVersion; import org.slf4j.Logger; @@ -75,6 +76,32 @@ static BrokerRegistrationRequestData.FeatureCollection brokerFeatures( return features; } + /** + * Create a broker features collection for use in a registration request. MV and given features are included. + * + * @param minVersion The minimum supported MV. + * @param maxVersion The maximum supported MV. + * @param featureMaxVersions The features and their max supported versions. + */ + static BrokerRegistrationRequestData.FeatureCollection brokerFeaturesPlusFeatureVersions( + MetadataVersion minVersion, + MetadataVersion maxVersion, + Map featureMaxVersions + ) { + BrokerRegistrationRequestData.FeatureCollection features = new BrokerRegistrationRequestData.FeatureCollection(); + features.add(new BrokerRegistrationRequestData.Feature() + .setName(MetadataVersion.FEATURE_NAME) + .setMinSupportedVersion(minVersion.featureLevel()) + .setMaxSupportedVersion(maxVersion.featureLevel())); + featureMaxVersions.entrySet().forEach(entry -> { + features.add(new BrokerRegistrationRequestData.Feature() + .setName(entry.getKey()) + .setMaxSupportedVersion(entry.getValue()) + .setMinSupportedVersion((short) 0)); + }); + return features; + } + /** * Register the given number of brokers. * @@ -94,7 +121,8 @@ static Map registerBrokersAndUnfence( .setBrokerId(brokerId) .setRack(null) .setClusterId(controller.clusterId()) - .setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting())) + .setFeatures(brokerFeaturesPlusFeatureVersions(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting(), + Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, EligibleLeaderReplicasVersion.ELRV_1.featureLevel()))) .setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB" + brokerId)) .setLogDirs(Collections.singletonList( Uuid.fromString("TESTBROKER" + Integer.toString(100000 + brokerId).substring(1) + "DIRAAAA") diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java index 867957513b4f6..c9e0d7b0742a5 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.errors.BrokerIdNotRegisteredException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; @@ -60,7 +61,6 @@ import org.apache.kafka.common.metadata.RegisterControllerRecord; import org.apache.kafka.common.metadata.TopicRecord; import org.apache.kafka.common.metadata.UnfenceBrokerRecord; -import org.apache.kafka.common.metadata.ZkMigrationStateRecord; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AlterPartitionRequest; @@ -99,14 +99,14 @@ import org.apache.kafka.metadata.RecordTestUtils.TestThroughAllIntermediateImagesLeadingToFinalImageHelper; import org.apache.kafka.metadata.authorizer.StandardAuthorizer; import org.apache.kafka.metadata.bootstrap.BootstrapMetadata; -import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.metadata.util.BatchFileWriter; import org.apache.kafka.metalog.LocalLogManager; import org.apache.kafka.metalog.LocalLogManagerTestEnv; import org.apache.kafka.raft.Batch; import org.apache.kafka.raft.OffsetAndEpoch; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.TopicIdPartition; @@ -157,6 +157,7 @@ import static org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT; import static org.apache.kafka.controller.ControllerRequestContextUtil.anonymousContextFor; import static org.apache.kafka.controller.QuorumControllerIntegrationTestUtils.brokerFeatures; +import static org.apache.kafka.controller.QuorumControllerIntegrationTestUtils.brokerFeaturesPlusFeatureVersions; import static org.apache.kafka.controller.QuorumControllerIntegrationTestUtils.pause; import static org.apache.kafka.controller.QuorumControllerIntegrationTestUtils.registerBrokersAndUnfence; import static org.apache.kafka.controller.QuorumControllerIntegrationTestUtils.sendBrokerHeartbeatToUnfenceBrokers; @@ -164,6 +165,7 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -188,7 +190,8 @@ public void testConfigurationOperations() throws Throwable { ) { controlEnv.activeController().registerBroker(ANONYMOUS_CONTEXT, new BrokerRegistrationRequestData(). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting())). + setFeatures(brokerFeaturesPlusFeatureVersions(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting(), + Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, EligibleLeaderReplicasVersion.ELRV_1.featureLevel()))). setBrokerId(0). setLogDirs(Collections.singletonList(Uuid.fromString("iiaQjkRPQcuMULNII0MUeA"))). setClusterId(logEnv.clusterId())).get(); @@ -229,7 +232,8 @@ public void testDelayedConfigurationOperations() throws Throwable { ) { controlEnv.activeController().registerBroker(ANONYMOUS_CONTEXT, new BrokerRegistrationRequestData(). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting())). + setFeatures(brokerFeaturesPlusFeatureVersions(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting(), + Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, EligibleLeaderReplicasVersion.ELRV_1.featureLevel()))). setBrokerId(0). setLogDirs(Collections.singletonList(Uuid.fromString("sTbzRAMnTpahIyIPNjiLhw"))). setClusterId(logEnv.clusterId())).get(); @@ -349,7 +353,32 @@ public void testFenceMultipleBrokers() throws Throwable { } @Test - public void testUncleanShutdownBroker() throws Throwable { + public void testElrEnabledByDefault() throws Throwable { + long sessionTimeoutMillis = 500; + try ( + LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv.Builder(1). + build(); + QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv.Builder(logEnv). + setSessionTimeoutMillis(OptionalLong.of(sessionTimeoutMillis)). + setBootstrapMetadata(BootstrapMetadata.fromRecords( + Arrays.asList( + new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_4_0_IV1.featureLevel()), (short) 0), + new ApiMessageAndVersion(new FeatureLevelRecord(). + setName(EligibleLeaderReplicasVersion.FEATURE_NAME). + setFeatureLevel(EligibleLeaderReplicasVersion.ELRV_1.featureLevel()), (short) 0) + ), + "test-provided bootstrap ELR enabled" + )). + build() + ) { + controlEnv.activeController(true); + assertTrue(controlEnv.activeController().configurationControl().clusterConfig().containsKey(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG)); + } + } + + public void testUncleanShutdownBrokerElrEnabled() throws Throwable { List allBrokers = Arrays.asList(1, 2, 3); short replicationFactor = (short) allBrokers.size(); long sessionTimeoutMillis = 500; @@ -359,7 +388,6 @@ public void testUncleanShutdownBroker() throws Throwable { build(); QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv.Builder(logEnv). setSessionTimeoutMillis(OptionalLong.of(sessionTimeoutMillis)). - setBootstrapMetadata(BootstrapMetadata.fromVersion(MetadataVersion.IBP_4_0_IV1, "test-provided bootstrap ELR enabled")). build() ) { @@ -367,14 +395,16 @@ public void testUncleanShutdownBroker() throws Throwable { listeners.add(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092)); QuorumController active = controlEnv.activeController(); Map brokerEpochs = new HashMap<>(); - + BrokerRegistrationRequestData.FeatureCollection features = + brokerFeaturesPlusFeatureVersions(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_4_0_IV1, + Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, EligibleLeaderReplicasVersion.ELRV_1.featureLevel())); for (Integer brokerId : allBrokers) { CompletableFuture reply = active.registerBroker( anonymousContextFor(ApiKeys.BROKER_REGISTRATION), new BrokerRegistrationRequestData(). setBrokerId(brokerId). setClusterId(active.clusterId()). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_4_0_IV1)). + setFeatures(features). setIncarnationId(Uuid.randomUuid()). setLogDirs(Collections.singletonList(Uuid.randomUuid())). setListeners(listeners)); @@ -442,7 +472,7 @@ public void testUncleanShutdownBroker() throws Throwable { new BrokerRegistrationRequestData(). setBrokerId(brokerToUncleanShutdown). setClusterId(active.clusterId()). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_4_0_IV1)). + setFeatures(features). setIncarnationId(Uuid.randomUuid()). setLogDirs(Collections.singletonList(Uuid.randomUuid())). setListeners(listeners)).get(); @@ -455,7 +485,7 @@ public void testUncleanShutdownBroker() throws Throwable { new BrokerRegistrationRequestData(). setBrokerId(lastKnownElr[0]). setClusterId(active.clusterId()). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_4_0_IV1)). + setFeatures(features). setIncarnationId(Uuid.randomUuid()). setLogDirs(Collections.singletonList(Uuid.randomUuid())). setListeners(listeners)).get(); @@ -476,6 +506,100 @@ public void testUncleanShutdownBroker() throws Throwable { } } + @Test + public void testUncleanShutdownElrDisabled() throws Exception { + List allBrokers = Arrays.asList(1, 2, 3); + short replicationFactor = (short) allBrokers.size(); + long sessionTimeoutMillis = 500; + + try ( + LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv.Builder(1). + build(); + QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv.Builder(logEnv) + .setControllerBuilderInitializer(controllerBuilder -> + controllerBuilder.setFenceStaleBrokerIntervalNs(TimeUnit.SECONDS.toNanos(15))) + .setSessionTimeoutMillis(OptionalLong.of(sessionTimeoutMillis)) + .setBootstrapMetadata(BootstrapMetadata.fromVersion(MetadataVersion.IBP_4_0_IV0, "test-provided bootstrap ELR not supported")) + .build() + ) { + ListenerCollection listeners = new ListenerCollection(); + listeners.add(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092)); + QuorumController active = controlEnv.activeController(); + Map brokerEpochs = new HashMap<>(); + BrokerRegistrationRequestData.FeatureCollection features = + brokerFeaturesPlusFeatureVersions(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_4_0_IV0, + Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, EligibleLeaderReplicasVersion.ELRV_0.featureLevel())); + for (Integer brokerId : allBrokers) { + CompletableFuture reply = active.registerBroker( + anonymousContextFor(ApiKeys.BROKER_REGISTRATION), + new BrokerRegistrationRequestData(). + setBrokerId(brokerId). + setClusterId(active.clusterId()). + setFeatures(features). + setIncarnationId(Uuid.randomUuid()). + setLogDirs(Collections.singletonList(Uuid.randomUuid())). + setListeners(listeners)); + brokerEpochs.put(brokerId, reply.get().epoch()); + } + + // Brokers are only registered and should still be fenced + allBrokers.forEach(brokerId -> + assertFalse(active.clusterControl().isUnfenced(brokerId), "Broker " + brokerId + " should have been fenced") + ); + + // Unfence all brokers and create a topic foo + sendBrokerHeartbeatToUnfenceBrokers(active, allBrokers, brokerEpochs); + CreateTopicsRequestData createTopicsRequestData = new CreateTopicsRequestData().setTopics( + new CreatableTopicCollection(Collections.singleton( + new CreatableTopic().setName("foo").setNumPartitions(1). + setReplicationFactor(replicationFactor)).iterator())); + CreateTopicsResponseData createTopicsResponseData = active.createTopics( + ANONYMOUS_CONTEXT, createTopicsRequestData, + Collections.singleton("foo")).get(); + assertEquals(Errors.NONE, Errors.forCode(createTopicsResponseData.topics().find("foo").errorCode())); + Uuid topicIdFoo = createTopicsResponseData.topics().find("foo").topicId(); + + // wait for brokers to become inactive + active.time().sleep(sessionTimeoutMillis); + + // unclean shutdown for each replica + for (int i = 0; i < (int) replicationFactor; i++) { + // Verify that ELR is disabled + PartitionRegistration partition = active.replicationControl().getPartition(topicIdFoo, 0); + assertEquals(0, partition.elr.length, partition.toString()); + assertEquals(0, partition.lastKnownElr.length, partition.toString()); + + boolean lastStandingIsr = i == (replicationFactor - 1); + int prevLeader = partition.leader; + int prevLeaderEpoch = partition.leaderEpoch; + // Unclean shutdown should remove the broker from the ISR and reassign leadership + active.registerBroker( + anonymousContextFor(ApiKeys.BROKER_REGISTRATION), + new BrokerRegistrationRequestData(). + setBrokerId(prevLeader). + setClusterId(active.clusterId()). + setFeatures(features). + setIncarnationId(Uuid.randomUuid()). + setLogDirs(Collections.singletonList(Uuid.randomUuid())). + setListeners(listeners)).get(); + partition = active.replicationControl().getPartition(topicIdFoo, 0); + // leader should always change, leader epoch should always be incremented + int currentLeader = partition.leader; + int currentLeaderEpoch = partition.leaderEpoch; + assertNotEquals(currentLeader, prevLeader); + assertNotEquals(currentLeaderEpoch, prevLeaderEpoch); + // if the broker is not the last standing ISR, it should be removed from the ISR + if (lastStandingIsr) { + assertArrayEquals(new int[]{prevLeader}, partition.isr); + assertEquals(NO_LEADER, currentLeader); + } else { + List isr = Arrays.stream(partition.isr).boxed().toList(); + assertFalse(isr.contains(prevLeader)); + } + } + } + } + @Test public void testBalancePartitionLeaders() throws Throwable { List allBrokers = Arrays.asList(1, 2, 3); @@ -594,7 +718,7 @@ public void testBalancePartitionLeaders() throws Throwable { alterPartitionRequest.topics().add(topicData); active.alterPartition(ANONYMOUS_CONTEXT, new AlterPartitionRequest - .Builder(alterPartitionRequest, false).build((short) 0).data()).get(); + .Builder(alterPartitionRequest).build((short) 0).data()).get(); AtomicLong lastHeartbeatMs = new AtomicLong(getMonotonicMs(active.time())); sendBrokerHeartbeatToUnfenceBrokers(active, allBrokers, brokerEpochs); @@ -690,7 +814,7 @@ public void testRegisterBrokerKRaftVersions(short finalizedKraftVersion, short b if (brokerMaxSupportedKraftVersion != 0) { brokerFeatures.add(new BrokerRegistrationRequestData.Feature() .setName(KRaftVersion.FEATURE_NAME) - .setMinSupportedVersion(Features.KRAFT_VERSION.minimumProduction()) + .setMinSupportedVersion(Feature.KRAFT_VERSION.minimumProduction()) .setMaxSupportedVersion(brokerMaxSupportedKraftVersion)); } BrokerRegistrationRequestData request = new BrokerRegistrationRequestData(). @@ -714,7 +838,7 @@ public void testRegisterBrokerKRaftVersions(short finalizedKraftVersion, short b BrokerRegistrationReply reply = active.registerBroker( ANONYMOUS_CONTEXT, request).get(); - assertTrue(reply.epoch() >= 5, "Unexpected broker epoch " + reply.epoch()); + assertTrue(reply.epoch() >= 4, "Unexpected broker epoch " + reply.epoch()); } } } @@ -737,10 +861,11 @@ public void testUnregisterBroker() throws Throwable { setBrokerId(0). setClusterId(active.clusterId()). setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwBA")). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting())). + setFeatures(brokerFeaturesPlusFeatureVersions(MetadataVersion.IBP_3_0_IV1, MetadataVersion.latestTesting(), + Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, EligibleLeaderReplicasVersion.ELRV_1.featureLevel()))). setLogDirs(Collections.singletonList(Uuid.fromString("vBpaRsZVSaGsQT53wtYGtg"))). setListeners(listeners)); - assertEquals(5L, reply.get().epoch()); + assertEquals(4L, reply.get().epoch()); CreateTopicsRequestData createTopicsRequestData = new CreateTopicsRequestData().setTopics( new CreatableTopicCollection(Collections.singleton( @@ -756,7 +881,7 @@ public void testUnregisterBroker() throws Throwable { get().topics().find("foo").errorMessage()); assertEquals(new BrokerHeartbeatReply(true, false, false, false), active.processBrokerHeartbeat(ANONYMOUS_CONTEXT, new BrokerHeartbeatRequestData(). - setWantFence(false).setBrokerEpoch(5L).setBrokerId(0). + setWantFence(false).setBrokerEpoch(4L).setBrokerId(0). setCurrentMetadataOffset(100000L)).get()); assertEquals(Errors.NONE.code(), active.createTopics(ANONYMOUS_CONTEXT, createTopicsRequestData, Collections.singleton("foo")). @@ -860,10 +985,10 @@ public void testSnapshotSaveAndLoad() throws Throwable { Arrays.asList(new CreatableReplicaAssignment(). setPartitionIndex(0). setBrokerIds(Arrays.asList(0, 1, 2)), - new CreatableReplicaAssignment(). - setPartitionIndex(1). - setBrokerIds(Arrays.asList(1, 2, 0))). - iterator()))).iterator())), + new CreatableReplicaAssignment(). + setPartitionIndex(1). + setBrokerIds(Arrays.asList(1, 2, 0))). + iterator()))).iterator())), Collections.singleton("foo")).get(); fooId = fooData.topics().find("foo").topicId(); active.allocateProducerIds(ANONYMOUS_CONTEXT, @@ -882,8 +1007,6 @@ private List generateTestRecords(Uuid fooId, Map PRE_PRODUCTION_RECORDS = - Collections.unmodifiableList(Arrays.asList( + List.of( new ApiMessageAndVersion(new RegisterBrokerRecord(). setBrokerEpoch(42). setBrokerId(123). @@ -1255,7 +1378,7 @@ public void close() throws Exception { new ApiMessageAndVersion(new TopicRecord(). setName("bar"). setTopicId(Uuid.fromString("cxBT72dK4si8Ied1iP4wBA")), - (short) 0))); + (short) 0)); private static final BootstrapMetadata COMPLEX_BOOTSTRAP = BootstrapMetadata.fromRecords( Arrays.asList( @@ -1376,10 +1499,7 @@ public void testAppendRecordsAtomically() { appender)).getMessage()); } - FeatureControlManager getActivationRecords( - MetadataVersion metadataVersion, - Optional stateInLog - ) { + FeatureControlManager getActivationRecords(MetadataVersion metadataVersion) { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControlManager = new FeatureControlManager.Builder() .setSnapshotRegistry(snapshotRegistry) @@ -1388,11 +1508,11 @@ FeatureControlManager getActivationRecords( ControllerResult result = ActivationRecordsGenerator.generate( msg -> { }, - stateInLog.isEmpty(), + true, -1L, BootstrapMetadata.fromVersion(metadataVersion, "test"), - stateInLog.orElse(ZkMigrationState.NONE), - metadataVersion); + metadataVersion, + 3); RecordTestUtils.replayAll(featureControlManager, result.records()); return featureControlManager; } @@ -1401,34 +1521,23 @@ FeatureControlManager getActivationRecords( public void testActivationRecords33() { FeatureControlManager featureControl; - featureControl = getActivationRecords(MetadataVersion.IBP_3_3_IV0, Optional.empty()); + featureControl = getActivationRecords(MetadataVersion.IBP_3_3_IV0); assertEquals(MetadataVersion.IBP_3_3_IV0, featureControl.metadataVersion()); - assertEquals(ZkMigrationState.NONE, featureControl.zkMigrationState()); - - featureControl = getActivationRecords(MetadataVersion.IBP_3_3_IV0, Optional.of(ZkMigrationState.NONE)); - assertEquals(MetadataVersion.IBP_3_3_IV0, featureControl.metadataVersion()); - assertEquals(ZkMigrationState.NONE, featureControl.zkMigrationState()); } @Test public void testActivationRecords34() { FeatureControlManager featureControl; - featureControl = getActivationRecords(MetadataVersion.IBP_3_4_IV0, Optional.empty()); - assertEquals(MetadataVersion.IBP_3_4_IV0, featureControl.metadataVersion()); - assertEquals(ZkMigrationState.NONE, featureControl.zkMigrationState()); - - featureControl = getActivationRecords(MetadataVersion.IBP_3_4_IV0, Optional.of(ZkMigrationState.NONE)); + featureControl = getActivationRecords(MetadataVersion.IBP_3_4_IV0); assertEquals(MetadataVersion.IBP_3_4_IV0, featureControl.metadataVersion()); - assertEquals(ZkMigrationState.NONE, featureControl.zkMigrationState()); } @Test public void testActivationRecordsNonEmptyLog() { FeatureControlManager featureControl = getActivationRecords( - MetadataVersion.IBP_3_9_IV0, Optional.empty()); + MetadataVersion.IBP_3_9_IV0); assertEquals(MetadataVersion.IBP_3_9_IV0, featureControl.metadataVersion()); - assertEquals(ZkMigrationState.NONE, featureControl.zkMigrationState()); } @Test @@ -1438,8 +1547,8 @@ public void testActivationRecordsPartialBootstrap() { true, 0L, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), - ZkMigrationState.NONE, - MetadataVersion.IBP_3_6_IV1); + MetadataVersion.IBP_3_6_IV1, + 3); assertFalse(result.isAtomic()); assertTrue(RecordTestUtils.recordAtIndexAs( AbortTransactionRecord.class, result.records(), 0).isPresent()); @@ -1487,8 +1596,8 @@ public void testActivationRecordsPartialTransaction() { false, offsetControlManager.transactionStartOffset(), BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), - ZkMigrationState.NONE, - MetadataVersion.IBP_3_6_IV1); + MetadataVersion.IBP_3_6_IV1, + 3); assertTrue(result.isAtomic()); offsetControlManager.replay( @@ -1511,8 +1620,7 @@ public void testActivationRecordsPartialTransactionNoSupport() { false, offsetControlManager.transactionStartOffset(), BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV0, "test"), - ZkMigrationState.NONE, - MetadataVersion.IBP_3_6_IV0) - ); + MetadataVersion.IBP_3_6_IV0, + 3)); } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java index 8bece2bb86cf9..a788dd22e6598 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java @@ -21,6 +21,7 @@ import org.apache.kafka.metadata.bootstrap.BootstrapMetadata; import org.apache.kafka.metalog.LocalLogManagerTestEnv; import org.apache.kafka.raft.LeaderAndEpoch; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.fault.MockFaultHandler; import org.apache.kafka.test.TestUtils; @@ -106,7 +107,7 @@ private QuorumControllerTestEnv( builder.setRaftClient(logEnv.logManagers().get(nodeId)); builder.setBootstrapMetadata(bootstrapMetadata); builder.setLeaderImbalanceCheckIntervalNs(leaderImbalanceCheckIntervalNs); - builder.setQuorumFeatures(new QuorumFeatures(nodeId, QuorumFeatures.defaultFeatureMap(true), nodeIds)); + builder.setQuorumFeatures(new QuorumFeatures(nodeId, QuorumFeatures.defaultSupportedFeatureMap(true), nodeIds)); sessionTimeoutMillis.ifPresent(timeout -> builder.setSessionTimeoutNs(NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)) ); @@ -115,11 +116,16 @@ private QuorumControllerTestEnv( fatalFaultHandlers.put(nodeId, fatalFaultHandler); MockFaultHandler nonFatalFaultHandler = new MockFaultHandler("nonFatalFaultHandler"); builder.setNonFatalFaultHandler(nonFatalFaultHandler); - builder.setEligibleLeaderReplicasEnabled(eligibleLeaderReplicasEnabled); builder.setConfigSchema(FakeKafkaConfigSchema.INSTANCE); nonFatalFaultHandlers.put(nodeId, fatalFaultHandler); controllerBuilderInitializer.accept(builder); - this.controllers.add(builder.build()); + QuorumController controller = builder.build(); + if (eligibleLeaderReplicasEnabled) { + bootstrapMetadata = bootstrapMetadata.copyWithFeatureRecord( + EligibleLeaderReplicasVersion.FEATURE_NAME, + EligibleLeaderReplicasVersion.ELRV_1.featureLevel()); + } + this.controllers.add(controller); } } catch (Exception e) { close(); diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java index 5df71a043ae8e..cfe320bb38d0b 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java @@ -17,12 +17,8 @@ package org.apache.kafka.controller; -import org.apache.kafka.common.Endpoint; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.metadata.ControllerRegistration; import org.apache.kafka.metadata.VersionRange; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.MetadataVersion; import org.junit.jupiter.api.Test; @@ -30,7 +26,6 @@ import org.junit.jupiter.params.provider.ValueSource; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -41,18 +36,14 @@ import static org.junit.jupiter.api.Assertions.assertTrue; public class QuorumFeaturesTest { - private static final Map LOCAL; + private static final Map LOCAL = Map.of( + "foo", VersionRange.of(0, 3), + "bar", VersionRange.of(0, 4), + "baz", VersionRange.of(2, 2) + ); - private static final QuorumFeatures QUORUM_FEATURES; - - static { - Map local = new HashMap<>(); - local.put("foo", VersionRange.of(0, 3)); - local.put("bar", VersionRange.of(0, 4)); - local.put("baz", VersionRange.of(2, 2)); - LOCAL = Collections.unmodifiableMap(local); - QUORUM_FEATURES = new QuorumFeatures(0, LOCAL, Arrays.asList(0, 1, 2)); - } + private static final QuorumFeatures QUORUM_FEATURES = new QuorumFeatures(0, LOCAL, + Arrays.asList(0, 1, 2)); @Test public void testDefaultFeatureMap() { @@ -60,8 +51,8 @@ public void testDefaultFeatureMap() { expectedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of( MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), MetadataVersion.LATEST_PRODUCTION.featureLevel())); - for (Features feature : Features.PRODUCTION_FEATURES) { - short maxVersion = feature.defaultValue(MetadataVersion.LATEST_PRODUCTION); + for (Feature feature : Feature.PRODUCTION_FEATURES) { + short maxVersion = feature.latestProduction(); if (maxVersion > 0) { expectedFeatures.put(feature.featureName(), VersionRange.of( feature.minimumProduction(), @@ -69,7 +60,7 @@ public void testDefaultFeatureMap() { )); } } - assertEquals(expectedFeatures, QuorumFeatures.defaultFeatureMap(false)); + assertEquals(expectedFeatures, QuorumFeatures.defaultSupportedFeatureMap(false)); } @Test @@ -78,8 +69,8 @@ public void testDefaultFeatureMapWithUnstable() { expectedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of( MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), MetadataVersion.latestTesting().featureLevel())); - for (Features feature : Features.PRODUCTION_FEATURES) { - short maxVersion = feature.defaultValue(MetadataVersion.latestTesting()); + for (Feature feature : Feature.PRODUCTION_FEATURES) { + short maxVersion = feature.defaultLevel(MetadataVersion.latestTesting()); if (maxVersion > 0) { expectedFeatures.put(feature.featureName(), VersionRange.of( feature.minimumProduction(), @@ -87,13 +78,13 @@ public void testDefaultFeatureMapWithUnstable() { )); } } - assertEquals(expectedFeatures, QuorumFeatures.defaultFeatureMap(true)); + assertEquals(expectedFeatures, QuorumFeatures.defaultSupportedFeatureMap(true)); } @ParameterizedTest @ValueSource(booleans = {true, false}) public void ensureDefaultSupportedFeaturesRangeMaxNotZero(boolean unstableVersionsEnabled) { - Map quorumFeatures = QuorumFeatures.defaultFeatureMap(unstableVersionsEnabled); + Map quorumFeatures = QuorumFeatures.defaultSupportedFeatureMap(unstableVersionsEnabled); for (VersionRange range : quorumFeatures.values()) { assertNotEquals(0, range.max()); } @@ -124,56 +115,4 @@ public void testIsControllerId() { assertTrue(QUORUM_FEATURES.isControllerId(2)); assertFalse(QUORUM_FEATURES.isControllerId(3)); } - - @Test - public void testZkMigrationNotReadyIfMetadataVersionTooLow() { - assertEquals(Optional.of("The metadata.version too low at 3.0-IV1"), - QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( - MetadataVersion.IBP_3_0_IV1, Collections.emptyMap())); - } - - @Test - public void testZkMigrationReadyIfControllerRegistrationNotSupported() { - assertEquals(Optional.empty(), - QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( - MetadataVersion.IBP_3_4_IV0, Collections.emptyMap())); - } - - @Test - public void testZkMigrationNotReadyIfNotAllControllersRegistered() { - assertEquals(Optional.of("No registration found for controller 0"), - QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( - MetadataVersion.IBP_3_7_IV0, Collections.emptyMap())); - } - - @Test - public void testZkMigrationNotReadyIfControllerNotReady() { - assertEquals(Optional.of("Controller 0 has not enabled zookeeper.metadata.migration.enable"), - QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( - MetadataVersion.IBP_3_7_IV0, Collections.singletonMap(0, - new ControllerRegistration.Builder(). - setId(0). - setZkMigrationReady(false). - setIncarnationId(Uuid.fromString("kCBJaDGNQk6x3y5xbtQOpg")). - setListeners(Collections.singletonMap("CONTROLLER", - new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "localhost", 9093))). - build()))); - } - - @Test - public void testZkMigrationReadyIfAllControllersReady() { - Map controllers = new HashMap<>(); - QUORUM_FEATURES.quorumNodeIds().forEach(id -> - controllers.put(id, - new ControllerRegistration.Builder(). - setId(id). - setZkMigrationReady(true). - setIncarnationId(Uuid.fromString("kCBJaDGNQk6x3y5xbtQOpg")). - setListeners(Collections.singletonMap("CONTROLLER", - new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "localhost", 9093))). - build()) - ); - assertEquals(Optional.empty(), QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( - MetadataVersion.IBP_3_7_IV0, controllers)); - } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java index 194c295de2822..0012046080e52 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java @@ -63,6 +63,7 @@ import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment; import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; import org.apache.kafka.common.metadata.ConfigRecord; +import org.apache.kafka.common.metadata.FeatureLevelRecord; import org.apache.kafka.common.metadata.PartitionChangeRecord; import org.apache.kafka.common.metadata.PartitionRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; @@ -92,6 +93,7 @@ import org.apache.kafka.metadata.placement.StripedReplicaPlacer; import org.apache.kafka.metadata.placement.UsableBroker; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.TopicIdPartition; import org.apache.kafka.server.policy.CreateTopicPolicy; @@ -218,6 +220,7 @@ ReplicationControlTestContext build() { final ClusterControlManager clusterControl; final ConfigurationControlManager configurationControl; final ReplicationControlManager replicationControl; + final OffsetControlManager offsetControlManager; void replay(List records) { RecordTestUtils.replayAll(clusterControl, records); @@ -233,18 +236,19 @@ private ReplicationControlTestContext( Map staticConfig ) { this.time = time; - this.configurationControl = new ConfigurationControlManager.Builder(). - setSnapshotRegistry(snapshotRegistry). - setStaticConfig(staticConfig). - setKafkaConfigSchema(FakeKafkaConfigSchema.INSTANCE). - build(); this.featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setQuorumFeatures(new QuorumFeatures(0, - QuorumFeatures.defaultFeatureMap(true), + QuorumFeatures.defaultSupportedFeatureMap(true), Collections.singletonList(0))). setMetadataVersion(metadataVersion). build(); + featureControl.replay(new FeatureLevelRecord() + .setName(EligibleLeaderReplicasVersion.FEATURE_NAME) + .setFeatureLevel(isElrEnabled ? + EligibleLeaderReplicasVersion.ELRV_1.featureLevel() : + EligibleLeaderReplicasVersion.ELRV_0.featureLevel()) + ); this.clusterControl = new ClusterControlManager.Builder(). setLogContext(logContext). setTime(time). @@ -254,7 +258,15 @@ private ReplicationControlTestContext( setFeatureControlManager(featureControl). setBrokerUncleanShutdownHandler(this::handleUncleanBrokerShutdown). build(); - + this.configurationControl = new ConfigurationControlManager.Builder(). + setSnapshotRegistry(snapshotRegistry). + setFeatureControl(featureControl). + setStaticConfig(staticConfig). + setKafkaConfigSchema(FakeKafkaConfigSchema.INSTANCE). + build(); + this.offsetControlManager = new OffsetControlManager.Builder(). + setSnapshotRegistry(snapshotRegistry). + build(); this.replicationControl = new ReplicationControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). setLogContext(logContext). @@ -263,7 +275,6 @@ private ReplicationControlTestContext( setClusterControl(clusterControl). setCreateTopicPolicy(createTopicPolicy). setFeatureControl(featureControl). - setEligibleLeaderReplicasEnabled(isElrEnabled). build(); clusterControl.activate(); } @@ -501,7 +512,7 @@ void fenceBrokers(Set brokerIds) { replay(fenceResult.records()); } while (fenceResult.response().booleanValue()); - assertEquals(brokerIds, clusterControl.fencedBrokerIds()); + assertEquals(brokerIds, fencedBrokerIds()); } long currentBrokerEpoch(int brokerId) { @@ -525,6 +536,15 @@ ControllerResult assignReplicasToDirs(int brok replay(result.records()); return result; } + + Set fencedBrokerIds() { + return clusterControl.brokerRegistrations().values() + .stream() + .filter(BrokerRegistration::fenced) + .map(BrokerRegistration::id) + .collect(Collectors.toSet()); + } + } static CreateTopicsResponseData withoutConfigs(CreateTopicsResponseData data) { @@ -1781,7 +1801,7 @@ public void testReassignPartitions(short version) { setReplicas(asList(0, 2, 1)), new ReassignablePartition().setPartitionIndex(2). setReplicas(asList(0, 2, 1)))), - new ReassignableTopic().setName("bar")))); + new ReassignableTopic().setName("bar")))); assertEquals(new AlterPartitionReassignmentsResponseData(). setErrorMessage(null).setResponses(asList( new ReassignableTopicResponse().setName("foo").setPartitions(asList( @@ -1866,7 +1886,7 @@ public void testReassignPartitions(short version) { setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); ControllerResult alterPartitionResult = replication.alterPartition( requestContext, - new AlterPartitionRequest.Builder(alterPartitionRequestData, version > 1).build(version).data()); + new AlterPartitionRequest.Builder(alterPartitionRequestData).build(version).data()); Errors expectedError = version > 1 ? NEW_LEADER_ELECTED : FENCED_LEADER_EPOCH; assertEquals(new AlterPartitionResponseData().setTopics(singletonList( new AlterPartitionResponseData.TopicData(). @@ -1930,7 +1950,7 @@ public void testAlterPartitionShouldRejectFencedBrokers(short version) { anonymousContextFor(ApiKeys.ALTER_PARTITION, version); ControllerResult alterPartitionResult = - replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest, version > 1).build(version).data()); + replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest).build(version).data()); Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; assertEquals( @@ -2015,7 +2035,7 @@ public void testAlterPartitionShouldRejectBrokersWithStaleEpoch(short version) { anonymousContextFor(ApiKeys.ALTER_PARTITION, version); ControllerResult alterPartitionResult = - replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest, version > 1).build(version).data()); + replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest).build(version).data()); // The late arrived AlterPartition request should be rejected when version >= 3. if (version >= 3) { @@ -2080,7 +2100,7 @@ public void testAlterPartitionShouldRejectShuttingDownBrokers(short version) { anonymousContextFor(ApiKeys.ALTER_PARTITION, version); ControllerResult alterPartitionResult = - replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest, version > 1).build(version).data()); + replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest).build(version).data()); Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; assertEquals( @@ -2132,27 +2152,21 @@ public void testCancelReassignPartitions() { setReplicas(asList(5, 6, 7)), new ReassignablePartition().setPartitionIndex(3). setReplicas(Collections.emptyList()))), - new ReassignableTopic().setName("bar").setPartitions(singletonList( + new ReassignableTopic().setName("bar").setPartitions(singletonList( new ReassignablePartition().setPartitionIndex(0). setReplicas(asList(1, 2, 3, 4, 0))))))); assertEquals(new AlterPartitionReassignmentsResponseData(). - setErrorMessage(null).setResponses(asList( - new ReassignableTopicResponse().setName("foo").setPartitions(asList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null), - new ReassignablePartitionResponse().setPartitionIndex(1). - setErrorMessage(null), - new ReassignablePartitionResponse().setPartitionIndex(2). - setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). - setErrorMessage("The manual partition assignment includes broker 5, " + - "but no such broker is registered."), - new ReassignablePartitionResponse().setPartitionIndex(3). - setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). - setErrorMessage("The manual partition assignment includes an empty " + - "replica list."))), - new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( - new ReassignablePartitionResponse().setPartitionIndex(0). - setErrorMessage(null))))), + setErrorMessage(null). + setResponses(asList( + new ReassignableTopicResponse().setName("foo").setPartitions(asList( + new ReassignablePartitionResponse().setPartitionIndex(0).setErrorMessage(null), + new ReassignablePartitionResponse().setPartitionIndex(1).setErrorMessage(null), + new ReassignablePartitionResponse().setPartitionIndex(2).setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). + setErrorMessage("The manual partition assignment includes broker 5, but no such broker is registered."), + new ReassignablePartitionResponse().setPartitionIndex(3).setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). + setErrorMessage("The manual partition assignment includes an empty replica list."))), + new ReassignableTopicResponse().setName("bar").setPartitions(singletonList( + new ReassignablePartitionResponse().setPartitionIndex(0).setErrorMessage(null))))), alterResult.response()); ctx.replay(alterResult.records()); assertEquals(new PartitionRegistration.Builder().setReplicas(new int[] {1, 2, 4}).setIsr(new int[] {1, 2, 4}). @@ -2443,7 +2457,7 @@ public void testFenceMultipleBrokers() { Uuid fooId = ctx.createTestTopic("foo", new int[][]{ new int[]{1, 2, 3}, new int[]{2, 3, 4}, new int[]{0, 2, 1}}).topicId(); - assertTrue(ctx.clusterControl.fencedBrokerIds().isEmpty()); + assertTrue(ctx.fencedBrokerIds().isEmpty()); ctx.fenceBrokers(Set.of(2, 3)); PartitionRegistration partition0 = replication.getPartition(fooId, 0); @@ -2941,7 +2955,7 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); ControllerResult alterPartitionResult = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), - new AlterPartitionRequest.Builder(alterPartitionRequestData, true).build().data()); + new AlterPartitionRequest.Builder(alterPartitionRequestData).build().data()); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( new AlterPartitionResponseData.TopicData(). setTopicId(topicId). @@ -3016,7 +3030,7 @@ public void testReassignPartitionsHandlesNewReassignmentThatRemovesPreviouslyAdd setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); ControllerResult alterPartitionResultTwo = replication.alterPartition( anonymousContextFor(ApiKeys.ALTER_PARTITION), - new AlterPartitionRequest.Builder(alterPartitionRequestDataTwo, true).build().data()); + new AlterPartitionRequest.Builder(alterPartitionRequestDataTwo).build().data()); assertEquals(new AlterPartitionResponseData().setTopics(singletonList( new AlterPartitionResponseData.TopicData(). setTopicId(topicId). diff --git a/metadata/src/test/java/org/apache/kafka/image/FeaturesImageTest.java b/metadata/src/test/java/org/apache/kafka/image/FeaturesImageTest.java index a23678098fb02..1df5ff655639f 100644 --- a/metadata/src/test/java/org/apache/kafka/image/FeaturesImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/FeaturesImageTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.image.writer.ImageWriterOptions; import org.apache.kafka.image.writer.RecordListWriter; import org.apache.kafka.metadata.RecordTestUtils; -import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.server.common.ApiMessageAndVersion; import org.apache.kafka.server.common.MetadataVersion; @@ -54,7 +53,7 @@ public class FeaturesImageTest { Map map1 = new HashMap<>(); map1.put("foo", (short) 2); map1.put("bar", (short) 1); - IMAGE1 = new FeaturesImage(map1, MetadataVersion.latestTesting(), ZkMigrationState.NONE); + IMAGE1 = new FeaturesImage(map1, MetadataVersion.latestTesting()); DELTA1_RECORDS = new ArrayList<>(); // change feature level @@ -76,7 +75,7 @@ public class FeaturesImageTest { Map map2 = new HashMap<>(); map2.put("foo", (short) 3); map2.put("baz", (short) 8); - IMAGE2 = new FeaturesImage(map2, MetadataVersion.latestTesting(), ZkMigrationState.NONE); + IMAGE2 = new FeaturesImage(map2, MetadataVersion.latestTesting()); DELTA2_RECORDS = new ArrayList<>(); // remove all features @@ -95,7 +94,7 @@ public class FeaturesImageTest { RecordTestUtils.replayAll(DELTA2, DELTA2_RECORDS); Map map3 = Collections.singletonMap("bar", (short) 1); - IMAGE3 = new FeaturesImage(map3, MetadataVersion.latestTesting(), ZkMigrationState.NONE); + IMAGE3 = new FeaturesImage(map3, MetadataVersion.latestTesting()); } @Test @@ -162,10 +161,9 @@ private static List getImageRecords(FeaturesImage image) { public void testEmpty() { assertTrue(FeaturesImage.EMPTY.isEmpty()); assertFalse(new FeaturesImage(Collections.singletonMap("foo", (short) 1), - FeaturesImage.EMPTY.metadataVersion(), FeaturesImage.EMPTY.zkMigrationState()).isEmpty()); + FeaturesImage.EMPTY.metadataVersion()).isEmpty()); assertFalse(new FeaturesImage(FeaturesImage.EMPTY.finalizedVersions(), - MetadataVersion.IBP_3_3_IV0, FeaturesImage.EMPTY.zkMigrationState()).isEmpty()); - assertFalse(new FeaturesImage(FeaturesImage.EMPTY.finalizedVersions(), - FeaturesImage.EMPTY.metadataVersion(), ZkMigrationState.MIGRATION).isEmpty()); + MetadataVersion.IBP_3_3_IV0).isEmpty()); + assertTrue(new FeaturesImage(FeaturesImage.EMPTY.finalizedVersions(), FeaturesImage.EMPTY.metadataVersion()).isEmpty()); } } diff --git a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java index 68fc8053bd2dd..ba636f3bdba96 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ImageDowngradeTest.java @@ -22,7 +22,6 @@ import org.apache.kafka.common.metadata.PartitionRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; import org.apache.kafka.common.metadata.TopicRecord; -import org.apache.kafka.common.metadata.ZkMigrationStateRecord; import org.apache.kafka.image.writer.ImageWriterOptions; import org.apache.kafka.image.writer.RecordListWriter; import org.apache.kafka.image.writer.UnwritableMetadataException; @@ -183,7 +182,6 @@ void testDirectoryAssignmentState() { (short) 2)), Arrays.asList( metadataVersionRecord(outputMetadataVersion), - new ApiMessageAndVersion(new ZkMigrationStateRecord(), (short) 0), TEST_RECORDS.get(0), new ApiMessageAndVersion( testPartitionRecord.duplicate().setDirectories(Collections.emptyList()), diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotEmitterTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotEmitterTest.java index b92a1876e6c59..d866c1b00ba27 100644 --- a/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotEmitterTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/publisher/SnapshotEmitterTest.java @@ -148,7 +148,7 @@ public void testEmit() { assertEquals(0L, emitter.metrics().latestSnapshotGeneratedBytes()); emitter.maybeEmit(MetadataImageTest.IMAGE1); assertEquals(0L, emitter.metrics().latestSnapshotGeneratedAgeMs()); - assertEquals(1600L, emitter.metrics().latestSnapshotGeneratedBytes()); + assertEquals(1500L, emitter.metrics().latestSnapshotGeneratedBytes()); FakeSnapshotWriter writer = mockRaftClient.writers.get( MetadataImageTest.IMAGE1.provenance().snapshotId()); assertNotNull(writer); diff --git a/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java b/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java index 485a39aad8b0f..e45234d225ee0 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/BrokerRegistrationTest.java @@ -159,7 +159,7 @@ public void testToNode() { assertEquals(Optional.empty(), REGISTRATIONS.get(0).node("NONEXISTENT")); assertEquals(Optional.of(new Node(0, "localhost", 9090, null)), REGISTRATIONS.get(0).node("INTERNAL")); - assertEquals(Optional.of(new Node(1, "localhost", 9091, null)), + assertEquals(Optional.of(new Node(1, "localhost", 9091, null, true)), REGISTRATIONS.get(1).node("INTERNAL")); assertEquals(Optional.of(new Node(2, "localhost", 9092, "myrack")), REGISTRATIONS.get(2).node("INTERNAL")); diff --git a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java index 3b0ef9c4375ab..2005fab92a55d 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java @@ -276,8 +276,7 @@ public static void assertBatchIteratorContains(List> public static void deepSortRecords(Object o) throws Exception { if (o == null) { return; - } else if (o instanceof List) { - List list = (List) o; + } else if (o instanceof List list) { for (Object entry : list) { if (entry != null) { if (Number.class.isAssignableFrom(entry.getClass())) { @@ -287,8 +286,7 @@ public static void deepSortRecords(Object o) throws Exception { } } list.sort(Comparator.comparing(Object::toString)); - } else if (o instanceof ImplicitLinkedHashCollection) { - ImplicitLinkedHashCollection coll = (ImplicitLinkedHashCollection) o; + } else if (o instanceof ImplicitLinkedHashCollection coll) { for (Object entry : coll) { deepSortRecords(entry); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapDirectoryTest.java b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapDirectoryTest.java index 8e35ea6741ef2..4eabc02f5183d 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapDirectoryTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapDirectoryTest.java @@ -31,20 +31,18 @@ import java.util.List; import java.util.Optional; -import static java.util.Arrays.asList; -import static java.util.Collections.unmodifiableList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @Timeout(40) public class BootstrapDirectoryTest { - static final List SAMPLE_RECORDS1 = unmodifiableList(asList( + static final List SAMPLE_RECORDS1 = List.of( new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel((short) 7), (short) 0), new ApiMessageAndVersion(new NoOpRecord(), (short) 0), - new ApiMessageAndVersion(new NoOpRecord(), (short) 0))); + new ApiMessageAndVersion(new NoOpRecord(), (short) 0)); static class BootstrapTestDirectory implements AutoCloseable { File directory = null; @@ -85,7 +83,7 @@ public void testReadFromConfigurationWithAncientVersion() throws Exception { try (BootstrapTestDirectory testDirectory = new BootstrapTestDirectory().createDirectory()) { assertEquals(BootstrapMetadata.fromVersion(MetadataVersion.MINIMUM_BOOTSTRAP_VERSION, "the minimum version bootstrap with metadata.version 3.3-IV0"), - new BootstrapDirectory(testDirectory.path(), Optional.of("2.7")).read()); + new BootstrapDirectory(testDirectory.path(), Optional.of("3.0")).read()); } } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java index fd41fefabf0f4..550f6ed966a6d 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java @@ -28,9 +28,7 @@ import java.util.Collections; import java.util.List; -import static java.util.Arrays.asList; import static java.util.Collections.emptyList; -import static java.util.Collections.unmodifiableList; import static org.apache.kafka.server.common.MetadataVersion.FEATURE_NAME; import static org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV1; import static org.apache.kafka.server.common.MetadataVersion.IBP_3_3_IV2; @@ -40,14 +38,14 @@ @Timeout(60) public class BootstrapMetadataTest { - static final List SAMPLE_RECORDS1 = unmodifiableList(asList( + static final List SAMPLE_RECORDS1 = List.of( new ApiMessageAndVersion(new FeatureLevelRecord(). setName(FEATURE_NAME). setFeatureLevel((short) 7), (short) 0), new ApiMessageAndVersion(new NoOpRecord(), (short) 0), new ApiMessageAndVersion(new FeatureLevelRecord(). setName(FEATURE_NAME). - setFeatureLevel((short) 6), (short) 0))); + setFeatureLevel((short) 6), (short) 0)); @Test public void testFromVersion() { diff --git a/metadata/src/test/java/org/apache/kafka/metadata/storage/FormatterTest.java b/metadata/src/test/java/org/apache/kafka/metadata/storage/FormatterTest.java index 45a896c47c44c..fd0a4086adddd 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/storage/FormatterTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/storage/FormatterTest.java @@ -29,7 +29,7 @@ import org.apache.kafka.metadata.properties.MetaPropertiesEnsemble; import org.apache.kafka.raft.DynamicVoters; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.GroupVersion; import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.TestFeatureVersion; @@ -331,7 +331,7 @@ public void testFormatWithScram() throws Exception { public void testFeatureFlag(short version) throws Exception { try (TestEnv testEnv = new TestEnv(1)) { FormatterContext formatter1 = testEnv.newFormatter(); - formatter1.formatter.setSupportedFeatures(Arrays.asList(Features.values())); + formatter1.formatter.setSupportedFeatures(Feature.TEST_AND_PRODUCTION_FEATURES); formatter1.formatter.setFeatureLevel(TestFeatureVersion.FEATURE_NAME, version); formatter1.formatter.run(); BootstrapMetadata bootstrapMetadata = @@ -357,10 +357,11 @@ public void testFeatureFlag(short version) throws Exception { public void testInvalidFeatureFlag() throws Exception { try (TestEnv testEnv = new TestEnv(2)) { FormatterContext formatter1 = testEnv.newFormatter(); - formatter1.formatter.setSupportedFeatures(Arrays.asList(Features.values())); + formatter1.formatter.setSupportedFeatures(Feature.TEST_AND_PRODUCTION_FEATURES); formatter1.formatter.setFeatureLevel("nonexistent.feature", (short) 1); assertEquals("Unsupported feature: nonexistent.feature. Supported features " + - "are: group.version, kraft.version, test.feature.version, transaction.version", + "are: eligible.leader.replicas.version, group.version, kraft.version, " + + "test.feature.version, transaction.version", assertThrows(FormatterException.class, () -> formatter1.formatter.run()). getMessage()); @@ -379,6 +380,7 @@ public void testFormatWithInitialVoters(boolean specifyKRaftVersion) throws Exce formatter1.formatter.setInitialControllers(DynamicVoters. parse("1@localhost:8020:4znU-ou9Taa06bmEJxsjnw")); formatter1.formatter.run(); + assertEquals((short) 1, formatter1.formatter.featureLevels.getOrDefault("kraft.version", (short) 0)); assertEquals(Arrays.asList( String.format("Formatting data directory %s with %s %s.", testEnv.directory(1), @@ -446,4 +448,68 @@ public void testFormatWithInitialVotersFailsWithOlderMetadataVersion() throws Ex () -> formatter1.formatter.run()).getMessage()); } } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testFormatWithNoInitialControllers(boolean specifyKRaftVersion) throws Exception { + try (TestEnv testEnv = new TestEnv(2)) { + FormatterContext formatter1 = testEnv.newFormatter(); + if (specifyKRaftVersion) { + formatter1.formatter.setFeatureLevel("kraft.version", (short) 1); + } + formatter1.formatter.setUnstableFeatureVersionsEnabled(true); + formatter1.formatter.setNoInitialControllersFlag(true); + assertTrue(formatter1.formatter.hasDynamicQuorum()); + + formatter1.formatter.run(); + assertEquals((short) 1, formatter1.formatter.featureLevels.getOrDefault("kraft.version", (short) 0)); + assertEquals(Arrays.asList( + String.format("Formatting data directory %s with %s %s.", + testEnv.directory(1), + MetadataVersion.FEATURE_NAME, + MetadataVersion.latestTesting()), + String.format("Formatting metadata directory %s with %s %s.", + testEnv.directory(0), + MetadataVersion.FEATURE_NAME, + MetadataVersion.latestTesting())), + formatter1.outputLines().stream().sorted().collect(Collectors.toList())); + MetaPropertiesEnsemble ensemble = new MetaPropertiesEnsemble.Loader(). + addLogDirs(testEnv.directories). + load(); + MetaProperties logDirProps0 = ensemble.logDirProps().get(testEnv.directory(0)); + assertNotNull(logDirProps0); + MetaProperties logDirProps1 = ensemble.logDirProps().get(testEnv.directory(1)); + assertNotNull(logDirProps1); + } + } + + @Test + public void testFormatWithoutNoInitialControllersFailsWithNewerKraftVersion() throws Exception { + try (TestEnv testEnv = new TestEnv(2)) { + FormatterContext formatter1 = testEnv.newFormatter(); + formatter1.formatter.setFeatureLevel("kraft.version", (short) 1); + formatter1.formatter.setUnstableFeatureVersionsEnabled(true); + formatter1.formatter.setNoInitialControllersFlag(false); + assertFalse(formatter1.formatter.hasDynamicQuorum()); + assertEquals("Cannot set kraft.version to 1 unless KIP-853 configuration is present. " + + "Try removing the --feature flag for kraft.version.", + assertThrows(FormatterException.class, + formatter1.formatter::run).getMessage()); + } + } + + @Test + public void testFormatWithNoInitialControllersFailsWithOlderKraftVersion() throws Exception { + try (TestEnv testEnv = new TestEnv(2)) { + FormatterContext formatter1 = testEnv.newFormatter(); + formatter1.formatter.setFeatureLevel("kraft.version", (short) 0); + formatter1.formatter.setUnstableFeatureVersionsEnabled(true); + formatter1.formatter.setNoInitialControllersFlag(true); + assertTrue(formatter1.formatter.hasDynamicQuorum()); + assertEquals("Cannot set kraft.version to 0 if KIP-853 configuration is present. " + + "Try removing the --feature flag for kraft.version.", + assertThrows(FormatterException.class, + formatter1.formatter::run).getMessage()); + } + } } diff --git a/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java b/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java index 70e39251b0ba7..db974195493a0 100644 --- a/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java +++ b/metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java @@ -96,8 +96,7 @@ public int size() { @Override public boolean equals(Object o) { - if (!(o instanceof LeaderChangeBatch)) return false; - LeaderChangeBatch other = (LeaderChangeBatch) o; + if (!(o instanceof LeaderChangeBatch other)) return false; return other.newLeader.equals(newLeader); } @@ -135,8 +134,7 @@ public int size() { @Override public boolean equals(Object o) { - if (!(o instanceof LocalRecordBatch)) return false; - LocalRecordBatch other = (LocalRecordBatch) o; + if (!(o instanceof LocalRecordBatch other)) return false; return leaderEpoch == other.leaderEpoch && appendTimestamp == other.appendTimestamp && @@ -261,8 +259,7 @@ public synchronized long append( long nextEndOffset = prevOffset + batch.size(); log.debug("append(batch={}, nextEndOffset={})", batch, nextEndOffset); batches.put(nextEndOffset, batch); - if (batch instanceof LeaderChangeBatch) { - LeaderChangeBatch leaderChangeBatch = (LeaderChangeBatch) batch; + if (batch instanceof LeaderChangeBatch leaderChangeBatch) { leader = leaderChangeBatch.newLeader; } for (LocalLogManager logManager : logManagers.values()) { @@ -373,8 +370,7 @@ synchronized long appendedBytes() { .values() .stream() .flatMapToInt(batch -> { - if (batch instanceof LocalRecordBatch) { - LocalRecordBatch localBatch = (LocalRecordBatch) batch; + if (batch instanceof LocalRecordBatch localBatch) { return localBatch.records.stream().mapToInt(record -> messageSize(record, objectCache)); } else { return IntStream.empty(); @@ -398,8 +394,7 @@ public long initialMaxReadOffset() { public synchronized List allRecords() { List allRecords = new ArrayList<>(); for (LocalBatch batch : batches.values()) { - if (batch instanceof LocalRecordBatch) { - LocalRecordBatch recordBatch = (LocalRecordBatch) batch; + if (batch instanceof LocalRecordBatch recordBatch) { allRecords.addAll(recordBatch.records); } } @@ -554,8 +549,7 @@ private void scheduleLogCheck() { nodeId, numEntriesFound, entryOffset, maxReadOffset); break; } - if (entry.getValue() instanceof LeaderChangeBatch) { - LeaderChangeBatch batch = (LeaderChangeBatch) entry.getValue(); + if (entry.getValue() instanceof LeaderChangeBatch batch) { log.trace("Node {}: handling LeaderChange to {}.", nodeId, batch.newLeader); // Only notify the listener if it equals the shared leader state @@ -572,8 +566,7 @@ private void scheduleLogCheck() { nodeId, batch.newLeader, sharedLeader); listenerData.setOffset(entryOffset); } - } else if (entry.getValue() instanceof LocalRecordBatch) { - LocalRecordBatch batch = (LocalRecordBatch) entry.getValue(); + } else if (entry.getValue() instanceof LocalRecordBatch batch) { log.trace("Node {}: handling LocalRecordBatch with offset {}.", nodeId, entryOffset); ObjectSerializationCache objectCache = new ObjectSerializationCache(); diff --git a/metadata/src/test/resources/log4j2.yaml b/metadata/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..fd94a4974e2dc --- /dev/null +++ b/metadata/src/test/resources/log4j2.yaml @@ -0,0 +1,36 @@ + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: DEBUG + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka + level: DEBUG diff --git a/raft/bin/test-kraft-server-start.sh b/raft/bin/test-kraft-server-start.sh index 701bc1864a458..7c62e0c91b6f7 100755 --- a/raft/bin/test-kraft-server-start.sh +++ b/raft/bin/test-kraft-server-start.sh @@ -16,8 +16,8 @@ base_dir=$(dirname $0) -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/kraft-log4j.properties" +if [ -z "$KAFKA_LOG4J_OPTS" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=file:$base_dir/../config/kraft-log4j2.yaml" fi if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then diff --git a/raft/config/kraft-log4j2.yaml b/raft/config/kraft-log4j2.yaml new file mode 100644 index 0000000000000..3bfd01ca5cfcf --- /dev/null +++ b/raft/config/kraft-log4j2.yaml @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c)%n" + + Appenders: + Console: + name: STDERR + target: SYSTEM_ERR + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDERR + Logger: + - name: org.apache.kafka.raft + level: INFO + + - name: org.apache.kafka.snapshot + level: INFO diff --git a/raft/src/main/java/org/apache/kafka/raft/CandidateState.java b/raft/src/main/java/org/apache/kafka/raft/CandidateState.java index 175df7760de60..d66de84e28445 100644 --- a/raft/src/main/java/org/apache/kafka/raft/CandidateState.java +++ b/raft/src/main/java/org/apache/kafka/raft/CandidateState.java @@ -20,38 +20,33 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; +import org.apache.kafka.raft.internals.EpochElection; import org.slf4j.Logger; -import java.util.HashMap; -import java.util.Map; import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; -public class CandidateState implements EpochState { +public class CandidateState implements NomineeState { private final int localId; private final Uuid localDirectoryId; private final int epoch; private final int retries; - private final Map voteStates = new HashMap<>(); + private final EpochElection epochElection; private final Optional highWatermark; private final int electionTimeoutMs; private final Timer electionTimer; private final Timer backoffTimer; private final Logger log; + private boolean isBackingOff; /** * The lifetime of a candidate state is the following. * - * 1. Once started, it would keep record of the received votes. - * 2. If majority votes granted, it can then end its life and will be replaced by a leader state; - * 3. If majority votes rejected or election timed out, it would transit into a backing off phase; - * after the backoff phase completes, it would end its left and be replaced by a new candidate state with bumped retry. + * 1. Once started, it will send vote requests and keep record of the received vote responses. + * 2. If majority votes granted, it will transition to leader state. + * 3. If majority votes rejected, it will transition to prospective after a backoff phase. + * 4. If election times out, it will transition immediately to prospective. */ - private boolean isBackingOff; - protected CandidateState( Time time, int localId, @@ -85,26 +80,8 @@ protected CandidateState( this.backoffTimer = time.timer(0); this.log = logContext.logger(CandidateState.class); - for (ReplicaKey voter : voters.voterKeys()) { - voteStates.put(voter.id(), new VoterState(voter)); - } - voteStates.get(localId).setState(State.GRANTED); - } - - public int localId() { - return localId; - } - - public int majoritySize() { - return voteStates.size() / 2 + 1; - } - - private long numGranted() { - return votersInState(State.GRANTED).count(); - } - - private long numUnrecorded() { - return votersInState(State.UNRECORDED).count(); + this.epochElection = new EpochElection(voters.voterKeys()); + epochElection.recordVote(localId, true); } /** @@ -118,69 +95,27 @@ public int retries() { return retries; } - /** - * Check whether we have received enough votes to conclude the election and become leader. - * - * @return true if at least a majority of nodes have granted the vote - */ - public boolean isVoteGranted() { - return numGranted() >= majoritySize(); - } - - /** - * Check if we have received enough rejections that it is no longer possible to reach a - * majority of grants. - * - * @return true if the vote is rejected, false if the vote is already or can still be granted - */ - public boolean isVoteRejected() { - return numGranted() + numUnrecorded() < majoritySize(); + @Override + public EpochElection epochElection() { + return epochElection; } - /** - * Record a granted vote from one of the voters. - * - * @param remoteNodeId The id of the voter - * @return true if the voter had not been previously recorded - * @throws IllegalArgumentException if the remote node is not a voter or if the vote had already been - * rejected by this node - */ + @Override public boolean recordGrantedVote(int remoteNodeId) { - VoterState voterState = voteStates.get(remoteNodeId); - if (voterState == null) { - throw new IllegalArgumentException("Attempt to grant vote to non-voter " + remoteNodeId); - } else if (voterState.state().equals(State.REJECTED)) { + if (epochElection().isRejectedVoter(remoteNodeId)) { throw new IllegalArgumentException("Attempt to grant vote from node " + remoteNodeId + " which previously rejected our request"); } - - boolean recorded = voterState.state().equals(State.UNRECORDED); - voterState.setState(State.GRANTED); - - return recorded; + return epochElection().recordVote(remoteNodeId, true); } - /** - * Record a rejected vote from one of the voters. - * - * @param remoteNodeId The id of the voter - * @return true if the rejected vote had not been previously recorded - * @throws IllegalArgumentException if the remote node is not a voter or if the vote had already been - * granted by this node - */ + @Override public boolean recordRejectedVote(int remoteNodeId) { - VoterState voterState = voteStates.get(remoteNodeId); - if (voterState == null) { - throw new IllegalArgumentException("Attempt to reject vote to non-voter " + remoteNodeId); - } else if (voterState.state().equals(State.GRANTED)) { + if (epochElection().isGrantedVoter(remoteNodeId)) { throw new IllegalArgumentException("Attempt to reject vote from node " + remoteNodeId + " which previously granted our request"); } - - boolean recorded = voterState.state().equals(State.UNRECORDED); - voterState.setState(State.REJECTED); - - return recorded; + return epochElection().recordVote(remoteNodeId, false); } /** @@ -192,41 +127,7 @@ public void startBackingOff(long currentTimeMs, long backoffDurationMs) { this.isBackingOff = true; } - /** - * Get the set of voters which have not been counted as granted or rejected yet. - * - * @return The set of unrecorded voters - */ - public Set unrecordedVoters() { - return votersInState(State.UNRECORDED).collect(Collectors.toSet()); - } - - /** - * Get the set of voters that have granted our vote requests. - * - * @return The set of granting voters, which should always contain the ID of the candidate - */ - public Set grantingVoters() { - return votersInState(State.GRANTED).map(ReplicaKey::id).collect(Collectors.toSet()); - } - - /** - * Get the set of voters that have rejected our candidacy. - * - * @return The set of rejecting voters - */ - public Set rejectingVoters() { - return votersInState(State.REJECTED).map(ReplicaKey::id).collect(Collectors.toSet()); - } - - private Stream votersInState(State state) { - return voteStates - .values() - .stream() - .filter(voterState -> voterState.state().equals(state)) - .map(VoterState::replicaKey); - } - + @Override public boolean hasElectionTimeoutExpired(long currentTimeMs) { electionTimer.update(currentTimeMs); return electionTimer.isExpired(); @@ -245,6 +146,7 @@ public long remainingBackoffMs(long currentTimeMs) { return backoffTimer.remainingMs(); } + @Override public long remainingElectionTimeMs(long currentTimeMs) { electionTimer.update(currentTimeMs); return electionTimer.remainingMs(); @@ -255,7 +157,7 @@ public ElectionState election() { return ElectionState.withVotedCandidate( epoch, ReplicaKey.of(localId, localDirectoryId), - voteStates.keySet() + epochElection.voterIds() ); } @@ -276,15 +178,22 @@ public Optional highWatermark() { @Override public boolean canGrantVote( - ReplicaKey candidateKey, - boolean isLogUpToDate + ReplicaKey replicaKey, + boolean isLogUpToDate, + boolean isPreVote ) { - // Still reject vote request even candidateId = localId, Although the candidate votes for + if (isPreVote && isLogUpToDate) { + return true; + } + // Reject standard vote requests even if replicaId = localId, although the replica votes for // itself, this vote is implicit and not "granted". log.debug( - "Rejecting vote request from candidate ({}) since we are already candidate in epoch {}", - candidateKey, - epoch + "Rejecting Vote request (preVote={}) from replica ({}) since we are in CandidateState in epoch {} " + + "and the replica's log is up-to-date={}", + isPreVote, + replicaKey, + epoch, + isLogUpToDate ); return false; } @@ -292,13 +201,13 @@ public boolean canGrantVote( @Override public String toString() { return String.format( - "CandidateState(localId=%d, localDirectoryId=%s,epoch=%d, retries=%d, voteStates=%s, " + + "CandidateState(localId=%d, localDirectoryId=%s, epoch=%d, retries=%d, epochElection=%s, " + "highWatermark=%s, electionTimeoutMs=%d)", localId, localDirectoryId, epoch, retries, - voteStates, + epochElection(), highWatermark, electionTimeoutMs ); @@ -311,31 +220,4 @@ public String name() { @Override public void close() {} - - private static final class VoterState { - private final ReplicaKey replicaKey; - private State state = State.UNRECORDED; - - private VoterState(ReplicaKey replicaKey) { - this.replicaKey = replicaKey; - } - - public State state() { - return state; - } - - public void setState(State state) { - this.state = state; - } - - public ReplicaKey replicaKey() { - return replicaKey; - } - } - - private enum State { - UNRECORDED, - GRANTED, - REJECTED - } } diff --git a/raft/src/main/java/org/apache/kafka/raft/ElectionState.java b/raft/src/main/java/org/apache/kafka/raft/ElectionState.java index e65e72890f5c8..6b4f775caeebc 100644 --- a/raft/src/main/java/org/apache/kafka/raft/ElectionState.java +++ b/raft/src/main/java/org/apache/kafka/raft/ElectionState.java @@ -73,11 +73,11 @@ public boolean isLeader(int nodeId) { public boolean isVotedCandidate(ReplicaKey nodeKey) { if (nodeKey.id() < 0) { throw new IllegalArgumentException("Invalid node key " + nodeKey); - } else if (!votedKey.isPresent()) { + } else if (votedKey.isEmpty()) { return false; } else if (votedKey.get().id() != nodeKey.id()) { return false; - } else if (!votedKey.get().directoryId().isPresent()) { + } else if (votedKey.get().directoryId().isEmpty()) { // when the persisted voted directory id is not present assume that we voted for this candidate; // this happens when the kraft version is 0. return true; @@ -87,7 +87,7 @@ public boolean isVotedCandidate(ReplicaKey nodeKey) { } public int leaderId() { - if (!leaderId.isPresent()) + if (leaderId.isEmpty()) throw new IllegalStateException("Attempt to access nil leaderId"); return leaderId.getAsInt(); } @@ -101,7 +101,7 @@ public OptionalInt optionalLeaderId() { } public ReplicaKey votedKey() { - if (!votedKey.isPresent()) { + if (votedKey.isEmpty()) { throw new IllegalStateException("Attempt to access nil votedId"); } @@ -185,12 +185,17 @@ public static ElectionState withVotedCandidate(int epoch, ReplicaKey votedKey, S return new ElectionState(epoch, OptionalInt.empty(), Optional.of(votedKey), voters); } - public static ElectionState withElectedLeader(int epoch, int leaderId, Set voters) { + public static ElectionState withElectedLeader( + int epoch, + int leaderId, + Optional votedKey, + Set voters + ) { if (leaderId < 0) { throw new IllegalArgumentException("Illegal leader Id " + leaderId + ": must be non-negative"); } - return new ElectionState(epoch, OptionalInt.of(leaderId), Optional.empty(), voters); + return new ElectionState(epoch, OptionalInt.of(leaderId), votedKey, voters); } public static ElectionState withUnknownLeader(int epoch, Set voters) { diff --git a/raft/src/main/java/org/apache/kafka/raft/EpochState.java b/raft/src/main/java/org/apache/kafka/raft/EpochState.java index 4f6baec79da61..338f660318140 100644 --- a/raft/src/main/java/org/apache/kafka/raft/EpochState.java +++ b/raft/src/main/java/org/apache/kafka/raft/EpochState.java @@ -26,16 +26,17 @@ default Optional highWatermark() { } /** - * Decide whether to grant a vote to a candidate. + * Decide whether to grant a vote to a replica. * * It is the responsibility of the caller to invoke - * {@link QuorumState#transitionToUnattachedVotedState(int, ReplicaKey)} if vote is granted. + * {@link QuorumState#unattachedAddVotedState(int, ReplicaKey)} if a standard vote is granted. * - * @param candidateKey the id and directory of the candidate - * @param isLogUpToDate whether the candidate’s log is at least as up-to-date as receiver’s log + * @param replicaKey the id and directory of the replica requesting the vote + * @param isLogUpToDate whether the replica's log is at least as up-to-date as receiver’s log + * @param isPreVote whether the vote request is a PreVote (non-binding) or standard vote * @return true if it can grant the vote, false otherwise */ - boolean canGrantVote(ReplicaKey candidateKey, boolean isLogUpToDate); + boolean canGrantVote(ReplicaKey replicaKey, boolean isLogUpToDate, boolean isPreVote); /** * Get the current election state, which is guaranteed to be immutable. diff --git a/raft/src/main/java/org/apache/kafka/raft/FileQuorumStateStore.java b/raft/src/main/java/org/apache/kafka/raft/FileQuorumStateStore.java index 50b519bf3b151..d7738d18f8d3f 100644 --- a/raft/src/main/java/org/apache/kafka/raft/FileQuorumStateStore.java +++ b/raft/src/main/java/org/apache/kafka/raft/FileQuorumStateStore.java @@ -95,11 +95,10 @@ private QuorumStateData readStateFromFile(File file) { final ObjectMapper objectMapper = new ObjectMapper(); JsonNode readNode = objectMapper.readTree(line); - if (!(readNode instanceof ObjectNode)) { + if (!(readNode instanceof ObjectNode dataObject)) { throw new IOException("Deserialized node " + readNode + " is not an object node"); } - final ObjectNode dataObject = (ObjectNode) readNode; JsonNode dataVersionNode = dataObject.get(DATA_VERSION); if (dataVersionNode == null) { diff --git a/raft/src/main/java/org/apache/kafka/raft/FollowerState.java b/raft/src/main/java/org/apache/kafka/raft/FollowerState.java index 49eecab5d610b..4d2357fdef58e 100644 --- a/raft/src/main/java/org/apache/kafka/raft/FollowerState.java +++ b/raft/src/main/java/org/apache/kafka/raft/FollowerState.java @@ -34,9 +34,14 @@ public class FollowerState implements EpochState { private final int epoch; private final int leaderId; private final Endpoints leaderEndpoints; + private final Optional votedKey; private final Set voters; // Used for tracking the expiration of both the Fetch and FetchSnapshot requests private final Timer fetchTimer; + /* Used to track if the replica has fetched successfully from the leader at least once since the transition to + * follower in this epoch. If the replica has not yet fetched successfully, it may be able to grant PreVotes. + */ + private boolean hasFetchedFromLeader; private Optional highWatermark; /* Used to track the currently fetching snapshot. When fetching snapshot regular * Fetch request are paused @@ -52,6 +57,7 @@ public FollowerState( int epoch, int leaderId, Endpoints leaderEndpoints, + Optional votedKey, Set voters, Optional highWatermark, int fetchTimeoutMs, @@ -61,16 +67,18 @@ public FollowerState( this.epoch = epoch; this.leaderId = leaderId; this.leaderEndpoints = leaderEndpoints; + this.votedKey = votedKey; this.voters = voters; this.fetchTimer = time.timer(fetchTimeoutMs); this.updateVoterPeriodTimer = time.timer(updateVoterPeriodMs()); this.highWatermark = highWatermark; this.log = logContext.logger(FollowerState.class); + this.hasFetchedFromLeader = false; } @Override public ElectionState election() { - return ElectionState.withElectedLeader(epoch, leaderId, voters); + return ElectionState.withElectedLeader(epoch, leaderId, votedKey, voters); } @Override @@ -118,9 +126,10 @@ public boolean hasFetchTimeoutExpired(long currentTimeMs) { return fetchTimer.isExpired(); } - public void resetFetchTimeout(long currentTimeMs) { + public void resetFetchTimeoutForSuccessfulFetch(long currentTimeMs) { fetchTimer.update(currentTimeMs); fetchTimer.reset(fetchTimeoutMs); + hasFetchedFromLeader = true; } public void overrideFetchTimeout(long currentTimeMs, long timeoutMs) { @@ -131,7 +140,7 @@ public void overrideFetchTimeout(long currentTimeMs, long timeoutMs) { private long updateVoterPeriodMs() { // Allow for a few rounds of fetch request before attempting to update // the voter state - return fetchTimeoutMs * 3; + return fetchTimeoutMs * 3L; } public boolean hasUpdateVoterPeriodExpired(long currentTimeMs) { @@ -150,7 +159,7 @@ public void resetUpdateVoterPeriod(long currentTimeMs) { } public boolean updateHighWatermark(OptionalLong newHighWatermark) { - if (!newHighWatermark.isPresent() && highWatermark.isPresent()) { + if (newHighWatermark.isEmpty() && highWatermark.isPresent()) { throw new IllegalArgumentException( String.format("Attempt to overwrite current high watermark %s with unknown value", highWatermark) ); @@ -202,12 +211,19 @@ public void setFetchingSnapshot(Optional newSnapshot) { } @Override - public boolean canGrantVote(ReplicaKey candidateKey, boolean isLogUpToDate) { + public boolean canGrantVote(ReplicaKey replicaKey, boolean isLogUpToDate, boolean isPreVote) { + if (isPreVote && !hasFetchedFromLeader && isLogUpToDate) { + return true; + } log.debug( - "Rejecting vote request from candidate ({}) since we already have a leader {} in epoch {}", - candidateKey, + "Rejecting Vote request (preVote={}) from replica ({}) since we are in FollowerState with leader {} in " + + "epoch {}, hasFetchedFromLeader={}, replica's log is up-to-date={}", + isPreVote, + replicaKey, leaderId, - epoch + epoch, + hasFetchedFromLeader, + isLogUpToDate ); return false; } @@ -215,12 +231,13 @@ public boolean canGrantVote(ReplicaKey candidateKey, boolean isLogUpToDate) { @Override public String toString() { return String.format( - "FollowerState(fetchTimeoutMs=%d, epoch=%d, leader=%d, leaderEndpoints=%s, " + + "FollowerState(fetchTimeoutMs=%d, epoch=%d, leader=%d, leaderEndpoints=%s, votedKey=%s, " + "voters=%s, highWatermark=%s, fetchingSnapshot=%s)", fetchTimeoutMs, epoch, leaderId, leaderEndpoints, + votedKey, voters, highWatermark, fetchingSnapshot diff --git a/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java b/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java index 68224a8c2410d..688a55abfd740 100644 --- a/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java +++ b/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java @@ -24,6 +24,7 @@ import org.apache.kafka.common.message.EndQuorumEpochRequestData; import org.apache.kafka.common.message.FetchRequestData; import org.apache.kafka.common.message.FetchSnapshotRequestData; +import org.apache.kafka.common.message.UpdateRaftVoterRequestData; import org.apache.kafka.common.message.VoteRequestData; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.protocol.ApiKeys; @@ -35,6 +36,7 @@ import org.apache.kafka.common.requests.EndQuorumEpochRequest; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.requests.FetchSnapshotRequest; +import org.apache.kafka.common.requests.UpdateRaftVoterRequest; import org.apache.kafka.common.requests.VoteRequest; import org.apache.kafka.common.utils.Time; import org.apache.kafka.server.util.InterBrokerSendThread; @@ -187,6 +189,8 @@ static AbstractRequest.Builder buildRequest(ApiMessag return new FetchRequest.SimpleBuilder((FetchRequestData) requestData); if (requestData instanceof FetchSnapshotRequestData) return new FetchSnapshotRequest.Builder((FetchSnapshotRequestData) requestData); + if (requestData instanceof UpdateRaftVoterRequestData) + return new UpdateRaftVoterRequest.Builder((UpdateRaftVoterRequestData) requestData); if (requestData instanceof ApiVersionsRequestData) return new ApiVersionsRequest.Builder((ApiVersionsRequestData) requestData, ApiKeys.API_VERSIONS.oldestVersion(), diff --git a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java index 51aa5e59f2f4e..f1e0e4e41b43d 100644 --- a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java +++ b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java @@ -159,6 +159,7 @@ * as FileRecords, but we use {@link UnalignedRecords} in FetchSnapshotResponse because the records * are not necessarily offset-aligned. */ +@SuppressWarnings({ "ClassDataAbstractionCoupling", "ClassFanOutComplexity", "ParameterNumber", "NPathComplexity" }) public final class KafkaRaftClient implements RaftClient { private static final int RETRY_BACKOFF_BASE_MS = 100; private static final int MAX_NUMBER_OF_BATCHES = 10; @@ -543,17 +544,17 @@ public void initialize( long currentTimeMs = time.milliseconds(); if (quorum.isLeader()) { throw new IllegalStateException("Voter cannot initialize as a Leader"); + } else if (quorum.isOnlyVoter() && (quorum.isUnattached() || quorum.isFollower() || quorum.isResigned())) { + // When there is only a single voter, become leader immediately. + // transitionToProspective will handle short-circuiting voter to candidate state + // and transitionToCandidate will handle short-circuiting voter to leader state + transitionToProspective(currentTimeMs); } else if (quorum.isCandidate()) { onBecomeCandidate(currentTimeMs); } else if (quorum.isFollower()) { onBecomeFollower(currentTimeMs); } - // When there is only a single voter, become candidate immediately - if (quorum.isOnlyVoter() && !quorum.isCandidate()) { - transitionToCandidate(currentTimeMs); - } - // Specialized add voter handler this.addVoterHandler = new AddVoterHandler( partitionState, @@ -657,7 +658,7 @@ private void flushLeaderLog(LeaderState state, long currentTimeMs) { } private boolean maybeTransitionToLeader(CandidateState state, long currentTimeMs) { - if (state.isVoteGranted()) { + if (state.epochElection().isVoteGranted()) { onBecomeLeader(currentTimeMs); return true; } else { @@ -665,6 +666,32 @@ private boolean maybeTransitionToLeader(CandidateState state, long currentTimeMs } } + private boolean maybeTransitionToCandidate(ProspectiveState state, long currentTimeMs) { + if (state.epochElection().isVoteGranted()) { + transitionToCandidate(currentTimeMs); + return true; + } else { + return false; + } + } + + /** + * Only applies to NomineeStates (Prospective or Candidate). If enough votes were granted + * then this method is called to transition the state forward - either from Prospective to Candidate + * or from Candidate to Leader. + */ + private void maybeTransitionForward(NomineeState state, long currentTimeMs) { + if (state instanceof ProspectiveState prospective) { + maybeTransitionToCandidate(prospective, currentTimeMs); + } else if (state instanceof CandidateState candidate) { + maybeTransitionToLeader(candidate, currentTimeMs); + } else { + throw new IllegalStateException( + "Expected to be a NomineeState (Prospective or Candidate), but current state is " + state + ); + } + } + private void onBecomeCandidate(long currentTimeMs) { CandidateState state = quorum.candidateStateOrThrow(); if (!maybeTransitionToLeader(state, currentTimeMs)) { @@ -679,8 +706,21 @@ private void transitionToCandidate(long currentTimeMs) { onBecomeCandidate(currentTimeMs); } - private void transitionToUnattached(int epoch) { - quorum.transitionToUnattached(epoch); + private void onBecomeProspective(long currentTimeMs) { + ProspectiveState state = quorum.prospectiveStateOrThrow(); + if (!maybeTransitionToCandidate(state, currentTimeMs)) { + resetConnections(); + kafkaRaftMetrics.updateElectionStartMs(currentTimeMs); + } + } + + private void transitionToProspective(long currentTimeMs) { + quorum.transitionToProspective(); + onBecomeProspective(currentTimeMs); + } + + private void transitionToUnattached(int epoch, OptionalInt leaderId) { + quorum.transitionToUnattached(epoch, leaderId); maybeFireLeaderChange(); resetConnections(); } @@ -692,10 +732,6 @@ private void transitionToResigned(List preferredSuccessors) { resetConnections(); } - private void transitionToUnattachedVoted(ReplicaKey candidateKey, int epoch) { - quorum.transitionToUnattachedVotedState(epoch, candidateKey); - } - private void onBecomeFollower(long currentTimeMs) { kafkaRaftMetrics.maybeUpdateElectionLatency(currentTimeMs); @@ -779,12 +815,32 @@ private VoteResponseData handleVoteRequest( VoteRequestData.PartitionData partitionRequest = request.topics().get(0).partitions().get(0); - int candidateId = partitionRequest.candidateId(); - int candidateEpoch = partitionRequest.candidateEpoch(); + int replicaId = partitionRequest.replicaId(); + int replicaEpoch = partitionRequest.replicaEpoch(); + boolean preVote = partitionRequest.preVote(); int lastEpoch = partitionRequest.lastOffsetEpoch(); long lastEpochEndOffset = partitionRequest.lastOffset(); - if (lastEpochEndOffset < 0 || lastEpoch < 0 || lastEpoch >= candidateEpoch) { + /* Validate the replica epoch and the log's last epoch. + * + * For a standard vote, the candidate replica increases the epoch before sending a vote request. + * So we expect the replicaEpoch to be strictly greater than the log's last epoch. This is always true because + * the candidate has never seen a leader at replicaEpoch. + * + * For a PreVote, the prospective replica doesn't increase the epoch so it is possible for there to be a leader + * and a record in the log at the prospective replica's replicaEpoch. + */ + boolean isIllegalEpoch = preVote ? lastEpoch > replicaEpoch : lastEpoch >= replicaEpoch; + if (isIllegalEpoch) { + logger.info( + "Received a vote request from replica {} with illegal epoch {}, last epoch {}, preVote={}", + replicaId, + replicaEpoch, + lastEpoch, + preVote + ); + } + if (lastEpochEndOffset < 0 || lastEpoch < 0 || isIllegalEpoch) { return buildVoteResponse( requestMetadata.listenerName(), requestMetadata.apiVersion(), @@ -793,7 +849,7 @@ private VoteResponseData handleVoteRequest( ); } - Optional errorOpt = validateVoterOnlyRequest(candidateId, candidateEpoch); + Optional errorOpt = validateVoterOnlyRequest(replicaId, replicaEpoch); if (errorOpt.isPresent()) { return buildVoteResponse( requestMetadata.listenerName(), @@ -803,15 +859,15 @@ private VoteResponseData handleVoteRequest( ); } - if (candidateEpoch > quorum.epoch()) { - transitionToUnattached(candidateEpoch); + if (replicaEpoch > quorum.epoch()) { + transitionToUnattached(replicaEpoch, OptionalInt.empty()); } // Check that the request was intended for this replica Optional voterKey = RaftUtil.voteRequestVoterKey(request, partitionRequest); if (!isValidVoterKey(voterKey)) { logger.info( - "Candidate sent a voter key ({}) in the VOTE request that doesn't match the " + + "A replica sent a voter key ({}) in the VOTE request that doesn't match the " + "local key ({}, {}); rejecting the vote", voterKey, nodeId, @@ -827,20 +883,30 @@ private VoteResponseData handleVoteRequest( } OffsetAndEpoch lastEpochEndOffsetAndEpoch = new OffsetAndEpoch(lastEpochEndOffset, lastEpoch); - ReplicaKey candidateKey = ReplicaKey.of( - candidateId, - partitionRequest.candidateDirectoryId() + ReplicaKey replicaKey = ReplicaKey.of( + replicaId, + partitionRequest.replicaDirectoryId() ); boolean voteGranted = quorum.canGrantVote( - candidateKey, - lastEpochEndOffsetAndEpoch.compareTo(endOffset()) >= 0 + replicaKey, + lastEpochEndOffsetAndEpoch.compareTo(endOffset()) >= 0, + preVote ); - if (voteGranted && quorum.isUnattachedNotVoted()) { - transitionToUnattachedVoted(candidateKey, candidateEpoch); + if (!preVote && voteGranted) { + if (quorum.isUnattachedNotVoted()) { + quorum.unattachedAddVotedState(replicaEpoch, replicaKey); + } else if (quorum.isProspectiveNotVoted()) { + quorum.prospectiveAddVotedState(replicaEpoch, replicaKey); + } } - logger.info("Vote request {} with epoch {} is {}", request, candidateEpoch, voteGranted ? "granted" : "rejected"); + logger.info( + "Vote request {} with epoch {} is {}", + request, + replicaEpoch, + voteGranted ? "granted" : "rejected" + ); return buildVoteResponse( requestMetadata.listenerName(), requestMetadata.apiVersion(), @@ -856,7 +922,15 @@ private boolean handleVoteResponse( int remoteNodeId = responseMetadata.source().id(); VoteResponseData response = (VoteResponseData) responseMetadata.data(); Errors topLevelError = Errors.forCode(response.errorCode()); - if (topLevelError != Errors.NONE) { + if (topLevelError == Errors.UNSUPPORTED_VERSION && quorum.isProspective()) { + logger.info( + "Prospective received unsupported version error in vote response in epoch {}, " + + "transitioning to Candidate state immediately since at least one voter doesn't support PreVote.", + quorum.epoch() + ); + transitionToCandidate(currentTimeMs); + return true; + } else if (topLevelError != Errors.NONE) { return handleTopLevelError(topLevelError, responseMetadata); } @@ -900,30 +974,22 @@ private boolean handleVoteResponse( if (quorum.isLeader()) { logger.debug("Ignoring vote response {} since we already became leader for epoch {}", partitionResponse, quorum.epoch()); - } else if (quorum.isCandidate()) { - CandidateState state = quorum.candidateStateOrThrow(); + } else if (quorum.isNomineeState()) { + NomineeState state = quorum.nomineeStateOrThrow(); if (partitionResponse.voteGranted()) { state.recordGrantedVote(remoteNodeId); - maybeTransitionToLeader(state, currentTimeMs); + maybeTransitionForward(state, currentTimeMs); } else { state.recordRejectedVote(remoteNodeId); - - // If our vote is rejected, we go immediately to the random backoff. This - // ensures that we are not stuck waiting for the election timeout when the - // vote has become gridlocked. - if (state.isVoteRejected() && !state.isBackingOff()) { - logger.info("Insufficient remaining votes to become leader (rejected by {}). " + - "We will backoff before retrying election again", state.rejectingVoters()); - - state.startBackingOff( - currentTimeMs, - binaryExponentialElectionBackoffMs(state.retries()) - ); - } + maybeHandleElectionLoss(state, currentTimeMs); } } else { - logger.debug("Ignoring vote response {} since we are no longer a candidate in epoch {}", - partitionResponse, quorum.epoch()); + logger.debug( + "Ignoring vote response {} since we are no longer a NomineeState " + + "(Prospective or Candidate) in epoch {}", + partitionResponse, + quorum.epoch() + ); } return true; } else { @@ -931,13 +997,52 @@ private boolean handleVoteResponse( } } + /** + * On election loss, if replica is prospective it will transition to unattached or follower state. + * If replica is candidate, it will start backing off. + */ + private void maybeHandleElectionLoss(NomineeState state, long currentTimeMs) { + if (state instanceof CandidateState candidate) { + if (candidate.epochElection().isVoteRejected() && !candidate.isBackingOff()) { + logger.info( + "Insufficient remaining votes to become leader. We will backoff before retrying election again. " + + "Current epoch election state is {}.", + candidate.epochElection() + ); + // Go immediately to a random, exponential backoff. The backoff starts low to prevent + // needing to wait the entire election timeout when the vote result has already been + // determined. The randomness prevents the next election from being gridlocked with + // another nominee due to timing. The exponential aspect limits epoch churn when the + // replica has failed multiple elections in succession. + candidate.startBackingOff( + currentTimeMs, + binaryExponentialElectionBackoffMs(candidate.retries()) + ); + } + } else if (state instanceof ProspectiveState prospective) { + if (prospective.epochElection().isVoteRejected()) { + logger.info( + "Insufficient remaining votes to become candidate. Current epoch election state is {}. ", + prospective.epochElection() + ); + prospectiveTransitionAfterElectionLoss(prospective, currentTimeMs); + } + } else { + throw new IllegalStateException( + "Expected to be a NomineeState (Prospective or Candidate), but current state is " + state + ); + } + } + private int binaryExponentialElectionBackoffMs(int retries) { if (retries <= 0) { throw new IllegalArgumentException("Retries " + retries + " should be larger than zero"); } // upper limit exponential co-efficients at 20 to avoid overflow - return Math.min(RETRY_BACKOFF_BASE_MS * random.nextInt(2 << Math.min(20, retries - 1)), - quorumConfig.electionBackoffMaxMs()); + return Math.min( + RETRY_BACKOFF_BASE_MS * random.nextInt(2 << Math.min(20, retries - 1)), + quorumConfig.electionBackoffMaxMs() + ); } private int strictExponentialElectionBackoffMs(int positionInSuccessors, int totalNumSuccessors) { @@ -1210,7 +1315,7 @@ private long endEpochElectionBackoff(Collection preferredCandidates) int position = 0; for (ReplicaKey candidate : preferredCandidates) { if (candidate.id() == quorum.localIdOrThrow()) { - if (!candidate.directoryId().isPresent() || + if (candidate.directoryId().isEmpty() || candidate.directoryId().get().equals(quorum.localDirectoryId()) ) { // Found ourselves in the preferred candidate list @@ -1684,7 +1789,7 @@ private boolean handleFetchResponse( updateFollowerHighWatermark(state, highWatermark); } - state.resetFetchTimeout(currentTimeMs); + state.resetFetchTimeoutForSuccessfulFetch(currentTimeMs); return true; } else { return handleUnexpectedError(error, responseMetadata); @@ -1788,7 +1893,7 @@ private FetchSnapshotResponseData handleFetchSnapshotRequest( Optional partitionSnapshotOpt = FetchSnapshotRequest .forTopicPartition(data, log.topicPartition()); - if (!partitionSnapshotOpt.isPresent()) { + if (partitionSnapshotOpt.isEmpty()) { // The Raft client assumes that there is only one topic partition. TopicPartition unknownTopicPartition = new TopicPartition( data.topics().get(0).name(), @@ -1828,7 +1933,7 @@ private FetchSnapshotResponseData handleFetchSnapshotRequest( ); Optional snapshotOpt = log.readSnapshot(snapshotId); - if (!snapshotOpt.isPresent() || snapshotId.equals(BOOTSTRAP_SNAPSHOT_ID)) { + if (snapshotOpt.isEmpty() || snapshotId.equals(BOOTSTRAP_SNAPSHOT_ID)) { // The bootstrap checkpoint should not be replicated. The first leader will // make sure that the content of the bootstrap checkpoint is included in the // partition log @@ -1944,7 +2049,7 @@ private boolean handleFetchSnapshotResponse( Optional partitionSnapshotOpt = FetchSnapshotResponse .forTopicPartition(data, log.topicPartition()); - if (!partitionSnapshotOpt.isPresent()) { + if (partitionSnapshotOpt.isEmpty()) { return false; } @@ -1988,7 +2093,7 @@ private boolean handleFetchSnapshotResponse( partitionSnapshot.snapshotId() ); state.setFetchingSnapshot(Optional.empty()); - state.resetFetchTimeout(currentTimeMs); + state.resetFetchTimeoutForSuccessfulFetch(currentTimeMs); return true; } @@ -2066,7 +2171,7 @@ private boolean handleFetchSnapshotResponse( } } - state.resetFetchTimeout(currentTimeMs); + state.resetFetchTimeoutForSuccessfulFetch(currentTimeMs); return true; } @@ -2098,7 +2203,7 @@ private CompletableFuture handleAddVoterRequest( } Optional newVoter = RaftUtil.addVoterRequestVoterKey(data); - if (!newVoter.isPresent() || !newVoter.get().directoryId().isPresent()) { + if (newVoter.isEmpty() || newVoter.get().directoryId().isEmpty()) { return completedFuture( new AddRaftVoterResponseData() .setErrorCode(Errors.INVALID_REQUEST.code()) @@ -2107,7 +2212,7 @@ private CompletableFuture handleAddVoterRequest( } Endpoints newVoterEndpoints = Endpoints.fromAddVoterRequest(data.listeners()); - if (!newVoterEndpoints.address(channel.listenerName()).isPresent()) { + if (newVoterEndpoints.address(channel.listenerName()).isEmpty()) { return completedFuture( new AddRaftVoterResponseData() .setErrorCode(Errors.INVALID_REQUEST.code()) @@ -2181,7 +2286,7 @@ private CompletableFuture handleRemoveVoterRequest( } Optional oldVoter = RaftUtil.removeVoterRequestVoterKey(data); - if (!oldVoter.isPresent() || !oldVoter.get().directoryId().isPresent()) { + if (oldVoter.isEmpty() || oldVoter.get().directoryId().isEmpty()) { return completedFuture( new RemoveRaftVoterResponseData() .setErrorCode(Errors.INVALID_REQUEST.code()) @@ -2226,7 +2331,7 @@ private CompletableFuture handleUpdateVoterRequest( } Optional voter = RaftUtil.updateVoterRequestVoterKey(data); - if (!voter.isPresent() || !voter.get().directoryId().isPresent()) { + if (voter.isEmpty() || voter.get().directoryId().isEmpty()) { return completedFuture( RaftUtil.updateVoterResponse( Errors.INVALID_REQUEST, @@ -2238,7 +2343,7 @@ private CompletableFuture handleUpdateVoterRequest( } Endpoints voterEndpoints = Endpoints.fromUpdateVoterRequest(data.listeners()); - if (!voterEndpoints.address(channel.listenerName()).isPresent()) { + if (voterEndpoints.address(channel.listenerName()).isEmpty()) { return completedFuture( RaftUtil.updateVoterResponse( Errors.INVALID_REQUEST, @@ -2319,8 +2424,8 @@ private boolean hasConsistentLeader(int epoch, OptionalInt leaderId) { return quorum.isLeader(); } else { return epoch != quorum.epoch() - || !leaderId.isPresent() - || !quorum.leaderId().isPresent() + || leaderId.isEmpty() + || quorum.leaderId().isEmpty() || leaderId.equals(quorum.leaderId()); } } @@ -2342,7 +2447,7 @@ private boolean hasConsistentLeader(int epoch, OptionalInt leaderId) { * - Optional.of(true) indicates that the response was successfully handled here and * the node can become ready * - Optional.of(false) indicates that the response was handled here, but that the - * node should got in to backoff + * node should go into backoff */ private Optional maybeHandleCommonResponse( Errors error, @@ -2419,7 +2524,7 @@ private void maybeTransition( if (leaderId.isPresent()) { transitionToFollower(epoch, leaderId.getAsInt(), leaderEndpoints, currentTimeMs); } else { - transitionToUnattached(epoch); + transitionToUnattached(epoch, OptionalInt.empty()); } } else if ( leaderId.isPresent() && @@ -2516,7 +2621,7 @@ private boolean isValidVoterKey(Optional voterKey) { return voterKey .map(key -> { if (!OptionalInt.of(key.id()).equals(nodeId)) return false; - if (!key.directoryId().isPresent()) return true; + if (key.directoryId().isEmpty()) return true; return key.directoryId().get().equals(nodeDirectoryId); }) @@ -2604,11 +2709,9 @@ private void handleRequest(RaftRequest.Inbound request, long currentTimeMs) { private void handleInboundMessage(RaftMessage message, long currentTimeMs) { logger.trace("Received inbound message {}", message); - if (message instanceof RaftRequest.Inbound) { - RaftRequest.Inbound request = (RaftRequest.Inbound) message; + if (message instanceof RaftRequest.Inbound request) { handleRequest(request, currentTimeMs); - } else if (message instanceof RaftResponse.Inbound) { - RaftResponse.Inbound response = (RaftResponse.Inbound) message; + } else if (message instanceof RaftResponse.Inbound response) { if (requestManager.isResponseExpected(response.source(), response.correlationId())) { handleResponse(response, currentTimeMs); } else { @@ -2724,7 +2827,7 @@ private BeginQuorumEpochRequestData buildBeginQuorumEpochRequest(ReplicaKey remo ); } - private VoteRequestData buildVoteRequest(ReplicaKey remoteVoter) { + private VoteRequestData buildVoteRequest(ReplicaKey remoteVoter, boolean preVote) { OffsetAndEpoch endOffset = endOffset(); return RaftUtil.singletonVoteRequest( log.topicPartition(), @@ -2733,7 +2836,8 @@ private VoteRequestData buildVoteRequest(ReplicaKey remoteVoter) { quorum.localReplicaKeyOrThrow(), remoteVoter, endOffset.epoch(), - endOffset.offset() + endOffset.offset(), + preVote ); } @@ -2755,7 +2859,7 @@ private FetchRequestData buildFetchRequest() { .setReplicaState(new FetchRequestData.ReplicaState().setReplicaId(quorum.localIdOrSentinel())); } - private long maybeSendAnyVoterFetch(long currentTimeMs) { + private long maybeSendFetchToAnyBootstrap(long currentTimeMs) { Optional readyNode = requestManager.findReadyBootstrapServer(currentTimeMs); if (readyNode.isPresent()) { return maybeSendRequest( @@ -2904,18 +3008,16 @@ private long pollResigned(long currentTimeMs) { GracefulShutdown shutdown = this.shutdown.get(); final long stateTimeoutMs; if (shutdown != null) { - // If we are shutting down, then we will remain in the resigned state + // If the replica is shutting down, it will remain in the resigned state // until either the shutdown expires or an election bumps the epoch stateTimeoutMs = shutdown.remainingTimeMs(); } else if (state.hasElectionTimeoutExpired(currentTimeMs)) { - if (quorum.isVoter()) { - transitionToCandidate(currentTimeMs); - } else { - // It is possible that the old leader is not a voter in the new voter set. - // In that case increase the epoch and transition to unattached. The epoch needs - // to be increased to avoid FETCH responses with the leader being this replica. - transitionToUnattached(quorum.epoch() + 1); - } + // The replica stays in resigned state for an election timeout period to allow end quorum requests + // to be processed, and to give other replicas a chance to become leader. When transitioning out + // of resigned state, the epoch must be increased to avoid FETCH responses with the leader + // being this replica, and to avoid this replica attempting to transition into follower state with + // itself as the leader. + transitionToUnattached(quorum.epoch() + 1, OptionalInt.empty()); stateTimeoutMs = 0L; } else { stateTimeoutMs = state.remainingElectionTimeMs(currentTimeMs); @@ -2959,15 +3061,16 @@ private long pollLeader(long currentTimeMs) { } private long maybeSendVoteRequests( - CandidateState state, + NomineeState state, long currentTimeMs ) { // Continue sending Vote requests as long as we still have a chance to win the election - if (!state.isVoteRejected()) { + if (!state.epochElection().isVoteRejected()) { VoterSet voters = partitionState.lastVoterSet(); + boolean preVote = quorum.isProspective(); return maybeSendRequest( currentTimeMs, - state.unrecordedVoters(), + state.epochElection().unrecordedVoters(), voterId -> voters .voterNode(voterId, channel.listenerName()) .orElseThrow(() -> @@ -2979,7 +3082,7 @@ private long maybeSendVoteRequests( ) ) ), - this::buildVoteRequest + voterId -> buildVoteRequest(voterId, preVote) ); } return Long.MAX_VALUE; @@ -2990,7 +3093,7 @@ private long pollCandidate(long currentTimeMs) { GracefulShutdown shutdown = this.shutdown.get(); if (shutdown != null) { - // If we happen to shutdown while we are a candidate, we will continue + // If we happen to shut down while we are a candidate, we will continue // with the current election until one of the following conditions is met: // 1) we are elected as leader (which allows us to resign) // 2) another leader is elected @@ -2999,20 +3102,53 @@ private long pollCandidate(long currentTimeMs) { return Math.min(shutdown.remainingTimeMs(), minRequestBackoffMs); } else if (state.isBackingOff()) { if (state.isBackoffComplete(currentTimeMs)) { - logger.info("Re-elect as candidate after election backoff has completed"); - transitionToCandidate(currentTimeMs); + logger.info("Transition to prospective after election backoff has completed"); + transitionToProspective(currentTimeMs); return 0L; } return state.remainingBackoffMs(currentTimeMs); } else if (state.hasElectionTimeoutExpired(currentTimeMs)) { - long backoffDurationMs = binaryExponentialElectionBackoffMs(state.retries()); - logger.info("Election has timed out, backing off for {}ms before becoming a candidate again", - backoffDurationMs); - state.startBackingOff(currentTimeMs, backoffDurationMs); - return backoffDurationMs; + logger.info("Election was not granted, transitioning to prospective"); + transitionToProspective(currentTimeMs); + return 0L; } else { + long minVoteRequestBackoffMs = maybeSendVoteRequests(state, currentTimeMs); + return Math.min(minVoteRequestBackoffMs, state.remainingElectionTimeMs(currentTimeMs)); + } + } + + private long pollProspective(long currentTimeMs) { + ProspectiveState state = quorum.prospectiveStateOrThrow(); + GracefulShutdown shutdown = this.shutdown.get(); + + if (shutdown != null) { long minRequestBackoffMs = maybeSendVoteRequests(state, currentTimeMs); - return Math.min(minRequestBackoffMs, state.remainingElectionTimeMs(currentTimeMs)); + return Math.min(shutdown.remainingTimeMs(), minRequestBackoffMs); + } else if (state.hasElectionTimeoutExpired(currentTimeMs)) { + logger.info( + "Election timed out before receiving sufficient vote responses to become candidate. " + + "Current epoch election state: {}", + state.epochElection() + ); + prospectiveTransitionAfterElectionLoss(state, currentTimeMs); + return 0L; + } else { + long minVoteRequestBackoffMs = maybeSendVoteRequests(state, currentTimeMs); + return Math.min(minVoteRequestBackoffMs, state.remainingElectionTimeMs(currentTimeMs)); + } + } + + private void prospectiveTransitionAfterElectionLoss(ProspectiveState prospective, long currentTimeMs) { + // If the replica knows of a leader, it transitions to follower. Otherwise, it transitions to unattached. + if (prospective.election().hasLeader() && !prospective.leaderEndpoints().isEmpty()) { + transitionToFollower( + quorum().epoch(), + prospective.election().leaderId(), + prospective.leaderEndpoints(), + currentTimeMs + ); + } else { + transitionToUnattached(quorum().epoch(), prospective.election().optionalLeaderId()); } } @@ -3033,8 +3169,8 @@ private long pollFollowerAsVoter(FollowerState state, long currentTimeMs) { // skip the transition to candidate in any case. backoffMs = 0; } else if (state.hasFetchTimeoutExpired(currentTimeMs)) { - logger.info("Become candidate due to fetch timeout"); - transitionToCandidate(currentTimeMs); + logger.info("Transitioning to Prospective state due to fetch timeout"); + transitionToProspective(currentTimeMs); backoffMs = 0; } else if (state.hasUpdateVoterPeriodExpired(currentTimeMs)) { if (partitionState.lastKraftVersion().isReconfigSupported() && @@ -3045,7 +3181,7 @@ private long pollFollowerAsVoter(FollowerState state, long currentTimeMs) { } state.resetUpdateVoterPeriod(currentTimeMs); } else { - backoffMs = maybeSendFetchOrFetchSnapshot(state, currentTimeMs); + backoffMs = maybeSendFetchToBestNode(state, currentTimeMs); } return Math.min( @@ -3059,28 +3195,30 @@ private long pollFollowerAsVoter(FollowerState state, long currentTimeMs) { private long pollFollowerAsObserver(FollowerState state, long currentTimeMs) { if (state.hasFetchTimeoutExpired(currentTimeMs)) { - return maybeSendAnyVoterFetch(currentTimeMs); + return maybeSendFetchToAnyBootstrap(currentTimeMs); } else { - final long backoffMs; - - // If the current leader is backing off due to some failure or if the - // request has timed out, then we attempt to send the Fetch to another - // voter in order to discover if there has been a leader change. - Node leaderNode = state.leaderNode(channel.listenerName()); - if (requestManager.hasRequestTimedOut(leaderNode, currentTimeMs)) { - // Once the request has timed out backoff the connection - requestManager.reset(leaderNode); - backoffMs = maybeSendAnyVoterFetch(currentTimeMs); - } else if (requestManager.isBackingOff(leaderNode, currentTimeMs)) { - backoffMs = maybeSendAnyVoterFetch(currentTimeMs); - } else if (!requestManager.hasAnyInflightRequest(currentTimeMs)) { - backoffMs = maybeSendFetchOrFetchSnapshot(state, currentTimeMs); - } else { - backoffMs = requestManager.backoffBeforeAvailableBootstrapServer(currentTimeMs); - } + return maybeSendFetchToBestNode(state, currentTimeMs); + } + } - return Math.min(backoffMs, state.remainingFetchTimeMs(currentTimeMs)); + private long maybeSendFetchToBestNode(FollowerState state, long currentTimeMs) { + // If the current leader is backing off due to some failure or if the + // request has timed out, then we attempt to send the Fetch to another + // voter in order to discover if there has been a leader change. + final long backoffMs; + Node leaderNode = state.leaderNode(channel.listenerName()); + if (requestManager.hasRequestTimedOut(leaderNode, currentTimeMs)) { + // Once the request has timed out backoff the connection + requestManager.reset(leaderNode); + backoffMs = maybeSendFetchToAnyBootstrap(currentTimeMs); + } else if (requestManager.isBackingOff(leaderNode, currentTimeMs)) { + backoffMs = maybeSendFetchToAnyBootstrap(currentTimeMs); + } else if (!requestManager.hasAnyInflightRequest(currentTimeMs)) { + backoffMs = maybeSendFetchOrFetchSnapshot(state, currentTimeMs); + } else { + backoffMs = requestManager.backoffBeforeAvailableBootstrapServer(currentTimeMs); } + return Math.min(backoffMs, state.remainingFetchTimeMs(currentTimeMs)); } private long maybeSendFetchOrFetchSnapshot(FollowerState state, long currentTimeMs) { @@ -3125,7 +3263,7 @@ private long pollUnattached(long currentTimeMs) { if (quorum.isVoter()) { return pollUnattachedAsVoter(state, currentTimeMs); } else { - return pollUnattachedAsObserver(state, currentTimeMs); + return pollUnattachedCommon(state, currentTimeMs); } } @@ -3136,15 +3274,15 @@ private long pollUnattachedAsVoter(UnattachedState state, long currentTimeMs) { // shutdown completes or an epoch bump forces another state transition return shutdown.remainingTimeMs(); } else if (state.hasElectionTimeoutExpired(currentTimeMs)) { - transitionToCandidate(currentTimeMs); + transitionToProspective(currentTimeMs); return 0L; } else { - return state.remainingElectionTimeMs(currentTimeMs); + return pollUnattachedCommon(state, currentTimeMs); } } - private long pollUnattachedAsObserver(UnattachedState state, long currentTimeMs) { - long fetchBackoffMs = maybeSendAnyVoterFetch(currentTimeMs); + private long pollUnattachedCommon(UnattachedState state, long currentTimeMs) { + long fetchBackoffMs = maybeSendFetchToAnyBootstrap(currentTimeMs); return Math.min(fetchBackoffMs, state.remainingElectionTimeMs(currentTimeMs)); } @@ -3153,6 +3291,8 @@ private long pollCurrentState(long currentTimeMs) { return pollLeader(currentTimeMs); } else if (quorum.isCandidate()) { return pollCandidate(currentTimeMs); + } else if (quorum.isProspective()) { + return pollProspective(currentTimeMs); } else if (quorum.isFollower()) { return pollFollower(currentTimeMs); } else if (quorum.isUnattached()) { @@ -3399,7 +3539,7 @@ public void resign(int epoch) { // Note that if we transition to another state before we have a chance to // request resignation, then we consider the call fulfilled. Optional> leaderStateOpt = quorum.maybeLeaderState(); - if (!leaderStateOpt.isPresent()) { + if (leaderStateOpt.isEmpty()) { logger.debug("Ignoring call to resign from epoch {} since this node is " + "no longer the leader", epoch); return; @@ -3467,8 +3607,7 @@ public void close() { if (kafkaRaftMetrics != null) { kafkaRaftMetrics.close(); } - if (memoryPool instanceof BatchMemoryPool) { - BatchMemoryPool batchMemoryPool = (BatchMemoryPool) memoryPool; + if (memoryPool instanceof BatchMemoryPool batchMemoryPool) { batchMemoryPool.releaseRetained(); } } @@ -3702,7 +3841,7 @@ private boolean shouldFireLeaderChange(LeaderAndEpoch leaderAndEpoch) { return true; } else { return leaderAndEpoch.leaderId().isPresent() && - !lastFiredLeaderChange.leaderId().isPresent(); + lastFiredLeaderChange.leaderId().isEmpty(); } } diff --git a/raft/src/main/java/org/apache/kafka/raft/LeaderState.java b/raft/src/main/java/org/apache/kafka/raft/LeaderState.java index c09282c87c9ea..36579499e625c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/LeaderState.java +++ b/raft/src/main/java/org/apache/kafka/raft/LeaderState.java @@ -35,7 +35,6 @@ import org.slf4j.Logger; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -112,7 +111,7 @@ protected LeaderState( new ReplicaState(voterNode.voterKey(), hasAcknowledgedLeader, voterNode.listeners()) ); } - this.grantingVoters = Collections.unmodifiableSet(new HashSet<>(grantingVoters)); + this.grantingVoters = Set.copyOf(grantingVoters); this.log = logContext.logger(LeaderState.class); this.accumulator = Objects.requireNonNull(accumulator, "accumulator must be non-null"); // use the 1.5x of fetch timeout to tolerate some network transition time or other IO time. @@ -157,7 +156,10 @@ public long timeUntilCheckQuorumExpires(long currentTimeMs) { "Current fetched voters are {}, and voters are {}", checkQuorumTimeoutMs, fetchedVoters, - voterStates.values().stream().map(voter -> voter.replicaKey) + voterStates.values() + .stream() + .map(voter -> voter.replicaKey) + .collect(Collectors.toUnmodifiableSet()) ); } return remainingMs; @@ -407,7 +409,7 @@ public Optional highWatermark() { @Override public ElectionState election() { - return ElectionState.withElectedLeader(epoch, localReplicaKey.id(), voterStates.keySet()); + return ElectionState.withElectedLeader(epoch, localReplicaKey.id(), Optional.empty(), voterStates.keySet()); } @Override @@ -809,9 +811,9 @@ void updateFollowerState( public int compareTo(ReplicaState that) { if (this.endOffset.equals(that.endOffset)) return this.replicaKey.compareTo(that.replicaKey); - else if (!this.endOffset.isPresent()) + else if (this.endOffset.isEmpty()) return 1; - else if (!that.endOffset.isPresent()) + else if (that.endOffset.isEmpty()) return -1; else return Long.compare(that.endOffset.get().offset(), this.endOffset.get().offset()); @@ -832,10 +834,11 @@ public String toString() { } @Override - public boolean canGrantVote(ReplicaKey candidateKey, boolean isLogUpToDate) { + public boolean canGrantVote(ReplicaKey replicaKey, boolean isLogUpToDate, boolean isPreVote) { log.debug( - "Rejecting vote request from candidate ({}) since we are already leader in epoch {}", - candidateKey, + "Rejecting Vote request (preVote={}) from replica ({}) since we are already leader in epoch {}", + isPreVote, + replicaKey, epoch ); return false; diff --git a/raft/src/main/java/org/apache/kafka/raft/LogOffsetMetadata.java b/raft/src/main/java/org/apache/kafka/raft/LogOffsetMetadata.java index 38a90c84a4e44..216ac874e6231 100644 --- a/raft/src/main/java/org/apache/kafka/raft/LogOffsetMetadata.java +++ b/raft/src/main/java/org/apache/kafka/raft/LogOffsetMetadata.java @@ -51,8 +51,7 @@ public String toString() { @Override public boolean equals(Object obj) { - if (obj instanceof LogOffsetMetadata) { - LogOffsetMetadata other = (LogOffsetMetadata) obj; + if (obj instanceof LogOffsetMetadata other) { return this.offset == other.offset && this.metadata.equals(other.metadata); } else { diff --git a/raft/src/main/java/org/apache/kafka/raft/NomineeState.java b/raft/src/main/java/org/apache/kafka/raft/NomineeState.java new file mode 100644 index 0000000000000..dc8952048c688 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/NomineeState.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft; + +import org.apache.kafka.raft.internals.EpochElection; + +interface NomineeState extends EpochState { + EpochElection epochElection(); + + /** + * Record a granted vote from one of the voters. + * + * @param remoteNodeId The id of the voter + * @return true if the voter had not been previously recorded + * @throws IllegalArgumentException + */ + boolean recordGrantedVote(int remoteNodeId); + + /** + * Record a rejected vote from one of the voters. + * + * @param remoteNodeId The id of the voter + * @return true if the voter had not been previously recorded + * @throws IllegalArgumentException + */ + boolean recordRejectedVote(int remoteNodeId); + + /** + * Returns true if the election timeout has expired, false otherwise. + * @param currentTimeMs The current time in milliseconds + */ + boolean hasElectionTimeoutExpired(long currentTimeMs); + + /** + * Returns the remaining time in milliseconds until the election timeout expires. + * @param currentTimeMs The current time in milliseconds + */ + long remainingElectionTimeMs(long currentTimeMs); +} diff --git a/raft/src/main/java/org/apache/kafka/raft/ProspectiveState.java b/raft/src/main/java/org/apache/kafka/raft/ProspectiveState.java new file mode 100644 index 0000000000000..d66fe2c3e5f5f --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/ProspectiveState.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft; + +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Timer; +import org.apache.kafka.raft.internals.EpochElection; + +import org.slf4j.Logger; + +import java.util.Optional; +import java.util.OptionalInt; + +import static org.apache.kafka.raft.QuorumState.unattachedOrProspectiveCanGrantVote; + +public class ProspectiveState implements NomineeState { + private final int localId; + private final int epoch; + private final OptionalInt leaderId; + private final Endpoints leaderEndpoints; + private final Optional votedKey; + private final VoterSet voters; + private final EpochElection epochElection; + private final Optional highWatermark; + private final int retries; + private final long electionTimeoutMs; + private final Timer electionTimer; + private final Logger log; + + /** + * The lifetime of a prospective state is the following. + * + * 1. Once started, it will send prevote requests and keep record of the received vote responses + * 2. If it receives a message denoting a leader with a higher epoch, it will transition to follower state. + * 3. If majority votes granted, it will transition to candidate state. + * 4. If majority votes rejected or election times out, it will transition to unattached or follower state + * depending on if it knows the leader id and endpoints or not + */ + public ProspectiveState( + Time time, + int localId, + int epoch, + OptionalInt leaderId, + Endpoints leaderEndpoints, + Optional votedKey, + VoterSet voters, + Optional highWatermark, + int retries, + int electionTimeoutMs, + LogContext logContext + ) { + this.localId = localId; + this.epoch = epoch; + this.leaderId = leaderId; + this.leaderEndpoints = leaderEndpoints; + this.votedKey = votedKey; + this.voters = voters; + this.highWatermark = highWatermark; + this.retries = retries; + this.electionTimeoutMs = electionTimeoutMs; + this.electionTimer = time.timer(electionTimeoutMs); + this.log = logContext.logger(ProspectiveState.class); + + this.epochElection = new EpochElection(voters.voterKeys()); + epochElection.recordVote(localId, true); + } + + public Optional votedKey() { + return votedKey; + } + + @Override + public EpochElection epochElection() { + return epochElection; + } + + public int retries() { + return retries; + } + + @Override + public boolean recordGrantedVote(int remoteNodeId) { + return epochElection().recordVote(remoteNodeId, true); + } + + @Override + public boolean recordRejectedVote(int remoteNodeId) { + if (remoteNodeId == localId) { + throw new IllegalArgumentException("Attempted to reject vote from ourselves"); + } + return epochElection().recordVote(remoteNodeId, false); + } + + @Override + public boolean canGrantVote(ReplicaKey replicaKey, boolean isLogUpToDate, boolean isPreVote) { + return unattachedOrProspectiveCanGrantVote( + leaderId, + votedKey, + epoch, + replicaKey, + isLogUpToDate, + isPreVote, + log + ); + } + + @Override + public boolean hasElectionTimeoutExpired(long currentTimeMs) { + electionTimer.update(currentTimeMs); + return electionTimer.isExpired(); + } + + @Override + public long remainingElectionTimeMs(long currentTimeMs) { + electionTimer.update(currentTimeMs); + return electionTimer.remainingMs(); + } + + @Override + public ElectionState election() { + if (leaderId.isPresent()) { + return ElectionState.withElectedLeader(epoch, leaderId.getAsInt(), votedKey, voters.voterIds()); + } else if (votedKey.isPresent()) { + return ElectionState.withVotedCandidate(epoch, votedKey.get(), voters.voterIds()); + } else { + return ElectionState.withUnknownLeader(epoch, voters.voterIds()); + } + } + + @Override + public int epoch() { + return epoch; + } + + @Override + public Endpoints leaderEndpoints() { + return leaderEndpoints; + } + + @Override + public Optional highWatermark() { + return highWatermark; + } + + @Override + public String toString() { + return String.format( + "ProspectiveState(epoch=%d, leaderId=%s, retries=%d, votedKey=%s, epochElection=%s, " + + "electionTimeoutMs=%s, highWatermark=%s)", + epoch, + leaderId, + retries, + votedKey, + epochElection, + electionTimeoutMs, + highWatermark + ); + } + + @Override + public String name() { + return "Prospective"; + } + + @Override + public void close() {} +} diff --git a/raft/src/main/java/org/apache/kafka/raft/QuorumState.java b/raft/src/main/java/org/apache/kafka/raft/QuorumState.java index 0598ce062dfa5..9233cc1951276 100644 --- a/raft/src/main/java/org/apache/kafka/raft/QuorumState.java +++ b/raft/src/main/java/org/apache/kafka/raft/QuorumState.java @@ -39,44 +39,46 @@ * how they are triggered: * * Resigned transitions to: - * Unattached: After learning of a new election with a higher epoch - * Candidate: After expiration of the election timeout - * Follower: After discovering a leader with an equal or larger epoch + * Unattached: After learning of a new election with a higher epoch, or expiration of the election timeout + * Follower: After discovering a leader with a larger epoch * * Unattached transitions to: - * Unattached: After learning of a new election with a higher epoch or after voting - * Candidate: After expiration of the election timeout - * Follower: After discovering a leader with an equal or larger epoch + * Unattached: After learning of a new election with a higher epoch or after giving a binding vote + * Prospective: After expiration of the election timeout + * Follower: After discovering a leader with an equal or larger epoch * - * Voted transitions to: - * Unattached: After learning of a new election with a higher epoch - * Candidate: After expiration of the election timeout + * Prospective transitions to: + * Unattached: After learning of an election with a higher epoch, or node did not have last + * known leader and loses/times out election + * Candidate: After receiving a majority of PreVotes granted + * Follower: After discovering a leader with a larger epoch, or node had a last known leader + * and loses/times out election * * Candidate transitions to: - * Unattached: After learning of a new election with a higher epoch - * Candidate: After expiration of the election timeout - * Leader: After receiving a majority of votes + * Unattached: After learning of a new election with a higher epoch + * Prospective: After expiration of the election timeout or loss of election + * Leader: After receiving a majority of votes * * Leader transitions to: - * Unattached: After learning of a new election with a higher epoch - * Resigned: When shutting down gracefully + * Unattached: After learning of a new election with a higher epoch + * Resigned: When shutting down gracefully * * Follower transitions to: - * Unattached: After learning of a new election with a higher epoch - * Candidate: After expiration of the fetch timeout - * Follower: After discovering a leader with a larger epoch + * Unattached: After learning of a new election with a higher epoch + * Prospective: After expiration of the fetch timeout + * Follower: After discovering a leader with a larger epoch * - * Observers follow a simpler state machine. The Voted/Candidate/Leader/Resigned + * Observers follow a simpler state machine. The Prospective/Candidate/Leader/Resigned * states are not possible for observers, so the only transitions that are possible * are between Unattached and Follower. * * Unattached transitions to: * Unattached: After learning of a new election with a higher epoch - * Follower: After discovering a leader with an equal or larger epoch + * Follower: After discovering a leader with an equal or larger epoch * * Follower transitions to: * Unattached: After learning of a new election with a higher epoch - * Follower: After discovering a leader with a larger epoch + * Follower: After discovering a leader with a larger epoch * */ public class QuorumState { @@ -140,7 +142,7 @@ public void initialize(OffsetAndEpoch logEndOffsetAndEpoch) throws IllegalStateE ElectionState election = readElectionState(); final EpochState initialState; - if (election.hasVoted() && !localId.isPresent()) { + if (election.hasVoted() && localId.isEmpty()) { throw new IllegalStateException( String.format( "Initialized quorum state (%s) with a voted candidate but without a local id", @@ -197,17 +199,6 @@ public void initialize(OffsetAndEpoch logEndOffsetAndEpoch) throws IllegalStateE randomElectionTimeoutMs(), logContext ); - } else if (election.hasVoted()) { - initialState = new UnattachedState( - time, - election.epoch(), - OptionalInt.empty(), - Optional.of(election.votedKey()), - partitionState.lastVoterSet().voterIds(), - Optional.empty(), - randomElectionTimeoutMs(), - logContext - ); } else if (election.hasLeader()) { VoterSet voters = partitionState.lastVoterSet(); Endpoints leaderEndpoints = voters.listeners(election.leaderId()); @@ -230,7 +221,7 @@ public void initialize(OffsetAndEpoch logEndOffsetAndEpoch) throws IllegalStateE time, election.epoch(), OptionalInt.of(election.leaderId()), - Optional.empty(), + election.optionalVotedKey(), partitionState.lastVoterSet().voterIds(), Optional.empty(), randomElectionTimeoutMs(), @@ -242,6 +233,7 @@ public void initialize(OffsetAndEpoch logEndOffsetAndEpoch) throws IllegalStateE election.epoch(), election.leaderId(), leaderEndpoints, + election.optionalVotedKey(), voters.voterIds(), Optional.empty(), fetchTimeoutMs, @@ -253,7 +245,7 @@ public void initialize(OffsetAndEpoch logEndOffsetAndEpoch) throws IllegalStateE time, election.epoch(), OptionalInt.empty(), - Optional.empty(), + election.optionalVotedKey(), partitionState.lastVoterSet().voterIds(), Optional.empty(), randomElectionTimeoutMs(), @@ -319,6 +311,10 @@ public OptionalInt leaderId() { return OptionalInt.empty(); } + public Optional votedKey() { + return state.election().optionalVotedKey(); + } + public boolean hasLeader() { return leaderId().isPresent(); } @@ -332,7 +328,7 @@ public Endpoints leaderEndpoints() { } public boolean isVoter() { - if (!localId.isPresent()) { + if (localId.isEmpty()) { return false; } @@ -372,14 +368,23 @@ public void transitionToResigned(List preferredSuccessors) { } /** - * Transition to the "unattached" state. This means we have found an epoch greater than the current epoch, - * but we do not yet know of the elected leader. + * Transition to the "unattached" state. This means one of the following + * 1. the replica has found an epoch greater than the current epoch. + * 2. the replica has transitioned from Prospective with the same epoch. + * 3. the replica has transitioned from Resigned with current epoch + 1. + * Note, if the replica is transitioning from unattached to add voted state and there is no epoch change, + * it takes the route of unattachedAddVotedState instead. */ - public void transitionToUnattached(int epoch) { + public void transitionToUnattached(int epoch, OptionalInt leaderId) { int currentEpoch = state.epoch(); - if (epoch <= currentEpoch) { - throw new IllegalStateException("Cannot transition to Unattached with epoch= " + epoch + - " from current state " + state); + if (epoch < currentEpoch || (epoch == currentEpoch && !isProspective())) { + throw new IllegalStateException( + String.format( + "Cannot transition to Unattached with epoch %d from current state %s", + epoch, + state + ) + ); } final long electionTimeoutMs; @@ -389,15 +394,22 @@ public void transitionToUnattached(int epoch) { electionTimeoutMs = candidateStateOrThrow().remainingElectionTimeMs(time.milliseconds()); } else if (isUnattached()) { electionTimeoutMs = unattachedStateOrThrow().remainingElectionTimeMs(time.milliseconds()); + } else if (isProspective() && !prospectiveStateOrThrow().epochElection().isVoteRejected()) { + electionTimeoutMs = prospectiveStateOrThrow().remainingElectionTimeMs(time.milliseconds()); + } else if (isResigned()) { + electionTimeoutMs = resignedStateOrThrow().remainingElectionTimeMs(time.milliseconds()); } else { electionTimeoutMs = randomElectionTimeoutMs(); } + // If the local replica is transitioning to Unattached in the same epoch (i.e. from Prospective), it + // should retain its voted key if it exists, so that it will not vote again in the same epoch. + Optional votedKey = epoch == currentEpoch ? votedKey() : Optional.empty(); durableTransitionTo(new UnattachedState( time, epoch, - OptionalInt.empty(), - Optional.empty(), + leaderId, + votedKey, partitionState.lastVoterSet().voterIds(), state.highWatermark(), electionTimeoutMs, @@ -406,12 +418,12 @@ public void transitionToUnattached(int epoch) { } /** - * Grant a vote to a candidate. We will transition/remain in Unattached - * state until either the election timeout expires or a leader is elected. In particular, - * we do not begin fetching until the election has concluded and - * {@link #transitionToFollower(int, int, Endpoints)} is invoked. + * Grant a vote to a candidate as Unattached. The replica will transition to Unattached with votedKey + * state in the same epoch and remain there until either the election timeout expires or it discovers the leader. + * Note, if the replica discovers a higher epoch or is transitioning from Prospective, it takes + * the route of transitionToUnattached instead. */ - public void transitionToUnattachedVotedState( + public void unattachedAddVotedState( int epoch, ReplicaKey candidateKey ) { @@ -419,50 +431,93 @@ public void transitionToUnattachedVotedState( if (localId.isPresent() && candidateKey.id() == localId.getAsInt()) { throw new IllegalStateException( String.format( - "Cannot transition to Voted for %s and epoch %d since it matches the local " + + "Cannot add voted key (%s) to current state (%s) in epoch %d since it matches the local " + "broker.id", candidateKey, + state, epoch ) ); - } else if (!localId.isPresent()) { - throw new IllegalStateException("Cannot transition to voted without a replica id"); - } else if (epoch < currentEpoch) { + } else if (localId.isEmpty()) { + throw new IllegalStateException("Cannot add voted state without a replica id"); + } else if (epoch != currentEpoch || !isUnattachedNotVoted()) { throw new IllegalStateException( String.format( - "Cannot transition to Voted for %s and epoch %d since the current epoch " + - "(%d) is larger", + "Cannot add voted key (%s) to current state (%s) in epoch %d", candidateKey, - epoch, - currentEpoch + state, + epoch + ) + ); + } + + // Note that we reset the election timeout after voting for a candidate because we + // know that the candidate has at least as good of a chance of getting elected as us + durableTransitionTo( + new UnattachedState( + time, + epoch, + state.election().optionalLeaderId(), + Optional.of(candidateKey), + partitionState.lastVoterSet().voterIds(), + state.highWatermark(), + randomElectionTimeoutMs(), + logContext + ) + ); + } + + /** + * Grant a vote to a candidate as Prospective. The replica will transition to Prospective with votedKey + * state in the same epoch. Note, if the replica is transitioning to Prospective due to a fetch/election timeout + * or loss of election as candidate, it takes the route of transitionToProspective instead. + */ + public void prospectiveAddVotedState( + int epoch, + ReplicaKey candidateKey + ) { + int currentEpoch = state.epoch(); + if (localId.isPresent() && candidateKey.id() == localId.getAsInt()) { + throw new IllegalStateException( + String.format( + "Cannot add voted key (%s) to current state (%s) in epoch %d since it matches the local " + + "broker.id", + candidateKey, + state, + epoch ) ); - } else if (epoch == currentEpoch && !isUnattachedNotVoted()) { + } else if (localId.isEmpty()) { + throw new IllegalStateException("Cannot add voted state without a replica id"); + } else if (epoch != currentEpoch || !isProspectiveNotVoted()) { throw new IllegalStateException( String.format( - "Cannot transition to Voted for %s and epoch %d from the current state (%s)", + "Cannot add voted key (%s) to current state (%s) in epoch %d", candidateKey, - epoch, - state + state, + epoch ) ); } + ProspectiveState prospectiveState = prospectiveStateOrThrow(); // Note that we reset the election timeout after voting for a candidate because we // know that the candidate has at least as good of a chance of getting elected as us durableTransitionTo( - new UnattachedState( + new ProspectiveState( time, + localIdOrThrow(), epoch, - OptionalInt.empty(), + state.election().optionalLeaderId(), + state.leaderEndpoints(), Optional.of(candidateKey), - partitionState.lastVoterSet().voterIds(), + partitionState.lastVoterSet(), state.highWatermark(), + prospectiveState.retries(), randomElectionTimeoutMs(), logContext ) ); - log.debug("Voted for candidate {} in epoch {}", candidateKey, epoch); } /** @@ -519,12 +574,18 @@ public void transitionToFollower(int epoch, int leaderId, Endpoints endpoints) { } } + // State transitions within the same epoch should preserve voted key if it exists. This prevents + // replicas from voting multiple times in the same epoch, which could violate the Raft invariant of + // at most one leader elected in an epoch. + Optional votedKey = epoch == currentEpoch ? votedKey() : Optional.empty(); + durableTransitionTo( new FollowerState( time, epoch, leaderId, endpoints, + votedKey, partitionState.lastVoterSet().voterIds(), state.highWatermark(), fetchTimeoutMs, @@ -533,26 +594,55 @@ public void transitionToFollower(int epoch, int leaderId, Endpoints endpoints) { ); } - public void transitionToCandidate() { + /** + * Transition to the "prospective" state. This means the replica experienced a fetch/election timeout or + * loss of election as candidate. Note, if the replica is transitioning from prospective to add voted state + * and there is no epoch change, it takes the route of prospectiveAddVotedState instead. + */ + public void transitionToProspective() { if (isObserver()) { throw new IllegalStateException( String.format( - "Cannot transition to Candidate since the local id (%s) and directory id (%s) " + + "Cannot transition to Prospective since the local id (%s) and directory id (%s) " + "is not one of the voters %s", localId, localDirectoryId, partitionState.lastVoterSet() ) ); - } else if (isLeader()) { - throw new IllegalStateException("Cannot transition to Candidate since the local broker.id=" + localId + - " since this node is already a Leader with state " + state); + } else if (isLeader() || isProspective()) { + throw new IllegalStateException("Cannot transition to Prospective since the local broker.id=" + localId + + " is state " + state); } int retries = isCandidate() ? candidateStateOrThrow().retries() + 1 : 1; + + // Durable transition is not necessary since there is no change to the persisted electionState + memoryTransitionTo( + new ProspectiveState( + time, + localIdOrThrow(), + epoch(), + leaderId(), + state.leaderEndpoints(), + votedKey(), + partitionState.lastVoterSet(), + state.highWatermark(), + retries, + randomElectionTimeoutMs(), + logContext + ) + ); + } + + public void transitionToCandidate() { + checkValidTransitionToCandidate(); + int newEpoch = epoch() + 1; int electionTimeoutMs = randomElectionTimeoutMs(); + int retries = isProspective() ? prospectiveStateOrThrow().retries() : 1; + durableTransitionTo(new CandidateState( time, localIdOrThrow(), @@ -566,6 +656,30 @@ public void transitionToCandidate() { )); } + private void checkValidTransitionToCandidate() { + if (isObserver()) { + throw new IllegalStateException( + String.format( + "Cannot transition to Candidate since the local id (%s) and directory id (%s) " + + "is not one of the voters %s", + localId, + localDirectoryId, + partitionState.lastVoterSet() + ) + ); + } + // Only Prospective is allowed to transition to Candidate + if (!isProspective()) { + throw new IllegalStateException( + String.format( + "Cannot transition to Candidate since the local broker.id=%s is state %s", + localId, + state + ) + ); + } + } + public LeaderState transitionToLeader(long epochStartOffset, BatchAccumulator accumulator) { if (isObserver()) { throw new IllegalStateException( @@ -582,7 +696,7 @@ public LeaderState transitionToLeader(long epochStartOffset, BatchAccumul } CandidateState candidateState = candidateStateOrThrow(); - if (!candidateState.isVoteGranted()) + if (!candidateState.epochElection().isVoteGranted()) throw new IllegalStateException("Cannot become leader without majority votes granted"); // Note that the leader does not retain the high watermark that was known @@ -604,7 +718,7 @@ public LeaderState transitionToLeader(long epochStartOffset, BatchAccumul partitionState.lastVoterSet(), partitionState.lastVoterSetOffset(), partitionState.lastKraftVersion(), - candidateState.grantingVoters(), + candidateState.epochElection().grantingVoters(), accumulator, localListeners, fetchTimeoutMs, @@ -641,8 +755,8 @@ private int randomElectionTimeoutMs() { return electionTimeoutMs + random.nextInt(electionTimeoutMs); } - public boolean canGrantVote(ReplicaKey candidateKey, boolean isLogUpToDate) { - return state.canGrantVote(candidateKey, isLogUpToDate); + public boolean canGrantVote(ReplicaKey replicaKey, boolean isLogUpToDate, boolean isPreVote) { + return state.canGrantVote(replicaKey, isLogUpToDate, isPreVote); } public FollowerState followerStateOrThrow() { @@ -661,9 +775,9 @@ public Optional maybeUnattachedState() { } public UnattachedState unattachedStateOrThrow() { - if (isUnattached()) - return (UnattachedState) state; - throw new IllegalStateException("Expected to be Unattached, but current state is " + state); + return maybeUnattachedState().orElseThrow( + () -> new IllegalStateException("Expected to be Unattached, but current state is " + state) + ); } public LeaderState leaderStateOrThrow() { @@ -687,12 +801,42 @@ public ResignedState resignedStateOrThrow() { throw new IllegalStateException("Expected to be Resigned, but current state is " + state); } + public Optional maybeProspectiveState() { + EpochState fixedState = state; + if (fixedState instanceof ProspectiveState) { + return Optional.of((ProspectiveState) fixedState); + } else { + return Optional.empty(); + } + } + + public ProspectiveState prospectiveStateOrThrow() { + return maybeProspectiveState().orElseThrow( + () -> new IllegalStateException("Expected to be Prospective, but current state is " + state) + ); + } + + public boolean isProspectiveNotVoted() { + return maybeProspectiveState().filter(prospective -> prospective.votedKey().isEmpty()).isPresent(); + } + + public boolean isProspectiveAndVoted() { + return maybeProspectiveState().flatMap(ProspectiveState::votedKey).isPresent(); + } + public CandidateState candidateStateOrThrow() { if (isCandidate()) return (CandidateState) state; throw new IllegalStateException("Expected to be Candidate, but current state is " + state); } + public NomineeState nomineeStateOrThrow() { + if (isNomineeState()) + return (NomineeState) state; + throw new IllegalStateException("Expected to be a NomineeState (Prospective or Candidate), " + + "but current state is " + state); + } + public LeaderAndEpoch leaderAndEpoch() { ElectionState election = state.election(); return new LeaderAndEpoch(election.optionalLeaderId(), election.epoch()); @@ -707,7 +851,7 @@ public boolean isUnattached() { } public boolean isUnattachedNotVoted() { - return maybeUnattachedState().filter(unattached -> !unattached.votedKey().isPresent()).isPresent(); + return maybeUnattachedState().filter(unattached -> unattached.votedKey().isEmpty()).isPresent(); } public boolean isUnattachedAndVoted() { @@ -722,7 +866,76 @@ public boolean isResigned() { return state instanceof ResignedState; } + public boolean isProspective() { + return state instanceof ProspectiveState; + } + public boolean isCandidate() { return state instanceof CandidateState; } + + public boolean isNomineeState() { + return state instanceof NomineeState; + } + + /** + * Determines if replica in unattached or prospective state can grant a vote request. + * + * @param leaderId local replica's optional leader id. + * @param votedKey local replica's optional voted key. + * @param epoch local replica's epoch + * @param replicaKey replicaKey of nominee which sent the vote request + * @param isLogUpToDate whether the log of the nominee is up-to-date with the local replica's log + * @param isPreVote whether the vote request is a PreVote request + * @param log logger + * @return true if the local replica can grant the vote request, false otherwise + */ + public static boolean unattachedOrProspectiveCanGrantVote( + OptionalInt leaderId, + Optional votedKey, + int epoch, + ReplicaKey replicaKey, + boolean isLogUpToDate, + boolean isPreVote, + Logger log + ) { + if (isPreVote) { + if (!isLogUpToDate) { + log.debug( + "Rejecting Vote request (preVote=true) from prospective ({}) since prospective's log is not up to date with us", + replicaKey + ); + } + return isLogUpToDate; + } else if (votedKey.isPresent()) { + ReplicaKey votedReplicaKey = votedKey.get(); + if (votedReplicaKey.id() == replicaKey.id()) { + return votedReplicaKey.directoryId().isEmpty() || votedReplicaKey.directoryId().equals(replicaKey.directoryId()); + } + log.debug( + "Rejecting Vote request (preVote=false) from candidate ({}), already have voted for another " + + "candidate ({}) in epoch {}", + replicaKey, + votedKey, + epoch + ); + return false; + } else if (leaderId.isPresent()) { + // If the leader id is known it should behave similar to the follower state + log.debug( + "Rejecting Vote request (preVote=false) from candidate ({}) since we already have a leader {} in epoch {}", + replicaKey, + leaderId.getAsInt(), + epoch + ); + return false; + } else if (!isLogUpToDate) { + log.debug( + "Rejecting Vote request (preVote=false) from candidate ({}) since candidate's log is not up to date with us", + replicaKey + ); + } + + return isLogUpToDate; + } } diff --git a/raft/src/main/java/org/apache/kafka/raft/RaftUtil.java b/raft/src/main/java/org/apache/kafka/raft/RaftUtil.java index 018bec0d632ad..12c48955b39b7 100644 --- a/raft/src/main/java/org/apache/kafka/raft/RaftUtil.java +++ b/raft/src/main/java/org/apache/kafka/raft/RaftUtil.java @@ -144,11 +144,12 @@ public static FetchResponseData singletonFetchResponse( public static VoteRequestData singletonVoteRequest( TopicPartition topicPartition, String clusterId, - int candidateEpoch, - ReplicaKey candidateKey, + int replicaEpoch, + ReplicaKey replicaKey, ReplicaKey voterKey, int lastEpoch, - long lastEpochEndOffset + long lastEpochEndOffset, + boolean preVote ) { return new VoteRequestData() .setClusterId(clusterId) @@ -161,10 +162,10 @@ public static VoteRequestData singletonVoteRequest( Collections.singletonList( new VoteRequestData.PartitionData() .setPartitionIndex(topicPartition.partition()) - .setCandidateEpoch(candidateEpoch) - .setCandidateId(candidateKey.id()) - .setCandidateDirectoryId( - candidateKey + .setReplicaEpoch(replicaEpoch) + .setReplicaId(replicaKey.id()) + .setReplicaDirectoryId( + replicaKey .directoryId() .orElse(ReplicaKey.NO_DIRECTORY_ID) ) @@ -175,6 +176,7 @@ public static VoteRequestData singletonVoteRequest( ) .setLastOffsetEpoch(lastEpoch) .setLastOffset(lastEpochEndOffset) + .setPreVote(preVote) ) ) ) @@ -192,17 +194,18 @@ public static VoteResponseData singletonVoteResponse( boolean voteGranted, Endpoints endpoints ) { + VoteResponseData.PartitionData partitionData = new VoteResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + .setVoteGranted(voteGranted); + VoteResponseData response = new VoteResponseData() .setErrorCode(topLevelError.code()) .setTopics(Collections.singletonList( new VoteResponseData.TopicData() .setTopicName(topicPartition.topic()) - .setPartitions(Collections.singletonList( - new VoteResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - .setVoteGranted(voteGranted))))); + .setPartitions(Collections.singletonList(partitionData)))); if (apiVersion >= 1) { Optional address = endpoints.address(listenerName); diff --git a/raft/src/main/java/org/apache/kafka/raft/ReplicaKey.java b/raft/src/main/java/org/apache/kafka/raft/ReplicaKey.java index a1acc39d57cff..f25a1d55ba4a7 100644 --- a/raft/src/main/java/org/apache/kafka/raft/ReplicaKey.java +++ b/raft/src/main/java/org/apache/kafka/raft/ReplicaKey.java @@ -70,7 +70,7 @@ public int hashCode() { @Override public String toString() { - return String.format("ReplicaKey(id=%d, directoryId=%s)", id, directoryId); + return String.format("ReplicaKey(id=%d, directoryId=%s)", id, directoryId.map(Uuid::toString).orElse("")); } public static ReplicaKey of(int id, Uuid directoryId) { diff --git a/raft/src/main/java/org/apache/kafka/raft/ResignedState.java b/raft/src/main/java/org/apache/kafka/raft/ResignedState.java index eaee0496b82f5..2d5fd27919f8c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/ResignedState.java +++ b/raft/src/main/java/org/apache/kafka/raft/ResignedState.java @@ -24,6 +24,7 @@ import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Set; /** @@ -75,7 +76,7 @@ public ResignedState( @Override public ElectionState election() { - return ElectionState.withElectedLeader(epoch, localId, voters); + return ElectionState.withElectedLeader(epoch, localId, Optional.empty(), voters); } @Override @@ -140,11 +141,17 @@ public List preferredSuccessors() { } @Override - public boolean canGrantVote(ReplicaKey candidateKey, boolean isLogUpToDate) { + public boolean canGrantVote(ReplicaKey replicaKey, boolean isLogUpToDate, boolean isPreVote) { + if (isPreVote && isLogUpToDate) { + return true; + } log.debug( - "Rejecting vote request from candidate ({}) since we have resigned as candidate/leader in epoch {}", - candidateKey, - epoch + "Rejecting Vote request (preVote={}) from replica ({}) since we are in ResignedState in epoch {} " + + "and the replica's log is up-to-date={}", + isPreVote, + replicaKey, + epoch, + isLogUpToDate ); return false; diff --git a/raft/src/main/java/org/apache/kafka/raft/SegmentPosition.java b/raft/src/main/java/org/apache/kafka/raft/SegmentPosition.java new file mode 100644 index 0000000000000..933c4f2ee1149 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/SegmentPosition.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft; + +public record SegmentPosition(long baseOffset, int relativePosition) implements OffsetMetadata { + + @Override + public String toString() { + return "(segmentBaseOffset=" + baseOffset + ",relativePositionInSegment=" + relativePosition + ")"; + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/TimingWheelExpirationService.java b/raft/src/main/java/org/apache/kafka/raft/TimingWheelExpirationService.java new file mode 100644 index 0000000000000..f0b28b00fdbe4 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/TimingWheelExpirationService.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft; + +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.server.util.ShutdownableThread; +import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.server.util.timer.TimerTask; + +import java.util.concurrent.CompletableFuture; + +public class TimingWheelExpirationService implements ExpirationService { + + private static final long WORK_TIMEOUT_MS = 200L; + + private final ExpiredOperationReaper expirationReaper; + private final Timer timer; + + public TimingWheelExpirationService(Timer timer) { + this.timer = timer; + this.expirationReaper = new ExpiredOperationReaper(); + expirationReaper.start(); + } + + @Override + public CompletableFuture failAfter(long timeoutMs) { + TimerTaskCompletableFuture task = new TimerTaskCompletableFuture<>(timeoutMs); + task.future.whenComplete((t, throwable) -> task.cancel()); + timer.add(task); + return task.future; + } + + public void shutdown() throws InterruptedException { + expirationReaper.shutdown(); + } + + private static class TimerTaskCompletableFuture extends TimerTask { + + private final CompletableFuture future = new CompletableFuture<>(); + + TimerTaskCompletableFuture(long delayMs) { + super(delayMs); + } + + @Override + public void run() { + future.completeExceptionally(new TimeoutException("Future failed to be completed before timeout of " + delayMs + " ms was reached")); + } + } + + private class ExpiredOperationReaper extends ShutdownableThread { + + ExpiredOperationReaper() { + super("raft-expiration-reaper", false); + } + + @Override + public void doWork() { + try { + timer.advanceClock(WORK_TIMEOUT_MS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/UnattachedState.java b/raft/src/main/java/org/apache/kafka/raft/UnattachedState.java index 4b21849f81866..6b7e4b700f241 100644 --- a/raft/src/main/java/org/apache/kafka/raft/UnattachedState.java +++ b/raft/src/main/java/org/apache/kafka/raft/UnattachedState.java @@ -26,6 +26,8 @@ import java.util.OptionalInt; import java.util.Set; +import static org.apache.kafka.raft.QuorumState.unattachedOrProspectiveCanGrantVote; + /** * A replica is "unattached" when it doesn't know the leader or the leader's endpoint. * @@ -71,10 +73,10 @@ public UnattachedState( @Override public ElectionState election() { - if (votedKey.isPresent()) { - return ElectionState.withVotedCandidate(epoch, votedKey().get(), voters); - } else if (leaderId.isPresent()) { - return ElectionState.withElectedLeader(epoch, leaderId.getAsInt(), voters); + if (leaderId.isPresent()) { + return ElectionState.withElectedLeader(epoch, leaderId.getAsInt(), votedKey, voters); + } else if (votedKey.isPresent()) { + return ElectionState.withVotedCandidate(epoch, votedKey.get(), voters); } else { return ElectionState.withUnknownLeader(epoch, voters); } @@ -119,48 +121,30 @@ public Optional highWatermark() { } @Override - public boolean canGrantVote(ReplicaKey candidateKey, boolean isLogUpToDate) { - if (votedKey.isPresent()) { - ReplicaKey votedReplicaKey = votedKey.get(); - if (votedReplicaKey.id() == candidateKey.id()) { - return !votedReplicaKey.directoryId().isPresent() || votedReplicaKey.directoryId().equals(candidateKey.directoryId()); - } - log.debug( - "Rejecting vote request from candidate ({}), already have voted for another " + - "candidate ({}) in epoch {}", - candidateKey, - votedKey, - epoch - ); - return false; - } else if (leaderId.isPresent()) { - // If the leader id is known it should behave similar to the follower state - log.debug( - "Rejecting vote request from candidate ({}) since we already have a leader {} in epoch {}", - candidateKey, - leaderId, - epoch - ); - return false; - } else if (!isLogUpToDate) { - log.debug( - "Rejecting vote request from candidate ({}) since candidate epoch/offset is not up to date with us", - candidateKey - ); - } - - return isLogUpToDate; + public boolean canGrantVote(ReplicaKey replicaKey, boolean isLogUpToDate, boolean isPreVote) { + return unattachedOrProspectiveCanGrantVote( + leaderId, + votedKey, + epoch, + replicaKey, + isLogUpToDate, + isPreVote, + log + ); } @Override public String toString() { - return "Unattached(" + - "epoch=" + epoch + - ", votedKey=" + votedKey.map(ReplicaKey::toString).orElse("null") + - ", voters=" + voters + - ", electionTimeoutMs=" + electionTimeoutMs + - ", highWatermark=" + highWatermark + - ')'; + return String.format( + "UnattachedState(epoch=%d, leaderId=%s, votedKey=%s, voters=%s, " + + "electionTimeoutMs=%d, highWatermark=%s)", + epoch, + leaderId, + votedKey, + voters, + electionTimeoutMs, + highWatermark + ); } @Override diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java index 44b9eb2a39dc0..1f7ea2f61c47b 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java @@ -101,7 +101,7 @@ public CompletableFuture handleAddVoterRequest( // Check that the leader has established a HWM and committed the current epoch Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); - if (!highWatermark.isPresent()) { + if (highWatermark.isEmpty()) { return CompletableFuture.completedFuture( RaftUtil.addVoterResponse( Errors.REQUEST_TIMED_OUT, @@ -127,7 +127,7 @@ public CompletableFuture handleAddVoterRequest( // Check that there are no uncommitted VotersRecord Optional> votersEntry = partitionState.lastVoterSetEntry(); - if (!votersEntry.isPresent() || votersEntry.get().offset() >= highWatermark.get()) { + if (votersEntry.isEmpty() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( RaftUtil.addVoterResponse( Errors.REQUEST_TIMED_OUT, @@ -172,7 +172,7 @@ public CompletableFuture handleAddVoterRequest( this::buildApiVersionsRequest, currentTimeMs ); - if (!timeout.isPresent()) { + if (timeout.isEmpty()) { return CompletableFuture.completedFuture( RaftUtil.addVoterResponse( Errors.REQUEST_TIMED_OUT, @@ -203,7 +203,7 @@ public boolean handleApiVersionsResponse( long currentTimeMs ) { Optional handlerState = leaderState.addVoterHandlerState(); - if (!handlerState.isPresent()) { + if (handlerState.isEmpty()) { // There are no pending add operation just ignore the api response return true; } @@ -242,7 +242,7 @@ public boolean handleApiVersionsResponse( return false; } - // Check that the new voter supports the kraft.verion for reconfiguration + // Check that the new voter supports the kraft.version for reconfiguration KRaftVersion kraftVersion = partitionState.lastKraftVersion(); if (!validVersionRange(kraftVersion, supportedKraftVersions)) { logger.info( diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandlerState.java b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandlerState.java index c403d0e0cd2e0..b43197c273dc2 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandlerState.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandlerState.java @@ -49,7 +49,7 @@ public long timeUntilOperationExpiration(long currentTimeMs) { } public boolean expectingApiResponse(int replicaId) { - return !lastOffset.isPresent() && replicaId == voterKey.id(); + return lastOffset.isEmpty() && replicaId == voterKey.id(); } public void setLastOffset(long lastOffset) { diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/BatchAccumulator.java b/raft/src/main/java/org/apache/kafka/raft/internals/BatchAccumulator.java index 89ea44d9e5987..1a68c82514bb7 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/BatchAccumulator.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/BatchAccumulator.java @@ -41,7 +41,6 @@ import java.util.Objects; import java.util.Optional; import java.util.OptionalInt; -import java.util.OptionalLong; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; @@ -484,7 +483,7 @@ public int epoch() { * This call will not block, but the drain may require multiple attempts before * it can be completed if the thread responsible for appending is holding the * append lock. In the worst case, the append will be completed on the next - * call to {@link #append(int, List, OptionalLong, boolean)} following the + * call to {@link #append(int, List, boolean)} following the * initial call to this method. * * The caller should respect the time to the next flush as indicated by diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/EpochElection.java b/raft/src/main/java/org/apache/kafka/raft/internals/EpochElection.java new file mode 100644 index 0000000000000..8cebe1becd955 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/internals/EpochElection.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.internals; + +import org.apache.kafka.raft.ReplicaKey; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Tracks the votes cast by voters in an election held by a Nominee. + */ +public class EpochElection { + private Map voterStates; + + public EpochElection(Set voters) { + this.voterStates = voters.stream() + .collect( + Collectors.toMap( + ReplicaKey::id, + VoterState::new + ) + ); + } + + /** + * Record a vote from a voter. + * + * @param voterId The id of the voter + * @param isGranted true if the vote is granted, false if it is rejected + * @return true if the voter had not been previously recorded + */ + public boolean recordVote(int voterId, boolean isGranted) { + VoterState voterState = getVoterStateOrThrow(voterId); + boolean wasUnrecorded = voterState.state == VoterState.State.UNRECORDED; + voterState.setState( + isGranted ? VoterState.State.GRANTED : VoterState.State.REJECTED + ); + return wasUnrecorded; + } + + /** + * Returns if a voter has granted the vote. + * + * @param voterId The id of the voter + * @throws IllegalArgumentException if the voter is not in the set of voters + */ + public boolean isGrantedVoter(int voterId) { + return getVoterStateOrThrow(voterId).state == VoterState.State.GRANTED; + } + + /** + * Returns if a voter has rejected the vote. + * + * @param voterId The id of the voter + * @throws IllegalArgumentException if the voter is not in the set of voters + */ + public boolean isRejectedVoter(int voterId) { + return getVoterStateOrThrow(voterId).state == VoterState.State.REJECTED; + } + + /** + * The set of voter ids. + */ + public Set voterIds() { + return Collections.unmodifiableSet(voterStates.keySet()); + } + + /** + * Check whether we have received enough votes to conclude the election and become leader. + * + * @return true if at least a majority of nodes have granted the vote + */ + public boolean isVoteGranted() { + return numGranted() >= majoritySize(); + } + + /** + * Check if we have received enough rejections that it is no longer possible to reach a + * majority of grants. + * + * @return true if the vote is rejected, false if the vote is already or can still be granted + */ + public boolean isVoteRejected() { + return numGranted() + numUnrecorded() < majoritySize(); + } + + /** + * Get the set of voters which have not been counted as granted or rejected yet. + * + * @return The set of unrecorded voters + */ + public Set unrecordedVoters() { + return votersOfState(VoterState.State.UNRECORDED).collect(Collectors.toSet()); + } + + /** + * Get the set of voters that have granted our vote requests. + * + * @return The set of granting voters, which should always contain the localId + */ + public Set grantingVoters() { + return votersOfState(VoterState.State.GRANTED).map(ReplicaKey::id).collect(Collectors.toSet()); + } + + /** + * Get the set of voters that have rejected our candidacy. + * + * @return The set of rejecting voters + */ + public Set rejectingVoters() { + return votersOfState(VoterState.State.REJECTED).map(ReplicaKey::id).collect(Collectors.toSet()); + } + + private VoterState getVoterStateOrThrow(int voterId) { + VoterState voterState = voterStates.get(voterId); + if (voterState == null) { + throw new IllegalArgumentException("Attempt to access voter state of non-voter " + voterId); + } + return voterState; + } + + private Stream votersOfState(VoterState.State state) { + return voterStates + .values() + .stream() + .filter(voterState -> voterState.state().equals(state)) + .map(VoterState::replicaKey); + } + + private long numGranted() { + return votersOfState(VoterState.State.GRANTED).count(); + } + + private long numUnrecorded() { + return votersOfState(VoterState.State.UNRECORDED).count(); + } + + private int majoritySize() { + return voterStates.size() / 2 + 1; + } + + @Override + public String toString() { + return String.format( + "EpochElection(voterStates=%s)", + voterStates + ); + } + + private static final class VoterState { + private final ReplicaKey replicaKey; + private State state = State.UNRECORDED; + + VoterState(ReplicaKey replicaKey) { + this.replicaKey = replicaKey; + } + + public State state() { + return state; + } + + public void setState(State state) { + this.state = state; + } + + public ReplicaKey replicaKey() { + return replicaKey; + } + + enum State { + UNRECORDED, + GRANTED, + REJECTED + } + + @Override + public String toString() { + return String.format( + "VoterState(replicaKey=%s, state=%s)", + replicaKey, + state + ); + } + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/KRaftControlRecordStateMachine.java b/raft/src/main/java/org/apache/kafka/raft/internals/KRaftControlRecordStateMachine.java index c1d4a0b2f2dbb..e2b6f7e43e4d4 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/KRaftControlRecordStateMachine.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/KRaftControlRecordStateMachine.java @@ -90,7 +90,7 @@ public KRaftControlRecordStateMachine( LogContext logContext ) { this.log = log; - this.voterSetHistory = new VoterSetHistory(staticVoterSet); + this.voterSetHistory = new VoterSetHistory(staticVoterSet, logContext); this.serde = serde; this.bufferSupplier = bufferSupplier; this.maxBatchSizeBytes = maxBatchSizeBytes; diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/KafkaRaftMetrics.java b/raft/src/main/java/org/apache/kafka/raft/internals/KafkaRaftMetrics.java index 64230b45f7292..fa93633b48016 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/KafkaRaftMetrics.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/KafkaRaftMetrics.java @@ -71,10 +71,14 @@ public KafkaRaftMetrics(Metrics metrics, String metricGrpPrefix, QuorumState sta Gauge stateProvider = (mConfig, currentTimeMs) -> { if (state.isLeader()) { return "leader"; + } else if (state.isProspectiveNotVoted()) { + return "prospective"; + } else if (state.isProspectiveAndVoted()) { + return "prospective-voted"; } else if (state.isCandidate()) { return "candidate"; } else if (state.isUnattachedAndVoted()) { - return "voted"; + return "unattached-voted"; } else if (state.isFollower()) { // a broker is special kind of follower, as not being a voter, it's an observer if (state.isObserver()) { @@ -96,9 +100,7 @@ public KafkaRaftMetrics(Metrics metrics, String metricGrpPrefix, QuorumState sta if (state.isLeader() || state.isCandidate()) { return state.localIdOrThrow(); } else { - return (double) state.maybeUnattachedState() - .flatMap(votedState -> votedState.votedKey().map(ReplicaKey::id)) - .orElse(-1); + return state.votedKey().map(ReplicaKey::id).orElse(-1); } }); @@ -111,10 +113,7 @@ public KafkaRaftMetrics(Metrics metrics, String metricGrpPrefix, QuorumState sta if (state.isLeader() || state.isCandidate()) { return state.localDirectoryId().toString(); } else { - return state.maybeUnattachedState() - .flatMap(votedState -> votedState.votedKey().flatMap(ReplicaKey::directoryId)) - .orElse(Uuid.ZERO_UUID) - .toString(); + return state.votedKey().flatMap(ReplicaKey::directoryId).orElse(Uuid.ZERO_UUID).toString(); } }; metrics.addMetric(this.currentVotedDirectoryIdMetricName, null, votedDirectoryIdProvider); diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/RecordsBatchReader.java b/raft/src/main/java/org/apache/kafka/raft/internals/RecordsBatchReader.java index a9f4e106aa1d7..64572b6bc4991 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/RecordsBatchReader.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/RecordsBatchReader.java @@ -51,7 +51,7 @@ private RecordsBatchReader( public boolean hasNext() { ensureOpen(); - if (!nextBatch.isPresent()) { + if (nextBatch.isEmpty()) { nextBatch = nextBatch(); } diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/RecordsIterator.java b/raft/src/main/java/org/apache/kafka/raft/internals/RecordsIterator.java index 8299138974802..2ea3126de775c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/RecordsIterator.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/RecordsIterator.java @@ -86,7 +86,7 @@ public RecordsIterator( public boolean hasNext() { ensureOpen(); - if (!nextBatch.isPresent()) { + if (nextBatch.isEmpty()) { nextBatch = nextBatch(); } @@ -334,7 +334,7 @@ private T decodeDataRecord(Optional key, Optional value) throw new IllegalArgumentException("Got key in the record when no key was expected"); } - if (!value.isPresent()) { + if (value.isEmpty()) { throw new IllegalArgumentException("Missing value in the record when a value was expected"); } else if (value.get().remaining() == 0) { throw new IllegalArgumentException("Got an unexpected empty value in the record"); @@ -346,13 +346,13 @@ private T decodeDataRecord(Optional key, Optional value) } private static ControlRecord decodeControlRecord(Optional key, Optional value) { - if (!key.isPresent()) { + if (key.isEmpty()) { throw new IllegalArgumentException("Missing key in the record when a key was expected"); } else if (key.get().remaining() == 0) { throw new IllegalArgumentException("Got an unexpected empty key in the record"); } - if (!value.isPresent()) { + if (value.isEmpty()) { throw new IllegalArgumentException("Missing value in the record when a value was expected"); } else if (value.get().remaining() == 0) { throw new IllegalArgumentException("Got an unexpected empty value in the record"); diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java index 29093cc30b6e9..2dea86d593bfe 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java @@ -91,7 +91,7 @@ public CompletableFuture handleRemoveVoterRequest( // Check that the leader has established a HWM and committed the current epoch Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); - if (!highWatermark.isPresent()) { + if (highWatermark.isEmpty()) { return CompletableFuture.completedFuture( RaftUtil.removeVoterResponse( Errors.REQUEST_TIMED_OUT, @@ -117,7 +117,7 @@ public CompletableFuture handleRemoveVoterRequest( // Check that there are no uncommitted VotersRecord Optional> votersEntry = partitionState.lastVoterSetEntry(); - if (!votersEntry.isPresent() || votersEntry.get().offset() >= highWatermark.get()) { + if (votersEntry.isEmpty() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( RaftUtil.removeVoterResponse( Errors.REQUEST_TIMED_OUT, @@ -132,7 +132,7 @@ public CompletableFuture handleRemoveVoterRequest( // Remove the voter from the set of voters Optional newVoters = votersEntry.get().value().removeVoter(voterKey); - if (!newVoters.isPresent()) { + if (newVoters.isEmpty()) { return CompletableFuture.completedFuture( RaftUtil.removeVoterResponse( Errors.VOTER_NOT_FOUND, diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java index 417c1decad758..335e1b02a22c1 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java @@ -97,7 +97,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check that the leader has established a HWM and committed the current epoch Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); - if (!highWatermark.isPresent()) { + if (highWatermark.isEmpty()) { return CompletableFuture.completedFuture( RaftUtil.updateVoterResponse( Errors.REQUEST_TIMED_OUT, @@ -130,7 +130,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check that there are no uncommitted VotersRecord Optional> votersEntry = partitionState.lastVoterSetEntry(); - if (!votersEntry.isPresent() || votersEntry.get().offset() >= highWatermark.get()) { + if (votersEntry.isEmpty() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( RaftUtil.updateVoterResponse( Errors.REQUEST_TIMED_OUT, @@ -160,7 +160,7 @@ public CompletableFuture handleUpdateVoterRequest( } // Check that endpoinds includes the default listener - if (!voterEndpoints.address(defaultListenerName).isPresent()) { + if (voterEndpoints.address(defaultListenerName).isEmpty()) { return CompletableFuture.completedFuture( RaftUtil.updateVoterResponse( Errors.INVALID_REQUEST, @@ -188,7 +188,7 @@ public CompletableFuture handleUpdateVoterRequest( ) ) ); - if (!updatedVoters.isPresent()) { + if (updatedVoters.isEmpty()) { return CompletableFuture.completedFuture( RaftUtil.updateVoterResponse( Errors.VOTER_NOT_FOUND, diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/VoterSetHistory.java b/raft/src/main/java/org/apache/kafka/raft/internals/VoterSetHistory.java index 6ab304f8c163d..5e25c6035420b 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/VoterSetHistory.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/VoterSetHistory.java @@ -16,8 +16,11 @@ */ package org.apache.kafka.raft.internals; +import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.raft.VoterSet; +import org.slf4j.Logger; + import java.util.Optional; import java.util.OptionalLong; @@ -31,9 +34,11 @@ public final class VoterSetHistory { private final VoterSet staticVoterSet; private final LogHistory votersHistory = new TreeMapLogHistory<>(); + private final Logger logger; - VoterSetHistory(VoterSet staticVoterSet) { + VoterSetHistory(VoterSet staticVoterSet, LogContext logContext) { this.staticVoterSet = staticVoterSet; + this.logger = logContext.logger(getClass()); } /** @@ -55,12 +60,10 @@ public void addAt(long offset, VoterSet voters) { // all replicas. VoterSet lastVoterSet = lastEntry.get().value(); if (!lastVoterSet.hasOverlappingMajority(voters)) { - throw new IllegalArgumentException( - String.format( - "Last voter set %s doesn't have an overlapping majority with the new voter set %s", - lastVoterSet, - voters - ) + logger.info( + "Last voter set ({}) doesn't have an overlapping majority with the new voter set ({})", + lastVoterSet, + voters ); } } diff --git a/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotReader.java b/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotReader.java index c865dc2a1a469..fc815621d839d 100644 --- a/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotReader.java +++ b/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotReader.java @@ -61,7 +61,7 @@ public int lastContainedLogEpoch() { @Override public long lastContainedLogTimestamp() { - if (!lastContainedLogTimestamp.isPresent()) { + if (lastContainedLogTimestamp.isEmpty()) { nextBatch.ifPresent(batch -> { throw new IllegalStateException( String.format( @@ -83,7 +83,7 @@ public long lastContainedLogTimestamp() { @Override public boolean hasNext() { - if (!nextBatch.isPresent()) { + if (nextBatch.isEmpty()) { nextBatch = nextBatch(); } @@ -127,7 +127,7 @@ private Optional> nextBatch() { if (iterator.hasNext()) { Batch batch = iterator.next(); - if (!lastContainedLogTimestamp.isPresent()) { + if (lastContainedLogTimestamp.isEmpty()) { // This must be the first batch which is expected to be a control batch with at least one record for // the snapshot header. if (batch.controlRecords().isEmpty()) { diff --git a/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotWriter.java b/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotWriter.java index ef26ce3bc0598..47683d68bdbc2 100644 --- a/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotWriter.java +++ b/raft/src/main/java/org/apache/kafka/snapshot/RecordsSnapshotWriter.java @@ -192,7 +192,7 @@ public Builder setVoterSet(Optional voterSet) { } public RecordsSnapshotWriter build(RecordSerde serde) { - if (!rawSnapshotWriter.isPresent()) { + if (rawSnapshotWriter.isEmpty()) { throw new IllegalStateException("Builder::build called without a RawSnapshotWriter"); } else if (rawSnapshotWriter.get().sizeInBytes() != 0) { throw new IllegalStateException( diff --git a/raft/src/main/resources/common/message/QuorumStateData.json b/raft/src/main/resources/common/message/QuorumStateData.json index fdfe45cc2c6e3..846f925c8dc70 100644 --- a/raft/src/main/resources/common/message/QuorumStateData.json +++ b/raft/src/main/resources/common/message/QuorumStateData.json @@ -19,14 +19,22 @@ "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ - { "name": "ClusterId", "type": "string", "versions": "0" }, - { "name": "LeaderId", "type": "int32", "versions": "0+", "default": "-1" }, - { "name": "LeaderEpoch", "type": "int32", "versions": "0+", "default": "-1" }, - { "name": "VotedId", "type": "int32", "versions": "0+", "default": "-1" }, - { "name": "VotedDirectoryId", "type": "uuid", "versions": "1+" }, - { "name": "AppliedOffset", "type": "int64", "versions": "0" }, - { "name": "CurrentVoters", "type": "[]Voter", "versions": "0", "nullableVersions": "0", "fields": [ - { "name": "VoterId", "type": "int32", "versions": "0" } + { "name": "ClusterId", "type": "string", "versions": "0", + "about": "The cluster id."}, + { "name": "LeaderId", "type": "int32", "versions": "0+", "default": "-1", + "about": "The leader id."}, + { "name": "LeaderEpoch", "type": "int32", "versions": "0+", "default": "-1", + "about": "The leader epoch."}, + { "name": "VotedId", "type": "int32", "versions": "0+", "default": "-1", + "about": "The voted id."}, + { "name": "VotedDirectoryId", "type": "uuid", "versions": "1+", + "about": "The voted directory id."}, + { "name": "AppliedOffset", "type": "int64", "versions": "0", + "about": "The applied offset."}, + { "name": "CurrentVoters", "type": "[]Voter", "versions": "0", "nullableVersions": "0", + "about": "The current voters.", "fields": [ + { "name": "VoterId", "type": "int32", "versions": "0", + "about": "The voter id."} ]} ] } diff --git a/raft/src/test/java/org/apache/kafka/raft/CandidateStateTest.java b/raft/src/test/java/org/apache/kafka/raft/CandidateStateTest.java index 4f764d43a211e..217efad22f5f8 100644 --- a/raft/src/test/java/org/apache/kafka/raft/CandidateStateTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/CandidateStateTest.java @@ -45,15 +45,15 @@ public class CandidateStateTest { private CandidateState newCandidateState(VoterSet voters) { return new CandidateState( - time, - localReplicaKey.id(), - localReplicaKey.directoryId().get(), - epoch, - voters, - Optional.empty(), - 0, - electionTimeoutMs, - logContext + time, + localReplicaKey.id(), + localReplicaKey.directoryId().get(), + epoch, + voters, + Optional.empty(), + 1, + electionTimeoutMs, + logContext ); } @@ -61,9 +61,9 @@ private CandidateState newCandidateState(VoterSet voters) { @ValueSource(booleans = { true, false }) public void testSingleNodeQuorum(boolean withDirectoryId) { CandidateState state = newCandidateState(voterSetWithLocal(IntStream.empty(), withDirectoryId)); - assertTrue(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); - assertEquals(Collections.emptySet(), state.unrecordedVoters()); + assertTrue(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); } @ParameterizedTest @@ -73,12 +73,12 @@ public void testTwoNodeQuorumVoteRejected(boolean withDirectoryId) { CandidateState state = newCandidateState( voterSetWithLocal(Stream.of(otherNode), withDirectoryId) ); - assertFalse(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); - assertEquals(Collections.singleton(otherNode), state.unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Collections.singleton(otherNode), state.epochElection().unrecordedVoters()); assertTrue(state.recordRejectedVote(otherNode.id())); - assertFalse(state.isVoteGranted()); - assertTrue(state.isVoteRejected()); + assertFalse(state.epochElection().isVoteGranted()); + assertTrue(state.epochElection().isVoteRejected()); } @ParameterizedTest @@ -88,13 +88,13 @@ public void testTwoNodeQuorumVoteGranted(boolean withDirectoryId) { CandidateState state = newCandidateState( voterSetWithLocal(Stream.of(otherNode), withDirectoryId) ); - assertFalse(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); - assertEquals(Collections.singleton(otherNode), state.unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Collections.singleton(otherNode), state.epochElection().unrecordedVoters()); assertTrue(state.recordGrantedVote(otherNode.id())); - assertEquals(Collections.emptySet(), state.unrecordedVoters()); - assertFalse(state.isVoteRejected()); - assertTrue(state.isVoteGranted()); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); + assertFalse(state.epochElection().isVoteRejected()); + assertTrue(state.epochElection().isVoteGranted()); } @ParameterizedTest @@ -105,17 +105,17 @@ public void testThreeNodeQuorumVoteGranted(boolean withDirectoryId) { CandidateState state = newCandidateState( voterSetWithLocal(Stream.of(node1, node2), withDirectoryId) ); - assertFalse(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); - assertEquals(Set.of(node1, node2), state.unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Set.of(node1, node2), state.epochElection().unrecordedVoters()); assertTrue(state.recordGrantedVote(node1.id())); - assertEquals(Collections.singleton(node2), state.unrecordedVoters()); - assertTrue(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); + assertEquals(Collections.singleton(node2), state.epochElection().unrecordedVoters()); + assertTrue(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); assertTrue(state.recordRejectedVote(node2.id())); - assertEquals(Collections.emptySet(), state.unrecordedVoters()); - assertTrue(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); + assertTrue(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); } @ParameterizedTest @@ -126,17 +126,17 @@ public void testThreeNodeQuorumVoteRejected(boolean withDirectoryId) { CandidateState state = newCandidateState( voterSetWithLocal(Stream.of(node1, node2), withDirectoryId) ); - assertFalse(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); - assertEquals(Set.of(node1, node2), state.unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Set.of(node1, node2), state.epochElection().unrecordedVoters()); assertTrue(state.recordRejectedVote(node1.id())); - assertEquals(Collections.singleton(node2), state.unrecordedVoters()); - assertFalse(state.isVoteGranted()); - assertFalse(state.isVoteRejected()); + assertEquals(Collections.singleton(node2), state.epochElection().unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); assertTrue(state.recordRejectedVote(node2.id())); - assertEquals(Collections.emptySet(), state.unrecordedVoters()); - assertFalse(state.isVoteGranted()); - assertTrue(state.isVoteRejected()); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertTrue(state.epochElection().isVoteRejected()); } @ParameterizedTest @@ -161,7 +161,7 @@ public void testCannotChangeVoteGrantedToRejected(boolean withDirectoryId) { ); assertTrue(state.recordGrantedVote(otherNodeId)); assertThrows(IllegalArgumentException.class, () -> state.recordRejectedVote(otherNodeId)); - assertTrue(state.isVoteGranted()); + assertTrue(state.epochElection().isVoteGranted()); } @ParameterizedTest @@ -173,7 +173,7 @@ public void testCannotChangeVoteRejectedToGranted(boolean withDirectoryId) { ); assertTrue(state.recordRejectedVote(otherNodeId)); assertThrows(IllegalArgumentException.class, () -> state.recordGrantedVote(otherNodeId)); - assertTrue(state.isVoteRejected()); + assertTrue(state.epochElection().isVoteRejected()); } @ParameterizedTest @@ -219,10 +219,15 @@ public void testGrantVote(boolean isLogUpToDate, boolean withDirectoryId) { voterSetWithLocal(Stream.of(node1, node2, node3), withDirectoryId) ); - assertFalse(state.canGrantVote(node0, isLogUpToDate)); - assertFalse(state.canGrantVote(node1, isLogUpToDate)); - assertFalse(state.canGrantVote(node2, isLogUpToDate)); - assertFalse(state.canGrantVote(node3, isLogUpToDate)); + assertEquals(isLogUpToDate, state.canGrantVote(node0, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node1, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node2, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node3, isLogUpToDate, true)); + + assertFalse(state.canGrantVote(node0, isLogUpToDate, false)); + assertFalse(state.canGrantVote(node1, isLogUpToDate, false)); + assertFalse(state.canGrantVote(node2, isLogUpToDate, false)); + assertFalse(state.canGrantVote(node3, isLogUpToDate, false)); } @ParameterizedTest diff --git a/raft/src/test/java/org/apache/kafka/raft/ElectionStateTest.java b/raft/src/test/java/org/apache/kafka/raft/ElectionStateTest.java index 43ac53d11ac90..85a3ca3951a07 100644 --- a/raft/src/test/java/org/apache/kafka/raft/ElectionStateTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/ElectionStateTest.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -70,26 +71,34 @@ void testQuorumStateDataRoundTrip(short version) { ReplicaKey votedKey = ReplicaKey.of(1, Uuid.randomUuid()); List electionStates = Arrays.asList( ElectionState.withUnknownLeader(5, Set.of(1, 2, 3)), - ElectionState.withElectedLeader(5, 1, Set.of(1, 2, 3)), - ElectionState.withVotedCandidate(5, votedKey, Set.of(1, 2, 3)) + ElectionState.withElectedLeader(5, 1, Optional.empty(), Set.of(1, 2, 3)), + ElectionState.withVotedCandidate(5, votedKey, Set.of(1, 2, 3)), + ElectionState.withElectedLeader(5, 1, Optional.of(votedKey), Set.of(1, 2, 3)) ); final List expected; if (version == 0) { expected = Arrays.asList( ElectionState.withUnknownLeader(5, Set.of(1, 2, 3)), - ElectionState.withElectedLeader(5, 1, Set.of(1, 2, 3)), + ElectionState.withElectedLeader(5, 1, Optional.empty(), Set.of(1, 2, 3)), ElectionState.withVotedCandidate( 5, ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), Set.of(1, 2, 3) + ), + ElectionState.withElectedLeader( + 5, + 1, + Optional.of(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID)), + Set.of(1, 2, 3) ) ); } else { expected = Arrays.asList( ElectionState.withUnknownLeader(5, Collections.emptySet()), - ElectionState.withElectedLeader(5, 1, Collections.emptySet()), - ElectionState.withVotedCandidate(5, votedKey, Collections.emptySet()) + ElectionState.withElectedLeader(5, 1, Optional.empty(), Collections.emptySet()), + ElectionState.withVotedCandidate(5, votedKey, Collections.emptySet()), + ElectionState.withElectedLeader(5, 1, Optional.of(votedKey), Collections.emptySet()) ); } diff --git a/raft/src/test/java/org/apache/kafka/raft/EndpointsTest.java b/raft/src/test/java/org/apache/kafka/raft/EndpointsTest.java index a4a39de097239..b83a46bf89fac 100644 --- a/raft/src/test/java/org/apache/kafka/raft/EndpointsTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/EndpointsTest.java @@ -41,8 +41,8 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; final class EndpointsTest { - private ListenerName testListener = ListenerName.normalised("listener"); - private InetSocketAddress testSocketAddress = InetSocketAddress.createUnresolved("localhost", 9092); + private final ListenerName testListener = ListenerName.normalised("listener"); + private final InetSocketAddress testSocketAddress = InetSocketAddress.createUnresolved("localhost", 9092); @Test void testAddressWithValidEndpoint() { diff --git a/raft/src/test/java/org/apache/kafka/raft/EpochElectionTest.java b/raft/src/test/java/org/apache/kafka/raft/EpochElectionTest.java new file mode 100644 index 0000000000000..e14e4cb17ffe8 --- /dev/null +++ b/raft/src/test/java/org/apache/kafka/raft/EpochElectionTest.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.raft.internals.EpochElection; + +import org.junit.jupiter.api.Test; + +import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class EpochElectionTest { + private final int voter1 = randomReplicaId(); + private final Set voters = Set.of( + ReplicaKey.of(voter1, Uuid.randomUuid()), + ReplicaKey.of(voter1 + 1, Uuid.randomUuid()), + ReplicaKey.of(voter1 + 2, Uuid.randomUuid()) + ); + @Test + public void testStateOnInitialization() { + EpochElection epochElection = new EpochElection(voters); + + assertEquals(voters, epochElection.unrecordedVoters()); + assertTrue(epochElection.grantingVoters().isEmpty()); + assertTrue(epochElection.rejectingVoters().isEmpty()); + assertFalse(epochElection.isVoteGranted()); + assertFalse(epochElection.isVoteRejected()); + assertFalse(epochElection.isGrantedVoter(voter1)); + assertFalse(epochElection.isRejectedVoter(voter1)); + } + + @Test + public void testRecordGrantedVote() { + EpochElection epochElection = new EpochElection(voters); + + assertTrue(epochElection.recordVote(voter1, true)); + assertEquals(1, epochElection.grantingVoters().size()); + assertTrue(epochElection.grantingVoters().contains(voter1)); + assertEquals(0, epochElection.rejectingVoters().size()); + assertEquals(2, epochElection.unrecordedVoters().size()); + assertTrue(epochElection.isGrantedVoter(voter1)); + assertFalse(epochElection.isRejectedVoter(voter1)); + assertFalse(epochElection.isVoteGranted()); + assertFalse(epochElection.isVoteRejected()); + + // recording same id as granted + assertFalse(epochElection.recordVote(voter1, true)); + assertTrue(epochElection.isGrantedVoter(voter1)); + assertFalse(epochElection.isVoteGranted()); + + // recording majority as granted + assertTrue(epochElection.recordVote(voter1 + 1, true)); + assertEquals(2, epochElection.grantingVoters().size()); + assertEquals(0, epochElection.rejectingVoters().size()); + assertEquals(1, epochElection.unrecordedVoters().size()); + assertTrue(epochElection.isGrantedVoter(voter1 + 1)); + assertFalse(epochElection.isRejectedVoter(voter1 + 1)); + assertTrue(epochElection.isVoteGranted()); + assertFalse(epochElection.isVoteRejected()); + } + + @Test + public void testRecordRejectedVote() { + EpochElection epochElection = new EpochElection(voters); + + assertTrue(epochElection.recordVote(voter1, false)); + assertEquals(0, epochElection.grantingVoters().size()); + assertEquals(1, epochElection.rejectingVoters().size()); + assertTrue(epochElection.rejectingVoters().contains(voter1)); + assertEquals(2, epochElection.unrecordedVoters().size()); + assertFalse(epochElection.isGrantedVoter(voter1)); + assertTrue(epochElection.isRejectedVoter(voter1)); + assertFalse(epochElection.isVoteGranted()); + assertFalse(epochElection.isVoteRejected()); + + // recording same id as rejected + assertFalse(epochElection.recordVote(voter1, false)); + assertFalse(epochElection.isGrantedVoter(voter1)); + assertFalse(epochElection.isVoteRejected()); + + // recording majority as rejected + assertTrue(epochElection.recordVote(voter1 + 1, false)); + assertEquals(0, epochElection.grantingVoters().size()); + assertEquals(2, epochElection.rejectingVoters().size()); + assertEquals(1, epochElection.unrecordedVoters().size()); + assertFalse(epochElection.isGrantedVoter(voter1 + 1)); + assertTrue(epochElection.isRejectedVoter(voter1 + 1)); + assertFalse(epochElection.isVoteGranted()); + assertTrue(epochElection.isVoteRejected()); + } + + @Test + public void testOverWritingVote() { + EpochElection epochElection = new EpochElection(voters); + + assertTrue(epochElection.recordVote(voter1, true)); + assertFalse(epochElection.recordVote(voter1, false)); + assertEquals(0, epochElection.grantingVoters().size()); + assertEquals(1, epochElection.rejectingVoters().size()); + assertTrue(epochElection.rejectingVoters().contains(voter1)); + assertFalse(epochElection.isGrantedVoter(voter1)); + assertTrue(epochElection.isRejectedVoter(voter1)); + assertFalse(epochElection.isVoteGranted()); + assertFalse(epochElection.isVoteRejected()); + + assertTrue(epochElection.recordVote(voter1 + 2, false)); + assertFalse(epochElection.recordVote(voter1 + 2, true)); + assertEquals(1, epochElection.grantingVoters().size()); + assertEquals(1, epochElection.rejectingVoters().size()); + assertTrue(epochElection.grantingVoters().contains(voter1 + 2)); + assertTrue(epochElection.isGrantedVoter(voter1 + 2)); + assertFalse(epochElection.isRejectedVoter(voter1 + 2)); + assertFalse(epochElection.isVoteGranted()); + assertFalse(epochElection.isVoteRejected()); + } + + private static int randomReplicaId() { + return ThreadLocalRandom.current().nextInt(1025); + } +} diff --git a/raft/src/test/java/org/apache/kafka/raft/FileQuorumStateStoreTest.java b/raft/src/test/java/org/apache/kafka/raft/FileQuorumStateStoreTest.java index 9782bc05a2283..a0893ae2aa0f9 100644 --- a/raft/src/test/java/org/apache/kafka/raft/FileQuorumStateStoreTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/FileQuorumStateStoreTest.java @@ -56,20 +56,28 @@ void testWriteReadElectedLeader(KRaftVersion kraftVersion) throws IOException { final int voter1 = 1; final int voter2 = 2; final int voter3 = 3; + ReplicaKey votedKey = ReplicaKey.of(voter1, Uuid.randomUuid()); Set voters = Set.of(voter1, voter2, voter3); stateStore.writeElectionState( - ElectionState.withElectedLeader(epoch, voter1, voters), + ElectionState.withElectedLeader(epoch, voter1, Optional.of(votedKey), voters), kraftVersion ); final Optional expected; if (kraftVersion.isReconfigSupported()) { expected = Optional.of( - ElectionState.withElectedLeader(epoch, voter1, Collections.emptySet()) + ElectionState.withElectedLeader(epoch, voter1, Optional.of(votedKey), Collections.emptySet()) ); } else { - expected = Optional.of(ElectionState.withElectedLeader(epoch, voter1, voters)); + expected = Optional.of( + ElectionState.withElectedLeader( + epoch, + voter1, + Optional.of(ReplicaKey.of(voter1, ReplicaKey.NO_DIRECTORY_ID)), + voters + ) + ); } assertEquals(expected, stateStore.readElectionState()); diff --git a/raft/src/test/java/org/apache/kafka/raft/FollowerStateTest.java b/raft/src/test/java/org/apache/kafka/raft/FollowerStateTest.java index 8c9c874e52690..c7e86c3fe4946 100644 --- a/raft/src/test/java/org/apache/kafka/raft/FollowerStateTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/FollowerStateTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.raft; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; @@ -47,18 +48,19 @@ public class FollowerStateTest { InetSocketAddress.createUnresolved("mock-host-3", 1234) ) ); + private final ReplicaKey votedKey = ReplicaKey.of(2, Uuid.randomUuid()); private FollowerState newFollowerState( - Set voters, - Optional highWatermark + Set voters ) { return new FollowerState( time, epoch, leaderId, leaderEndpoints, + Optional.of(votedKey), voters, - highWatermark, + Optional.empty(), fetchTimeoutMs, logContext ); @@ -66,7 +68,7 @@ private FollowerState newFollowerState( @Test public void testFetchTimeoutExpiration() { - FollowerState state = newFollowerState(Set.of(1, 2, 3), Optional.empty()); + FollowerState state = newFollowerState(Set.of(1, 2, 3)); assertFalse(state.hasFetchTimeoutExpired(time.milliseconds())); assertEquals(fetchTimeoutMs, state.remainingFetchTimeMs(time.milliseconds())); @@ -82,7 +84,7 @@ public void testFetchTimeoutExpiration() { @Test public void testMonotonicHighWatermark() { - FollowerState state = newFollowerState(Set.of(1, 2, 3), Optional.empty()); + FollowerState state = newFollowerState(Set.of(1, 2, 3)); OptionalLong highWatermark = OptionalLong.of(15L); state.updateHighWatermark(highWatermark); @@ -94,20 +96,46 @@ public void testMonotonicHighWatermark() { @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testGrantVote(boolean isLogUpToDate) { - FollowerState state = newFollowerState( - Set.of(1, 2, 3), - Optional.empty() - ); + public void testPreVoteIfHasNotFetchedFromLeaderYet(boolean isLogUpToDate) { + FollowerState state = newFollowerState(Set.of(1, 2, 3)); + + assertEquals(isLogUpToDate, state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testPreVoteAfterSuccessfulFetchFromLeader(boolean isLogUpToDate) { + FollowerState state = newFollowerState(Set.of(1, 2, 3)); + state.resetFetchTimeoutForSuccessfulFetch(time.milliseconds()); + + assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertFalse(state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + + assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testGrantStandardVote(boolean isLogUpToDate) { + FollowerState state = newFollowerState(Set.of(1, 2, 3)); - assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); - assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); - assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); + assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); } @Test public void testLeaderIdAndEndpoint() { - FollowerState state = newFollowerState(Set.of(0, 1, 2), Optional.empty()); + FollowerState state = newFollowerState(Set.of(0, 1, 2)); assertEquals(leaderId, state.leaderId()); assertEquals(leaderEndpoints, state.leaderEndpoints()); diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java index 96a5df1845fcb..ba49a9c14310b 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java @@ -21,12 +21,14 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.feature.SupportedVersionRange; import org.apache.kafka.common.message.ApiVersionsResponseData; import org.apache.kafka.common.message.BeginQuorumEpochResponseData; import org.apache.kafka.common.message.EndQuorumEpochResponseData; import org.apache.kafka.common.message.FetchRequestData; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.message.FetchSnapshotResponseData; +import org.apache.kafka.common.message.UpdateRaftVoterResponseData; import org.apache.kafka.common.message.VoteResponseData; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.protocol.ApiKeys; @@ -42,6 +44,7 @@ import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.common.requests.FetchResponse; import org.apache.kafka.common.requests.FetchSnapshotResponse; +import org.apache.kafka.common.requests.UpdateRaftVoterResponse; import org.apache.kafka.common.requests.VoteRequest; import org.apache.kafka.common.requests.VoteResponse; import org.apache.kafka.common.utils.MockTime; @@ -85,7 +88,8 @@ public void update(Time time, MockClient.MetadataUpdate update) { } ApiKeys.BEGIN_QUORUM_EPOCH, ApiKeys.END_QUORUM_EPOCH, ApiKeys.FETCH, - ApiKeys.FETCH_SNAPSHOT + ApiKeys.FETCH_SNAPSHOT, + ApiKeys.UPDATE_RAFT_VOTER ); private final int requestTimeoutMs = 30000; @@ -281,7 +285,7 @@ private ApiMessage buildTestRequest(ApiKeys key) { case VOTE: int lastEpoch = 4; - return VoteRequest.singletonRequest(topicPartition, clusterId, leaderEpoch, leaderId, lastEpoch, 329); + return VoteRequest.singletonRequest(topicPartition, clusterId, leaderEpoch, leaderId, lastEpoch, 329, true); case FETCH: FetchRequestData request = RaftUtil.singletonFetchRequest(topicPartition, topicId, fetchPartition -> @@ -304,6 +308,15 @@ private ApiMessage buildTestRequest(ApiKeys key) { 10 ); + case UPDATE_RAFT_VOTER: + return RaftUtil.updateVoterRequest( + clusterId, + ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), + 5, + new SupportedVersionRange((short) 1, (short) 1), + Endpoints.empty() + ); + default: throw new AssertionError("Unexpected api " + key); } @@ -337,6 +350,8 @@ private ApiMessage buildTestErrorResponse(ApiKeys key, Errors error) { return new FetchResponseData().setErrorCode(error.code()); case FETCH_SNAPSHOT: return new FetchSnapshotResponseData().setErrorCode(error.code()); + case UPDATE_RAFT_VOTER: + return new UpdateRaftVoterResponseData().setErrorCode(error.code()); default: throw new AssertionError("Unexpected api " + key); } @@ -354,6 +369,8 @@ private Errors extractError(ApiMessage response) { code = ((VoteResponseData) response).errorCode(); } else if (response instanceof FetchSnapshotResponseData) { code = ((FetchSnapshotResponseData) response).errorCode(); + } else if (response instanceof UpdateRaftVoterResponseData) { + code = ((UpdateRaftVoterResponseData) response).errorCode(); } else { throw new IllegalArgumentException("Unexpected type for responseData: " + response); } @@ -372,6 +389,8 @@ private AbstractResponse buildResponse(ApiMessage responseData) { return new FetchResponse((FetchResponseData) responseData); } else if (responseData instanceof FetchSnapshotResponseData) { return new FetchSnapshotResponse((FetchSnapshotResponseData) responseData); + } else if (responseData instanceof UpdateRaftVoterResponseData) { + return new UpdateRaftVoterResponse((UpdateRaftVoterResponseData) responseData); } else { throw new IllegalArgumentException("Unexpected type for responseData: " + responseData); } diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java new file mode 100644 index 0000000000000..43cfeb29fe1b0 --- /dev/null +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java @@ -0,0 +1,1160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.message.VoteResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.raft.RaftClientTestContext.RaftProtocol; +import org.apache.kafka.server.common.KRaftVersion; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.OptionalInt; +import java.util.stream.Stream; + +import static org.apache.kafka.raft.KafkaRaftClientTest.randomReplicaId; +import static org.apache.kafka.raft.KafkaRaftClientTest.replicaKey; +import static org.apache.kafka.raft.RaftClientTestContext.RaftProtocol.KIP_996_PROTOCOL; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class KafkaRaftClientPreVoteTest { + @ParameterizedTest + @MethodSource("kraftVersionHasFetchedCombinations") + public void testHandlePreVoteRequestAsFollower( + KRaftVersion kraftVersion, + boolean hasFetchedFromLeader + ) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + ReplicaKey electedLeader = replicaKey(localId + 2, true); + ReplicaKey observer = replicaKey(localId + 3, true); + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, otherNodeKey, electedLeader)), kraftVersion) + .withElectedLeader(epoch, electedLeader.id()) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + if (hasFetchedFromLeader) { + context.pollUntilRequest(); + RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest(); + context.assertFetchRequestData(fetchRequest, epoch, 0L, 0); + + context.deliverResponse( + fetchRequest.correlationId(), + fetchRequest.destination(), + context.fetchResponse(epoch, electedLeader.id(), MemoryRecords.EMPTY, 0L, Errors.NONE) + ); + } + + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, epoch, 1)); + context.pollUntilResponse(); + + // follower should reject pre-vote requests if it has successfully fetched from the leader + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(electedLeader.id()), !hasFetchedFromLeader); + context.assertElectedLeader(epoch, electedLeader.id()); + + // same with observers + context.deliverRequest(context.preVoteRequest(epoch, observer, epoch, 1)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(electedLeader.id()), !hasFetchedFromLeader); + context.assertElectedLeader(epoch, electedLeader.id()); + + // follower will transition to unattached if pre-vote request has a higher epoch + context.deliverRequest(context.preVoteRequest(epoch + 1, otherNodeKey, epoch + 1, 1)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch + 1, OptionalInt.of(-1), true); + assertEquals(context.currentEpoch(), epoch + 1); + assertTrue(context.client.quorum().isUnattachedNotVoted()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHandlePreVoteRequestAsFollowerWithVotedCandidate(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + int epoch = 2; + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + ReplicaKey votedCandidateKey = replicaKey(localId + 2, true); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey, votedCandidateKey)); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(voters, kraftVersion) + .withVotedCandidate(epoch, votedCandidateKey) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + // unattached will send fetch request before transitioning to follower, proactively clear the mock sent queue + context.client.poll(); + context.assertSentFetchRequest(); + + context.deliverRequest(context.beginEpochRequest(epoch, votedCandidateKey.id(), voters.listeners(votedCandidateKey.id()))); + context.pollUntilResponse(); + context.assertSentBeginQuorumEpochResponse(Errors.NONE); + assertTrue(context.client.quorum().isFollower()); + + // follower can grant PreVotes if it has not fetched successfully from leader yet + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(votedCandidateKey.id()), true); + + // after fetching from leader, follower should reject PreVote requests + context.pollUntilRequest(); + RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest(); + context.deliverResponse( + fetchRequest.correlationId(), + fetchRequest.destination(), + context.fetchResponse(epoch, votedCandidateKey.id(), MemoryRecords.EMPTY, 0L, Errors.NONE) + ); + + context.client.poll(); + assertTrue(context.client.quorum().isFollower()); + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(votedCandidateKey.id()), false); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHandlePreVoteRequestAsCandidate(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + ReplicaKey observer = replicaKey(localId + 2, true); + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey)), kraftVersion) + .withVotedCandidate(epoch, ReplicaKey.of(localId, localKey.directoryId().get())) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + assertTrue(context.client.quorum().isCandidate()); + + // candidate should grant pre-vote requests with the same epoch if log is up-to-date + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, epoch, 1)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + context.assertVotedCandidate(epoch, localKey); + assertTrue(context.client.quorum().isCandidate()); + + // if an observer with up-to-date log sends a pre-vote request for the same epoch, it should also be granted + context.deliverRequest(context.preVoteRequest(epoch, observer, epoch, 2)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + context.assertVotedCandidate(epoch, localKey); + assertTrue(context.client.quorum().isCandidate()); + + // candidate will transition to unattached if pre-vote request has a higher epoch + context.deliverRequest(context.preVoteRequest(epoch + 1, otherNodeKey, epoch + 1, 2)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch + 1, OptionalInt.of(-1), true); + assertTrue(context.client.quorum().isUnattached()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHandlePreVoteRequestAsUnattachedObserver(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + int epoch = 2; + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey replica1 = replicaKey(localId + 1, true); + ReplicaKey replica2 = replicaKey(localId + 2, true); + ReplicaKey observer = replicaKey(localId + 3, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(replica1, replica2)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + assertTrue(context.client.quorum().isUnattached()); + assertTrue(context.client.quorum().isObserver()); + + // if a voter with up-to-date log sends a pre-vote request, it should be granted + context.deliverRequest(context.preVoteRequest(epoch, replica1, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + // if same voter sends another pre-vote request, it can be granted if the sender's log is still up-to-date + context.deliverRequest(context.preVoteRequest(epoch, replica1, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + // if different voter with up-to-date log sends a pre-vote request for the same epoch, it will be granted + context.deliverRequest(context.preVoteRequest(epoch, replica2, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + // if an observer with up-to-date log sends a pre-vote request for the same epoch, it will be granted + context.deliverRequest(context.preVoteRequest(epoch, observer, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + assertEquals(epoch, context.currentEpoch()); + assertTrue(context.client.quorum().isUnattached()); + assertTrue(context.client.quorum().isObserver()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHandlePreVoteRequestAsUnattachedVoted(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey replica1 = replicaKey(localId + 1, true); + ReplicaKey replica2 = replicaKey(localId + 2, true); + ReplicaKey observer = replicaKey(localId + 3, true); + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(replica1, replica2)), kraftVersion) + .withVotedCandidate(epoch, replica2) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + assertTrue(context.client.quorum().isUnattachedAndVoted()); + + // if a voter with up-to-date log sends a pre-vote request, it should be granted + context.deliverRequest(context.preVoteRequest(epoch, replica1, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + // if same voter sends another pre-vote request, it can be granted if the sender's log is still up-to-date + context.deliverRequest(context.preVoteRequest(epoch, replica1, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + // if different voter with up-to-date log sends a pre-vote request for the same epoch, it will be granted + context.deliverRequest(context.preVoteRequest(epoch, replica2, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + // if an observer with up-to-date log sends a pre-vote request for the same epoch, it will be granted + context.deliverRequest(context.preVoteRequest(epoch, observer, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), true); + + assertEquals(epoch, context.currentEpoch()); + assertTrue(context.client.quorum().isUnattachedAndVoted()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHandlePreVoteRequestAsUnattachedWithLeader(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey replica1 = replicaKey(localId + 1, true); + ReplicaKey replica2 = replicaKey(localId + 2, true); + ReplicaKey leader = replicaKey(localId + 3, true); + ReplicaKey observer = replicaKey(localId + 4, true); + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(replica1, replica2)), kraftVersion) + .withElectedLeader(epoch, leader.id()) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + assertTrue(context.client.quorum().isUnattachedNotVoted()); + + // if a voter with up-to-date log sends a pre-vote request, it should be granted + context.deliverRequest(context.preVoteRequest(epoch, replica1, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(leader.id()), true); + + // if same voter sends another pre-vote request, it can be granted if the sender's log is still up-to-date + context.deliverRequest(context.preVoteRequest(epoch, replica1, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(leader.id()), true); + + // if different voter with up-to-date log sends a pre-vote request for the same epoch, it will be granted + context.deliverRequest(context.preVoteRequest(epoch, replica2, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(leader.id()), true); + + // if an observer with up-to-date log sends a pre-vote request for the same epoch, it will be granted + context.deliverRequest(context.preVoteRequest(epoch, observer, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(leader.id()), true); + + assertEquals(epoch, context.currentEpoch()); + assertTrue(context.client.quorum().isUnattachedNotVoted()); + } + + @ParameterizedTest + @MethodSource("kraftVersionHasFetchedCombinations") + public void testHandlePreVoteRequestAsFollowerObserver( + KRaftVersion kraftVersion, + boolean hasFetchedFromLeader + ) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey leader = replicaKey(localId + 1, true); + ReplicaKey follower = replicaKey(localId + 2, true); + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(leader, follower)), kraftVersion) + .withElectedLeader(epoch, leader.id()) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + context.assertElectedLeader(epoch, leader.id()); + assertTrue(context.client.quorum().isFollower()); + assertTrue(context.client.quorum().isObserver()); + + if (hasFetchedFromLeader) { + context.pollUntilRequest(); + RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest(); + context.assertFetchRequestData(fetchRequest, epoch, 0L, 0); + + context.deliverResponse( + fetchRequest.correlationId(), + fetchRequest.destination(), + context.fetchResponse(epoch, leader.id(), MemoryRecords.EMPTY, 0L, Errors.NONE) + ); + } + + context.deliverRequest(context.preVoteRequest(epoch, follower, epoch, 1)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(leader.id()), !hasFetchedFromLeader); + assertTrue(context.client.quorum().isFollower()); + assertTrue(context.client.quorum().isObserver()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHandleInvalidPreVoteRequestWithOlderEpoch(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, otherNodeKey)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.deliverRequest(context.preVoteRequest(epoch - 1, otherNodeKey, epoch - 1, 1)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.FENCED_LEADER_EPOCH, epoch, OptionalInt.empty(), false); + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderRejectPreVoteRequestOnSameEpoch(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey)), kraftVersion) + .withUnknownLeader(2) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.unattachedToLeader(); + int leaderEpoch = context.currentEpoch(); + + context.deliverRequest(context.preVoteRequest(leaderEpoch, otherNodeKey, leaderEpoch, 1)); + + context.client.poll(); + + context.assertSentVoteResponse(Errors.NONE, leaderEpoch, OptionalInt.of(localId), false); + context.assertElectedLeader(leaderEpoch, localId); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testPreVoteRequestClusterIdValidation(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey)), kraftVersion) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.unattachedToLeader(); + int epoch = context.currentEpoch(); + + // valid cluster id is accepted + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, epoch, 0)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(localId), false); + + // null cluster id is accepted + context.deliverRequest(context.voteRequest(null, epoch, otherNodeKey, epoch, 0, true)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(localId), false); + + // empty cluster id is rejected + context.deliverRequest(context.voteRequest("", epoch, otherNodeKey, epoch, 0, true)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.INCONSISTENT_CLUSTER_ID); + + // invalid cluster id is rejected + context.deliverRequest(context.voteRequest("invalid-uuid", epoch, otherNodeKey, epoch, 0, true)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.INCONSISTENT_CLUSTER_ID); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testInvalidVoterReplicaPreVoteRequest(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey)), kraftVersion) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.unattachedToLeader(); + int epoch = context.currentEpoch(); + + // invalid voter id is rejected + context.deliverRequest( + context.voteRequest( + context.clusterId.toString(), + epoch, + otherNodeKey, + ReplicaKey.of(10, Uuid.randomUuid()), + epoch, + 100, + true + ) + ); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.INVALID_VOTER_KEY, epoch, OptionalInt.of(localId), false); + + // invalid voter directory id is rejected + context.deliverRequest( + context.voteRequest( + context.clusterId.toString(), + epoch, + otherNodeKey, + ReplicaKey.of(0, Uuid.randomUuid()), + epoch, + 100, + true + ) + ); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.INVALID_VOTER_KEY, epoch, OptionalInt.of(localId), false); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderAcceptPreVoteFromObserver(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey)), kraftVersion) + .withUnknownLeader(4) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.unattachedToLeader(); + int epoch = context.currentEpoch(); + + ReplicaKey observerKey = replicaKey(localId + 2, true); + context.deliverRequest(context.preVoteRequest(epoch - 1, observerKey, 0, 0)); + context.client.poll(); + context.assertSentVoteResponse(Errors.FENCED_LEADER_EPOCH, epoch, OptionalInt.of(localId), false); + + context.deliverRequest(context.preVoteRequest(epoch, observerKey, 0, 0)); + context.client.poll(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(localId), false); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHandlePreVoteRequestAsResigned(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey)), kraftVersion) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + context.unattachedToLeader(); + context.client.quorum().transitionToResigned(Collections.emptyList()); + assertTrue(context.client.quorum().isResigned()); + + // resigned should grant pre-vote requests with the same epoch if log is up-to-date + int epoch = context.currentEpoch(); + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, epoch, context.log.endOffset().offset())); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(localId), true); + + // resigned will transition to unattached if pre-vote request has a higher epoch + context.deliverRequest(context.preVoteRequest(epoch + 1, otherNodeKey, epoch + 1, context.log.endOffset().offset())); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch + 1, OptionalInt.of(-1), true); + assertTrue(context.client.quorum().isUnattached()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testInvalidPreVoteRequest(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey localKey = replicaKey(localId, true); + ReplicaKey otherNodeKey = replicaKey(localId + 1, true); + int epoch = 5; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + localKey.id(), + localKey.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(localKey, otherNodeKey)), kraftVersion) + .withElectedLeader(epoch, otherNodeKey.id()) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + assertEquals(epoch, context.currentEpoch()); + context.assertElectedLeader(epoch, otherNodeKey.id()); + + // invalid offset + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, 0, -5L)); + context.pollUntilResponse(); + context.assertSentVoteResponse( + Errors.INVALID_REQUEST, + epoch, + OptionalInt.of(otherNodeKey.id()), + false + ); + assertEquals(epoch, context.currentEpoch()); + context.assertElectedLeader(epoch, otherNodeKey.id()); + + // invalid epoch + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, -1, 0L)); + context.pollUntilResponse(); + context.assertSentVoteResponse( + Errors.INVALID_REQUEST, + epoch, + OptionalInt.of(otherNodeKey.id()), + false + ); + assertEquals(epoch, context.currentEpoch()); + context.assertElectedLeader(epoch, otherNodeKey.id()); + + // lastEpoch > replicaEpoch + context.deliverRequest(context.preVoteRequest(epoch, otherNodeKey, epoch + 1, 0L)); + context.pollUntilResponse(); + context.assertSentVoteResponse( + Errors.INVALID_REQUEST, + epoch, + OptionalInt.of(otherNodeKey.id()), + false + ); + assertEquals(epoch, context.currentEpoch()); + context.assertElectedLeader(epoch, otherNodeKey.id()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testFollowerGrantsPreVoteIfHasNotFetchedYet(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey replica1 = replicaKey(localId + 1, true); + ReplicaKey replica2 = replicaKey(localId + 2, true); + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(replica1, replica2)), kraftVersion) + .withElectedLeader(epoch, replica1.id()) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.assertElectedLeader(epoch, replica1.id()); + + assertTrue(context.client.quorum().isFollower()); + + // Follower will grant PreVotes before fetching successfully from the leader, it will NOT contain the leaderId + context.deliverRequest(context.preVoteRequest(epoch, replica2, epoch, 1)); + context.pollUntilResponse(); + + assertTrue(context.client.quorum().isFollower()); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(replica1.id()), true); + + // After fetching successfully from the leader once, follower will no longer grant PreVotes + context.pollUntilRequest(); + RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest(); + context.assertFetchRequestData(fetchRequest, epoch, 0L, 0); + + context.deliverResponse( + fetchRequest.correlationId(), + fetchRequest.destination(), + context.fetchResponse(epoch, replica1.id(), MemoryRecords.EMPTY, 0L, Errors.NONE) + ); + assertTrue(context.client.quorum().isFollower()); + + context.deliverRequest(context.preVoteRequest(epoch, replica2, epoch, 1)); + context.pollUntilResponse(); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(replica1.id()), false); + + assertTrue(context.client.quorum().isFollower()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testRejectPreVoteIfRemoteLogIsNotUpToDate(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + int epoch = 2; + ReplicaKey local = replicaKey(localId, true); + ReplicaKey replica1 = replicaKey(localId + 1, true); + ReplicaKey replica2 = replicaKey(localId + 2, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, replica1, replica2)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(KIP_996_PROTOCOL) + .appendToLog(epoch, Arrays.asList("a", "b", "c")) + .build(); + assertTrue(context.client.quorum().isUnattached()); + assertEquals(3, context.log.endOffset().offset()); + + // older epoch + context.deliverRequest(context.preVoteRequest(epoch - 1, replica1, epoch - 1, 0)); + context.pollUntilResponse(); + + assertTrue(context.client.quorum().isUnattached()); + context.assertSentVoteResponse(Errors.FENCED_LEADER_EPOCH, epoch, OptionalInt.empty(), false); + + // older offset + context.deliverRequest(context.preVoteRequest(epoch, replica1, epoch - 1, context.log.endOffset().offset() - 1)); + context.pollUntilResponse(); + + assertTrue(context.client.quorum().isUnattached()); + context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.empty(), false); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testPreVoteResponseIgnoredAfterBecomingFollower(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey voter2 = replicaKey(localId + 1, true); + ReplicaKey voter3 = replicaKey(localId + 2, true); + int epoch = 5; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, voter2, voter3)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + + // Sleep a little to ensure transition to prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + + // Wait until the vote requests are inflight + context.pollUntilRequest(); + assertTrue(context.client.quorum().isProspective()); + List voteRequests = context.collectPreVoteRequests(epoch, 0, 0); + assertEquals(2, voteRequests.size()); + + // While the vote requests are still inflight, replica receives a BeginEpoch for the same epoch + context.deliverRequest(context.beginEpochRequest(epoch, voter3.id())); + context.client.poll(); + context.assertElectedLeader(epoch, voter3.id()); + + // If PreVote responses are received now they should be ignored + VoteResponseData voteResponse1 = context.voteResponse(true, OptionalInt.empty(), epoch); + context.deliverResponse( + voteRequests.get(0).correlationId(), + voteRequests.get(0).destination(), + voteResponse1 + ); + + VoteResponseData voteResponse2 = context.voteResponse(true, OptionalInt.of(voter3.id()), epoch); + context.deliverResponse( + voteRequests.get(1).correlationId(), + voteRequests.get(1).destination(), + voteResponse2 + ); + + context.client.poll(); + context.assertElectedLeader(epoch, voter3.id()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testPreVoteNotSupportedByRemote(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey voter2Key = replicaKey(localId + 1, true); + ReplicaKey voter3Key = replicaKey(localId + 2, true); + int epoch = 5; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, voter2Key, voter3Key)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + + // Sleep a little to ensure transition to Prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + assertEquals(epoch, context.currentEpoch()); + assertTrue(context.client.quorum().isProspective()); + + // Simulate one remote node not supporting PreVote with UNSUPPORTED_VERSION response. + // Note: with the mocked network client we simulate this is a bit differently, in reality this response would + // be generated from the network client and not sent from the remote node. + List voteRequests = context.collectPreVoteRequests(epoch, 0, 0); + assertEquals(2, voteRequests.size()); + context.deliverResponse( + voteRequests.get(0).correlationId(), + voteRequests.get(0).destination(), + RaftUtil.errorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) + ); + + // Local should transition to Candidate since it realizes remote node does not support PreVote. + context.client.poll(); + assertEquals(epoch + 1, context.currentEpoch()); + context.client.quorum().isCandidate(); + + // Any further PreVote requests should be ignored + context.deliverResponse( + voteRequests.get(1).correlationId(), + voteRequests.get(1).destination(), + context.voteResponse(true, OptionalInt.empty(), epoch) + ); + context.client.poll(); + assertEquals(epoch + 1, context.currentEpoch()); + context.client.quorum().isCandidate(); + context.collectVoteRequests(epoch + 1, 0, 0); + + // Sleep to transition back to Prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + assertEquals(epoch + 1, context.currentEpoch()); + assertTrue(context.client.quorum().isProspective()); + + // Simulate receiving enough valid PreVote responses for election to succeed + context.pollUntilRequest(); + voteRequests = context.collectPreVoteRequests(epoch + 1, 0, 0); + assertEquals(2, voteRequests.size()); + context.deliverResponse( + voteRequests.get(0).correlationId(), + voteRequests.get(0).destination(), + context.voteResponse(true, OptionalInt.empty(), epoch + 1) + ); + context.client.poll(); + assertEquals(epoch + 2, context.currentEpoch()); + context.client.quorum().isCandidate(); + + // Any further PreVote responses should be ignored + context.deliverResponse( + voteRequests.get(1).correlationId(), + voteRequests.get(1).destination(), + RaftUtil.errorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) + ); + context.client.poll(); + assertEquals(epoch + 2, context.currentEpoch()); + context.client.quorum().isCandidate(); + } + + @ParameterizedTest + @MethodSource("kraftVersionRaftProtocolCombinations") + public void testProspectiveReceivesBeginQuorumRequest( + KRaftVersion kraftVersion, + RaftProtocol raftProtocol + ) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey leader = replicaKey(localId + 1, true); + int epoch = 5; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, leader)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(raftProtocol) + .build(); + + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + + // Sleep a little to ensure transition to prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + + assertTrue(context.client.quorum().isProspective()); + + context.deliverRequest(context.beginEpochRequest(epoch, leader.id())); + context.client.poll(); + + assertTrue(context.client.quorum().isFollower()); + context.assertElectedLeader(epoch, leader.id()); + } + + @ParameterizedTest + @MethodSource("kraftVersionRaftProtocolCombinations") + public void testProspectiveTransitionsToUnattachedOnElectionFailure( + KRaftVersion kraftVersion, + RaftProtocol raftProtocol + ) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey otherNode = replicaKey(localId + 1, true); + int epoch = 5; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, otherNode)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(raftProtocol) + .build(); + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + + // Sleep a little to ensure that transition to prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + assertTrue(context.client.quorum().isProspective()); + context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + + // If election timeout expires, replica should transition to unattached to attempt re-discovering leader + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + assertTrue(context.client.quorum().isUnattached()); + + // After election times out again, replica will transition back to prospective and send PreVote requests + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + RaftRequest.Outbound voteRequest = context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + + // If prospective receives enough rejected votes, it also transitions to unattached immediately + context.deliverResponse( + voteRequest.correlationId(), + voteRequest.destination(), + context.voteResponse(false, OptionalInt.empty(), epoch) + ); + context.client.poll(); + assertTrue(context.client.quorum().isUnattached()); + + // After election times out again, replica will transition back to prospective and send PreVote requests + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + } + + @ParameterizedTest + @MethodSource("kraftVersionRaftProtocolCombinations") + public void testProspectiveWithLeaderTransitionsToFollower( + KRaftVersion kraftVersion, + RaftProtocol raftProtocol + ) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey replica1 = replicaKey(localId + 1, true); + ReplicaKey replica2 = replicaKey(localId + 2, true); + int epoch = 5; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, replica1, replica2)), kraftVersion) + .withElectedLeader(epoch, replica1.id()) + .withRaftProtocol(raftProtocol) + .build(); + context.assertElectedLeader(epoch, replica1.id()); + assertTrue(context.client.quorum().isFollower()); + + // Sleep a little to ensure transition to prospective + context.time.sleep(context.fetchTimeoutMs); + context.pollUntilRequest(); + assertTrue(context.client.quorum().isProspective()); + context.assertSentPreVoteRequest(epoch, 0, 0L, 2); + + // If election timeout expires, replica should transition back to follower if it hasn't found new leader yet + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + context.assertSentFetchRequest(); + assertTrue(context.client.quorum().isFollower()); + context.assertElectedLeader(epoch, replica1.id()); + + // After election times out again, replica will transition back to prospective and send PreVote requests + context.time.sleep(context.fetchTimeoutMs); + context.pollUntilRequest(); + List voteRequests = context.collectPreVoteRequests(epoch, 0, 0); + assertEquals(2, voteRequests.size()); + assertTrue(context.client.quorum().isProspective()); + context.assertElectedLeader(epoch, replica1.id()); + + // If prospective receives enough rejected votes without leaderId, it also transitions to follower immediately + context.deliverResponse( + voteRequests.get(0).correlationId(), + voteRequests.get(0).destination(), + context.voteResponse(false, OptionalInt.empty(), epoch) + ); + context.client.poll(); + + context.deliverResponse( + voteRequests.get(1).correlationId(), + voteRequests.get(1).destination(), + context.voteResponse(false, OptionalInt.empty(), epoch) + ); + context.client.poll(); + assertTrue(context.client.quorum().isFollower()); + + context.client.poll(); + context.assertSentFetchRequest(); + + // After election times out again, transition back to prospective and send PreVote requests + context.time.sleep(context.fetchTimeoutMs); + context.pollUntilRequest(); + voteRequests = context.collectPreVoteRequests(epoch, 0, 0); + assertEquals(2, voteRequests.size()); + assertTrue(context.client.quorum().isProspective()); + context.assertElectedLeader(epoch, replica1.id()); + + // If prospective receives vote response with different leaderId, it will transition to follower immediately + context.deliverResponse( + voteRequests.get(0).correlationId(), + voteRequests.get(0).destination(), + context.voteResponse(Errors.FENCED_LEADER_EPOCH, OptionalInt.of(replica2.id()), epoch + 1)); + context.client.poll(); + assertTrue(context.client.quorum().isFollower()); + context.assertElectedLeader(epoch + 1, replica2.id()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveLosesElectionHasLeaderButMissingEndpoint(KRaftVersion kraftVersion) throws Exception { + int localId = randomReplicaId(); + ReplicaKey local = replicaKey(localId, true); + ReplicaKey voter1 = replicaKey(localId + 1, true); + int electedLeaderId = localId + 3; + int epoch = 2; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, voter1)), kraftVersion) + .withElectedLeader(epoch, electedLeaderId) + .withRaftProtocol(KIP_996_PROTOCOL) + .build(); + context.assertElectedLeader(epoch, electedLeaderId); + assertTrue(context.client.quorum().isUnattached()); + // Sleep a little to ensure that we become a prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + + // Sleep past election timeout + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + + // Prospective should transition to unattached + assertTrue(context.client.quorum().isUnattached()); + assertTrue(context.client.quorum().hasLeader()); + + // If election timeout expires again, it should transition back to prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + assertTrue(context.client.quorum().hasLeader()); + } + + @ParameterizedTest + @MethodSource("kraftVersionRaftProtocolCombinations") + public void testProspectiveWithoutLeaderTransitionsToFollower( + KRaftVersion kraftVersion, + RaftProtocol raftProtocol + ) throws Exception { + ReplicaKey local = replicaKey(randomReplicaId(), true); + ReplicaKey leader = replicaKey(local.id() + 1, true); + ReplicaKey follower = replicaKey(local.id() + 2, true); + int epoch = 5; + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, leader, follower)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(raftProtocol) + .build(); + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + + // Sleep a little to ensure that we transition to Prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + assertTrue(context.client.quorum().isProspective()); + List voteRequests = context.collectPreVoteRequests(epoch, 0, 0); + assertEquals(2, voteRequests.size()); + + // Simulate PreVote response with granted=true and a leaderId + VoteResponseData voteResponse1 = context.voteResponse(true, OptionalInt.of(leader.id()), epoch); + context.deliverResponse( + voteRequests.get(0).correlationId(), + voteRequests.get(0).destination(), + voteResponse1 + ); + + // Prospective should transition to Follower + context.client.poll(); + assertTrue(context.client.quorum().isFollower()); + assertEquals(OptionalInt.of(leader.id()), context.client.quorum().leaderId()); + } + + @ParameterizedTest + @MethodSource("kraftVersionRaftProtocolCombinations") + public void testPreVoteRequestTimeout( + KRaftVersion kraftVersion, + RaftProtocol raftProtocol + ) throws Exception { + int localId = randomReplicaId(); + int epoch = 1; + ReplicaKey local = replicaKey(localId, true); + ReplicaKey otherNode = replicaKey(localId + 1, true); + + RaftClientTestContext context = new RaftClientTestContext.Builder( + local.id(), + local.directoryId().get() + ) + .withStartingVoters(VoterSetTest.voterSet(Stream.of(local, otherNode)), kraftVersion) + .withUnknownLeader(epoch) + .withRaftProtocol(raftProtocol) + .build(); + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + + // Simulate a request timeout + context.pollUntilRequest(); + RaftRequest.Outbound request = context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + context.time.sleep(context.requestTimeoutMs()); + + // Prospective should retry the request + context.client.poll(); + RaftRequest.Outbound retryRequest = context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + + // Ignore the timed out response if it arrives late + context.deliverResponse( + request.correlationId(), + request.destination(), + context.voteResponse(true, OptionalInt.empty(), epoch) + ); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + + // Become candidate after receiving the retry response + context.deliverResponse( + retryRequest.correlationId(), + retryRequest.destination(), + context.voteResponse(true, OptionalInt.empty(), epoch) + ); + context.client.poll(); + assertTrue(context.client.quorum().isCandidate()); + context.assertVotedCandidate(epoch + 1, local); + } + + static Stream kraftVersionRaftProtocolCombinations() { + return Stream.of(KRaftVersion.values()) + .flatMap(enum1 -> Stream.of(RaftProtocol.values()) + .map(enum2 -> Arguments.of(enum1, enum2))); + } + + static Stream kraftVersionHasFetchedCombinations() { + return Stream.of(KRaftVersion.values()) + .flatMap(enum1 -> Stream.of(true, false) + .map(enum2 -> Arguments.of(enum1, enum2))); + } +} diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java index 9e5d68d5e6a0c..493083831d1de 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java @@ -36,7 +36,7 @@ import org.apache.kafka.common.record.Records; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.BufferSupplier; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.snapshot.RecordsSnapshotReader; import org.apache.kafka.snapshot.SnapshotReader; @@ -121,7 +121,7 @@ public void testLeaderWritesBootstrapRecords() throws Exception { SnapshotWriterReaderTest.assertControlSnapshot(expectedBootstrapRecords, reader); } - context.becomeLeader(); + context.unattachedToLeader(); // check if leader writes 3 bootstrap records to the log Records records = context.log.read(0, Isolation.UNCOMMITTED).records; @@ -155,7 +155,7 @@ public void testBootstrapCheckpointIsNotReturnedOnFetch() throws Exception { .withUnknownLeader(0) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // check that leader does not respond with bootstrap snapshot id when follower fetches offset 0 @@ -203,7 +203,7 @@ public void testLeaderDoesNotBootstrapRecordsWithKraftVersion0() throws Exceptio ); // check leader does not write bootstrap records to log - context.becomeLeader(); + context.unattachedToLeader(); Records records = context.log.read(0, Isolation.UNCOMMITTED).records; RecordBatch batch = records.batches().iterator().next(); @@ -333,7 +333,7 @@ public void testAddVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -380,7 +380,7 @@ public void testAddVoter() throws Exception { apiVersionsResponse(Errors.NONE) ); - // Handle the the API_VERSIONS response + // Handle the API_VERSIONS response context.client.poll(); // Append new VotersRecord to log context.client.poll(); @@ -412,7 +412,7 @@ void testAddVoterInvalidClusterId() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); InetSocketAddress newAddress = InetSocketAddress.createUnresolved( @@ -477,7 +477,7 @@ void testAddVoterWithMissingDefaultListener() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); InetSocketAddress newAddress = InetSocketAddress.createUnresolved( @@ -507,7 +507,7 @@ void testAddVoterWithPendingAddVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -563,7 +563,7 @@ void testAddVoterWithoutFencedPreviousLeaders() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -601,7 +601,7 @@ void testAddVoterWithKraftVersion0() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -646,7 +646,7 @@ void testAddVoterWithExistingVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(follower.id(), true); @@ -691,7 +691,7 @@ void testAddVoterTimeout() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -735,7 +735,7 @@ void testAddVoterTimeout() throws Exception { apiVersionsResponse(Errors.NONE) ); - // Handle the the API_VERSIONS response + // Handle the API_VERSIONS response context.client.poll(); // Wait for request timeout without sending a FETCH request to timeout the add voter RPC @@ -763,7 +763,7 @@ void testAddVoterWithApiVersionsFromIncorrectNode() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -823,7 +823,7 @@ void testAddVoterInvalidFeatureVersion() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -883,7 +883,7 @@ void testAddVoterWithLaggingNewVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -936,7 +936,7 @@ void testAddVoterFailsWhenLosingLeadership() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -985,7 +985,7 @@ void testAddVoterWithMissingDirectoryId() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, false); @@ -1024,7 +1024,7 @@ public void testRemoveVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertTrue(context.client.quorum().isVoter(follower2)); @@ -1073,7 +1073,7 @@ public void testRemoveVoterIsLeader() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1101,7 +1101,7 @@ public void testRemoveVoterIsLeader() throws Exception { context.pollUntilResponse(); context.assertSentFetchPartitionResponse(Errors.NONE, epoch, OptionalInt.of(local.id())); - // Send a FETCH request for follower2 and increaes the HWM + // Send a FETCH request for follower2 and increase the HWM context.deliverRequest( context.fetchRequest(epoch, follower2, context.log.endOffset().offset(), epoch, 0) ); @@ -1123,8 +1123,8 @@ public void testRemoveVoterIsLeader() throws Exception { // Calls to resign should be allowed and not throw an exception context.client.resign(epoch); - // Election timeout is random numer in [electionTimeoutMs, 2 * electionTimeoutMs) - context.time.sleep(2 * context.electionTimeoutMs()); + // Election timeout is random number in [electionTimeoutMs, 2 * electionTimeoutMs) + context.time.sleep(2L * context.electionTimeoutMs()); context.client.poll(); assertTrue(context.client.quorum().isObserver()); @@ -1145,7 +1145,7 @@ public void testRemoveVoterInvalidClusterId() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); // empty cluster id is rejected context.deliverRequest(context.removeVoterRequest("", follower1)); @@ -1194,7 +1194,7 @@ void testRemoveVoterWithPendingRemoveVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1232,7 +1232,7 @@ void testRemoveVoterWithoutFencedPreviousLeaders() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); // Attempt to remove follower2 context.deliverRequest(context.removeVoterRequest(follower2)); @@ -1254,7 +1254,7 @@ void testRemoveVoterWithKraftVersion0() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1284,7 +1284,7 @@ void testRemoveVoterWithNoneVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1314,7 +1314,7 @@ void testRemoveVoterWithNoneVoterId() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1366,7 +1366,7 @@ void testRemoveVoterTimedOut() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1410,7 +1410,7 @@ void testRemoveVoterFailsWhenLosingLeadership() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1452,7 +1452,7 @@ void testAddVoterWithPendingRemoveVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1497,7 +1497,7 @@ void testRemoveVoterWithPendingAddVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -1545,7 +1545,7 @@ void testUpdateVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertTrue(context.client.quorum().isVoter(follower)); @@ -1573,12 +1573,12 @@ void testUpdateVoter() throws Exception { context.deliverRequest( context.updateVoterRequest( follower, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), newListeners ) ); - // Expect reply for UpdateVoter request without commiting the record + // Expect reply for UpdateVoter request without committing the record context.pollUntilResponse(); context.assertSentUpdateVoterResponse( Errors.NONE, @@ -1615,7 +1615,7 @@ void testLeaderUpdatesVoter() throws Exception { .withLocalListeners(localListeners) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertTrue(context.client.quorum().isVoter(follower)); @@ -1631,7 +1631,7 @@ void testLeaderUpdatesVoter() throws Exception { VoterSet.VoterNode.of( local, localListeners, - Features.KRAFT_VERSION.supportedVersionRange() + Feature.KRAFT_VERSION.supportedVersionRange() ) ); assertEquals(updatedVoterSet, context.listener.lastCommittedVoterSet()); @@ -1650,7 +1650,7 @@ public void testUpdateVoterInvalidClusterId() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // empty cluster id is rejected @@ -1659,7 +1659,7 @@ public void testUpdateVoterInvalidClusterId() throws Exception { "", follower, epoch, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), Endpoints.empty() ) ); @@ -1676,7 +1676,7 @@ public void testUpdateVoterInvalidClusterId() throws Exception { "invalid-uuid", follower, epoch, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), Endpoints.empty() ) ); @@ -1701,7 +1701,7 @@ void testUpdateVoterOldEpoch() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest( @@ -1709,7 +1709,7 @@ void testUpdateVoterOldEpoch() throws Exception { context.clusterId, follower, epoch - 1, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), Endpoints.empty() ) ); @@ -1734,7 +1734,7 @@ void testUpdateVoterNewEpoch() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest( @@ -1742,7 +1742,7 @@ void testUpdateVoterNewEpoch() throws Exception { context.clusterId, follower, epoch + 1, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), Endpoints.empty() ) ); @@ -1767,11 +1767,11 @@ void testUpdateVoterToNotLeader() throws Exception { .withUnknownLeader(3) .build(); - // Attempt to uodate voter in the quorum + // Attempt to update voter in the quorum context.deliverRequest( context.updateVoterRequest( follower, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), Endpoints.empty() ) ); @@ -1796,7 +1796,7 @@ void testUpdateVoterWithoutFencedPreviousLeaders() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Attempt to update the follower @@ -1815,7 +1815,7 @@ void testUpdateVoterWithoutFencedPreviousLeaders() throws Exception { context.deliverRequest( context.updateVoterRequest( follower, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), newListeners ) ); @@ -1841,7 +1841,7 @@ void testUpdateVoterWithKraftVersion0() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1867,7 +1867,7 @@ void testUpdateVoterWithKraftVersion0() throws Exception { context.deliverRequest( context.updateVoterRequest( follower, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), newListeners ) ); @@ -1892,7 +1892,7 @@ void testUpdateVoterWithNoneVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1918,7 +1918,7 @@ void testUpdateVoterWithNoneVoter() throws Exception { context.deliverRequest( context.updateVoterRequest( replicaKey(follower.id(), true), - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), newListeners ) ); @@ -1943,7 +1943,7 @@ void testUpdateVoterWithNoneVoterId() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Establish a HWM and fence previous leaders @@ -1969,7 +1969,7 @@ void testUpdateVoterWithNoneVoterId() throws Exception { context.deliverRequest( context.updateVoterRequest( ReplicaKey.of(follower.id() + 1, follower.directoryId().get()), - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), newListeners ) ); @@ -1994,7 +1994,7 @@ void testUpdateVoterWithPendingAddVoter() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey newVoter = replicaKey(local.id() + 2, true); @@ -2039,7 +2039,7 @@ void testUpdateVoterWithPendingAddVoter() throws Exception { context.deliverRequest( context.updateVoterRequest( follower, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), newListeners ) ); @@ -2105,7 +2105,7 @@ void testFollowerSendsUpdateVoter() throws Exception { RaftRequest.Outbound updateRequest = context.assertSentUpdateVoterRequest( local, epoch, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), localListeners ); context.deliverResponse( @@ -2226,7 +2226,7 @@ void testUpdateVoterResponseCausesEpochChange() throws Exception { RaftRequest.Outbound updateRequest = context.assertSentUpdateVoterRequest( local, epoch, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), localListeners ); context.deliverResponse( @@ -2248,14 +2248,14 @@ void testUpdateVoterResponseCausesEpochChange() throws Exception { @Test void testObserverDiscoversLeaderWithUnknownVoters() throws Exception { ReplicaKey local = replicaKey(randomReplicaId(), true); - InetSocketAddress bootstrapAdddress = InetSocketAddress.createUnresolved("localhost", 1234); + InetSocketAddress bootstrapAddress = InetSocketAddress.createUnresolved("localhost", 1234); int epoch = 3; RaftClientTestContext context = new RaftClientTestContext.Builder(local.id(), local.directoryId().get()) .withKip853Rpc(true) .withBootstrapSnapshot(Optional.empty()) .withUnknownLeader(epoch) - .withBootstrapServers(Optional.of(Collections.singletonList(bootstrapAdddress))) + .withBootstrapServers(Optional.of(Collections.singletonList(bootstrapAddress))) .build(); context.pollUntilRequest(); @@ -2331,7 +2331,7 @@ private int randomReplicaId() { } private static ApiVersionsResponseData apiVersionsResponse(Errors error) { - return apiVersionsResponse(error, Features.KRAFT_VERSION.supportedVersionRange()); + return apiVersionsResponse(error, Feature.KRAFT_VERSION.supportedVersionRange()); } private static ApiVersionsResponseData apiVersionsResponse(Errors error, SupportedVersionRange supportedVersions) { diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java index 1fc43cd2e14f8..1f3307f9adaef 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java @@ -114,7 +114,7 @@ public void testLeaderListenerNotified(boolean entireLog, boolean withKip853Rpc) RaftClientTestContext context = contextBuilder.build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Advance the highWatermark @@ -236,7 +236,7 @@ public void testListenerRenotified(boolean withKip853Rpc) throws Exception { .deleteBeforeSnapshot(snapshotId) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Stop the listener from reading commit batches @@ -293,7 +293,7 @@ public void testLeaderImmediatelySendsSnapshotId(boolean withKip853Rpc) throws E .deleteBeforeSnapshot(snapshotId) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Send a fetch request for an end offset and epoch which has been snapshotted @@ -321,7 +321,7 @@ public void testFetchRequestOffsetLessThanLogStart(boolean withKip853Rpc) throws .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); List appendRecords = Arrays.asList("a", "b", "c"); @@ -371,7 +371,7 @@ public void testFetchRequestOffsetAtZero(boolean withKip853Rpc) throws Exception .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); List appendRecords = Arrays.asList("a", "b", "c"); @@ -423,7 +423,7 @@ public void testFetchRequestWithLargerLastFetchedEpoch(boolean withKip853Rpc) th .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertEquals(oldestSnapshotId.epoch() + 1, epoch); @@ -465,7 +465,7 @@ public void testFetchRequestTruncateToLogStart(boolean withKip853Rpc) throws Exc .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertEquals(oldestSnapshotId.epoch() + 2 + 1, epoch); @@ -516,7 +516,7 @@ public void testFetchRequestAtLogStartOffsetWithValidEpoch(boolean withKip853Rpc .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertEquals(oldestSnapshotId.epoch() + 2 + 1, epoch); @@ -562,7 +562,7 @@ public void testFetchRequestAtLogStartOffsetWithInvalidEpoch(boolean withKip853R .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertEquals(oldestSnapshotId.epoch() + 2 + 1, epoch); @@ -616,7 +616,7 @@ public void testFetchRequestWithLastFetchedEpochLessThanOldestSnapshot( .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertEquals(oldestSnapshotId.epoch() + 2 + 1, epoch); @@ -660,7 +660,7 @@ public void testFetchSnapshotRequestMissingSnapshot(boolean withKip853Rpc) throw .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest( @@ -693,7 +693,7 @@ public void testFetchSnapshotRequestBootstrapSnapshot() throws Exception { .withUnknownLeader(3) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest( @@ -724,7 +724,7 @@ public void testFetchSnapshotRequestUnknownPartition(boolean withKip853Rpc) thro .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest( @@ -756,7 +756,7 @@ public void testFetchSnapshotRequestAsLeader(boolean withKip853Rpc) throws Excep .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.advanceLocalLeaderHighWatermarkToLogEndOffset(); @@ -813,7 +813,7 @@ public void testLeaderShouldResignLeadershipIfNotGetFetchSnapshotRequestFromMajo .build(); int resignLeadershipTimeout = context.checkQuorumTimeoutMs; - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); FetchSnapshotRequestData voter1FetchSnapshotRequest = fetchSnapshotRequest( @@ -902,7 +902,7 @@ public void testPartialFetchSnapshotRequestAsLeader(boolean withKip853Rpc) throw .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.advanceLocalLeaderHighWatermarkToLogEndOffset(); @@ -1015,7 +1015,7 @@ public void testFetchSnapshotRequestWithInvalidPosition(boolean withKip853Rpc) t .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.advanceLocalLeaderHighWatermarkToLogEndOffset(); @@ -1074,7 +1074,7 @@ public void testFetchSnapshotRequestWithOlderEpoch(boolean withKip853Rpc) throws .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest( @@ -1107,7 +1107,7 @@ public void testFetchSnapshotRequestWithNewerEpoch(boolean withKip853Rpc) throws .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest( @@ -1182,13 +1182,13 @@ public void testFetchResponseWithInvalidSnapshotId(boolean withKip853Rpc) throws fetchRequest = context.assertSentFetchRequest(); context.assertFetchRequestData(fetchRequest, epoch, 0L, 0); - // Fetch timer is not reset; sleeping for remainder should transition to candidate + // Fetch timer is not reset; sleeping for remainder should transition to prospective context.time.sleep(context.fetchTimeoutMs - slept); context.pollUntilRequest(); - context.assertSentVoteRequest(epoch + 1, 0, 0L, 1); - context.assertVotedCandidate(epoch + 1, localId); + context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + assertTrue(context.client.quorum().isProspective()); } @ParameterizedTest @@ -1777,13 +1777,13 @@ public void testFetchSnapshotResponseToNotFollower(boolean withKip853Rpc) throws assertEquals(snapshotId.epoch(), request.snapshotId().epoch()); assertEquals(0, request.position()); - // Sleeping for fetch timeout should transition to candidate + // Sleeping for fetch timeout should transition to prospective context.time.sleep(context.fetchTimeoutMs); context.pollUntilRequest(); - context.assertSentVoteRequest(epoch + 1, 0, 0L, 1); - context.assertVotedCandidate(epoch + 1, localId); + context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + assertTrue(context.client.quorum().isProspective()); // Send the response late context.deliverResponse( @@ -1809,9 +1809,9 @@ public void testFetchSnapshotResponseToNotFollower(boolean withKip853Rpc) throws ) ); - // Assert that the response is ignored and the replicas stays as a candidate + // Assert that the response is ignored and the replicas stays as a prospective context.client.poll(); - context.assertVotedCandidate(epoch + 1, localId); + assertTrue(context.client.quorum().isProspective()); } @ParameterizedTest @@ -1828,7 +1828,7 @@ public void testFetchSnapshotRequestClusterIdValidation( .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // valid cluster id is accepted @@ -1909,13 +1909,20 @@ public void testCreateSnapshotAsLeaderWithInvalidSnapshotId(boolean withKip853Rp .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int currentEpoch = context.currentEpoch(); // When leader creating snapshot: // 1.1 high watermark cannot be empty assertEquals(OptionalLong.empty(), context.client.highWatermark()); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId1, 0)); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId1, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=4, epoch=2)) greater than the high-watermark (0)", + exception.getMessage() + ); // 1.2 high watermark must larger than or equal to the snapshotId's endOffset context.advanceLocalLeaderHighWatermarkToLogEndOffset(); @@ -1927,18 +1934,52 @@ public void testCreateSnapshotAsLeaderWithInvalidSnapshotId(boolean withKip853Rp context.client.poll(); assertEquals(context.log.endOffset().offset(), context.client.highWatermark().getAsLong() + newRecords.size()); - OffsetAndEpoch invalidSnapshotId2 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + 2, currentEpoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId2, 0)); + OffsetAndEpoch invalidSnapshotId2 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + newRecords.size(), currentEpoch); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId2, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=7, epoch=3)) greater than the high-watermark (4)", + exception.getMessage() + ); // 2 the quorum epoch must larger than or equal to the snapshotId's epoch OffsetAndEpoch invalidSnapshotId3 = new OffsetAndEpoch(context.client.highWatermark().getAsLong(), currentEpoch + 1); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId3, 0)); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId3, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=4, epoch=4)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=7, epoch=3))", + exception.getMessage() + ); // 3 the snapshotId should be validated against endOffsetForEpoch OffsetAndEpoch endOffsetForEpoch = context.log.endOffsetForEpoch(epoch); assertEquals(epoch, endOffsetForEpoch.epoch()); - OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 2, epoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId4, 0)); + OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 1, epoch); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId4, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=4, epoch=2)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=3, epoch=2))", + exception.getMessage() + ); + + // 4 snapshotId offset must be at a batch boundary + context.advanceLocalLeaderHighWatermarkToLogEndOffset(); + OffsetAndEpoch invalidSnapshotId5 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() - 1, currentEpoch); + // this points to the "f" offset, which is not batch aligned + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId5, 0) + ); + assertEquals( + "Cannot create snapshot at offset (6) because it is not batch aligned. The batch containing the requested offset has a base offset of (4)", + exception.getMessage() + ); } @ParameterizedTest @@ -1951,6 +1992,7 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 Set voters = Set.of(localId, leaderId, otherFollowerId); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .appendToLog(1, List.of("a")) .withElectedLeader(epoch, leaderId) .withKip853Rpc(withKip853Rpc) .build(); @@ -1959,18 +2001,25 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 // When follower creating snapshot: // 1) The high watermark cannot be empty assertEquals(OptionalLong.empty(), context.client.highWatermark()); - OffsetAndEpoch invalidSnapshotId1 = new OffsetAndEpoch(1, 0); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId1, 0)); + OffsetAndEpoch invalidSnapshotId1 = new OffsetAndEpoch(1, 1); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId1, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=1, epoch=1)) greater than the high-watermark (0)", + exception.getMessage() + ); // Poll for our first fetch request context.pollUntilRequest(); RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest(); assertTrue(voters.contains(fetchRequest.destination().id())); - context.assertFetchRequestData(fetchRequest, epoch, 0L, 0); + context.assertFetchRequestData(fetchRequest, epoch, 1L, 1); // The response does not advance the high watermark - List records1 = Arrays.asList("a", "b", "c"); - MemoryRecords batch1 = context.buildBatch(0L, 3, records1); + List records1 = Arrays.asList("b", "c"); + MemoryRecords batch1 = context.buildBatch(1L, 3, records1); context.deliverResponse( fetchRequest.correlationId(), fetchRequest.destination(), @@ -1981,11 +2030,14 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 // 2) The high watermark must be larger than or equal to the snapshotId's endOffset int currentEpoch = context.currentEpoch(); OffsetAndEpoch invalidSnapshotId2 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + 1, currentEpoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId2, 0)); - - // 3) The quorum epoch must be larger than or equal to the snapshotId's epoch - OffsetAndEpoch invalidSnapshotId3 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + 1, currentEpoch + 1); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId3, 0)); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId2, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=1, epoch=5)) greater than the high-watermark (0)", + exception.getMessage() + ); // The high watermark advances to be larger than log.endOffsetForEpoch(3), to test the case 3 context.pollUntilRequest(); @@ -1994,7 +2046,8 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 context.assertFetchRequestData(fetchRequest, epoch, 3L, 3); List records2 = Arrays.asList("d", "e", "f"); - MemoryRecords batch2 = context.buildBatch(3L, 4, records2); + int batch2Epoch = 4; + MemoryRecords batch2 = context.buildBatch(3L, batch2Epoch, records2); context.deliverResponse( fetchRequest.correlationId(), fetchRequest.destination(), @@ -2003,11 +2056,44 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 context.client.poll(); assertEquals(6L, context.client.highWatermark().getAsLong()); + // 3) The quorum epoch must be larger than or equal to the snapshotId's epoch + OffsetAndEpoch invalidSnapshotId3 = new OffsetAndEpoch(context.client.highWatermark().getAsLong(), currentEpoch + 1); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId3, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=6, epoch=6)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=6, epoch=4))", + exception.getMessage() + ); + // 4) The snapshotId should be validated against endOffsetForEpoch OffsetAndEpoch endOffsetForEpoch = context.log.endOffsetForEpoch(3); assertEquals(3, endOffsetForEpoch.epoch()); - OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 1, epoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId4, 0)); + OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 3, 3); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId4, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=6, epoch=3)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=3, epoch=3))", + exception.getMessage() + ); + + // 5) The snapshotId should be batch-aligned + endOffsetForEpoch = context.log.endOffsetForEpoch(batch2Epoch); + assertEquals(4, endOffsetForEpoch.epoch()); + assertEquals(6, endOffsetForEpoch.offset()); + OffsetAndEpoch invalidSnapshotId5 = new OffsetAndEpoch(endOffsetForEpoch.offset() - 1, batch2Epoch); + // this points to the "f" offset, which is not batch aligned + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId5, 0) + ); + assertEquals( + "Cannot create snapshot at offset (5) because it is not batch aligned. The batch containing the requested offset has a base offset of (3)", + exception.getMessage() + ); } private static ReplicaKey replicaKey(int id, boolean withDirectoryId) { diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java index fd0a2eef89a64..48b8c8b72c107 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientTest.java @@ -70,6 +70,7 @@ import static java.util.Collections.singletonList; import static org.apache.kafka.raft.RaftClientTestContext.Builder.DEFAULT_ELECTION_TIMEOUT_MS; +import static org.apache.kafka.raft.RaftClientTestContext.RaftProtocol.KIP_853_PROTOCOL; import static org.apache.kafka.test.TestUtils.assertFutureThrows; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -79,6 +80,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +@SuppressWarnings({"ClassDataAbstractionCoupling", "ClassFanOutComplexity"}) public class KafkaRaftClientTest { @Test public void testNodeDirectoryId() { @@ -215,7 +217,7 @@ public void testGrantVotesFromHigherEpochAfterResigningLeadership(boolean withKi ); context.client.poll(); - // We will first transition to unattached and then grant vote and then transition to voted + // Replica will first transition to unattached, then grant vote, then transition to unattached voted assertTrue(context.client.quorum().isUnattachedAndVoted()); context.assertVotedCandidate(epoch + 1, remoteKey.id()); context.assertSentVoteResponse(Errors.NONE, epoch + 1, OptionalInt.empty(), true); @@ -252,7 +254,7 @@ public void testGrantVotesFromHigherEpochAfterResigningCandidacy(boolean withKip ); context.client.poll(); - // We will first transition to unattached and then grant vote and then transition to voted + // Replica will first transition to unattached, then grant vote, then transition to unattached voted assertTrue(context.client.quorum().isUnattachedAndVoted()); context.assertVotedCandidate(epoch + 1, remoteKey.id()); context.assertSentVoteResponse(Errors.NONE, epoch + 1, OptionalInt.empty(), true); @@ -270,7 +272,7 @@ public void testGrantVotesWhenShuttingDown(boolean withKip853Rpc) throws Excepti .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Beginning shutdown @@ -288,7 +290,7 @@ public void testGrantVotesWhenShuttingDown(boolean withKip853Rpc) throws Excepti ); context.client.poll(); - // We will first transition to unattached and then grant vote and then transition to voted + // Replica will first transition to unattached, then grant vote, then transition to unattached voted assertTrue( context.client.quorum().isUnattachedAndVoted(), "Local Id: " + localId + @@ -302,7 +304,7 @@ public void testGrantVotesWhenShuttingDown(boolean withKip853Rpc) throws Excepti @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testInitializeAsResignedAndBecomeCandidate(boolean withKip853Rpc) throws Exception { + public void testInitializeAsResignedAndUnableToContactQuorum(boolean withKip853Rpc) throws Exception { int localId = randomReplicaId(); int remoteId = localId + 1; Set voters = Set.of(localId, remoteId); @@ -323,9 +325,19 @@ public void testInitializeAsResignedAndBecomeCandidate(boolean withKip853Rpc) th context.time.sleep(context.electionTimeoutMs()); context.client.poll(); - // Become candidate in a new epoch - assertTrue(context.client.quorum().isCandidate()); - context.assertVotedCandidate(epoch + 1, localId); + // Become unattached with expired election timeout + assertTrue(context.client.quorum().isUnattached()); + assertEquals(epoch + 1, context.currentEpoch()); + + // Become prospective immediately + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + + // Become unattached again after election timeout + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + assertTrue(context.client.quorum().isUnattached()); + assertEquals(epoch + 1, context.currentEpoch()); } @ParameterizedTest @@ -358,10 +370,13 @@ public void testInitializeAsResignedLeaderFromStateStore(boolean withKip853Rpc) ); context.client.poll(); + // The node will transition to unattached with epoch + 1 after election timeout passes context.time.sleep(context.electionTimeoutMs()); - context.pollUntilRequest(); - context.assertVotedCandidate(epoch + 1, localId); - context.assertSentVoteRequest(epoch + 1, 0, 0L, 1); + context.client.poll(); + assertTrue(context.client.quorum().isUnattached()); + assertEquals(epoch + 1, context.currentEpoch()); + UnattachedState unattached = context.client.quorum().unattachedStateOrThrow(); + assertEquals(0, unattached.remainingElectionTimeMs(context.time.milliseconds())); } @ParameterizedTest @@ -399,7 +414,7 @@ public void testAppendFailedWithBufferAllocationException(boolean withKip853Rpc) .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int epoch = context.currentEpoch(); @@ -418,7 +433,7 @@ public void testAppendFailedWithFencedEpoch(boolean withKip853Rpc) throws Except .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int epoch = context.currentEpoch(); @@ -439,7 +454,7 @@ public void testAppendFailedWithRecordBatchTooLargeException(boolean withKip853R .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int epoch = context.currentEpoch(); @@ -512,7 +527,7 @@ public void testResignWillCompleteFetchPurgatory(boolean withKip853Rpc) throws E .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); // send fetch request when become leader @@ -550,7 +565,7 @@ public void testResignInOlderEpochIgnored(boolean withKip853Rpc) throws Exceptio .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int currentEpoch = context.currentEpoch(); @@ -577,7 +592,7 @@ public void testHandleBeginQuorumEpochAfterUserInitiatedResign( .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int resignedEpoch = context.currentEpoch(); @@ -605,7 +620,7 @@ public void testBeginQuorumEpochHeartbeat(boolean withKip853Rpc) throws Exceptio .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertEquals(OptionalInt.of(localId), context.currentLeader()); @@ -641,7 +656,7 @@ public void testLeaderShouldResignLeadershipIfNotGetFetchRequestFromMajorityVote .build(); int resignLeadershipTimeout = context.checkQuorumTimeoutMs; - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); assertEquals(OptionalInt.of(localId), context.currentLeader()); @@ -714,7 +729,7 @@ public void testElectionTimeoutAfterUserInitiatedResign(boolean withKip853Rpc) t .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int resignedEpoch = context.currentEpoch(); @@ -733,7 +748,7 @@ public void testElectionTimeoutAfterUserInitiatedResign(boolean withKip853Rpc) t context.deliverResponse(request.correlationId(), request.destination(), response); context.client.poll(); - // We do not resend `EndQuorumRequest` once the other voter has acknowledged it. + // Local does not resend `EndQuorumRequest` once the other voter has acknowledged it. context.time.sleep(context.retryBackoffMs); context.client.poll(); assertFalse(context.channel.hasSentRequests()); @@ -748,12 +763,17 @@ public void testElectionTimeoutAfterUserInitiatedResign(boolean withKip853Rpc) t OptionalInt.of(localId) ); - // After the election timer, we should become a candidate. + // After the election timer, local should become unattached. context.time.sleep(2L * context.electionTimeoutMs()); - context.pollUntil(context.client.quorum()::isCandidate); + context.pollUntil(context.client.quorum()::isUnattached); assertEquals(resignedEpoch + 1, context.currentEpoch()); assertEquals(new LeaderAndEpoch(OptionalInt.empty(), resignedEpoch + 1), context.listener.currentLeaderAndEpoch()); + + // Local will become prospective right away + assertEquals(0, context.client.quorum().unattachedStateOrThrow().electionTimeoutMs()); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); } @ParameterizedTest @@ -766,7 +786,7 @@ public void testCannotResignWithLargerEpochThanCurrentEpoch(boolean withKip853Rp RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertThrows(IllegalArgumentException.class, () -> context.client.resign(context.currentEpoch() + 1)); @@ -839,7 +859,7 @@ public void testInitializeAsCandidateFromStateStore(boolean withKip853Rpc) throw @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testInitializeAsCandidateAndBecomeLeader(boolean withKip853Rpc) throws Exception { + public void testInitializeAsUnattachedAndBecomeLeader(boolean withKip853Rpc) throws Exception { final int localId = randomReplicaId(); final int otherNodeId = localId + 1; Set voters = Set.of(localId, otherNodeId); @@ -847,13 +867,36 @@ public void testInitializeAsCandidateAndBecomeLeader(boolean withKip853Rpc) thro .withKip853Rpc(withKip853Rpc) .build(); - context.assertUnknownLeader(0); - context.time.sleep(2L * context.electionTimeoutMs()); + context.assertUnknownLeaderAndNoVotedCandidate(0); + context.pollUntilRequest(); + RaftRequest.Outbound request = context.assertSentFetchRequest(0, 0L, 0); + assertTrue(context.client.quorum().isUnattached()); + assertTrue(context.client.quorum().isVoter()); + + // receives a fetch response which does not specify who the leader is + context.time.sleep(context.electionTimeoutMs() / 2); + context.deliverResponse( + request.correlationId(), + request.destination(), + context.fetchResponse(0, -1, MemoryRecords.EMPTY, -1, Errors.NOT_LEADER_OR_FOLLOWER) + ); + + // should remain unattached voter + context.client.poll(); + assertTrue(context.client.quorum().isUnattached()); + assertTrue(context.client.quorum().isVoter()); + // after election timeout should become prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.pollUntilRequest(); + assertTrue(context.client.quorum().isProspective()); + + // after receiving enough granted prevotes, should become candidate + context.expectAndGrantPreVotes(context.currentEpoch()); context.pollUntilRequest(); context.assertVotedCandidate(1, localId); - RaftRequest.Outbound request = context.assertSentVoteRequest(1, 0, 0L, 1); + request = context.assertSentVoteRequest(1, 0, 0L, 1); context.deliverResponse( request.correlationId(), request.destination(), @@ -891,20 +934,18 @@ public void testInitializeAsCandidateAndBecomeLeaderQuorumOfThree(boolean withKi final int secondNodeId = localId + 2; Set voters = Set.of(localId, firstNodeId, secondNodeId); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .withVotedCandidate(2, ReplicaKey.of(localId, ReplicaKey.NO_DIRECTORY_ID)) .withKip853Rpc(withKip853Rpc) .build(); - - context.assertUnknownLeader(0); - context.time.sleep(2L * context.electionTimeoutMs()); - + assertTrue(context.client.quorum().isCandidate()); context.pollUntilRequest(); - context.assertVotedCandidate(1, localId); + context.assertVotedCandidate(2, localId); - RaftRequest.Outbound request = context.assertSentVoteRequest(1, 0, 0L, 2); + RaftRequest.Outbound request = context.assertSentVoteRequest(2, 0, 0L, 2); context.deliverResponse( request.correlationId(), request.destination(), - context.voteResponse(true, OptionalInt.empty(), 1) + context.voteResponse(true, OptionalInt.empty(), 2) ); VoteRequestData voteRequest = (VoteRequestData) request.data(); @@ -913,7 +954,7 @@ public void testInitializeAsCandidateAndBecomeLeaderQuorumOfThree(boolean withKi // Become leader after receiving the vote context.pollUntil(() -> context.log.endOffset().offset() == 1L); - context.assertElectedLeader(1, localId); + context.assertElectedLeader(2, localId); long electionTimestamp = context.time.milliseconds(); // Leader change record appended @@ -922,7 +963,7 @@ public void testInitializeAsCandidateAndBecomeLeaderQuorumOfThree(boolean withKi // Send BeginQuorumEpoch to voters context.client.poll(); - context.assertSentBeginQuorumEpochRequest(1, Set.of(firstNodeId, secondNodeId)); + context.assertSentBeginQuorumEpochRequest(2, Set.of(firstNodeId, secondNodeId)); Records records = context.log.read(0, Isolation.UNCOMMITTED).records; RecordBatch batch = records.batches().iterator().next(); @@ -934,6 +975,52 @@ public void testInitializeAsCandidateAndBecomeLeaderQuorumOfThree(boolean withKi Arrays.asList(voterId, localId), record.key(), record.value()); } + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testInitializeAsOnlyVoterWithEmptyElectionState(boolean withKip853Rpc) throws Exception { + int localId = randomReplicaId(); + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, Set.of(localId)) + .withKip853Rpc(withKip853Rpc) + .build(); + context.assertElectedLeader(1, localId); + assertEquals(0L, context.log.endOffset().offset()); + assertTrue(context.client.quorum().isLeader()); + } + + @Test + public void testInitializeAsFollowerAndOnlyVoter() throws Exception { + int localId = randomReplicaId(); + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, Set.of(localId)) + .withRaftProtocol(KIP_853_PROTOCOL) + .withElectedLeader(2, localId + 1) + .build(); + context.assertElectedLeader(3, localId); + assertEquals(0L, context.log.endOffset().offset()); + assertTrue(context.client.quorum().isLeader()); + } + + @Test + public void testInitializeAsCandidateAndOnlyVoter() throws Exception { + int localId = randomReplicaId(); + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, Set.of(localId)) + .withRaftProtocol(KIP_853_PROTOCOL) + .withVotedCandidate(2, ReplicaKey.of(localId, ReplicaKey.NO_DIRECTORY_ID)) + .build(); + context.assertElectedLeader(2, localId); + assertTrue(context.client.quorum().isLeader()); + } + + @Test + public void testInitializeAsResignedAndOnlyVoter() throws Exception { + int localId = randomReplicaId(); + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, Set.of(localId)) + .withRaftProtocol(KIP_853_PROTOCOL) + .withElectedLeader(2, localId) + .build(); + context.assertElectedLeader(3, localId); + assertTrue(context.client.quorum().isLeader()); + } + @ParameterizedTest @ValueSource(booleans = { true, false }) public void testHandleBeginQuorumRequest(boolean withKip853Rpc) throws Exception { @@ -950,7 +1037,7 @@ public void testHandleBeginQuorumRequest(boolean withKip853Rpc) throws Exception context.deliverRequest(context.beginEpochRequest(votedCandidateEpoch, otherNodeKey.id())); context.pollUntilResponse(); - context.assertElectedLeader(votedCandidateEpoch, otherNodeKey.id()); + context.assertElectedLeaderAndVotedKey(votedCandidateEpoch, otherNodeKey.id(), otherNodeKey); context.assertSentBeginQuorumEpochResponse( Errors.NONE, @@ -1032,9 +1119,7 @@ public void testEndQuorumIgnoredAsCandidateIfOlderEpoch(boolean withKip853Rpc) t .withKip853Rpc(withKip853Rpc) .build(); - // Sleep a little to ensure that we become a candidate - context.time.sleep(context.electionTimeoutMs() + jitterMs); - context.client.poll(); + context.unattachedToCandidate(); context.assertVotedCandidate(epoch, localId); context.deliverRequest( @@ -1048,7 +1133,7 @@ public void testEndQuorumIgnoredAsCandidateIfOlderEpoch(boolean withKip853Rpc) t context.client.poll(); context.assertSentEndQuorumEpochResponse(Errors.FENCED_LEADER_EPOCH, epoch, OptionalInt.empty()); - // We should still be candidate until expiration of election timeout + // Replica should still be candidate until expiration of election timeout context.time.sleep(context.electionTimeoutMs() + jitterMs - 1); context.client.poll(); context.assertVotedCandidate(epoch, localId); @@ -1058,10 +1143,10 @@ public void testEndQuorumIgnoredAsCandidateIfOlderEpoch(boolean withKip853Rpc) t context.client.poll(); context.assertVotedCandidate(epoch, localId); - // After backoff, we will become a candidate again + // After backoff, replica will become prospective again context.time.sleep(context.electionBackoffMaxMs); context.client.poll(); - context.assertVotedCandidate(epoch + 1, localId); + assertTrue(context.client.quorum().isProspective()); } @ParameterizedTest @@ -1076,7 +1161,7 @@ public void testEndQuorumIgnoredAsLeaderIfOlderEpoch(boolean withKip853Rpc) thro .withUnknownLeader(6) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // One of the voters may have sent EndQuorumEpoch from an earlier epoch @@ -1087,7 +1172,7 @@ public void testEndQuorumIgnoredAsLeaderIfOlderEpoch(boolean withKip853Rpc) thro context.pollUntilResponse(); context.assertSentEndQuorumEpochResponse(Errors.FENCED_LEADER_EPOCH, epoch, OptionalInt.of(localId)); - // We should still be leader as long as fetch timeout has not expired + // Replica should still be leader as long as fetch timeout has not expired context.time.sleep(context.fetchTimeoutMs - 1); context.client.poll(); context.assertElectedLeader(epoch, localId); @@ -1120,9 +1205,9 @@ public void testEndQuorumStartsNewElectionImmediatelyIfFollowerUnattached( context.pollUntilResponse(); context.assertSentEndQuorumEpochResponse(Errors.NONE, epoch, OptionalInt.of(voter2)); - // Should become a candidate immediately + // Should become a prospective immediately context.client.poll(); - context.assertVotedCandidate(epoch + 1, localId); + context.client.quorum().isProspective(); } @ParameterizedTest @@ -1144,7 +1229,7 @@ public void testAccumulatorClearedAfterBecomingFollower(boolean withKip853Rpc) t .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int epoch = context.currentEpoch(); @@ -1177,7 +1262,7 @@ public void testAccumulatorClearedAfterBecomingVoted(boolean withKip853Rpc) thro .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int epoch = context.currentEpoch(); @@ -1211,7 +1296,7 @@ public void testAccumulatorClearedAfterBecomingUnattached(boolean withKip853Rpc) .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); int epoch = context.currentEpoch(); @@ -1220,7 +1305,7 @@ public void testAccumulatorClearedAfterBecomingUnattached(boolean withKip853Rpc) context.deliverRequest(context.voteRequest(epoch + 1, otherNodeKey, epoch, 0L)); context.pollUntilResponse(); - context.assertUnknownLeader(epoch + 1); + context.assertUnknownLeaderAndNoVotedCandidate(epoch + 1); // Expect two calls one for the leader change control batch and one for the data batch Mockito.verify(memoryPool, Mockito.times(2)).release(buffer); } @@ -1240,7 +1325,7 @@ public void testChannelWokenUpIfLingerTimeoutReachedWithoutAppend(boolean withKi .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); assertEquals(1L, context.log.endOffset().offset()); @@ -1276,7 +1361,7 @@ public void testChannelWokenUpIfLingerTimeoutReachedDuringAppend(boolean withKip .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(OptionalInt.of(localId), context.currentLeader()); assertEquals(1L, context.log.endOffset().offset()); @@ -1323,7 +1408,8 @@ public void testHandleEndQuorumRequest(boolean withKip853Rpc) throws Exception { context.assertSentEndQuorumEpochResponse(Errors.NONE, leaderEpoch, OptionalInt.of(oldLeaderId)); context.client.poll(); - context.assertVotedCandidate(leaderEpoch + 1, localId); + assertTrue(context.client.quorum().isProspective()); + context.assertElectedLeader(leaderEpoch, oldLeaderId); } @ParameterizedTest @@ -1354,19 +1440,19 @@ public void testHandleEndQuorumRequestWithLowerPriorityToBecomeLeader(boolean wi // The election won't trigger by one round retry backoff context.time.sleep(1); - context.pollUntilRequest(); - + context.client.poll(); context.assertSentFetchRequest(leaderEpoch, 0, 0); - context.time.sleep(context.retryBackoffMs); - - context.pollUntilRequest(); + context.time.sleep(context.electionBackoffMaxMs); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); - List voteRequests = context.collectVoteRequests(leaderEpoch + 1, 0, 0); + context.client.poll(); + List voteRequests = context.collectPreVoteRequests(leaderEpoch, 0, 0); assertEquals(2, voteRequests.size()); - // Should have already done self-voting - context.assertVotedCandidate(leaderEpoch + 1, localId); + assertTrue(context.client.quorum().isProspective()); + assertEquals(leaderEpoch, context.currentEpoch()); } @ParameterizedTest @@ -1380,9 +1466,9 @@ public void testVoteRequestTimeout(boolean withKip853Rpc) throws Exception { RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) .withKip853Rpc(withKip853Rpc) .build(); - context.assertUnknownLeader(0); + context.assertUnknownLeaderAndNoVotedCandidate(0); - context.time.sleep(2L * context.electionTimeoutMs()); + context.unattachedToCandidate(); context.pollUntilRequest(); context.assertVotedCandidate(epoch, localId); @@ -1475,6 +1561,61 @@ public void testHandleVoteRequestAsFollowerWithVotedCandidate(boolean withKip853 context.assertVotedCandidate(epoch, votedCandidateKey.id()); } + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testHandleVoteRequestAsProspective(boolean withKip853Rpc) throws Exception { + int localId = randomReplicaId(); + int epoch = 2; + ReplicaKey otherNodeKey = replicaKey(localId + 1, withKip853Rpc); + int electedLeaderId = localId + 2; + Set voters = Set.of(localId, otherNodeKey.id(), electedLeaderId); + + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .withElectedLeader(epoch, electedLeaderId) + .withKip853Rpc(withKip853Rpc) + .build(); + + // Sleep a little to ensure that we become a prospective + context.time.sleep(context.fetchTimeoutMs); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + + context.deliverRequest(context.voteRequest(epoch + 1, otherNodeKey, epoch, 1)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch + 1, OptionalInt.empty(), true); + assertTrue(context.client.quorum().isUnattachedAndVoted()); + assertEquals(epoch + 1, context.currentEpoch()); + assertFalse(context.client.quorum().hasLeader()); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testHandleVoteRequestAsProspectiveWithVotedCandidate(boolean withKip853Rpc) throws Exception { + int localId = randomReplicaId(); + int epoch = 2; + ReplicaKey otherNodeKey = replicaKey(localId + 1, withKip853Rpc); + ReplicaKey votedCandidateKey = replicaKey(localId + 2, withKip853Rpc); + Set voters = Set.of(localId, otherNodeKey.id(), votedCandidateKey.id()); + + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .withVotedCandidate(epoch, votedCandidateKey) + .withKip853Rpc(withKip853Rpc) + .build(); + + // Sleep a little to ensure that we become a prospective + context.time.sleep(context.electionTimeoutMs() * 2L); + context.client.poll(); + assertTrue(context.client.quorum().isProspectiveAndVoted()); + context.assertVotedCandidate(epoch, votedCandidateKey.id()); + + context.deliverRequest(context.voteRequest(epoch + 1, otherNodeKey, epoch, 1)); + context.pollUntilResponse(); + + context.assertSentVoteResponse(Errors.NONE, epoch + 1, OptionalInt.empty(), true); + context.assertVotedCandidate(epoch + 1, otherNodeKey.id()); + } + @ParameterizedTest @ValueSource(booleans = { true, false }) public void testHandleInvalidVoteRequestWithOlderEpoch(boolean withKip853Rpc) throws Exception { @@ -1492,7 +1633,7 @@ public void testHandleInvalidVoteRequestWithOlderEpoch(boolean withKip853Rpc) th context.pollUntilResponse(); context.assertSentVoteResponse(Errors.FENCED_LEADER_EPOCH, epoch, OptionalInt.empty(), false); - context.assertUnknownLeader(epoch); + context.assertUnknownLeaderAndNoVotedCandidate(epoch); } @ParameterizedTest @@ -1528,7 +1669,7 @@ public void testLeaderIgnoreVoteRequestOnSameEpoch(boolean withKip853Rpc) throws .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int leaderEpoch = context.currentEpoch(); context.deliverRequest(context.voteRequest(leaderEpoch, otherNodeKey, leaderEpoch - 1, 1)); @@ -1551,7 +1692,7 @@ public void testListenerCommitCallbackAfterLeaderWrite(boolean withKip853Rpc) th .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // First poll has no high watermark advance @@ -1601,7 +1742,7 @@ public void testLeaderImmediatelySendsDivergingEpoch(boolean withKip853Rpc) thro .build(); // Start off as the leader - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Send a fetch request for an end offset and epoch which has diverged @@ -1640,11 +1781,11 @@ public void testCandidateIgnoreVoteRequestOnSameEpoch(boolean withKip853Rpc) thr @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testRetryElection(boolean withKip853Rpc) throws Exception { + public void testCandidateBackoffElection(boolean withKip853Rpc) throws Exception { int localId = randomReplicaId(); int otherNodeId = localId + 1; int epoch = 1; - int exponentialFactor = 85; // set it large enough so that we will bound on jitter + int exponentialFactor = 85; // set it large enough so that replica will bound on jitter Set voters = Set.of(localId, otherNodeId); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) @@ -1652,13 +1793,20 @@ public void testRetryElection(boolean withKip853Rpc) throws Exception { .withKip853Rpc(withKip853Rpc) .build(); - context.assertUnknownLeader(0); + context.assertUnknownLeaderAndNoVotedCandidate(0); - context.time.sleep(2L * context.electionTimeoutMs()); + context.unattachedToCandidate(); context.pollUntilRequest(); context.assertVotedCandidate(epoch, localId); + CandidateState candidate = context.client.quorum().candidateStateOrThrow(); + assertEquals(1, candidate.retries()); + assertEquals( + context.electionTimeoutMs() + exponentialFactor, + candidate.remainingElectionTimeMs(context.time.milliseconds()) + ); + assertFalse(candidate.isBackingOff()); - // Quorum size is two. If the other member rejects, then we need to schedule a revote. + // Quorum size is two. If the other member rejects, then the local replica will lose the election. RaftRequest.Outbound request = context.assertSentVoteRequest(epoch, 0, 0L, 1); context.deliverResponse( request.correlationId(), @@ -1667,22 +1815,87 @@ public void testRetryElection(boolean withKip853Rpc) throws Exception { ); context.client.poll(); + assertTrue(candidate.isBackingOff()); + assertEquals( + context.electionBackoffMaxMs, + candidate.remainingBackoffMs(context.time.milliseconds()) + ); - // All nodes have rejected our candidacy, but we should still remember that we had voted + // Election is lost, but local replica should still remember that it has voted context.assertVotedCandidate(epoch, localId); - // Even though our candidacy was rejected, we will backoff for jitter period - // before we bump the epoch and start a new election. + // Even though candidacy was rejected, local replica will backoff for jitter period + // before transitioning to prospective and starting a new election. context.time.sleep(context.electionBackoffMaxMs - 1); context.client.poll(); context.assertVotedCandidate(epoch, localId); - // After jitter expires, we become a candidate again + // After jitter expires, become a prospective again context.time.sleep(1); context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + ProspectiveState prospective = context.client.quorum().prospectiveStateOrThrow(); + assertEquals(2, prospective.retries()); context.pollUntilRequest(); + request = context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + assertEquals( + context.electionTimeoutMs() + exponentialFactor, + prospective.remainingElectionTimeMs(context.time.milliseconds()) + ); + + // After becoming candidate again, retries should be 2 + context.deliverResponse( + request.correlationId(), + request.destination(), + context.voteResponse(true, OptionalInt.empty(), 1) + ); + context.client.poll(); context.assertVotedCandidate(epoch + 1, localId); - context.assertSentVoteRequest(epoch + 1, 0, 0L, 1); + candidate = context.client.quorum().candidateStateOrThrow(); + assertEquals(2, candidate.retries()); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testCandidateElectionTimeout(boolean withKip853Rpc) throws Exception { + int localId = randomReplicaId(); + int otherNodeId = localId + 1; + int epoch = 1; + int jitter = 100; + Set voters = Set.of(localId, otherNodeId); + + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .updateRandom(r -> r.mockNextInt(jitter)) + .withKip853Rpc(withKip853Rpc) + .build(); + + context.assertUnknownLeaderAndNoVotedCandidate(0); + + context.unattachedToCandidate(); + context.pollUntilRequest(); + context.assertVotedCandidate(epoch, localId); + context.assertSentVoteRequest(epoch, 0, 0L, 1); + CandidateState candidate = context.client.quorum().candidateStateOrThrow(); + assertEquals(1, candidate.retries()); + assertEquals( + context.electionTimeoutMs() + jitter, + candidate.remainingElectionTimeMs(context.time.milliseconds()) + ); + assertFalse(candidate.isBackingOff()); + + // If election times out, replica transition to prospective without any additional backoff + context.time.sleep(candidate.remainingElectionTimeMs(context.time.milliseconds())); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + + ProspectiveState prospective = context.client.quorum().prospectiveStateOrThrow(); + assertEquals(2, prospective.retries()); + context.pollUntilRequest(); + context.assertSentPreVoteRequest(epoch, 0, 0L, 1); + assertEquals( + context.electionTimeoutMs() + jitter, + prospective.remainingElectionTimeMs(context.time.milliseconds()) + ); } @ParameterizedTest @@ -1728,7 +1941,7 @@ public void testInitializeAsFollowerNonEmptyLog(boolean withKip853Rpc) throws Ex @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testVoterBecomeCandidateAfterFetchTimeout(boolean withKip853Rpc) throws Exception { + public void testVoterBecomeProspectiveAfterFetchTimeout(boolean withKip853Rpc) throws Exception { int localId = randomReplicaId(); int otherNodeId = localId + 1; int epoch = 5; @@ -1746,14 +1959,15 @@ public void testVoterBecomeCandidateAfterFetchTimeout(boolean withKip853Rpc) thr context.assertSentFetchRequest(epoch, 1L, lastEpoch); context.time.sleep(context.fetchTimeoutMs); - context.pollUntilRequest(); - context.assertSentVoteRequest(epoch + 1, lastEpoch, 1L, 1); - context.assertVotedCandidate(epoch + 1, localId); + context.client.poll(); + assertTrue(context.client.quorum().isProspective()); + context.client.poll(); + context.assertSentPreVoteRequest(epoch, lastEpoch, 1L, 1); } @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testFollowerAsObserverDoesNotBecomeCandidateAfterFetchTimeout(boolean withKip853Rpc) throws Exception { + public void testFollowerAsObserverDoesNotBecomeProspectiveAfterFetchTimeout(boolean withKip853Rpc) throws Exception { int localId = randomReplicaId(); int otherNodeId = localId + 1; int epoch = 5; @@ -1783,7 +1997,7 @@ public void testFollowerAsObserverDoesNotBecomeCandidateAfterFetchTimeout(boolea @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testUnattachedAsObserverDoesNotBecomeCandidateAfterElectionTimeout(boolean withKip853Rpc) throws Exception { + public void testUnattachedAsObserverDoesNotBecomeProspectiveAfterElectionTimeout(boolean withKip853Rpc) throws Exception { int localId = randomReplicaId(); int otherNodeId = localId + 1; int epoch = 5; @@ -1812,12 +2026,43 @@ public void testUnattachedAsObserverDoesNotBecomeCandidateAfterElectionTimeout(b context.time.sleep(context.electionTimeoutMs() * 2); context.pollUntilRequest(); - // observer cannot transition to candidate though + // observer cannot transition to prospective though assertTrue(context.client.quorum().isUnattached()); context.assertSentFetchRequest(epoch + 1, 0L, 0); assertEquals(0, context.channel.drainSendQueue().size()); } + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testUnattachedAsVoterCanBecomeFollowerAfterFindingLeader(boolean withKip853Rpc) throws Exception { + int localId = randomReplicaId(); + int otherNodeId = localId + 1; + int leaderNodeId = localId + 2; + int epoch = 5; + Set voters = Set.of(localId, otherNodeId, leaderNodeId); + + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .withUnknownLeader(epoch) + .withKip853Rpc(withKip853Rpc) + .build(); + + context.pollUntilRequest(); + RaftRequest.Outbound request = context.assertSentFetchRequest(epoch, 0L, 0); + assertTrue(context.client.quorum().isUnattached()); + assertTrue(context.client.quorum().isVoter()); + + // receives a fetch response specifying who the leader is + Errors responseError = (request.destination().id() == otherNodeId) ? Errors.NOT_LEADER_OR_FOLLOWER : Errors.NONE; + context.deliverResponse( + request.correlationId(), + request.destination(), + context.fetchResponse(epoch, leaderNodeId, MemoryRecords.EMPTY, 0L, responseError) + ); + + context.client.poll(); + assertTrue(context.client.quorum().isFollower()); + } + @ParameterizedTest @ValueSource(booleans = { true, false }) public void testInitializeObserverNoPreviousState(boolean withKip853Rpc) throws Exception { @@ -1935,7 +2180,7 @@ public void testObserverSendDiscoveryFetchAfterFetchTimeout(boolean withKip853Rp @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testObserverHandleRetryFetchtToBootstrapServer(boolean withKip853Rpc) throws Exception { + public void testObserverHandleRetryFetchToBootstrapServer(boolean withKip853Rpc) throws Exception { // This test tries to check that KRaft is able to handle a retrying Fetch request to // a boostrap server after a Fetch request to the leader. int localId = randomReplicaId(); @@ -2078,7 +2323,7 @@ public void testInvalidFetchRequest(boolean withKip853Rpc) throws Exception { .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.deliverRequest(context.fetchRequest(epoch, otherNodeKey, -5L, 0, 0)); @@ -2120,7 +2365,11 @@ public void testLeaderStateUpdateWithDifferentFetchRequestVersions(short version int epoch = 5; Set voters = Set.of(localId, otherNodeKey.id()); - RaftClientTestContext context = RaftClientTestContext.initializeAsLeader(localId, voters, epoch); + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .withUnknownLeader(epoch - 1) + .build(); + context.assertUnknownLeaderAndNoVotedCandidate(epoch - 1); + context.unattachedToLeader(); // First poll has no high watermark advance. context.client.poll(); @@ -2150,7 +2399,7 @@ public void testFetchRequestClusterIdValidation(boolean withKip853Rpc) throws Ex .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // valid cluster id is accepted @@ -2185,7 +2434,7 @@ public void testVoteRequestClusterIdValidation(boolean withKip853Rpc) throws Exc .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // valid cluster id is accepted @@ -2194,17 +2443,17 @@ public void testVoteRequestClusterIdValidation(boolean withKip853Rpc) throws Exc context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(localId), false); // null cluster id is accepted - context.deliverRequest(context.voteRequest(null, epoch, otherNodeKey, 0, 0)); + context.deliverRequest(context.voteRequest(null, epoch, otherNodeKey, 0, 0, false)); context.pollUntilResponse(); context.assertSentVoteResponse(Errors.NONE, epoch, OptionalInt.of(localId), false); // empty cluster id is rejected - context.deliverRequest(context.voteRequest("", epoch, otherNodeKey, 0, 0)); + context.deliverRequest(context.voteRequest("", epoch, otherNodeKey, 0, 0, false)); context.pollUntilResponse(); context.assertSentVoteResponse(Errors.INCONSISTENT_CLUSTER_ID); // invalid cluster id is rejected - context.deliverRequest(context.voteRequest("invalid-uuid", epoch, otherNodeKey, 0, 0)); + context.deliverRequest(context.voteRequest("invalid-uuid", epoch, otherNodeKey, 0, 0, false)); context.pollUntilResponse(); context.assertSentVoteResponse(Errors.INCONSISTENT_CLUSTER_ID); } @@ -2219,7 +2468,7 @@ public void testInvalidVoterReplicaVoteRequest() throws Exception { .withKip853Rpc(true) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // invalid voter id is rejected @@ -2230,7 +2479,8 @@ public void testInvalidVoterReplicaVoteRequest() throws Exception { otherNodeKey, ReplicaKey.of(10, Uuid.randomUuid()), epoch, - 100 + 100, + false ) ); context.pollUntilResponse(); @@ -2244,7 +2494,8 @@ public void testInvalidVoterReplicaVoteRequest() throws Exception { otherNodeKey, ReplicaKey.of(0, Uuid.randomUuid()), epoch, - 100 + 100, + false ) ); context.pollUntilResponse(); @@ -2263,7 +2514,7 @@ public void testInvalidVoterReplicaBeginQuorumEpochRequest() throws Exception { .withUnknownLeader(epoch - 1) .withKip853Rpc(true) .build(); - context.assertUnknownLeader(epoch - 1); + context.assertUnknownLeaderAndNoVotedCandidate(epoch - 1); // Leader voter3 sends a begin quorum epoch request with incorrect voter id context.deliverRequest( @@ -2316,7 +2567,7 @@ public void testBeginQuorumEpochRequestClusterIdValidation(boolean withKip853Rpc .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // valid cluster id is accepted @@ -2352,7 +2603,7 @@ public void testEndQuorumEpochRequestClusterIdValidation(boolean withKip853Rpc) .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // valid cluster id is accepted @@ -2388,7 +2639,7 @@ public void testLeaderAcceptVoteFromObserver(boolean withKip853Rpc) throws Excep .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); ReplicaKey observerKey = replicaKey(localId + 2, withKip853Rpc); @@ -2458,7 +2709,7 @@ public void testPurgatoryFetchTimeout(boolean withKip853Rpc) throws Exception { .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Follower sends a fetch which cannot be satisfied immediately @@ -2486,7 +2737,7 @@ public void testPurgatoryFetchSatisfiedByWrite(boolean withKip853Rpc) throws Exc .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Follower sends a fetch which cannot be satisfied immediately @@ -2517,7 +2768,7 @@ public void testPurgatoryFetchCompletedByFollowerTransition(boolean withKip853Rp .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Follower sends a fetch which cannot be satisfied immediately @@ -2542,7 +2793,7 @@ public void testPurgatoryFetchCompletedByFollowerTransition(boolean withKip853Rp @ParameterizedTest @ValueSource(booleans = { true, false }) - public void testFetchResponseIgnoredAfterBecomingCandidate(boolean withKip853Rpc) throws Exception { + public void testFetchResponseIgnoredAfterBecomingProspective(boolean withKip853Rpc) throws Exception { int localId = randomReplicaId(); int otherNodeId = localId + 1; int epoch = 5; @@ -2559,10 +2810,10 @@ public void testFetchResponseIgnoredAfterBecomingCandidate(boolean withKip853Rpc context.pollUntilRequest(); RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest(epoch, 0L, 0); - // Now await the fetch timeout and become a candidate + // Now await the fetch timeout and become prospective context.time.sleep(context.fetchTimeoutMs); context.client.poll(); - context.assertVotedCandidate(epoch + 1, localId); + assertTrue(context.client.quorum().isProspective()); // The fetch response from the old leader returns, but it should be ignored Records records = context.buildBatch(0L, 3, Arrays.asList("a", "b")); @@ -2574,7 +2825,7 @@ public void testFetchResponseIgnoredAfterBecomingCandidate(boolean withKip853Rpc context.client.poll(); assertEquals(0, context.log.endOffset().offset()); - context.assertVotedCandidate(epoch + 1, localId); + context.expectAndGrantPreVotes(epoch); } @ParameterizedTest @@ -2628,34 +2879,36 @@ public void testVoteResponseIgnoredAfterBecomingFollower(boolean withKip853Rpc) Set voters = Set.of(localId, voter2, voter3); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) - .withUnknownLeader(epoch - 1) + .withUnknownLeader(epoch) .withKip853Rpc(withKip853Rpc) .build(); - context.assertUnknownLeader(epoch - 1); - - // Sleep a little to ensure that we become a candidate - context.time.sleep(context.electionTimeoutMs() * 2L); + context.assertUnknownLeaderAndNoVotedCandidate(epoch); + context.unattachedToCandidate(); // Wait until the vote requests are inflight context.pollUntilRequest(); - context.assertVotedCandidate(epoch, localId); - List voteRequests = context.collectVoteRequests(epoch, 0, 0); + context.assertVotedCandidate(epoch + 1, localId); + List voteRequests = context.collectVoteRequests(epoch + 1, 0, 0); assertEquals(2, voteRequests.size()); // While the vote requests are still inflight, we receive a BeginEpoch for the same epoch - context.deliverRequest(context.beginEpochRequest(epoch, voter3)); + context.deliverRequest(context.beginEpochRequest(epoch + 1, voter3)); context.client.poll(); - context.assertElectedLeader(epoch, voter3); + context.assertElectedLeaderAndVotedKey( + epoch + 1, + voter3, + ReplicaKey.of(localId, ReplicaKey.NO_DIRECTORY_ID) + ); // The vote requests now return and should be ignored - VoteResponseData voteResponse1 = context.voteResponse(false, OptionalInt.empty(), epoch); + VoteResponseData voteResponse1 = context.voteResponse(true, OptionalInt.empty(), epoch + 1); context.deliverResponse( voteRequests.get(0).correlationId(), voteRequests.get(0).destination(), voteResponse1 ); - VoteResponseData voteResponse2 = context.voteResponse(false, OptionalInt.of(voter3), epoch); + VoteResponseData voteResponse2 = context.voteResponse(true, OptionalInt.of(voter3), epoch + 1); context.deliverResponse( voteRequests.get(1).correlationId(), voteRequests.get(1).destination(), @@ -2663,7 +2916,85 @@ public void testVoteResponseIgnoredAfterBecomingFollower(boolean withKip853Rpc) ); context.client.poll(); - context.assertElectedLeader(epoch, voter3); + context.assertElectedLeaderAndVotedKey( + epoch + 1, + voter3, + ReplicaKey.of(localId, ReplicaKey.NO_DIRECTORY_ID) + ); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testFollowerLeaderRediscoveryAfterBrokerNotAvailableError(boolean withKip853Rpc) throws Exception { + int localId = randomReplicaId(); + int leaderId = localId + 1; + int otherNodeId = localId + 2; + int epoch = 5; + Set voters = Set.of(leaderId, localId, otherNodeId); + List bootstrapServers = voters + .stream() + .map(RaftClientTestContext::mockAddress) + .collect(Collectors.toList()); + + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .withBootstrapServers(Optional.of(bootstrapServers)) + .withKip853Rpc(withKip853Rpc) + .withElectedLeader(epoch, leaderId) + .build(); + + context.pollUntilRequest(); + RaftRequest.Outbound fetchRequest1 = context.assertSentFetchRequest(); + assertEquals(leaderId, fetchRequest1.destination().id()); + context.assertFetchRequestData(fetchRequest1, epoch, 0L, 0); + + context.deliverResponse( + fetchRequest1.correlationId(), + fetchRequest1.destination(), + context.fetchResponse(epoch, -1, MemoryRecords.EMPTY, -1, Errors.BROKER_NOT_AVAILABLE) + ); + context.pollUntilRequest(); + + // We should retry the Fetch against the other voter since the original + // voter connection will be backing off. + RaftRequest.Outbound fetchRequest2 = context.assertSentFetchRequest(); + assertNotEquals(leaderId, fetchRequest2.destination().id()); + assertTrue(context.bootstrapIds.contains(fetchRequest2.destination().id())); + context.assertFetchRequestData(fetchRequest2, epoch, 0L, 0); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testFollowerLeaderRediscoveryAfterRequestTimeout(boolean withKip853Rpc) throws Exception { + int localId = randomReplicaId(); + int leaderId = localId + 1; + int otherNodeId = localId + 2; + int epoch = 5; + Set voters = Set.of(leaderId, localId, otherNodeId); + List bootstrapServers = voters + .stream() + .map(RaftClientTestContext::mockAddress) + .collect(Collectors.toList()); + + RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .withBootstrapServers(Optional.of(bootstrapServers)) + .withKip853Rpc(withKip853Rpc) + .withElectedLeader(epoch, leaderId) + .build(); + + context.pollUntilRequest(); + RaftRequest.Outbound fetchRequest1 = context.assertSentFetchRequest(); + assertEquals(leaderId, fetchRequest1.destination().id()); + context.assertFetchRequestData(fetchRequest1, epoch, 0L, 0); + + context.time.sleep(context.requestTimeoutMs()); + context.pollUntilRequest(); + + // We should retry the Fetch against the other voter since the original + // voter connection will be backing off. + RaftRequest.Outbound fetchRequest2 = context.assertSentFetchRequest(); + assertNotEquals(leaderId, fetchRequest2.destination().id()); + assertTrue(context.bootstrapIds.contains(fetchRequest2.destination().id())); + context.assertFetchRequestData(fetchRequest2, epoch, 0L, 0); } @ParameterizedTest @@ -2705,12 +3036,10 @@ public void testObserverLeaderRediscoveryAfterBrokerNotAvailableError(boolean wi assertTrue(context.bootstrapIds.contains(fetchRequest2.destination().id())); context.assertFetchRequestData(fetchRequest2, epoch, 0L, 0); - Errors error = fetchRequest2.destination().id() == leaderId ? - Errors.NONE : Errors.NOT_LEADER_OR_FOLLOWER; context.deliverResponse( fetchRequest2.correlationId(), fetchRequest2.destination(), - context.fetchResponse(epoch, leaderId, MemoryRecords.EMPTY, 0L, error) + context.fetchResponse(epoch, leaderId, MemoryRecords.EMPTY, 0L, Errors.NOT_LEADER_OR_FOLLOWER) ); context.client.poll(); @@ -2773,7 +3102,7 @@ public void testLeaderGracefulShutdown(boolean withKip853Rpc) throws Exception { .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Now shutdown @@ -2821,7 +3150,7 @@ public void testEndQuorumEpochSentBasedOnFetchOffset(boolean withKip853Rpc) thro .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // The lagging follower fetches first @@ -2904,7 +3233,7 @@ public void testDescribeQuorumWithOnlyStaticVoters(boolean withKip853Rpc) throws .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Describe quorum response will not include directory ids @@ -2955,7 +3284,7 @@ public void testDescribeQuorumWithFollowers(boolean withKip853Rpc, boolean withB } RaftClientTestContext context = builder.build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Describe quorum response before any fetches made @@ -3061,7 +3390,7 @@ public void testDescribeQuorumWithObserver(boolean withKip853Rpc, boolean withBo } RaftClientTestContext context = builder.build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Update HW to non-initial value @@ -3203,7 +3532,7 @@ public void testDescribeQuorumNonMonotonicFollowerFetch(boolean withKip853Rpc, b } RaftClientTestContext context = builder.build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Update HW to non-initial value @@ -3274,7 +3603,7 @@ public void testStaticVotersIgnoredWithBootstrapSnapshot(boolean withKip853Rpc) .withBootstrapSnapshot(Optional.of(voterSet)) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // check describe quorum response has both followers context.deliverRequest(context.describeQuorumRequest()); @@ -3314,7 +3643,7 @@ public void testLeaderGracefulShutdownTimeout(boolean withKip853Rpc) throws Exce .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); // Now shutdown @@ -3380,7 +3709,7 @@ public void testObserverGracefulShutdown(boolean withKip853Rpc) throws Exception .withKip853Rpc(withKip853Rpc) .build(); context.client.poll(); - context.assertUnknownLeader(5); + context.assertUnknownLeaderAndNoVotedCandidate(5); // Observer shutdown should complete immediately even if the // current leader is unknown @@ -3555,7 +3884,7 @@ public void testFetchShouldBeTreatedAsLeaderAcknowledgement(boolean withKip853Rp .withKip853Rpc(withKip853Rpc) .build(); - context.time.sleep(context.electionTimeoutMs()); + context.unattachedToCandidate(); context.expectAndGrantVotes(epoch); context.pollUntilRequest(); @@ -3782,6 +4111,7 @@ public void testClusterAuthorizationFailedInBeginQuorumEpoch(boolean withKip853R .build(); context.time.sleep(context.electionTimeoutMs()); + context.expectAndGrantPreVotes(epoch - 1); context.expectAndGrantVotes(epoch); context.pollUntilRequest(); @@ -3809,8 +4139,8 @@ public void testClusterAuthorizationFailedInVote(boolean withKip853Rpc) throws E .withKip853Rpc(withKip853Rpc) .build(); - // Sleep a little to ensure that we become a candidate - context.time.sleep(context.electionTimeoutMs() * 2L); + // Become a candidate + context.unattachedToCandidate(); context.pollUntilRequest(); context.assertVotedCandidate(epoch, localId); @@ -3834,7 +4164,7 @@ public void testClusterAuthorizationFailedInEndQuorumEpoch(boolean withKip853Rpc .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); int epoch = context.currentEpoch(); context.client.shutdown(5000); @@ -3861,7 +4191,7 @@ public void testHandleLeaderChangeFiresAfterListenerReachesEpochStartOffsetOnEmp .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); context.client.poll(); int epoch = context.currentEpoch(); @@ -3918,7 +4248,7 @@ public void testHandleLeaderChangeFiresAfterListenerReachesEpochStartOffset( .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); context.client.poll(); // After becoming leader, we expect the `LeaderChange` record to be appended @@ -3984,7 +4314,7 @@ public void testLateRegisteredListenerCatchesUp(boolean withKip853Rpc) throws Ex .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); context.client.poll(); assertEquals(10L, context.log.endOffset().offset()); @@ -4027,7 +4357,7 @@ public void testReregistrationChangesListenerContext(boolean withKip853Rpc) thro .withKip853Rpc(withKip853Rpc) .build(); - context.becomeLeader(); + context.unattachedToLeader(); context.client.poll(); assertEquals(10L, context.log.endOffset().offset()); @@ -4129,7 +4459,7 @@ public void testHandleCommitCallbackFiresInVotedState(boolean withKip853Rpc) thr .build(); // Start off as the leader and receive a fetch to initialize the high watermark - context.becomeLeader(); + context.unattachedToLeader(); context.deliverRequest(context.fetchRequest(epoch, otherNodeKey, 10L, epoch, 500)); context.client.poll(); assertEquals(OptionalLong.of(10L), context.client.highWatermark()); @@ -4176,7 +4506,7 @@ public void testHandleCommitCallbackFiresInCandidateState(boolean withKip853Rpc) .build(); // Start off as the leader and receive a fetch to initialize the high watermark - context.becomeLeader(); + context.unattachedToLeader(); assertEquals(10L, context.log.endOffset().offset()); context.deliverRequest(context.fetchRequest(epoch, otherNodeKey, 10L, epoch, 0)); @@ -4187,12 +4517,12 @@ public void testHandleCommitCallbackFiresInCandidateState(boolean withKip853Rpc) // Now we receive a vote request which transitions us to the 'unattached' state context.deliverRequest(context.voteRequest(epoch + 1, otherNodeKey, epoch, 9L)); context.pollUntilResponse(); - context.assertUnknownLeader(epoch + 1); + context.assertUnknownLeaderAndNoVotedCandidate(epoch + 1); assertEquals(OptionalLong.of(10L), context.client.highWatermark()); - // Timeout the election and become candidate + // Timeout the election and become prospective then candidate + context.unattachedToCandidate(); int candidateEpoch = epoch + 2; - context.time.sleep(context.electionTimeoutMs() * 2L); context.client.poll(); context.assertVotedCandidate(candidateEpoch, localId); @@ -4368,7 +4698,7 @@ static ReplicaKey replicaKey(int id, boolean withDirectoryId) { return ReplicaKey.of(id, directoryId); } - private static int randomReplicaId() { + static int randomReplicaId() { return ThreadLocalRandom.current().nextInt(1025); } } diff --git a/raft/src/test/java/org/apache/kafka/raft/LeaderStateTest.java b/raft/src/test/java/org/apache/kafka/raft/LeaderStateTest.java index 696a926f50a5d..ce3cef11ef8fe 100644 --- a/raft/src/test/java/org/apache/kafka/raft/LeaderStateTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/LeaderStateTest.java @@ -575,15 +575,13 @@ public void testGrantVote(boolean isLogUpToDate) { 1 ); - assertFalse( - state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) - ); - assertFalse( - state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) - ); - assertFalse( - state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) - ); + assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true)); + + assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); } @ParameterizedTest diff --git a/raft/src/test/java/org/apache/kafka/raft/MockLog.java b/raft/src/test/java/org/apache/kafka/raft/MockLog.java index 29281fa633fc7..a7a8e89a88cfa 100644 --- a/raft/src/test/java/org/apache/kafka/raft/MockLog.java +++ b/raft/src/test/java/org/apache/kafka/raft/MockLog.java @@ -167,7 +167,7 @@ private Optional metadataForOffset(long offset) { } private void assertValidHighWatermarkMetadata(LogOffsetMetadata offsetMetadata) { - if (!offsetMetadata.metadata().isPresent()) { + if (offsetMetadata.metadata().isEmpty()) { return; } @@ -490,6 +490,18 @@ public Optional createNewSnapshot(OffsetAndEpoch snapshotId) ); } + long baseOffset = read(snapshotId.offset(), Isolation.COMMITTED).startOffsetMetadata.offset(); + if (snapshotId.offset() != baseOffset) { + throw new IllegalArgumentException( + String.format( + "Cannot create snapshot at offset (%s) because it is not batch aligned. " + + "The batch containing the requested offset has a base offset of (%s)", + snapshotId.offset(), + baseOffset + ) + ); + } + return createNewSnapshotUnchecked(snapshotId); } diff --git a/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java b/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java index 08e19866d9bd1..8306e103258e1 100644 --- a/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java @@ -450,13 +450,23 @@ public void testCreateSnapshotValidation() { // Test snapshot id for the first epoch log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords, firstEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, firstEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(1, firstEpoch)).get().close(); // Test snapshot id for the second epoch log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords, secondEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords - 1, secondEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords + 1, secondEpoch)).get().close(); + } + + @Test + public void testCreateSnapshotInMiddleOfBatch() { + int numberOfRecords = 10; + int epoch = 1; + + appendBatch(numberOfRecords, epoch); + log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)); + + assertThrows( + IllegalArgumentException.class, + () -> log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, epoch)) + ); } @Test diff --git a/raft/src/test/java/org/apache/kafka/raft/MockNetworkChannel.java b/raft/src/test/java/org/apache/kafka/raft/MockNetworkChannel.java index c8e732e880559..47785fab4be01 100644 --- a/raft/src/test/java/org/apache/kafka/raft/MockNetworkChannel.java +++ b/raft/src/test/java/org/apache/kafka/raft/MockNetworkChannel.java @@ -66,7 +66,7 @@ public List drainSentRequests(Optional apiKeyFilt Iterator iterator = sendQueue.iterator(); while (iterator.hasNext()) { RaftRequest.Outbound request = iterator.next(); - if (!apiKeyFilter.isPresent() || request.data().apiKey() == apiKeyFilter.get().id) { + if (apiKeyFilter.isEmpty() || request.data().apiKey() == apiKeyFilter.get().id) { awaitingResponse.put(request.correlationId(), request); requests.add(request); iterator.remove(); diff --git a/raft/src/test/java/org/apache/kafka/raft/ProspectiveStateTest.java b/raft/src/test/java/org/apache/kafka/raft/ProspectiveStateTest.java new file mode 100644 index 0000000000000..d74eba4ab22e0 --- /dev/null +++ b/raft/src/test/java/org/apache/kafka/raft/ProspectiveStateTest.java @@ -0,0 +1,480 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.Set; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ProspectiveStateTest { + private final ReplicaKey localReplicaKey = ReplicaKey.of(0, Uuid.randomUuid()); + private final Endpoints leaderEndpoints = Endpoints.fromInetSocketAddresses( + Collections.singletonMap( + ListenerName.normalised("CONTROLLER"), + InetSocketAddress.createUnresolved("mock-host-3", 1234) + ) + ); + private final int epoch = 5; + private final MockTime time = new MockTime(); + private final int electionTimeoutMs = 10000; + private final LogContext logContext = new LogContext(); + private final int localId = 0; + private final int votedId = 1; + private final Uuid votedDirectoryId = Uuid.randomUuid(); + private final ReplicaKey votedKeyWithDirectoryId = ReplicaKey.of(votedId, votedDirectoryId); + private final ReplicaKey votedKeyWithoutDirectoryId = ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID); + + private ProspectiveState newProspectiveState( + VoterSet voters, + OptionalInt leaderId, + Optional votedKey + ) { + return new ProspectiveState( + time, + localReplicaKey.id(), + epoch, + leaderId, + leaderId.isPresent() ? leaderEndpoints : Endpoints.empty(), + votedKey, + voters, + Optional.empty(), + 1, + electionTimeoutMs, + logContext + ); + } + + private ProspectiveState newProspectiveState(VoterSet voters) { + return new ProspectiveState( + time, + localReplicaKey.id(), + epoch, + OptionalInt.empty(), + Endpoints.empty(), + Optional.empty(), + voters, + Optional.empty(), + 1, + electionTimeoutMs, + logContext + ); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testSingleNodeQuorum(boolean withDirectoryId) { + ProspectiveState state = newProspectiveState(voterSetWithLocal(IntStream.empty(), withDirectoryId)); + assertTrue(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testTwoNodeQuorumVoteRejected(boolean withDirectoryId) { + ReplicaKey otherNode = replicaKey(1, withDirectoryId); + ProspectiveState state = newProspectiveState( + voterSetWithLocal(Stream.of(otherNode), withDirectoryId) + ); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Collections.singleton(otherNode), state.epochElection().unrecordedVoters()); + assertTrue(state.recordRejectedVote(otherNode.id())); + assertFalse(state.epochElection().isVoteGranted()); + assertTrue(state.epochElection().isVoteRejected()); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testTwoNodeQuorumVoteGranted(boolean withDirectoryId) { + ReplicaKey otherNode = replicaKey(1, withDirectoryId); + ProspectiveState state = newProspectiveState( + voterSetWithLocal(Stream.of(otherNode), withDirectoryId) + ); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Collections.singleton(otherNode), state.epochElection().unrecordedVoters()); + assertTrue(state.recordGrantedVote(otherNode.id())); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); + assertFalse(state.epochElection().isVoteRejected()); + assertTrue(state.epochElection().isVoteGranted()); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testThreeNodeQuorumVoteGranted(boolean withDirectoryId) { + ReplicaKey node1 = replicaKey(1, withDirectoryId); + ReplicaKey node2 = replicaKey(2, withDirectoryId); + ProspectiveState state = newProspectiveState( + voterSetWithLocal(Stream.of(node1, node2), withDirectoryId) + ); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Set.of(node1, node2), state.epochElection().unrecordedVoters()); + assertTrue(state.recordGrantedVote(node1.id())); + assertEquals(Collections.singleton(node2), state.epochElection().unrecordedVoters()); + assertTrue(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertTrue(state.recordRejectedVote(node2.id())); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); + assertTrue(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testThreeNodeQuorumVoteRejected(boolean withDirectoryId) { + ReplicaKey node1 = replicaKey(1, withDirectoryId); + ReplicaKey node2 = replicaKey(2, withDirectoryId); + ProspectiveState state = newProspectiveState( + voterSetWithLocal(Stream.of(node1, node2), withDirectoryId) + ); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertEquals(Set.of(node1, node2), state.epochElection().unrecordedVoters()); + assertTrue(state.recordRejectedVote(node1.id())); + assertEquals(Collections.singleton(node2), state.epochElection().unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertFalse(state.epochElection().isVoteRejected()); + assertTrue(state.recordRejectedVote(node2.id())); + assertEquals(Collections.emptySet(), state.epochElection().unrecordedVoters()); + assertFalse(state.epochElection().isVoteGranted()); + assertTrue(state.epochElection().isVoteRejected()); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testCanChangePreVote(boolean withDirectoryId) { + int voter1 = 1; + int voter2 = 2; + ProspectiveState state = newProspectiveState( + voterSetWithLocal(IntStream.of(voter1, voter2), withDirectoryId) + ); + assertTrue(state.recordGrantedVote(voter1)); + assertTrue(state.epochElection().isVoteGranted()); + assertFalse(state.recordRejectedVote(voter1)); + assertFalse(state.epochElection().isVoteGranted()); + + assertTrue(state.recordRejectedVote(voter2)); + assertTrue(state.epochElection().isVoteRejected()); + assertFalse(state.recordGrantedVote(voter2)); + assertFalse(state.epochElection().isVoteRejected()); + } + + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testCannotGrantOrRejectNonVoters(boolean withDirectoryId) { + int nonVoterId = 1; + ProspectiveState state = newProspectiveState(voterSetWithLocal(IntStream.empty(), withDirectoryId)); + assertThrows(IllegalArgumentException.class, () -> state.recordGrantedVote(nonVoterId)); + assertThrows(IllegalArgumentException.class, () -> state.recordRejectedVote(nonVoterId)); + } + + @ParameterizedTest + @ValueSource(booleans = { true, false }) + public void testConsecutiveGrant(boolean withDirectoryId) { + int otherNodeId = 1; + ProspectiveState state = newProspectiveState( + voterSetWithLocal(IntStream.of(otherNodeId), withDirectoryId) + ); + assertTrue(state.recordGrantedVote(otherNodeId)); + assertFalse(state.recordGrantedVote(otherNodeId)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testConsecutiveReject(boolean withDirectoryId) { + int otherNodeId = 1; + ProspectiveState state = newProspectiveState( + voterSetWithLocal(IntStream.of(otherNodeId), withDirectoryId) + ); + assertTrue(state.recordRejectedVote(otherNodeId)); + assertFalse(state.recordRejectedVote(otherNodeId)); + } + + @ParameterizedTest + @CsvSource({ "true,true", "true,false", "false,true", "false,false" }) + public void testGrantVote(boolean isLogUpToDate, boolean withDirectoryId) { + ReplicaKey node0 = replicaKey(0, withDirectoryId); + ReplicaKey node1 = replicaKey(1, withDirectoryId); + ReplicaKey node2 = replicaKey(2, withDirectoryId); + + ProspectiveState state = newProspectiveState( + voterSetWithLocal(Stream.of(node1, node2), withDirectoryId) + ); + + assertEquals(isLogUpToDate, state.canGrantVote(node0, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node1, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node2, isLogUpToDate, true)); + + assertEquals(isLogUpToDate, state.canGrantVote(node0, isLogUpToDate, false)); + assertEquals(isLogUpToDate, state.canGrantVote(node1, isLogUpToDate, false)); + assertEquals(isLogUpToDate, state.canGrantVote(node2, isLogUpToDate, false)); + } + + @ParameterizedTest + @CsvSource({ "true,true", "true,false", "false,true", "false,false" }) + public void testGrantVoteWithVotedKey(boolean isLogUpToDate, boolean withDirectoryId) { + ReplicaKey node0 = replicaKey(0, withDirectoryId); + ReplicaKey node1 = replicaKey(1, withDirectoryId); + ReplicaKey node2 = replicaKey(2, withDirectoryId); + + ProspectiveState state = newProspectiveState( + voterSetWithLocal(Stream.of(node1, node2), withDirectoryId), + OptionalInt.empty(), + Optional.of(node1) + ); + + assertEquals(isLogUpToDate, state.canGrantVote(node0, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node1, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node2, isLogUpToDate, true)); + + assertFalse(state.canGrantVote(node0, isLogUpToDate, false)); + assertTrue(state.canGrantVote(node1, isLogUpToDate, false)); + assertFalse(state.canGrantVote(node2, isLogUpToDate, false)); + } + + @ParameterizedTest + @CsvSource({ "true,true", "true,false", "false,true", "false,false" }) + public void testGrantVoteWithLeader(boolean isLogUpToDate, boolean withDirectoryId) { + ReplicaKey node0 = replicaKey(0, withDirectoryId); + ReplicaKey node1 = replicaKey(1, withDirectoryId); + ReplicaKey node2 = replicaKey(2, withDirectoryId); + + ProspectiveState state = newProspectiveState( + voterSetWithLocal(Stream.of(node1, node2), withDirectoryId), + OptionalInt.of(node1.id()), + Optional.empty() + ); + + assertEquals(isLogUpToDate, state.canGrantVote(node0, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node1, isLogUpToDate, true)); + assertEquals(isLogUpToDate, state.canGrantVote(node2, isLogUpToDate, true)); + + assertFalse(state.canGrantVote(node0, isLogUpToDate, false)); + assertFalse(state.canGrantVote(node1, isLogUpToDate, false)); + assertFalse(state.canGrantVote(node2, isLogUpToDate, false)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testElectionState(boolean withDirectoryId) { + VoterSet voters = voterSetWithLocal(IntStream.of(1, 2, 3), withDirectoryId); + ProspectiveState state = newProspectiveState(voters); + assertEquals( + ElectionState.withUnknownLeader( + epoch, + voters.voterIds() + ), + state.election() + ); + + // with leader + state = newProspectiveState(voters, OptionalInt.of(1), Optional.empty()); + assertEquals( + ElectionState.withElectedLeader( + epoch, + 1, + Optional.empty(), voters.voterIds() + ), + state.election() + ); + + // with voted key + ReplicaKey votedKey = replicaKey(1, withDirectoryId); + state = newProspectiveState(voters, OptionalInt.empty(), Optional.of(votedKey)); + assertEquals( + ElectionState.withVotedCandidate( + epoch, + votedKey, + voters.voterIds() + ), + state.election() + ); + + // with both + state = newProspectiveState(voters, OptionalInt.of(1), Optional.of(votedKey)); + assertEquals( + ElectionState.withElectedLeader( + epoch, + 1, + Optional.of(votedKey), + voters.voterIds() + ), + state.election() + ); + } + + @Test + public void testElectionTimeout() { + ProspectiveState state = newProspectiveState( + voterSetWithLocal(IntStream.empty(), true), + OptionalInt.empty(), + Optional.of(votedKeyWithDirectoryId) + ); + + assertEquals(epoch, state.epoch()); + assertEquals(votedKeyWithDirectoryId, state.votedKey().get()); + assertEquals( + ElectionState.withVotedCandidate(epoch, votedKeyWithDirectoryId, Collections.singleton(localId)), + state.election() + ); + assertEquals(electionTimeoutMs, state.remainingElectionTimeMs(time.milliseconds())); + assertFalse(state.hasElectionTimeoutExpired(time.milliseconds())); + + time.sleep(5000); + assertEquals(electionTimeoutMs - 5000, state.remainingElectionTimeMs(time.milliseconds())); + assertFalse(state.hasElectionTimeoutExpired(time.milliseconds())); + + time.sleep(5000); + assertEquals(0, state.remainingElectionTimeMs(time.milliseconds())); + assertTrue(state.hasElectionTimeoutExpired(time.milliseconds())); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testCanGrantVoteWithoutDirectoryId(boolean isLogUpToDate) { + ProspectiveState state = newProspectiveState( + voterSetWithLocal(IntStream.empty(), true), + OptionalInt.empty(), + Optional.of(votedKeyWithoutDirectoryId)); + + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertTrue(state.canGrantVote(ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedId, Uuid.randomUuid()), isLogUpToDate, true) + ); + assertTrue(state.canGrantVote(ReplicaKey.of(votedId, Uuid.randomUuid()), isLogUpToDate, false)); + + // Can grant PreVote to other replicas even if we have granted a standard vote to another replica + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedId + 1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(votedId + 1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testCanGrantVoteWithDirectoryId(boolean isLogUpToDate) { + ProspectiveState state = newProspectiveState( + voterSetWithLocal(IntStream.empty(), true), + OptionalInt.empty(), + Optional.of(votedKeyWithDirectoryId)); + + // Same voterKey + // We will not grant PreVote for a replica we have already granted a standard vote to if their log is behind + assertEquals( + isLogUpToDate, + state.canGrantVote(votedKeyWithDirectoryId, isLogUpToDate, true) + ); + assertTrue(state.canGrantVote(votedKeyWithDirectoryId, isLogUpToDate, false)); + + // Different directoryId + // We can grant PreVote for a replica we have already granted a standard vote to if their log is up-to-date, + // even if the directoryId is different + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedId, Uuid.randomUuid()), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(votedId, Uuid.randomUuid()), isLogUpToDate, false)); + + // Missing directoryId + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + + // Different voterId + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedId + 1, votedDirectoryId), isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedId + 1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(votedId + 1, votedDirectoryId), true, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(votedId + 1, ReplicaKey.NO_DIRECTORY_ID), true, false)); + } + + @Test + public void testLeaderEndpoints() { + ProspectiveState state = newProspectiveState( + voterSetWithLocal(IntStream.of(1, 2, 3), true), + OptionalInt.empty(), + Optional.of(ReplicaKey.of(1, Uuid.randomUuid())) + ); + assertEquals(Endpoints.empty(), state.leaderEndpoints()); + + state = newProspectiveState( + voterSetWithLocal(IntStream.of(1, 2, 3), true), + OptionalInt.of(3), + Optional.of(ReplicaKey.of(1, Uuid.randomUuid())) + ); + assertEquals(leaderEndpoints, state.leaderEndpoints()); + } + + private ReplicaKey replicaKey(int id, boolean withDirectoryId) { + Uuid directoryId = withDirectoryId ? Uuid.randomUuid() : ReplicaKey.NO_DIRECTORY_ID; + return ReplicaKey.of(id, directoryId); + } + + private VoterSet voterSetWithLocal(IntStream remoteVoterIds, boolean withDirectoryId) { + Stream remoteVoterKeys = remoteVoterIds + .boxed() + .map(id -> replicaKey(id, withDirectoryId)); + + return voterSetWithLocal(remoteVoterKeys, withDirectoryId); + } + + private VoterSet voterSetWithLocal(Stream remoteVoterKeys, boolean withDirectoryId) { + ReplicaKey actualLocalVoter = withDirectoryId ? + localReplicaKey : + ReplicaKey.of(localReplicaKey.id(), ReplicaKey.NO_DIRECTORY_ID); + + return VoterSetTest.voterSet( + Stream.concat(Stream.of(actualLocalVoter), remoteVoterKeys) + ); + } +} diff --git a/raft/src/test/java/org/apache/kafka/raft/QuorumStateTest.java b/raft/src/test/java/org/apache/kafka/raft/QuorumStateTest.java index 3038a775074eb..ffe47f66d9c67 100644 --- a/raft/src/test/java/org/apache/kafka/raft/QuorumStateTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/QuorumStateTest.java @@ -22,7 +22,7 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.raft.internals.BatchAccumulator; import org.apache.kafka.raft.internals.KRaftControlRecordStateMachine; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.KRaftVersion; import org.junit.jupiter.params.ParameterizedTest; @@ -33,6 +33,7 @@ import java.net.InetSocketAddress; import java.util.Collections; import java.util.HashMap; +import java.util.Map; import java.util.Optional; import java.util.OptionalInt; import java.util.OptionalLong; @@ -79,7 +80,7 @@ private QuorumState buildQuorumState( localDirectoryId, mockPartitionState, localId.isPresent() ? voterSet.listeners(localId.getAsInt()) : Endpoints.empty(), - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), electionTimeoutMs, fetchTimeoutMs, store, @@ -97,7 +98,7 @@ private QuorumState initializeEmptyState(VoterSet voters, KRaftVersion kraftVers } private Set persistedVoters(Set voters, KRaftVersion kraftVersion) { - if (kraftVersion.featureLevel() == 1) { + if (kraftVersion.isReconfigSupported()) { return Collections.emptySet(); } @@ -105,7 +106,7 @@ private Set persistedVoters(Set voters, KRaftVersion kraftVers } private ReplicaKey persistedVotedKey(ReplicaKey replicaKey, KRaftVersion kraftVersion) { - if (kraftVersion.featureLevel() == 1) { + if (kraftVersion.isReconfigSupported()) { return replicaKey; } @@ -154,6 +155,163 @@ private ReplicaKey replicaKey(int id, boolean withDirectoryId) { return ReplicaKey.of(id, directoryId); } + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHasRemoteLeader(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + assertFalse(state.hasRemoteLeader()); + + state.transitionToProspective(); + assertFalse(state.hasRemoteLeader()); + state.transitionToCandidate(); + assertFalse(state.hasRemoteLeader()); + + state.candidateStateOrThrow().recordGrantedVote(otherNodeKey.id()); + state.transitionToLeader(0L, accumulator); + assertFalse(state.hasRemoteLeader()); + + state.transitionToUnattached(state.epoch() + 1, OptionalInt.empty()); + assertFalse(state.hasRemoteLeader()); + + state.unattachedAddVotedState(state.epoch(), otherNodeKey); + assertFalse(state.hasRemoteLeader()); + + state.transitionToFollower( + state.epoch(), + otherNodeKey.id(), + voters.listeners(otherNodeKey.id()) + ); + assertTrue(state.hasRemoteLeader()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testHighWatermarkRetained(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.transitionToFollower( + 5, + otherNodeKey.id(), + voters.listeners(otherNodeKey.id()) + ); + + FollowerState followerState = state.followerStateOrThrow(); + followerState.updateHighWatermark(OptionalLong.of(10L)); + + Optional highWatermark = Optional.of(new LogOffsetMetadata(10L)); + assertEquals(highWatermark, state.highWatermark()); + + state.transitionToUnattached(6, OptionalInt.empty()); + assertEquals(highWatermark, state.highWatermark()); + + state.unattachedAddVotedState(6, otherNodeKey); + assertEquals(highWatermark, state.highWatermark()); + + state.transitionToProspective(); + assertEquals(highWatermark, state.highWatermark()); + + state.transitionToCandidate(); + assertEquals(highWatermark, state.highWatermark()); + + CandidateState candidateState = state.candidateStateOrThrow(); + candidateState.recordGrantedVote(otherNodeKey.id()); + assertTrue(candidateState.epochElection().isVoteGranted()); + + state.transitionToLeader(10L, accumulator); + assertEquals(Optional.empty(), state.highWatermark()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCanBecomeFollowerOfNonVoter(KRaftVersion kraftVersion) { + int otherNodeId = 1; + ReplicaKey nonVoterKey = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, 4)); + + // Add voted state + state.unattachedAddVotedState(4, nonVoterKey); + assertTrue(state.isUnattachedAndVoted()); + + UnattachedState votedState = state.unattachedStateOrThrow(); + assertEquals(4, votedState.epoch()); + assertEquals(nonVoterKey, votedState.votedKey().get()); + + // Transition to follower + state.transitionToFollower( + 4, + nonVoterKey.id(), + Endpoints.fromInetSocketAddresses( + Collections.singletonMap( + VoterSetTest.DEFAULT_LISTENER_NAME, + InetSocketAddress.createUnresolved("non-voter-host", 1234) + ) + ) + ); + assertEquals( + new LeaderAndEpoch(OptionalInt.of(nonVoterKey.id()), 4), + state.leaderAndEpoch() + ); + assertEquals( + ElectionState.withElectedLeader( + 4, + nonVoterKey.id(), + Optional.of(persistedVotedKey(nonVoterKey, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCannotFollowSelf(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + assertEquals(Optional.empty(), store.readElectionState()); + QuorumState state = initializeEmptyState(voters, kraftVersion); + + assertThrows( + IllegalStateException.class, + () -> state.transitionToFollower( + 0, + localId, + voters.listeners(localId) + ) + ); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(0, localVoterKey)); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCannotTransitionToFollowerWithNoLeaderEndpoint(KRaftVersion kraftVersion) { + int leaderId = 1; + int followerId = 2; + int epoch = 5; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(leaderId, followerId), kraftVersion); + store.writeElectionState(ElectionState.withUnknownLeader(epoch, voters.voterIds()), kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + + assertThrows( + NullPointerException.class, + () -> state.transitionToFollower(epoch + 1, leaderId, null) + ); + assertThrows( + IllegalArgumentException.class, + () -> state.transitionToFollower(epoch + 2, leaderId, Endpoints.empty()) + ); + } + + /** + * Initialization tests + */ + @ParameterizedTest @EnumSource(value = KRaftVersion.class) public void testInitializePrimordialEpoch(KRaftVersion kraftVersion) { @@ -163,9 +321,10 @@ public void testInitializePrimordialEpoch(KRaftVersion kraftVersion) { QuorumState state = initializeEmptyState(voters, kraftVersion); assertTrue(state.isUnattached()); assertEquals(0, state.epoch()); + state.transitionToProspective(); state.transitionToCandidate(); CandidateState candidateState = state.candidateStateOrThrow(); - assertTrue(candidateState.isVoteGranted()); + assertTrue(candidateState.epochElection().isVoteGranted()); assertEquals(1, candidateState.epoch()); } @@ -200,7 +359,10 @@ public void testInitializeAsFollower(KRaftVersion kraftVersion) { int node2 = 2; int epoch = 5; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - store.writeElectionState(ElectionState.withElectedLeader(epoch, node1, voters.voterIds()), kraftVersion); + store.writeElectionState( + ElectionState.withElectedLeader(epoch, node1, Optional.empty(), voters.voterIds()), + kraftVersion + ); QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); @@ -213,6 +375,69 @@ public void testInitializeAsFollower(KRaftVersion kraftVersion) { assertEquals(fetchTimeoutMs, followerState.remainingFetchTimeMs(time.milliseconds())); } + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testInitializeVotedAndLeaderAreSame(KRaftVersion kraftVersion) { + int epoch = 5; + ReplicaKey leaderKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = localWithRemoteVoterSet(IntStream.of(leaderKey.id(), 2), kraftVersion); + store.writeElectionState( + ElectionState.withElectedLeader(epoch, leaderKey.id(), Optional.of(leaderKey), voters.voterIds()), + kraftVersion + ); + + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + assertTrue(state.isFollower()); + assertEquals(epoch, state.epoch()); + + FollowerState followerState = state.followerStateOrThrow(); + assertEquals(epoch, followerState.epoch()); + assertEquals(leaderKey.id(), followerState.leaderId()); + assertEquals(fetchTimeoutMs, followerState.remainingFetchTimeMs(time.milliseconds())); + assertEquals( + ElectionState.withElectedLeader( + epoch, + leaderKey.id(), + Optional.of(persistedVotedKey(leaderKey, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testInitializeVotedAndLeaderAreDifferent(KRaftVersion kraftVersion) { + int leader = 1; + int epoch = 5; + ReplicaKey votedKey = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = localWithRemoteVoterSet(IntStream.of(leader, votedKey.id()), kraftVersion); + store.writeElectionState( + ElectionState.withElectedLeader(epoch, leader, Optional.of(votedKey), voters.voterIds()), + kraftVersion + ); + + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + assertTrue(state.isFollower()); + assertEquals(epoch, state.epoch()); + + FollowerState followerState = state.followerStateOrThrow(); + assertEquals(epoch, followerState.epoch()); + assertEquals(leader, followerState.leaderId()); + assertEquals(fetchTimeoutMs, followerState.remainingFetchTimeMs(time.milliseconds())); + assertEquals( + ElectionState.withElectedLeader( + epoch, + leader, + Optional.of(persistedVotedKey(votedKey, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + @ParameterizedTest @EnumSource(value = KRaftVersion.class) public void testInitializeAsUnattachedWhenMissingEndpoints(KRaftVersion kraftVersion) { @@ -220,8 +445,12 @@ public void testInitializeAsUnattachedWhenMissingEndpoints(KRaftVersion kraftVer int node2 = 2; int leader = 3; int epoch = 5; + ReplicaKey votedKey = ReplicaKey.of(leader, Uuid.randomUuid()); VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - store.writeElectionState(ElectionState.withElectedLeader(epoch, leader, voters.voterIds()), kraftVersion); + store.writeElectionState( + ElectionState.withElectedLeader(epoch, leader, Optional.of(votedKey), voters.voterIds()), + kraftVersion + ); QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); @@ -230,11 +459,20 @@ public void testInitializeAsUnattachedWhenMissingEndpoints(KRaftVersion kraftVer UnattachedState unattachedState = state.unattachedStateOrThrow(); assertEquals(epoch, unattachedState.epoch()); + assertEquals( + ElectionState.withElectedLeader( + epoch, + leader, + Optional.of(persistedVotedKey(votedKey, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testInitializeAsVoted(KRaftVersion kraftVersion) { + public void testInitializeAsVotedNoLeader(KRaftVersion kraftVersion) { ReplicaKey nodeKey1 = ReplicaKey.of(1, Uuid.randomUuid()); ReplicaKey nodeKey2 = ReplicaKey.of(2, Uuid.randomUuid()); @@ -292,9 +530,9 @@ public void testInitializeAsResignedCandidate(KRaftVersion kraftVersion) { ElectionState.withVotedCandidate(epoch, localVoterKey, voters.voterIds()), candidateState.election() ); - assertEquals(Set.of(node1, node2), candidateState.unrecordedVoters()); - assertEquals(Set.of(localId), candidateState.grantingVoters()); - assertEquals(Collections.emptySet(), candidateState.rejectingVoters()); + assertEquals(Set.of(node1, node2), candidateState.epochElection().unrecordedVoters()); + assertEquals(Set.of(localId), candidateState.epochElection().grantingVoters()); + assertEquals(Collections.emptySet(), candidateState.epochElection().rejectingVoters()); assertEquals( electionTimeoutMs + jitterMs, candidateState.remainingElectionTimeMs(time.milliseconds()) @@ -308,7 +546,7 @@ public void testInitializeAsResignedLeader(KRaftVersion kraftVersion) { int node2 = 2; int epoch = 5; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - ElectionState election = ElectionState.withElectedLeader(epoch, localId, voters.voterIds()); + ElectionState election = ElectionState.withElectedLeader(epoch, localId, Optional.empty(), voters.voterIds()); store.writeElectionState(election, kraftVersion); // If we were previously a leader, we will start as resigned in order to ensure @@ -328,369 +566,292 @@ public void testInitializeAsResignedLeader(KRaftVersion kraftVersion) { assertEquals(epoch, resignedState.epoch()); assertEquals(election, resignedState.election()); assertEquals(Set.of(node1, node2), resignedState.unackedVoters()); - assertEquals(electionTimeoutMs + jitterMs, - resignedState.remainingElectionTimeMs(time.milliseconds())); + assertEquals( + electionTimeoutMs + jitterMs, + resignedState.remainingElectionTimeMs(time.milliseconds()) + ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testCandidateToCandidate(KRaftVersion kraftVersion) { - int node1 = 1; - int node2 = 2; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - assertEquals(Optional.empty(), store.readElectionState()); - - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.transitionToCandidate(); - assertTrue(state.isCandidate()); - assertEquals(1, state.epoch()); - - CandidateState candidate1 = state.candidateStateOrThrow(); - candidate1.recordRejectedVote(node2); - - // Check backoff behavior before transitioning - int backoffMs = 500; - candidate1.startBackingOff(time.milliseconds(), backoffMs); - assertTrue(candidate1.isBackingOff()); - assertFalse(candidate1.isBackoffComplete(time.milliseconds())); + public void testInitializeAsOnlyVoter(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + store.writeElectionState(ElectionState.withUnknownLeader(0, voters.voterIds()), kraftVersion); - time.sleep(backoffMs - 1); - assertTrue(candidate1.isBackingOff()); - assertFalse(candidate1.isBackoffComplete(time.milliseconds())); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + // start as unattached, in KafkaRaftClient initialization the local replica then transitions to candidate + assertTrue(state.isUnattached()); + assertEquals(0, state.epoch()); + assertEquals(OptionalInt.empty(), state.leaderId()); + } - time.sleep(1); - assertTrue(candidate1.isBackingOff()); - assertTrue(candidate1.isBackoffComplete(time.milliseconds())); + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testInitializeWithCorruptedStore(KRaftVersion kraftVersion) { + QuorumStateStore stateStore = Mockito.mock(QuorumStateStore.class); + Mockito.doThrow(UncheckedIOException.class).when(stateStore).readElectionState(); - // The election timeout should be reset after we become a candidate again - int jitterMs = 2500; - random.mockNextInt(jitterMs); + QuorumState state = buildQuorumState( + OptionalInt.of(localId), + localStandaloneVoterSet(), + kraftVersion + ); - state.transitionToCandidate(); - assertTrue(state.isCandidate()); - CandidateState candidate2 = state.candidateStateOrThrow(); - assertEquals(2, state.epoch()); - assertEquals(Collections.singleton(localId), candidate2.grantingVoters()); - assertEquals(Collections.emptySet(), candidate2.rejectingVoters()); - assertEquals(electionTimeoutMs + jitterMs, - candidate2.remainingElectionTimeMs(time.milliseconds())); + int epoch = 2; + state.initialize(new OffsetAndEpoch(0L, epoch)); + assertEquals(epoch, state.epoch()); + assertTrue(state.isUnattached()); + assertFalse(state.hasLeader()); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testCandidateToResigned(KRaftVersion kraftVersion) { - int node1 = 1; - int node2 = 2; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - assertEquals(Optional.empty(), store.readElectionState()); + public void testInitializeWithEmptyLocalId(KRaftVersion kraftVersion) { + boolean withDirectoryId = kraftVersion.featureLevel() > 0; + VoterSet voters = VoterSetTest.voterSet( + VoterSetTest.voterMap(IntStream.of(0, 1), withDirectoryId) + ); + QuorumState state = buildQuorumState(OptionalInt.empty(), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, 0)); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.transitionToCandidate(); - assertTrue(state.isCandidate()); - assertEquals(1, state.epoch()); + assertTrue(state.isObserver()); + assertFalse(state.isVoter()); + + assertThrows(IllegalStateException.class, state::transitionToProspective); assertThrows( - IllegalStateException.class, () -> - state.transitionToResigned(Collections.emptyList()) + IllegalStateException.class, + () -> state.unattachedAddVotedState(1, ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID)) ); - assertTrue(state.isCandidate()); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); + + state.transitionToFollower(1, 1, voters.listeners(1)); + assertTrue(state.isFollower()); + + state.transitionToUnattached(2, OptionalInt.empty()); + assertTrue(state.isUnattached()); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testCandidateToLeader(KRaftVersion kraftVersion) { - VoterSet voters = localStandaloneVoterSet(); - assertEquals(Optional.empty(), store.readElectionState()); + public void testNoLocalIdInitializationFailsIfElectionStateHasVotedCandidate(KRaftVersion kraftVersion) { + boolean withDirectoryId = kraftVersion.featureLevel() > 0; + int epoch = 5; + int votedId = 1; + VoterSet voters = VoterSetTest.voterSet( + VoterSetTest.voterMap(IntStream.of(0, votedId), withDirectoryId) + ); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.transitionToCandidate(); - assertTrue(state.isCandidate()); - assertEquals(1, state.epoch()); + store.writeElectionState( + ElectionState.withVotedCandidate( + epoch, + ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), + voters.voterIds() + ), + kraftVersion + ); - state.transitionToLeader(0L, accumulator); - LeaderState leaderState = state.leaderStateOrThrow(); - assertTrue(state.isLeader()); - assertEquals(1, leaderState.epoch()); - assertEquals(Optional.empty(), leaderState.highWatermark()); + QuorumState state2 = buildQuorumState(OptionalInt.empty(), voters, kraftVersion); + assertThrows(IllegalStateException.class, () -> state2.initialize(new OffsetAndEpoch(0, 0))); } + /** + * Test transitions from Unattached + */ @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testCandidateToLeaderWithoutGrantedVote(KRaftVersion kraftVersion) { - int otherNodeId = 1; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + public void testUnattachedToUnattached(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); - assertFalse(state.candidateStateOrThrow().isVoteGranted()); - assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); - state.candidateStateOrThrow().recordGrantedVote(otherNodeId); - assertTrue(state.candidateStateOrThrow().isVoteGranted()); - state.transitionToLeader(0L, accumulator); - assertTrue(state.isLeader()); - } + state.transitionToUnattached(5, OptionalInt.empty()); + assertTrue(state.isUnattachedNotVoted()); - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testCandidateToFollower(KRaftVersion kraftVersion) { - int otherNodeId = 1; + long remainingElectionTimeMs = state.unattachedStateOrThrow().remainingElectionTimeMs(time.milliseconds()); + time.sleep(1000); - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); + // cannot transition to unattached in same epoch + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(state.epoch(), OptionalInt.empty())); - state.transitionToFollower(5, otherNodeId, voters.listeners(otherNodeId)); - assertEquals(5, state.epoch()); - assertEquals(OptionalInt.of(otherNodeId), state.leaderId()); + // can transition to unattached in higher epoch + state.transitionToUnattached(6, OptionalInt.empty()); + assertTrue(state.isUnattachedNotVoted()); + UnattachedState unattachedState = state.unattachedStateOrThrow(); + assertEquals(6, unattachedState.epoch()); + + // Verify that the election timer does not get reset assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 5, - otherNodeId, - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() + remainingElectionTimeMs - 1000, + unattachedState.remainingElectionTimeMs(time.milliseconds()) ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testCandidateToUnattached(KRaftVersion kraftVersion) { - int otherNodeId = 1; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + public void testUnattachedCannotAddVotedStateForSelf(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + assertEquals(Optional.empty(), store.readElectionState()); QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); - - state.transitionToUnattached(5); - assertEquals(5, state.epoch()); - assertEquals(OptionalInt.empty(), state.leaderId()); - assertEquals( - Optional.of( - ElectionState.withUnknownLeader( - 5, - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() - ); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(0, localVoterKey)); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testCandidateToUnattachedVoted(KRaftVersion kraftVersion) { + public void testUnattachedToUnattachedVotedSameEpoch(KRaftVersion kraftVersion) { ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); + state.transitionToUnattached(5, OptionalInt.empty()); - state.transitionToUnattachedVotedState(5, otherNodeKey); - assertEquals(5, state.epoch()); - assertEquals(OptionalInt.empty(), state.leaderId()); + int jitterMs = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs); + state.unattachedAddVotedState(5, otherNodeKey); UnattachedState votedState = state.unattachedStateOrThrow(); + assertEquals(5, votedState.epoch()); assertEquals(otherNodeKey, votedState.votedKey().get()); assertEquals( - Optional.of( ElectionState.withVotedCandidate( 5, persistedVotedKey(otherNodeKey, kraftVersion), - persistedVoters(voters.voterIds(), kraftVersion)) + persistedVoters(voters.voterIds(), kraftVersion) ), - store.readElectionState() + store.readElectionState().get() ); - } - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testCandidateToAnyStateLowerEpoch(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - state.transitionToCandidate(); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4)); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(4, otherNodeKey)); - assertThrows( - IllegalStateException.class, - () -> state.transitionToFollower( - 4, - otherNodeKey.id(), - voters.listeners(otherNodeKey.id()) - ) - ); - assertEquals(6, state.epoch()); + // Verify election timeout is reset when we vote for a candidate assertEquals( - Optional.of( - ElectionState.withVotedCandidate( - 6, - persistedVotedKey(localVoterKey, kraftVersion), - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() + electionTimeoutMs + jitterMs, + votedState.remainingElectionTimeMs(time.milliseconds()) ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testLeaderToLeader(KRaftVersion kraftVersion) { - VoterSet voters = localStandaloneVoterSet(); - assertEquals(Optional.empty(), store.readElectionState()); - + public void testUnattachedToUnattachedVotedHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); - state.transitionToLeader(0L, accumulator); - assertTrue(state.isLeader()); - assertEquals(1, state.epoch()); + state.transitionToUnattached(5, OptionalInt.empty()); + assertTrue(state.isUnattachedNotVoted()); - assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); - assertTrue(state.isLeader()); - assertEquals(1, state.epoch()); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(10, otherNodeKey)); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testLeaderToResigned(KRaftVersion kraftVersion) { - VoterSet voters = localStandaloneVoterSet(); - assertEquals(Optional.empty(), store.readElectionState()); - + public void testUnattachedToFollowerSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); - state.transitionToLeader(0L, accumulator); - assertTrue(state.isLeader()); - assertEquals(1, state.epoch()); + state.transitionToUnattached(5, OptionalInt.empty()); - state.transitionToResigned(Collections.singletonList(localVoterKey)); - assertTrue(state.isResigned()); - ResignedState resignedState = state.resignedStateOrThrow(); + state.transitionToFollower( + 5, + otherNodeKey.id(), + voters.listeners(otherNodeKey.id()) + ); + assertTrue(state.isFollower()); + FollowerState followerState = state.followerStateOrThrow(); + assertEquals(5, followerState.epoch()); assertEquals( - ElectionState.withElectedLeader(1, localId, voters.voterIds()), - resignedState.election() + voters.listeners(otherNodeKey.id()), + followerState.leaderEndpoints() ); - assertEquals(1, resignedState.epoch()); - assertEquals(Collections.emptySet(), resignedState.unackedVoters()); + assertEquals(fetchTimeoutMs, followerState.remainingFetchTimeMs(time.milliseconds())); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testLeaderToCandidate(KRaftVersion kraftVersion) { - VoterSet voters = localStandaloneVoterSet(); - assertEquals(Optional.empty(), store.readElectionState()); - + public void testUnattachedToFollowerHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); - state.transitionToLeader(0L, accumulator); - assertTrue(state.isLeader()); - assertEquals(1, state.epoch()); - - assertThrows(IllegalStateException.class, state::transitionToCandidate); - assertTrue(state.isLeader()); - assertEquals(1, state.epoch()); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testLeaderToFollower(KRaftVersion kraftVersion) { - int otherNodeId = 1; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); - - QuorumState state = initializeEmptyState(voters, kraftVersion); + state.transitionToUnattached(5, OptionalInt.empty()); - state.transitionToCandidate(); - state.candidateStateOrThrow().recordGrantedVote(otherNodeId); - state.transitionToLeader(0L, accumulator); - state.transitionToFollower(5, otherNodeId, voters.listeners(otherNodeId)); - - assertEquals(5, state.epoch()); - assertEquals(OptionalInt.of(otherNodeId), state.leaderId()); + state.transitionToFollower( + 8, + otherNodeKey.id(), + voters.listeners(otherNodeKey.id()) + ); + assertTrue(state.isFollower()); + FollowerState followerState = state.followerStateOrThrow(); + assertEquals(8, followerState.epoch()); assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 5, - otherNodeId, - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() + voters.listeners(otherNodeKey.id()), + followerState.leaderEndpoints() ); + assertEquals(fetchTimeoutMs, followerState.remainingFetchTimeMs(time.milliseconds())); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testLeaderToUnattached(KRaftVersion kraftVersion) { - int otherNodeId = 1; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + public void testUnattachedToProspective(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); - state.candidateStateOrThrow().recordGrantedVote(otherNodeId); - state.transitionToLeader(0L, accumulator); - state.transitionToUnattached(5); - assertEquals(5, state.epoch()); - assertEquals(OptionalInt.empty(), state.leaderId()); + assertTrue(state.isUnattached()); + assertEquals(logEndEpoch, state.epoch()); + + int jitterMs = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs); + state.transitionToProspective(); + ProspectiveState prospective = state.prospectiveStateOrThrow(); + assertEquals(electionTimeoutMs + jitterMs, + prospective.remainingElectionTimeMs(time.milliseconds())); assertEquals( - Optional.of( - ElectionState.withUnknownLeader( - 5, - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() + ElectionState.withUnknownLeader(logEndEpoch, persistedVoters(voters.voterIds(), kraftVersion)), + store.readElectionState().get() ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testLeaderToUnattachedVoted(KRaftVersion kraftVersion) { + public void testUnattachedToCandidate(KRaftVersion kraftVersion) { ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToCandidate(); - state.candidateStateOrThrow().recordGrantedVote(otherNodeKey.id()); - state.transitionToLeader(0L, accumulator); - state.transitionToUnattachedVotedState(5, otherNodeKey); - - assertEquals(5, state.epoch()); - assertEquals(OptionalInt.empty(), state.leaderId()); - - UnattachedState votedState = state.unattachedStateOrThrow(); - assertEquals(otherNodeKey, votedState.votedKey().get()); + state.transitionToUnattached(5, OptionalInt.empty()); + assertThrows(IllegalStateException.class, () -> state.transitionToCandidate()); + } - assertEquals( - Optional.of( - ElectionState.withVotedCandidate( - 5, - persistedVotedKey(otherNodeKey, kraftVersion), - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testUnattachedToLeaderOrResigned(KRaftVersion kraftVersion) { + ReplicaKey leaderKey = ReplicaKey.of(1, Uuid.randomUuid()); + int epoch = 5; + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, leaderKey)); + store.writeElectionState( + ElectionState.withVotedCandidate(epoch, leaderKey, voters.voterIds()), + kraftVersion ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + assertTrue(state.isUnattachedAndVoted()); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); + assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testLeaderToAnyStateLowerEpoch(KRaftVersion kraftVersion) { + public void testUnattachedToAnyStateLowerEpoch(KRaftVersion kraftVersion) { ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - state.transitionToCandidate(); - state.candidateStateOrThrow().recordGrantedVote(otherNodeKey.id()); - state.transitionToLeader(0L, accumulator); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4)); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(4, otherNodeKey)); + state.transitionToUnattached(5, OptionalInt.empty()); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4, OptionalInt.empty())); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(4, otherNodeKey)); assertThrows( IllegalStateException.class, () -> state.transitionToFollower( @@ -699,301 +860,74 @@ public void testLeaderToAnyStateLowerEpoch(KRaftVersion kraftVersion) { voters.listeners(otherNodeKey.id()) ) ); - assertEquals(6, state.epoch()); - assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 6, - localId, - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() - ); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testCannotFollowOrVoteForSelf(KRaftVersion kraftVersion) { - VoterSet voters = localStandaloneVoterSet(); - assertEquals(Optional.empty(), store.readElectionState()); - QuorumState state = initializeEmptyState(voters, kraftVersion); - - assertThrows( - IllegalStateException.class, - () -> state.transitionToFollower( - 0, - localId, - voters.listeners(localId) - ) - ); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(0, localVoterKey)); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedToLeaderOrResigned(KRaftVersion kraftVersion) { - ReplicaKey leaderKey = ReplicaKey.of(1, Uuid.randomUuid()); - int epoch = 5; - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, leaderKey)); - store.writeElectionState( - ElectionState.withVotedCandidate(epoch, leaderKey, voters.voterIds()), - kraftVersion - ); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - assertTrue(state.isUnattached()); - assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); - assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedVotedSameEpoch(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - - int jitterMs = 2500; - random.mockNextInt(electionTimeoutMs, jitterMs); - state.transitionToUnattachedVotedState(5, otherNodeKey); - - UnattachedState votedState = state.unattachedStateOrThrow(); - assertEquals(5, votedState.epoch()); - assertEquals(otherNodeKey, votedState.votedKey().get()); - - assertEquals( - Optional.of( - ElectionState.withVotedCandidate( - 5, - persistedVotedKey(otherNodeKey, kraftVersion), - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() - ); - - // Verify election timeout is reset when we vote for a candidate - assertEquals(electionTimeoutMs + jitterMs, - votedState.remainingElectionTimeMs(time.milliseconds())); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedVotedHigherEpoch(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - assertTrue(state.isUnattachedNotVoted()); - - state.transitionToUnattachedVotedState(8, otherNodeKey); - assertTrue(state.isUnattachedAndVoted()); - - UnattachedState votedState = state.unattachedStateOrThrow(); - assertEquals(8, votedState.epoch()); - assertEquals(otherNodeKey, votedState.votedKey().get()); - + assertEquals(5, state.epoch()); assertEquals( - Optional.of( - ElectionState.withVotedCandidate( - 8, - persistedVotedKey(otherNodeKey, kraftVersion), - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() + ElectionState.withUnknownLeader(5, persistedVoters(voters.voterIds(), kraftVersion)), + store.readElectionState().get() ); } + /** + * Tests transitions from Unattached with votedKey + */ @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testUnattachedToCandidate(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); + public void testUnattachedVotedToUnattachedSameEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - - int jitterMs = 2500; - random.mockNextInt(electionTimeoutMs, jitterMs); - state.transitionToCandidate(); - - assertTrue(state.isCandidate()); - CandidateState candidateState = state.candidateStateOrThrow(); - assertEquals(6, candidateState.epoch()); - assertEquals(electionTimeoutMs + jitterMs, - candidateState.remainingElectionTimeMs(time.milliseconds())); + state.unattachedAddVotedState(logEndEpoch, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(logEndEpoch, OptionalInt.empty())); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testUnattachedToUnattached(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); + public void testUnattachedVotedToUnattachedHigherEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - assertTrue(state.isUnattachedNotVoted()); + state.initialize(new OffsetAndEpoch(0L, 5)); + state.unattachedAddVotedState(5, ReplicaKey.of(otherNodeId, ReplicaKey.NO_DIRECTORY_ID)); long remainingElectionTimeMs = state.unattachedStateOrThrow().remainingElectionTimeMs(time.milliseconds()); time.sleep(1000); - state.transitionToUnattached(6); - assertTrue(state.isUnattachedNotVoted()); + state.transitionToUnattached(6, OptionalInt.empty()); UnattachedState unattachedState = state.unattachedStateOrThrow(); assertEquals(6, unattachedState.epoch()); // Verify that the election timer does not get reset - assertEquals(remainingElectionTimeMs - 1000, - unattachedState.remainingElectionTimeMs(time.milliseconds())); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedToFollowerSameEpoch(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - - state.transitionToFollower( - 5, - otherNodeKey.id(), - voters.listeners(otherNodeKey.id()) - ); - assertTrue(state.isFollower()); - FollowerState followerState = state.followerStateOrThrow(); - assertEquals(5, followerState.epoch()); assertEquals( - voters.listeners(otherNodeKey.id()), - followerState.leaderEndpoints() - ); - assertEquals(fetchTimeoutMs, followerState.remainingFetchTimeMs(time.milliseconds())); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedToFollowerHigherEpoch(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - - state.transitionToFollower( - 8, - otherNodeKey.id(), - voters.listeners(otherNodeKey.id()) - ); - assertTrue(state.isFollower()); - FollowerState followerState = state.followerStateOrThrow(); - assertEquals(8, followerState.epoch()); - assertEquals( - voters.listeners(otherNodeKey.id()), - followerState.leaderEndpoints() - ); - assertEquals(fetchTimeoutMs, followerState.remainingFetchTimeMs(time.milliseconds())); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedToAnyStateLowerEpoch(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4)); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(4, otherNodeKey)); - assertThrows( - IllegalStateException.class, - () -> state.transitionToFollower( - 4, - otherNodeKey.id(), - voters.listeners(otherNodeKey.id()) - ) - ); - assertEquals(5, state.epoch()); - assertEquals( - Optional.of( - ElectionState.withUnknownLeader( - 5, - persistedVoters(voters.voterIds(), kraftVersion) - ) - ), - store.readElectionState() + remainingElectionTimeMs - 1000, + unattachedState.remainingElectionTimeMs(time.milliseconds()) ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testUnattachedVotedToInvalidLeaderOrResigned(KRaftVersion kraftVersion) { - int node1 = 1; - int node2 = 2; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); - assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0, accumulator)); - assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedVotedToCandidate(KRaftVersion kraftVersion) { - int node1 = 1; - int node2 = 2; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); - - int jitterMs = 2500; - random.mockNextInt(electionTimeoutMs, jitterMs); - state.transitionToCandidate(); - assertTrue(state.isCandidate()); - CandidateState candidateState = state.candidateStateOrThrow(); - assertEquals(6, candidateState.epoch()); - assertEquals(electionTimeoutMs + jitterMs, - candidateState.remainingElectionTimeMs(time.milliseconds())); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testObserverFromUnattachedVotedToCandidate(KRaftVersion kraftVersion) { - int voter1 = 1; - int voter2 = 2; - VoterSet voters = withRemoteVoterSet(IntStream.of(voter1, voter2), kraftVersion); - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, ReplicaKey.of(voter1, ReplicaKey.NO_DIRECTORY_ID)); - - assertThrows(IllegalStateException.class, () -> state.transitionToCandidate()); - assertTrue(state.isUnattached()); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testUnattachedVotedToUnattachedVotedSameEpoch(KRaftVersion kraftVersion) { + public void testUnattachedVotedToUnattachedVoted(KRaftVersion kraftVersion) { int node1 = 1; int node2 = 2; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(5); - state.transitionToUnattachedVotedState(8, ReplicaKey.of(node1, Uuid.randomUuid())); + state.initialize(new OffsetAndEpoch(0L, 8)); + state.unattachedAddVotedState(8, ReplicaKey.of(node1, Uuid.randomUuid())); + // same epoch + assertThrows( + IllegalStateException.class, + () -> state.unattachedAddVotedState(8, ReplicaKey.of(node2, ReplicaKey.NO_DIRECTORY_ID)) + ); + // same votedKey assertThrows( IllegalStateException.class, - () -> state.transitionToUnattachedVotedState(8, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)) + () -> state.unattachedAddVotedState(8, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)) ); + // higher epoch assertThrows( IllegalStateException.class, - () -> state.transitionToUnattachedVotedState(8, ReplicaKey.of(node2, ReplicaKey.NO_DIRECTORY_ID)) + () -> state.unattachedAddVotedState(10, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)) ); } @@ -1001,11 +935,12 @@ public void testUnattachedVotedToUnattachedVotedSameEpoch(KRaftVersion kraftVers @EnumSource(value = KRaftVersion.class) public void testUnattachedVotedToFollowerSameEpoch(KRaftVersion kraftVersion) { int node1 = 1; + ReplicaKey node1Key = ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID); int node2 = 2; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); + state.initialize(new OffsetAndEpoch(0L, 5)); + state.unattachedAddVotedState(5, node1Key); state.transitionToFollower( 5, node2, @@ -1019,14 +954,13 @@ public void testUnattachedVotedToFollowerSameEpoch(KRaftVersion kraftVersion) { followerState.leaderEndpoints() ); assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 5, - node2, - persistedVoters(voters.voterIds(), kraftVersion) - ) + ElectionState.withElectedLeader( + 5, + node2, + Optional.of(persistedVotedKey(node1Key, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) ), - store.readElectionState() + store.readElectionState().get() ); } @@ -1037,8 +971,8 @@ public void testUnattachedVotedToFollowerHigherEpoch(KRaftVersion kraftVersion) int node2 = 2; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); + state.initialize(new OffsetAndEpoch(0L, 5)); + state.unattachedAddVotedState(5, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); state.transitionToFollower( 8, node2, @@ -1052,49 +986,58 @@ public void testUnattachedVotedToFollowerHigherEpoch(KRaftVersion kraftVersion) followerState.leaderEndpoints() ); assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 8, - node2, - persistedVoters(voters.voterIds(), kraftVersion) - ) + ElectionState.withElectedLeader( + 8, + node2, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) ), - store.readElectionState() + store.readElectionState().get() ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testUnattachedVotedToUnattachedSameEpoch(KRaftVersion kraftVersion) { + public void testUnattachedVotedToProspective(KRaftVersion kraftVersion) { int node1 = 1; + Uuid node1DirectoryId = Uuid.randomUuid(); int node2 = 2; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(5)); + state.initialize(new OffsetAndEpoch(0L, 5)); + state.unattachedAddVotedState(5, ReplicaKey.of(node1, node1DirectoryId)); + + int jitterMs = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs); + state.transitionToProspective(); + assertTrue(state.isProspective()); + ProspectiveState prospectiveState = state.prospectiveStateOrThrow(); + assertEquals(5, prospectiveState.epoch()); + assertEquals(electionTimeoutMs + jitterMs, + prospectiveState.remainingElectionTimeMs(time.milliseconds())); + assertEquals( + ElectionState.withVotedCandidate( + 5, + persistedVotedKey(ReplicaKey.of(node1, node1DirectoryId), kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testUnattachedVotedToUnattachedHigherEpoch(KRaftVersion kraftVersion) { - int otherNodeId = 1; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + public void testUnattachedVotedToCandidateOrLeaderOrResigned(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, ReplicaKey.of(otherNodeId, ReplicaKey.NO_DIRECTORY_ID)); - - long remainingElectionTimeMs = state.unattachedStateOrThrow().remainingElectionTimeMs(time.milliseconds()); - time.sleep(1000); - - state.transitionToUnattached(6); - UnattachedState unattachedState = state.unattachedStateOrThrow(); - assertEquals(6, unattachedState.epoch()); - - // Verify that the election timer does not get reset - assertEquals(remainingElectionTimeMs - 1000, - unattachedState.remainingElectionTimeMs(time.milliseconds())); - } + state.unattachedAddVotedState(logEndEpoch, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)); + assertThrows(IllegalStateException.class, () -> state.transitionToCandidate()); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0, accumulator)); + assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); + } @ParameterizedTest @EnumSource(value = KRaftVersion.class) @@ -1102,10 +1045,10 @@ public void testUnattachedVotedToAnyStateLowerEpoch(KRaftVersion kraftVersion) { ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); QuorumState state = initializeEmptyState(voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattachedVotedState(5, otherNodeKey); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4)); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(4, otherNodeKey)); + state.initialize(new OffsetAndEpoch(0L, 5)); + state.unattachedAddVotedState(5, otherNodeKey); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4, OptionalInt.empty())); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(4, otherNodeKey)); assertThrows( IllegalStateException.class, () -> state.transitionToFollower( @@ -1116,52 +1059,160 @@ public void testUnattachedVotedToAnyStateLowerEpoch(KRaftVersion kraftVersion) { ); assertEquals(5, state.epoch()); assertEquals( - Optional.of( - ElectionState.withVotedCandidate( - 5, - persistedVotedKey(otherNodeKey, kraftVersion), - persistedVoters(voters.voterIds(), kraftVersion) - ) + ElectionState.withVotedCandidate( + 5, + persistedVotedKey(otherNodeKey, kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) ), - store.readElectionState() + store.readElectionState().get() ); } + /** + * Test transitions from Unattached with leader + */ + @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testAllStatesToUnattachedFailInSameEpoch(KRaftVersion kraftVersion) { - ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); - ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + public void testUnattachedWithLeaderToProspective(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + int epoch = 5; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + store.writeElectionState( + ElectionState.withElectedLeader(epoch, node1, Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + + state.transitionToProspective(); + assertTrue(state.isProspective()); + assertEquals( + ElectionState.withElectedLeader( + epoch, + node1, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testUnattachedWithLeaderNoEndpointToAndFromProspective(KRaftVersion kraftVersion) { + int leaderId = 1; + ReplicaKey leaderKey = ReplicaKey.of(leaderId, Uuid.randomUuid()); + int followerId = 2; + ReplicaKey followerKey = ReplicaKey.of(followerId, Uuid.randomUuid()); + int epoch = 5; + Map voterMap = new HashMap<>(); + voterMap.put(localId, VoterSetTest.voterNode(localVoterKey)); + voterMap.put(leaderId, VoterSetTest.voterNode(leaderKey, Endpoints.empty())); + voterMap.put(followerId, VoterSetTest.voterNode(followerKey, Endpoints.empty())); + VoterSet voters = VoterSetTest.voterSet(voterMap); + + store.writeElectionState( + ElectionState.withElectedLeader(epoch, leaderId, Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + assertTrue(state.isUnattached()); + assertTrue(state.hasLeader()); + assertTrue(state.leaderEndpoints().isEmpty()); + + state.transitionToProspective(); + assertTrue(state.isProspective()); + assertTrue(state.leaderEndpoints().isEmpty()); + assertEquals( + ElectionState.withElectedLeader( + 5, + leaderId, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + + state.transitionToUnattached(5, OptionalInt.of(leaderId)); + assertTrue(state.isUnattached()); + assertTrue(state.leaderEndpoints().isEmpty()); + assertEquals( + ElectionState.withElectedLeader( + 5, + leaderId, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + /** + * Test transitions from Follower + */ + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testFollowerToUnattachedSameEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + ReplicaKey votedKey = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToFollower( + 8, + node2, + voters.listeners(node2) + ); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(8, OptionalInt.empty())); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(8, votedKey)); + } - // unattached to unattached - state.unattachedStateOrThrow(); - state.transitionToUnattachedVotedState(5, voter1); - // cannot vote for same or different node in same epoch - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(5, voter1)); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(5, voter2)); - // can vote for same or different node in larger epoch - state.transitionToUnattachedVotedState(10, voter1); - state.transitionToUnattachedVotedState(15, voter2); + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testFollowerToUnattachedHigherEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToFollower( + 8, + node2, + voters.listeners(node2) + ); - // follower to unattached - state.transitionToFollower(20, voter1.id(), voters.listeners(voter1.id())); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(state.epoch(), voter1)); - state.transitionToUnattachedVotedState(state.epoch() + 1, voter1); + int jitterMs = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs); + state.transitionToUnattached(9, OptionalInt.empty()); + assertTrue(state.isUnattached()); + UnattachedState unattachedState = state.unattachedStateOrThrow(); + assertEquals(9, unattachedState.epoch()); + assertEquals( + electionTimeoutMs + jitterMs, + unattachedState.remainingElectionTimeMs(time.milliseconds()) + ); + } - // candidate - state.transitionToCandidate(); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(state.epoch(), voter1)); - state.transitionToUnattachedVotedState(state.epoch() + 1, voter1); + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testFollowerToUnattachedVotedHigherEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + ReplicaKey votedKey = ReplicaKey.of(node2, Uuid.randomUuid()); + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToFollower( + 8, + node2, + voters.listeners(node2) + ); - // leader - state.transitionToCandidate(); - state.candidateStateOrThrow().recordGrantedVote(voter1.id()); - state.transitionToLeader(0L, accumulator); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattachedVotedState(state.epoch(), voter1)); - state.transitionToUnattachedVotedState(state.epoch() + 1, voter1); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(9, votedKey)); } @ParameterizedTest @@ -1201,14 +1252,13 @@ public void testFollowerToFollowerSameEpoch(KRaftVersion kraftVersion) { followerState.leaderEndpoints() ); assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 8, - node2, - persistedVoters(voters.voterIds(), kraftVersion) - ) + ElectionState.withElectedLeader( + 8, + node2, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) ), - store.readElectionState() + store.readElectionState().get() ); } @@ -1270,32 +1320,48 @@ public void testFollowerToFollowerHigherEpoch(KRaftVersion kraftVersion) { followerState.leaderEndpoints() ); assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 9, - node1, - persistedVoters(voters.voterIds(), kraftVersion) - ) + ElectionState.withElectedLeader( + 9, + node1, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) ), - store.readElectionState() + store.readElectionState().get() ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testFollowerToLeaderOrResigned(KRaftVersion kraftVersion) { + public void testFollowerToProspective(KRaftVersion kraftVersion) { int node1 = 1; int node2 = 2; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); - QuorumState state = initializeEmptyState(voters, kraftVersion); + store.writeElectionState(ElectionState.withUnknownLeader(logEndEpoch, voters.voterIds()), kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); state.transitionToFollower( 8, node2, voters.listeners(node2) ); - assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0, accumulator)); - assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); + int jitterMs = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs); + state.transitionToProspective(); + ProspectiveState prospective = state.prospectiveStateOrThrow(); + assertEquals(8, prospective.epoch()); + assertEquals( + electionTimeoutMs + jitterMs, + prospective.remainingElectionTimeMs(time.milliseconds()) + ); + assertEquals( + ElectionState.withElectedLeader( + 8, + node2, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); } @ParameterizedTest @@ -1311,20 +1377,12 @@ public void testFollowerToCandidate(KRaftVersion kraftVersion) { node2, voters.listeners(node2) ); - - int jitterMs = 2500; - random.mockNextInt(electionTimeoutMs, jitterMs); - state.transitionToCandidate(); - assertTrue(state.isCandidate()); - CandidateState candidateState = state.candidateStateOrThrow(); - assertEquals(9, candidateState.epoch()); - assertEquals(electionTimeoutMs + jitterMs, - candidateState.remainingElectionTimeMs(time.milliseconds())); + assertThrows(IllegalStateException.class, state::transitionToCandidate); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testFollowerToUnattachedSameEpoch(KRaftVersion kraftVersion) { + public void testFollowerToLeaderOrResigned(KRaftVersion kraftVersion) { int node1 = 1; int node2 = 2; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); @@ -1335,220 +1393,1168 @@ public void testFollowerToUnattachedSameEpoch(KRaftVersion kraftVersion) { node2, voters.listeners(node2) ); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(8)); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0, accumulator)); + assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testFollowerToUnattachedHigherEpoch(KRaftVersion kraftVersion) { - int node1 = 1; - int node2 = 2; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + public void testFollowerToAnyStateLowerEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); state.transitionToFollower( - 8, - node2, - voters.listeners(node2) + 5, + otherNodeId, + voters.listeners(otherNodeId) + ); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4, OptionalInt.empty())); + assertThrows( + IllegalStateException.class, + () -> state.transitionToFollower( + 4, + otherNodeId, + voters.listeners(otherNodeId) + ) + ); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(4, accumulator)); + assertEquals(5, state.epoch()); + assertEquals( + ElectionState.withElectedLeader( + 5, + otherNodeId, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() ); - - int jitterMs = 2500; - random.mockNextInt(electionTimeoutMs, jitterMs); - state.transitionToUnattached(9); - assertTrue(state.isUnattached()); - UnattachedState unattachedState = state.unattachedStateOrThrow(); - assertEquals(9, unattachedState.epoch()); - assertEquals(electionTimeoutMs + jitterMs, - unattachedState.remainingElectionTimeMs(time.milliseconds())); } + /** + * Test transitions from Follower with votedKey + */ + @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testFollowerToUnattachedVotedSameEpoch(KRaftVersion kraftVersion) { + public void testFollowerVotedToUnattachedSameEpoch(KRaftVersion kraftVersion) { int node1 = 1; int node2 = 2; VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.prospectiveStateOrThrow().recordGrantedVote(node1); state.transitionToFollower( - 8, + state.epoch(), node2, voters.listeners(node2) ); + assertEquals(0, state.epoch()); assertThrows( IllegalStateException.class, - () -> state.transitionToUnattachedVotedState(8, ReplicaKey.of(node1, ReplicaKey.NO_DIRECTORY_ID)) - ); - assertThrows( - IllegalStateException.class, - () -> state.transitionToUnattachedVotedState(8, ReplicaKey.of(localId, ReplicaKey.NO_DIRECTORY_ID)) - ); - assertThrows( - IllegalStateException.class, - () -> state.transitionToUnattachedVotedState(8, ReplicaKey.of(node2, ReplicaKey.NO_DIRECTORY_ID)) + () -> state.transitionToUnattached(0, OptionalInt.empty()) ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testFollowerToUnattachedVotedHigherEpoch(KRaftVersion kraftVersion) { - ReplicaKey nodeKey1 = ReplicaKey.of(1, Uuid.randomUuid()); - ReplicaKey nodeKey2 = ReplicaKey.of(2, Uuid.randomUuid()); - - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, nodeKey1, nodeKey2)); + public void testFollowerVotedToUnattachedHigherEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.prospectiveStateOrThrow().recordGrantedVote(node1); state.transitionToFollower( - 8, - nodeKey2.id(), - voters.listeners(nodeKey2.id()) + state.epoch(), + node2, + voters.listeners(node2) ); + assertEquals(0, state.epoch()); int jitterMs = 2500; random.mockNextInt(electionTimeoutMs, jitterMs); - state.transitionToUnattachedVotedState(9, nodeKey1); - assertTrue(state.isUnattachedAndVoted()); + state.transitionToUnattached(10, OptionalInt.empty()); + assertTrue(state.isUnattachedNotVoted()); - UnattachedState votedState = state.unattachedStateOrThrow(); - assertEquals(9, votedState.epoch()); - assertEquals(nodeKey1, votedState.votedKey().get()); + UnattachedState unattached = state.unattachedStateOrThrow(); + assertEquals(10, unattached.epoch()); - assertEquals(electionTimeoutMs + jitterMs, - votedState.remainingElectionTimeMs(time.milliseconds())); + assertEquals( + electionTimeoutMs + jitterMs, + unattached.remainingElectionTimeMs(time.milliseconds()) + ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testFollowerToAnyStateLowerEpoch(KRaftVersion kraftVersion) { - int otherNodeId = 1; - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + public void testFollowerVotedToFollowerSameEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + ReplicaKey votedKey = ReplicaKey.of(node1, Uuid.randomUuid()); + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.prospectiveAddVotedState(state.epoch(), votedKey); state.transitionToFollower( - 5, - otherNodeId, - voters.listeners(otherNodeId) + state.epoch(), + node2, + voters.listeners(node2) ); - assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4)); - assertThrows( - IllegalStateException.class, - () -> state.transitionToUnattachedVotedState(4, ReplicaKey.of(otherNodeId, ReplicaKey.NO_DIRECTORY_ID)) + assertEquals( + ElectionState.withElectedLeader( + 0, + node2, + Optional.of(persistedVotedKey(votedKey, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() ); + assertThrows( IllegalStateException.class, - () -> state.transitionToFollower( - 4, - otherNodeId, - voters.listeners(otherNodeId) - ) + () -> state.transitionToFollower(state.epoch(), node1, voters.listeners(node1)) + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testFollowerVotedToFollowerHigherEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + ReplicaKey votedKey = ReplicaKey.of(node1, Uuid.randomUuid()); + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.prospectiveAddVotedState(state.epoch(), votedKey); + state.transitionToFollower( + state.epoch(), + node2, + voters.listeners(node2) ); - assertEquals(5, state.epoch()); assertEquals( - Optional.of( - ElectionState.withElectedLeader( - 5, - otherNodeId, - persistedVoters(voters.voterIds(), kraftVersion) - ) + ElectionState.withElectedLeader( + 0, + node2, + Optional.of(persistedVotedKey(votedKey, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + + state.transitionToFollower(state.epoch() + 1, node1, voters.listeners(node1)); + assertEquals(1, state.epoch()); + assertEquals( + ElectionState.withElectedLeader( + 1, + node1, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) ), - store.readElectionState() + store.readElectionState().get() ); } + /** + * Test transitions from Prospective + */ @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testCanBecomeFollowerOfNonVoter(KRaftVersion kraftVersion) { - int otherNodeId = 1; - ReplicaKey nonVoterKey = ReplicaKey.of(2, Uuid.randomUuid()); - VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + public void testProspectiveToUnattachedInSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); - // Add voted state - state.transitionToUnattachedVotedState(4, nonVoterKey); + state.transitionToUnattached(state.epoch(), OptionalInt.empty()); + assertEquals( + ElectionState.withUnknownLeader(logEndEpoch, persistedVoters(voters.voterIds(), kraftVersion)), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToUnattachedInHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + + state.transitionToUnattached(20, OptionalInt.empty()); + assertEquals( + ElectionState.withUnknownLeader(20, persistedVoters(voters.voterIds(), kraftVersion)), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToUnattachedVoted(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + + // in same epoch + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(logEndEpoch, voter1)); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(logEndEpoch, localVoterKey)); + + // in higher epoch + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(10, voter1)); + assertThrows(IllegalStateException.class, () -> state.unattachedAddVotedState(10, localVoterKey)); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToUnattachedWithLeaderInHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + + state.transitionToUnattached(10, OptionalInt.of(voter1.id())); + assertEquals( + ElectionState.withElectedLeader( + 10, + voter1.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToFollowerSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToFollower(state.epoch(), voter1.id(), voters.listeners(voter1.id())); + assertEquals( + ElectionState.withElectedLeader( + 0, + voter1.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToFollowerHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToFollower(state.epoch() + 1, voter1.id(), voters.listeners(voter1.id())); + assertEquals( + ElectionState.withElectedLeader( + 1, + voter1.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToProspective(KRaftVersion kraftVersion) { + int leaderId = 1; + int followerId = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(leaderId, followerId), kraftVersion); + + store.writeElectionState(ElectionState.withUnknownLeader(logEndEpoch, voters.voterIds()), kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + assertTrue(state.isProspective()); + assertThrows(IllegalStateException.class, state::transitionToProspective); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToCandidate(KRaftVersion kraftVersion) { + int leaderId = 1; + int followerId = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(leaderId, followerId), kraftVersion); + + store.writeElectionState(ElectionState.withUnknownLeader(logEndEpoch, voters.voterIds()), kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + int jitterMs1 = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs1); + state.transitionToProspective(); + assertFalse(state.hasLeader()); + assertEquals( + electionTimeoutMs + jitterMs1, + state.prospectiveStateOrThrow().remainingElectionTimeMs(time.milliseconds()) + ); + + int jitterMs2 = 3000; + random.mockNextInt(electionTimeoutMs, jitterMs2); + state.transitionToCandidate(); + assertEquals( + electionTimeoutMs + jitterMs2, + state.candidateStateOrThrow().remainingElectionTimeMs(time.milliseconds()) + ); + assertEquals( + ElectionState.withVotedCandidate( + logEndEpoch + 1, + persistedVotedKey(localVoterKey, kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveToLeaderOrResigned(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + + // in same epoch + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(logEndEpoch, accumulator)); + assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); + + // in higher epoch + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(10, accumulator)); + } + + /** + * Transitions from Prospective with votedKey + */ + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveVotedToUnattachedInSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + store.writeElectionState(ElectionState.withUnknownLeader(logEndEpoch, voters.voterIds()), kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.prospectiveAddVotedState(logEndEpoch, voter1); + + state.transitionToUnattached(logEndEpoch, OptionalInt.empty()); + assertEquals( + ElectionState.withVotedCandidate( + logEndEpoch, + persistedVotedKey(voter1, kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveVotedToAndFromFollowerSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + store.writeElectionState(ElectionState.withUnknownLeader(logEndEpoch, voters.voterIds()), kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.prospectiveAddVotedState(logEndEpoch, voter1); + + // transition to follower of voter2 with votedKey voter1 + state.transitionToFollower(state.epoch(), voter2.id(), voters.listeners(voter2.id())); + assertEquals( + ElectionState.withElectedLeader( + logEndEpoch, + voter2.id(), + Optional.of(persistedVotedKey(voter1, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + + // transition back to prospective + state.transitionToProspective(); + assertEquals( + ElectionState.withElectedLeader( + logEndEpoch, + voter2.id(), + Optional.of(persistedVotedKey(voter1, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveVotedToCandidate(KRaftVersion kraftVersion) { + int node1 = 1; + Uuid node1DirectoryId = Uuid.randomUuid(); + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + store.writeElectionState( + ElectionState.withVotedCandidate( + logEndEpoch, + ReplicaKey.of(node1, node1DirectoryId), + voters.voterIds() + ), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); assertTrue(state.isUnattachedAndVoted()); + int jitterMs1 = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs1); + state.transitionToProspective(); + ProspectiveState prospectiveState = state.prospectiveStateOrThrow(); + assertEquals( + electionTimeoutMs + jitterMs1, + prospectiveState.remainingElectionTimeMs(time.milliseconds()) + ); + assertTrue(prospectiveState.votedKey().isPresent()); - UnattachedState votedState = state.unattachedStateOrThrow(); - assertEquals(4, votedState.epoch()); - assertEquals(nonVoterKey, votedState.votedKey().get()); + int jitterMs2 = 3000; + random.mockNextInt(electionTimeoutMs, jitterMs2); + state.transitionToCandidate(); + assertTrue(state.isCandidate()); + assertEquals( + electionTimeoutMs + jitterMs2, + state.candidateStateOrThrow().remainingElectionTimeMs(time.milliseconds()) + ); + assertEquals(logEndEpoch + 1, state.epoch()); + assertEquals( + ElectionState.withVotedCandidate( + logEndEpoch + 1, + persistedVotedKey(localVoterKey, kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } - // Transition to follower - state.transitionToFollower( - 4, - nonVoterKey.id(), - Endpoints.fromInetSocketAddresses( - Collections.singletonMap( - VoterSetTest.DEFAULT_LISTENER_NAME, - InetSocketAddress.createUnresolved("non-voter-host", 1234) - ) - ) + /** + * Test transitions from Prospective with leader + */ + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveWithLeaderToUnattachedInSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + store.writeElectionState( + ElectionState.withElectedLeader(logEndEpoch, voter1.id(), Optional.empty(), voters.voterIds()), + kraftVersion ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); assertEquals( - new LeaderAndEpoch(OptionalInt.of(nonVoterKey.id()), 4), - state.leaderAndEpoch() + ElectionState.withElectedLeader( + logEndEpoch, + voter1.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + + state.transitionToUnattached(state.epoch(), OptionalInt.of(voter1.id())); + assertEquals( + ElectionState.withElectedLeader( + logEndEpoch, + voter1.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveWithLeaderToUnattachedInHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + store.writeElectionState( + ElectionState.withElectedLeader(logEndEpoch, voter1.id(), Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + assertEquals( + ElectionState.withElectedLeader( + logEndEpoch, + voter1.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + + // transition to unattached + state.transitionToUnattached(10, OptionalInt.empty()); + assertEquals( + ElectionState.withUnknownLeader(10, persistedVoters(voters.voterIds(), kraftVersion)), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveWithLeaderToUnattachedWithLeaderInHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + store.writeElectionState( + ElectionState.withElectedLeader(logEndEpoch, voter1.id(), Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + assertEquals( + ElectionState.withElectedLeader( + logEndEpoch, + voter1.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + + // transition to unattached with different leader state + state.transitionToUnattached(10, OptionalInt.of(voter2.id())); + assertEquals( + ElectionState.withElectedLeader( + 10, + voter2.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveWithLeaderToFollowerSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + store.writeElectionState( + ElectionState.withElectedLeader(logEndEpoch, voter1.id(), Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + + state.transitionToFollower(state.epoch(), voter2.id(), voters.listeners(voter2.id())); + assertEquals( + ElectionState.withElectedLeader( + logEndEpoch, + voter2.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveWithLeaderToFollowerHigherEpoch(KRaftVersion kraftVersion) { + ReplicaKey voter1 = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey voter2 = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, voter1, voter2)); + store.writeElectionState( + ElectionState.withElectedLeader(logEndEpoch, voter1.id(), Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + + state.transitionToFollower(10, voter2.id(), voters.listeners(voter2.id())); + assertEquals( + ElectionState.withElectedLeader( + 10, + voter2.id(), + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() ); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testObserverCannotBecomeCandidateOrLeader(KRaftVersion kraftVersion) { - boolean withDirectoryId = kraftVersion.featureLevel() > 0; - int otherNodeId = 1; - VoterSet voters = VoterSetTest.voterSet( - VoterSetTest.voterMap(IntStream.of(otherNodeId), withDirectoryId) - ); + public void testProspectiveWithLeaderToCandidate(KRaftVersion kraftVersion) { + int leaderId = 1; + int followerId = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(leaderId, followerId), kraftVersion); + + store.writeElectionState( + ElectionState.withElectedLeader(logEndEpoch, leaderId, Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + assertTrue(state.isProspective()); + assertTrue(state.hasLeader()); + + state.transitionToCandidate(); + assertTrue(state.isCandidate()); + assertEquals(logEndEpoch + 1, state.epoch()); + assertEquals( + ElectionState.withVotedCandidate( + logEndEpoch + 1, + persistedVotedKey(localVoterKey, kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testProspectiveVotedWithLeaderToUnattachedInSameEpoch(KRaftVersion kraftVersion) { + ReplicaKey leader = ReplicaKey.of(1, Uuid.randomUuid()); + ReplicaKey candidate = ReplicaKey.of(2, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, leader, candidate)); + store.writeElectionState( + ElectionState.withElectedLeader(logEndEpoch, leader.id(), Optional.empty(), voters.voterIds()), + kraftVersion + ); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.prospectiveAddVotedState(logEndEpoch, candidate); + + // transition to unattached with different leader state + state.transitionToUnattached(state.epoch(), OptionalInt.of(candidate.id())); + + assertEquals( + ElectionState.withElectedLeader( + logEndEpoch, + candidate.id(), + Optional.of(persistedVotedKey(candidate, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + /** + * Test transitions from Candidate + */ + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToUnattachedSameEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(state.epoch(), OptionalInt.empty())); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToUnattachedHigherEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + + state.transitionToUnattached(5, OptionalInt.empty()); + assertEquals(5, state.epoch()); + assertEquals(OptionalInt.empty(), state.leaderId()); + assertEquals( + ElectionState.withUnknownLeader(5, persistedVoters(voters.voterIds(), kraftVersion)), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToFollowerSameEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + + state.transitionToFollower(state.epoch(), otherNodeId, voters.listeners(otherNodeId)); + assertEquals(1, state.epoch()); + assertEquals(OptionalInt.of(otherNodeId), state.leaderId()); + assertEquals( + ElectionState.withElectedLeader( + 1, + otherNodeId, + Optional.of(persistedVotedKey(localVoterKey, kraftVersion)), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToFollowerHigherEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + + state.transitionToFollower(5, otherNodeId, voters.listeners(otherNodeId)); + assertEquals(5, state.epoch()); + assertEquals(OptionalInt.of(otherNodeId), state.leaderId()); + assertEquals( + ElectionState.withElectedLeader( + 5, + otherNodeId, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToProspective(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + assertEquals(Optional.empty(), store.readElectionState()); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.transitionToProspective(); + int jitterMs1 = 2500; + random.mockNextInt(electionTimeoutMs, jitterMs1); + state.transitionToCandidate(); + assertTrue(state.isCandidate()); + assertEquals(1, state.epoch()); + CandidateState candidate = state.candidateStateOrThrow(); + assertEquals( + electionTimeoutMs + jitterMs1, + candidate.remainingElectionTimeMs(time.milliseconds()) + ); + + // The election timeout should be reset after we transition to prospective + time.sleep(candidate.remainingElectionTimeMs(time.milliseconds())); + assertEquals(0, candidate.remainingElectionTimeMs(time.milliseconds())); + int jitterMs2 = 3000; + random.mockNextInt(electionTimeoutMs, jitterMs2); + state.transitionToProspective(); + ProspectiveState prospective = state.prospectiveStateOrThrow(); + assertEquals( + electionTimeoutMs + jitterMs2, + prospective.remainingElectionTimeMs(time.milliseconds()) + ); + assertEquals( + ElectionState.withVotedCandidate( + 1, + persistedVotedKey(localVoterKey, kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToCandidate(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + assertEquals(Optional.empty(), store.readElectionState()); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.transitionToProspective(); + state.transitionToCandidate(); + assertTrue(state.isCandidate()); + assertThrows(IllegalStateException.class, () -> state.transitionToCandidate()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToLeader(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + assertEquals(Optional.empty(), store.readElectionState()); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.transitionToProspective(); + state.transitionToCandidate(); + assertTrue(state.isCandidate()); + assertEquals(1, state.epoch()); + + state.transitionToLeader(0L, accumulator); + LeaderState leaderState = state.leaderStateOrThrow(); + assertTrue(state.isLeader()); + assertEquals(1, leaderState.epoch()); + assertEquals(Optional.empty(), leaderState.highWatermark()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToLeaderWithoutGrantedVote(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + assertFalse(state.candidateStateOrThrow().epochElection().isVoteGranted()); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); + state.candidateStateOrThrow().recordGrantedVote(otherNodeId); + assertTrue(state.candidateStateOrThrow().epochElection().isVoteGranted()); + state.transitionToLeader(0L, accumulator); + assertTrue(state.isLeader()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToResigned(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + assertEquals(Optional.empty(), store.readElectionState()); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.transitionToProspective(); + state.transitionToCandidate(); + assertTrue(state.isCandidate()); + assertEquals(1, state.epoch()); + + assertThrows( + IllegalStateException.class, () -> + state.transitionToResigned(Collections.emptyList()) + ); + assertTrue(state.isCandidate()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testCandidateToAnyStateLowerEpoch(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToUnattached(5, OptionalInt.empty()); + state.transitionToProspective(); + state.transitionToCandidate(); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4, OptionalInt.empty())); + assertThrows( + IllegalStateException.class, + () -> state.transitionToFollower(4, otherNodeKey.id(), voters.listeners(otherNodeKey.id())) + ); + assertEquals(6, state.epoch()); + assertEquals( + ElectionState.withVotedCandidate( + 6, + persistedVotedKey(localVoterKey, kraftVersion), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + /** + * Test transitions from Leader + */ + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToUnattachedSameEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + state.candidateStateOrThrow().recordGrantedVote(otherNodeId); + state.transitionToLeader(0L, accumulator); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(state.epoch(), OptionalInt.empty())); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToUnattachedHigherEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + state.candidateStateOrThrow().recordGrantedVote(otherNodeId); + state.transitionToLeader(0L, accumulator); + state.transitionToUnattached(5, OptionalInt.empty()); + assertEquals(5, state.epoch()); + assertEquals(OptionalInt.empty(), state.leaderId()); + assertEquals( + ElectionState.withUnknownLeader(5, persistedVoters(voters.voterIds(), kraftVersion)), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToFollowerSameEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + + state.transitionToProspective(); + state.transitionToCandidate(); + state.candidateStateOrThrow().recordGrantedVote(otherNodeId); + state.transitionToLeader(0L, accumulator); + assertThrows( + IllegalStateException.class, + () -> state.transitionToFollower(state.epoch(), otherNodeId, voters.listeners(otherNodeId)) + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToFollowerHigherEpoch(KRaftVersion kraftVersion) { + int otherNodeId = 1; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(otherNodeId), kraftVersion); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + + state.transitionToProspective(); + state.transitionToCandidate(); + state.candidateStateOrThrow().recordGrantedVote(otherNodeId); + state.transitionToLeader(0L, accumulator); + state.transitionToFollower(5, otherNodeId, voters.listeners(otherNodeId)); + + assertEquals(5, state.epoch()); + assertEquals(OptionalInt.of(otherNodeId), state.leaderId()); + assertEquals( + ElectionState.withElectedLeader( + 5, + otherNodeId, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToProspective(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + assertEquals(Optional.empty(), store.readElectionState()); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + state.transitionToLeader(0L, accumulator); + assertTrue(state.isLeader()); + assertEquals(1, state.epoch()); + + assertThrows(IllegalStateException.class, state::transitionToProspective); + assertTrue(state.isLeader()); + assertEquals(1, state.epoch()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToCandidate(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + assertEquals(Optional.empty(), store.readElectionState()); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToProspective(); + state.transitionToCandidate(); + state.transitionToLeader(0L, accumulator); + assertTrue(state.isLeader()); + assertEquals(1, state.epoch()); + + assertThrows(IllegalStateException.class, state::transitionToCandidate); + assertTrue(state.isLeader()); + assertEquals(1, state.epoch()); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToLeader(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + assertEquals(Optional.empty(), store.readElectionState()); + QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - assertTrue(state.isObserver()); - assertThrows(IllegalStateException.class, state::transitionToCandidate); + state.transitionToProspective(); + state.transitionToCandidate(); + state.transitionToLeader(0L, accumulator); + assertTrue(state.isLeader()); + assertEquals(1, state.epoch()); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); + assertTrue(state.isLeader()); + assertEquals(1, state.epoch()); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testObserverWithIdCanVote(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(otherNodeKey)); + public void testLeaderToResigned(KRaftVersion kraftVersion) { + VoterSet voters = localStandaloneVoterSet(); + assertEquals(Optional.empty(), store.readElectionState()); QuorumState state = initializeEmptyState(voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - assertTrue(state.isObserver()); + state.transitionToProspective(); + state.transitionToCandidate(); + state.transitionToLeader(0L, accumulator); + assertTrue(state.isLeader()); + assertEquals(1, state.epoch()); - state.transitionToUnattachedVotedState(5, otherNodeKey); - assertTrue(state.isUnattachedAndVoted()); + state.transitionToResigned(Collections.singletonList(localVoterKey)); + assertTrue(state.isResigned()); + ResignedState resignedState = state.resignedStateOrThrow(); + assertEquals( + ElectionState.withElectedLeader(1, localId, Optional.empty(), voters.voterIds()), + resignedState.election() + ); + assertEquals(1, resignedState.epoch()); + assertEquals(Collections.emptySet(), resignedState.unackedVoters()); + } - UnattachedState votedState = state.unattachedStateOrThrow(); - assertEquals(5, votedState.epoch()); - assertEquals(otherNodeKey, votedState.votedKey().get()); + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testLeaderToAnyStateLowerEpoch(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + state.transitionToUnattached(5, OptionalInt.empty()); + state.transitionToProspective(); + state.transitionToCandidate(); + state.candidateStateOrThrow().recordGrantedVote(otherNodeKey.id()); + state.transitionToLeader(0L, accumulator); + assertThrows(IllegalStateException.class, () -> state.transitionToUnattached(4, OptionalInt.empty())); + assertThrows( + IllegalStateException.class, + () -> state.transitionToFollower( + 4, + otherNodeKey.id(), + voters.listeners(otherNodeKey.id()) + ) + ); + assertEquals(6, state.epoch()); + assertEquals( + ElectionState.withElectedLeader( + 6, + localId, + Optional.empty(), + persistedVoters(voters.voterIds(), kraftVersion) + ), + store.readElectionState().get() + ); } + /** + * Test transitions from Resigned + */ + // KAFKA-18379 to fill in the rest of the cases @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testObserverFollowerToUnattached(KRaftVersion kraftVersion) { - boolean withDirectoryId = kraftVersion.featureLevel() > 0; + public void testResignedToFollowerInSameEpoch(KRaftVersion kraftVersion) { int node1 = 1; int node2 = 2; - VoterSet voters = VoterSetTest.voterSet( - VoterSetTest.voterMap(IntStream.of(node1, node2), withDirectoryId) - ); - QuorumState state = initializeEmptyState(voters, kraftVersion); + int epoch = 5; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + ElectionState election = ElectionState.withElectedLeader(epoch, localId, Optional.empty(), voters.voterIds()); + store.writeElectionState(election, kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + assertTrue(state.isResigned()); + assertThrows(IllegalStateException.class, () -> state.transitionToFollower(epoch, localId, voters.listeners(localId))); + // KAFKA-18379 will fix this + state.transitionToFollower(epoch, node1, voters.listeners(node1)); + } + + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testResignedToUnattachedInHigherEpoch(KRaftVersion kraftVersion) { + int node1 = 1; + int node2 = 2; + int epoch = 5; + VoterSet voters = localWithRemoteVoterSet(IntStream.of(node1, node2), kraftVersion); + ElectionState election = ElectionState.withElectedLeader(epoch, localId, Optional.empty(), voters.voterIds()); + store.writeElectionState(election, kraftVersion); + QuorumState state = buildQuorumState(OptionalInt.of(localId), voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + assertTrue(state.isResigned()); + state.transitionToUnattached(epoch + 1, OptionalInt.empty()); + } + + /** + * Test transitions from Observer as Unattached + */ + @ParameterizedTest + @EnumSource(value = KRaftVersion.class) + public void testObserverUnattachedToUnattachedVoted(KRaftVersion kraftVersion) { + ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); + VoterSet voters = VoterSetTest.voterSet(Stream.of(otherNodeKey)); + + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, 5)); assertTrue(state.isObserver()); - state.transitionToFollower( - 2, - node1, - voters.listeners(node1) - ); - state.transitionToUnattached(3); - assertTrue(state.isUnattached()); - UnattachedState unattachedState = state.unattachedStateOrThrow(); - assertEquals(3, unattachedState.epoch()); + state.unattachedAddVotedState(5, otherNodeKey); - // Observers can remain in the unattached state indefinitely until a leader is found - assertEquals(Long.MAX_VALUE, unattachedState.electionTimeoutMs()); + UnattachedState votedState = state.unattachedStateOrThrow(); + assertTrue(state.isUnattachedAndVoted()); + assertTrue(state.isObserver()); + assertEquals(5, votedState.epoch()); + assertEquals(otherNodeKey, votedState.votedKey().get()); } @ParameterizedTest @@ -1564,7 +2570,7 @@ public void testObserverUnattachedToFollower(KRaftVersion kraftVersion) { state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); assertTrue(state.isObserver()); - state.transitionToUnattached(2); + state.transitionToUnattached(2, OptionalInt.empty()); state.transitionToFollower(3, node1, voters.listeners(node1)); assertTrue(state.isFollower()); FollowerState followerState = state.followerStateOrThrow(); @@ -1578,136 +2584,92 @@ public void testObserverUnattachedToFollower(KRaftVersion kraftVersion) { @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testInitializeWithCorruptedStore(KRaftVersion kraftVersion) { - QuorumStateStore stateStore = Mockito.mock(QuorumStateStore.class); - Mockito.doThrow(UncheckedIOException.class).when(stateStore).readElectionState(); - - QuorumState state = buildQuorumState( - OptionalInt.of(localId), - localStandaloneVoterSet(), - kraftVersion - ); - - int epoch = 2; - state.initialize(new OffsetAndEpoch(0L, epoch)); - assertEquals(epoch, state.epoch()); - assertTrue(state.isUnattached()); - assertFalse(state.hasLeader()); - } - - @ParameterizedTest - @EnumSource(value = KRaftVersion.class) - public void testHasRemoteLeader(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - + public void testObserverUnattachedToProspective(KRaftVersion kraftVersion) { + int voter1 = 1; + int voter2 = 2; + VoterSet voters = withRemoteVoterSet(IntStream.of(voter1, voter2), kraftVersion); QuorumState state = initializeEmptyState(voters, kraftVersion); - assertFalse(state.hasRemoteLeader()); - - state.transitionToCandidate(); - assertFalse(state.hasRemoteLeader()); - - state.candidateStateOrThrow().recordGrantedVote(otherNodeKey.id()); - state.transitionToLeader(0L, accumulator); - assertFalse(state.hasRemoteLeader()); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - state.transitionToUnattached(state.epoch() + 1); - assertFalse(state.hasRemoteLeader()); + assertTrue(state.isObserver()); + assertTrue(state.isUnattachedNotVoted()); + assertThrows(IllegalStateException.class, state::transitionToProspective); - state.transitionToUnattachedVotedState(state.epoch() + 1, otherNodeKey); - assertFalse(state.hasRemoteLeader()); + state.unattachedAddVotedState(logEndEpoch, ReplicaKey.of(voter1, ReplicaKey.NO_DIRECTORY_ID)); + assertTrue(state.isUnattachedAndVoted()); + assertTrue(state.isObserver()); + assertThrows(IllegalStateException.class, state::transitionToProspective); - state.transitionToFollower( - state.epoch() + 1, - otherNodeKey.id(), - voters.listeners(otherNodeKey.id()) - ); - assertTrue(state.hasRemoteLeader()); + state.transitionToFollower(logEndEpoch, voter2, voters.listeners(voter2)); + assertTrue(state.isFollower()); + assertTrue(state.isObserver()); + assertTrue(state.votedKey().isPresent()); + assertTrue(state.hasLeader()); + assertThrows(IllegalStateException.class, state::transitionToProspective); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testHighWatermarkRetained(KRaftVersion kraftVersion) { - ReplicaKey otherNodeKey = ReplicaKey.of(1, Uuid.randomUuid()); - VoterSet voters = VoterSetTest.voterSet(Stream.of(localVoterKey, otherNodeKey)); - - QuorumState state = initializeEmptyState(voters, kraftVersion); - state.transitionToFollower( - 5, - otherNodeKey.id(), - voters.listeners(otherNodeKey.id()) + public void testObserverUnattachedToCandidateOrLeaderOrResigned(KRaftVersion kraftVersion) { + boolean withDirectoryId = kraftVersion.featureLevel() > 0; + int otherNodeId = 1; + VoterSet voters = VoterSetTest.voterSet( + VoterSetTest.voterMap(IntStream.of(otherNodeId), withDirectoryId) ); - - FollowerState followerState = state.followerStateOrThrow(); - followerState.updateHighWatermark(OptionalLong.of(10L)); - - Optional highWatermark = Optional.of(new LogOffsetMetadata(10L)); - assertEquals(highWatermark, state.highWatermark()); - - state.transitionToUnattached(6); - assertEquals(highWatermark, state.highWatermark()); - - state.transitionToUnattachedVotedState(7, otherNodeKey); - assertEquals(highWatermark, state.highWatermark()); - - state.transitionToCandidate(); - assertEquals(highWatermark, state.highWatermark()); - - CandidateState candidateState = state.candidateStateOrThrow(); - candidateState.recordGrantedVote(otherNodeKey.id()); - assertTrue(candidateState.isVoteGranted()); - - state.transitionToLeader(10L, accumulator); - assertEquals(Optional.empty(), state.highWatermark()); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); + assertTrue(state.isObserver()); + assertThrows(IllegalStateException.class, state::transitionToCandidate); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); + assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); } + /** + * Test transitions from Observer as Follower + */ @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testInitializeWithEmptyLocalId(KRaftVersion kraftVersion) { + public void testObserverFollowerToUnattached(KRaftVersion kraftVersion) { boolean withDirectoryId = kraftVersion.featureLevel() > 0; + int node1 = 1; + int node2 = 2; VoterSet voters = VoterSetTest.voterSet( - VoterSetTest.voterMap(IntStream.of(0, 1), withDirectoryId) + VoterSetTest.voterMap(IntStream.of(node1, node2), withDirectoryId) ); - QuorumState state = buildQuorumState(OptionalInt.empty(), voters, kraftVersion); - state.initialize(new OffsetAndEpoch(0L, 0)); - + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); assertTrue(state.isObserver()); - assertFalse(state.isVoter()); - assertThrows(IllegalStateException.class, state::transitionToCandidate); - assertThrows( - IllegalStateException.class, - () -> state.transitionToUnattachedVotedState(1, ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID)) + state.transitionToFollower( + 2, + node1, + voters.listeners(node1) ); - assertThrows(IllegalStateException.class, () -> state.transitionToLeader(0L, accumulator)); - - state.transitionToFollower(1, 1, voters.listeners(1)); - assertTrue(state.isFollower()); - - state.transitionToUnattached(2); + state.transitionToUnattached(3, OptionalInt.empty()); assertTrue(state.isUnattached()); + UnattachedState unattachedState = state.unattachedStateOrThrow(); + assertEquals(3, unattachedState.epoch()); + + // Observers can remain in the unattached state indefinitely until a leader is found + assertEquals(Long.MAX_VALUE, unattachedState.electionTimeoutMs()); } @ParameterizedTest @EnumSource(value = KRaftVersion.class) - public void testNoLocalIdInitializationFailsIfElectionStateHasVotedCandidate(KRaftVersion kraftVersion) { - boolean withDirectoryId = kraftVersion.featureLevel() > 0; - int epoch = 5; - int votedId = 1; - VoterSet voters = VoterSetTest.voterSet( - VoterSetTest.voterMap(IntStream.of(0, votedId), withDirectoryId) - ); + public void testObserverFollowerToProspectiveOrCandidateOrLeaderOrResigned(KRaftVersion kraftVersion) { + int voter1 = 1; + int voter2 = 2; + VoterSet voters = withRemoteVoterSet(IntStream.of(voter1, voter2), kraftVersion); + QuorumState state = initializeEmptyState(voters, kraftVersion); + state.initialize(new OffsetAndEpoch(0L, logEndEpoch)); - store.writeElectionState( - ElectionState.withVotedCandidate( - epoch, - ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), - voters.voterIds() - ), - kraftVersion - ); + state.transitionToFollower(logEndEpoch, voter1, voters.listeners(voter1)); + assertTrue(state.isObserver()); + assertEquals(fetchTimeoutMs, state.followerStateOrThrow().remainingFetchTimeMs(time.milliseconds())); - QuorumState state2 = buildQuorumState(OptionalInt.empty(), voters, kraftVersion); - assertThrows(IllegalStateException.class, () -> state2.initialize(new OffsetAndEpoch(0, 0))); + assertThrows(IllegalStateException.class, state::transitionToProspective); + assertThrows(IllegalStateException.class, state::transitionToCandidate); + assertThrows(IllegalStateException.class, () -> state.transitionToLeader(logEndEpoch + 1, accumulator)); + assertThrows(IllegalStateException.class, () -> state.transitionToResigned(Collections.emptyList())); } } diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java index dc8e978abfcb7..ee840ff59ed77 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java @@ -65,7 +65,7 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.raft.internals.BatchBuilder; import org.apache.kafka.raft.internals.StringSerde; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.serialization.RecordSerde; import org.apache.kafka.snapshot.RecordsSnapshotWriter; @@ -99,6 +99,7 @@ import java.util.stream.Stream; import static org.apache.kafka.raft.LeaderState.CHECK_QUORUM_TIMEOUT_FACTOR; +import static org.apache.kafka.raft.RaftClientTestContext.RaftProtocol.KIP_853_PROTOCOL; import static org.apache.kafka.raft.RaftUtil.hasValidTopicPartition; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -137,7 +138,7 @@ public final class RaftClientTestContext { final VoterSet startingVoters; final Set bootstrapIds; // Used to determine which RPC request and response to construct - final boolean kip853Rpc; + final RaftProtocol raftProtocol; // Used to determine if the local kraft client was configured to always flush final boolean alwaysFlush; @@ -172,7 +173,7 @@ public static final class Builder { private int appendLingerMs = DEFAULT_APPEND_LINGER_MS; private MemoryPool memoryPool = MemoryPool.NONE; private Optional> bootstrapServers = Optional.empty(); - private boolean kip853Rpc = false; + private RaftProtocol raftProtocol = RaftProtocol.KIP_595_PROTOCOL; private boolean alwaysFlush = false; private VoterSet startingVoters = VoterSet.empty(); private Endpoints localListeners = Endpoints.empty(); @@ -199,7 +200,7 @@ public Builder(OptionalInt localId, Uuid localDirectoryId) { Builder withElectedLeader(int epoch, int leaderId) { quorumStateStore.writeElectionState( - ElectionState.withElectedLeader(epoch, leaderId, startingVoters.voterIds()), + ElectionState.withElectedLeader(epoch, leaderId, Optional.empty(), startingVoters.voterIds()), kraftVersion ); return this; @@ -292,8 +293,16 @@ Builder withBootstrapServers(Optional> bootstrapServers) return this; } - Builder withKip853Rpc(boolean kip853Rpc) { - this.kip853Rpc = kip853Rpc; + // deprecated, use withRpc instead + Builder withKip853Rpc(boolean withKip853Rpc) { + if (withKip853Rpc) { + this.raftProtocol = KIP_853_PROTOCOL; + } + return this; + } + + Builder withRaftProtocol(RaftProtocol raftProtocol) { + this.raftProtocol = raftProtocol; return this; } @@ -302,6 +311,14 @@ Builder withAlwaysFlush(boolean alwaysFlush) { return this; } + Builder withStartingVoters(VoterSet voters, KRaftVersion kraftVersion) { + if (kraftVersion.isReconfigSupported()) { + return withBootstrapSnapshot(Optional.of(voters)); + } else { + return withStaticVoters(voters.voterIds()); + } + } + Builder withStaticVoters(Set staticVoters) { Map staticVoterAddressMap = staticVoters .stream() @@ -330,7 +347,7 @@ Builder withBootstrapSnapshot(Optional voters) { isStartingVotersStatic = false; if (voters.isPresent()) { - kraftVersion = KRaftVersion.KRAFT_VERSION_1; + kraftVersion = KRaftVersion.LATEST_PRODUCTION; RecordsSnapshotWriter.Builder builder = new RecordsSnapshotWriter.Builder() .setRawSnapshotWriter( @@ -420,7 +437,7 @@ public RaftClientTestContext build() throws IOException { clusterId, computedBootstrapServers, localListeners, - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), logContext, random, quorumConfig @@ -450,7 +467,7 @@ public RaftClientTestContext build() throws IOException { .limit(bootstrapServers.map(List::size).orElse(0)) .boxed() .collect(Collectors.toSet()), - kip853Rpc, + raftProtocol, alwaysFlush, metrics, listener @@ -478,7 +495,7 @@ private RaftClientTestContext( QuorumStateStore quorumStateStore, VoterSet startingVoters, Set bootstrapIds, - boolean kip853Rpc, + RaftProtocol raftProtocol, boolean alwaysFlush, Metrics metrics, MockListener listener @@ -495,7 +512,7 @@ private RaftClientTestContext( this.quorumStateStore = quorumStateStore; this.startingVoters = startingVoters; this.bootstrapIds = bootstrapIds; - this.kip853Rpc = kip853Rpc; + this.raftProtocol = raftProtocol; this.alwaysFlush = alwaysFlush; this.metrics = metrics; this.listener = listener; @@ -545,23 +562,14 @@ static MemoryRecords buildBatch( return builder.build(); } - static RaftClientTestContext initializeAsLeader(int localId, Set voters, int epoch) throws Exception { - if (epoch <= 0) { - throw new IllegalArgumentException("Cannot become leader in epoch " + epoch); - } - - RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) - .withUnknownLeader(epoch - 1) - .build(); - - context.assertUnknownLeader(epoch - 1); - context.becomeLeader(); - return context; + public void unattachedToCandidate() throws Exception { + time.sleep(electionTimeoutMs * 2L); + expectAndGrantPreVotes(currentEpoch()); } - public void becomeLeader() throws Exception { + public void unattachedToLeader() throws Exception { int currentEpoch = currentEpoch(); - time.sleep(electionTimeoutMs * 2L); + unattachedToCandidate(); expectAndGrantVotes(currentEpoch + 1); expectBeginEpoch(currentEpoch + 1); } @@ -594,12 +602,38 @@ void expectAndGrantVotes(int epoch) throws Exception { assertElectedLeader(epoch, localIdOrThrow()); } + void expectAndGrantPreVotes(int epoch) throws Exception { + pollUntilRequest(); + + List voteRequests = collectPreVoteRequests( + epoch, + log.lastFetchedEpoch(), + log.endOffset().offset() + ); + + for (RaftRequest.Outbound request : voteRequests) { + if (!raftProtocol.isPreVoteSupported()) { + deliverResponse( + request.correlationId(), + request.destination(), + RaftUtil.errorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) + ); + } else { + VoteResponseData voteResponse = voteResponse(true, OptionalInt.empty(), epoch); + deliverResponse(request.correlationId(), request.destination(), voteResponse); + } + } + + client.poll(); + assertTrue(client.quorum().isCandidate()); + } + private int localIdOrThrow() { return localId.orElseThrow(() -> new AssertionError("Required local id is not defined")); } public ReplicaKey localReplicaKey() { - return kip853Rpc ? + return raftProtocol.isReconfigSupported() ? ReplicaKey.of(localIdOrThrow(), localDirectoryId) : ReplicaKey.of(localIdOrThrow(), ReplicaKey.NO_DIRECTORY_ID); } @@ -629,40 +663,67 @@ void pollUntilRequest() throws InterruptedException { } void assertVotedCandidate(int epoch, int candidateId) { + ReplicaKey candidateKey = ReplicaKey.of(candidateId, ReplicaKey.NO_DIRECTORY_ID); + assertVotedCandidate(epoch, candidateKey); + } + + void assertVotedCandidate(int epoch, ReplicaKey candidateKey) { assertEquals( ElectionState.withVotedCandidate( epoch, - ReplicaKey.of(candidateId, ReplicaKey.NO_DIRECTORY_ID), - startingVoters.voterIds() + persistedVotedKey(candidateKey, kraftVersion), + expectedVoters() ), quorumStateStore.readElectionState().get() ); } public void assertElectedLeader(int epoch, int leaderId) { - Set voters = kraftVersion.isReconfigSupported() ? - Collections.emptySet() : startingVoters.voterIds(); assertEquals( - ElectionState.withElectedLeader(epoch, leaderId, voters), + ElectionState.withElectedLeader(epoch, leaderId, Optional.empty(), expectedVoters()), quorumStateStore.readElectionState().get() ); } - void assertUnknownLeader(int epoch) { + public void assertElectedLeaderAndVotedKey(int epoch, int leaderId, ReplicaKey candidateKey) { assertEquals( - ElectionState.withUnknownLeader(epoch, startingVoters.voterIds()), + ElectionState.withElectedLeader( + epoch, + leaderId, + Optional.of(persistedVotedKey(candidateKey, kraftVersion)), + expectedVoters() + ), quorumStateStore.readElectionState().get() ); } + private static ReplicaKey persistedVotedKey(ReplicaKey replicaKey, KRaftVersion kraftVersion) { + if (kraftVersion.isReconfigSupported()) { + return replicaKey; + } + + return ReplicaKey.of(replicaKey.id(), ReplicaKey.NO_DIRECTORY_ID); + } + + void assertUnknownLeaderAndNoVotedCandidate(int epoch) { + assertEquals( + ElectionState.withUnknownLeader(epoch, expectedVoters()), + quorumStateStore.readElectionState().get()); + } + void assertResignedLeader(int epoch, int leaderId) { assertTrue(client.quorum().isResigned()); assertEquals( - ElectionState.withElectedLeader(epoch, leaderId, startingVoters.voterIds()), + ElectionState.withElectedLeader(epoch, leaderId, Optional.empty(), expectedVoters()), quorumStateStore.readElectionState().get() ); } + // Voters are only written to ElectionState in KRaftVersion 0 + private Set expectedVoters() { + return kraftVersion.isReconfigSupported() ? Collections.emptySet() : startingVoters.voterIds(); + } + DescribeQuorumResponseData collectDescribeQuorumResponse() { List sentMessages = drainSentResponses(ApiKeys.DESCRIBE_QUORUM); assertEquals(1, sentMessages.size()); @@ -737,6 +798,12 @@ void assertSentDescribeQuorumResponse( assertEquals(expectedResponse, response); } + RaftRequest.Outbound assertSentPreVoteRequest(int epoch, int lastEpoch, long lastEpochOffset, int numVoteReceivers) { + List voteRequests = collectPreVoteRequests(epoch, lastEpoch, lastEpochOffset); + assertEquals(numVoteReceivers, voteRequests.size()); + return voteRequests.iterator().next(); + } + RaftRequest.Outbound assertSentVoteRequest(int epoch, int lastEpoch, long lastEpochOffset, int numVoteReceivers) { List voteRequests = collectVoteRequests(epoch, lastEpoch, lastEpochOffset); assertEquals(numVoteReceivers, voteRequests.size()); @@ -768,14 +835,14 @@ void assertSentVoteResponse( VoteResponseData.PartitionData partitionResponse = response.topics().get(0).partitions().get(0); - String voterIdDebugLog = "Leader Id: " + leaderId + + String leaderIdDebugLog = "Leader Id: " + leaderId + " Partition response leader Id: " + partitionResponse.leaderId(); - assertEquals(voteGranted, partitionResponse.voteGranted(), voterIdDebugLog); - assertEquals(error, Errors.forCode(partitionResponse.errorCode()), voterIdDebugLog); - assertEquals(leaderId.orElse(-1), partitionResponse.leaderId()); + assertEquals(voteGranted, partitionResponse.voteGranted()); + assertEquals(error, Errors.forCode(partitionResponse.errorCode())); + assertEquals(leaderId.orElse(-1), partitionResponse.leaderId(), leaderIdDebugLog); assertEquals(epoch, partitionResponse.leaderEpoch()); - if (kip853Rpc && leaderId.isPresent()) { + if (raftProtocol.isReconfigSupported() && leaderId.isPresent()) { Endpoints expectedLeaderEndpoints = startingVoters.listeners(leaderId.getAsInt()); Endpoints responseEndpoints = Endpoints.fromVoteResponse( channel.listenerName(), @@ -786,7 +853,7 @@ void assertSentVoteResponse( } } - List collectVoteRequests( + List collectPreVoteRequests( int epoch, int lastEpoch, long lastEpochOffset @@ -797,8 +864,30 @@ List collectVoteRequests( VoteRequestData request = (VoteRequestData) raftMessage.data(); VoteRequestData.PartitionData partitionRequest = unwrap(request); - assertEquals(epoch, partitionRequest.candidateEpoch()); - assertEquals(localIdOrThrow(), partitionRequest.candidateId()); + assertTrue(partitionRequest.preVote()); + assertEquals(epoch, partitionRequest.replicaEpoch()); + assertEquals(localIdOrThrow(), partitionRequest.replicaId()); + assertEquals(lastEpoch, partitionRequest.lastOffsetEpoch()); + assertEquals(lastEpochOffset, partitionRequest.lastOffset()); + voteRequests.add(raftMessage); + } + } + return voteRequests; + } + + List collectVoteRequests( + int epoch, + int lastEpoch, + long lastEpochOffset + ) { + List voteRequests = new ArrayList<>(); + for (RaftRequest.Outbound raftMessage : channel.drainSendQueue()) { + if (raftMessage.data() instanceof VoteRequestData request) { + VoteRequestData.PartitionData partitionRequest = unwrap(request); + + assertFalse(partitionRequest.preVote()); + assertEquals(epoch, partitionRequest.replicaEpoch()); + assertEquals(localIdOrThrow(), partitionRequest.replicaId()); assertEquals(lastEpoch, partitionRequest.lastOffsetEpoch()); assertEquals(lastEpochOffset, partitionRequest.lastOffset()); voteRequests.add(raftMessage); @@ -891,7 +980,7 @@ void assertSentBeginQuorumEpochResponse( .get(0) .partitions() .get(0); - if (kip853Rpc && partitionResponse.leaderId() >= 0) { + if (raftProtocol.isReconfigSupported() && partitionResponse.leaderId() >= 0) { int leaderId = partitionResponse.leaderId(); Endpoints expectedLeaderEndpoints = startingVoters.listeners(leaderId); Endpoints responseEndpoints = Endpoints.fromBeginQuorumEpochResponse( @@ -928,7 +1017,7 @@ void assertSentBeginQuorumEpochResponse( " Partition response leader Id: " + partitionResponse.leaderId() ); - if (kip853Rpc && leaderId.isPresent()) { + if (raftProtocol.isReconfigSupported() && leaderId.isPresent()) { Endpoints expectedLeaderEndpoints = startingVoters.listeners(leaderId.getAsInt()); Endpoints responseEndpoints = Endpoints.fromBeginQuorumEpochResponse( channel.listenerName(), @@ -965,7 +1054,7 @@ void assertSentEndQuorumEpochResponse( .get(0) .partitions() .get(0); - if (kip853Rpc && partitionResponse.leaderId() >= 0) { + if (raftProtocol.isReconfigSupported() && partitionResponse.leaderId() >= 0) { int leaderId = partitionResponse.leaderId(); Endpoints expectedLeaderEndpoints = startingVoters.listeners(leaderId); Endpoints responseEndpoints = Endpoints.fromEndQuorumEpochResponse( @@ -997,7 +1086,7 @@ void assertSentEndQuorumEpochResponse( assertEquals(leaderId.orElse(-1), partitionResponse.leaderId()); assertEquals(partitionError, Errors.forCode(partitionResponse.errorCode())); - if (kip853Rpc && leaderId.isPresent()) { + if (raftProtocol.isReconfigSupported() && leaderId.isPresent()) { Endpoints expectedLeaderEndpoints = startingVoters.listeners(leaderId.getAsInt()); Endpoints responseEndpoints = Endpoints.fromEndQuorumEpochResponse( channel.listenerName(), @@ -1041,7 +1130,7 @@ FetchResponseData.PartitionData assertSentFetchPartitionResponse() { assertEquals(1, response.responses().get(0).partitions().size()); FetchResponseData.PartitionData partitionResponse = response.responses().get(0).partitions().get(0); - if (kip853Rpc && partitionResponse.currentLeader().leaderId() >= 0) { + if (raftProtocol.isReconfigSupported() && partitionResponse.currentLeader().leaderId() >= 0) { int leaderId = partitionResponse.currentLeader().leaderId(); Endpoints expectedLeaderEndpoints = startingVoters.listeners(leaderId); Endpoints responseEndpoints = Endpoints.fromFetchResponse( @@ -1131,7 +1220,7 @@ Optional assertSentFetchSnapshotRes Optional result = FetchSnapshotResponse.forTopicPartition(response, topicPartition); - if (result.isPresent() && kip853Rpc && result.get().currentLeader().leaderId() >= 0) { + if (result.isPresent() && raftProtocol.isReconfigSupported() && result.get().currentLeader().leaderId() >= 0) { int leaderId = result.get().currentLeader().leaderId(); Endpoints expectedLeaderEndpoints = startingVoters.listeners(leaderId); Endpoints responseEndpoints = Endpoints.fromFetchSnapshotResponse( @@ -1247,8 +1336,7 @@ List collectEndQuorumRequests( .map(list -> list.stream().map(ReplicaKey::id).collect(Collectors.toList())); for (RaftRequest.Outbound raftMessage : channel.drainSendQueue()) { - if (raftMessage.data() instanceof EndQuorumEpochRequestData) { - EndQuorumEpochRequestData request = (EndQuorumEpochRequestData) raftMessage.data(); + if (raftMessage.data() instanceof EndQuorumEpochRequestData request) { EndQuorumEpochRequestData.PartitionData partitionRequest = request.topics().get(0).partitions().get(0); @@ -1369,7 +1457,7 @@ BeginQuorumEpochRequestData beginEpochRequest(int epoch, int leaderId) { } BeginQuorumEpochRequestData beginEpochRequest(int epoch, int leaderId, Endpoints endpoints) { - ReplicaKey localReplicaKey = kip853Rpc ? + ReplicaKey localReplicaKey = raftProtocol.isReconfigSupported() ? ReplicaKey.of(localIdOrThrow(), localDirectoryId) : ReplicaKey.of(-1, ReplicaKey.NO_DIRECTORY_ID); @@ -1377,7 +1465,7 @@ BeginQuorumEpochRequestData beginEpochRequest(int epoch, int leaderId, Endpoints } BeginQuorumEpochRequestData beginEpochRequest(String clusterId, int epoch, int leaderId) { - ReplicaKey localReplicaKey = kip853Rpc ? + ReplicaKey localReplicaKey = raftProtocol.isReconfigSupported() ? ReplicaKey.of(localIdOrThrow(), localDirectoryId) : ReplicaKey.of(-1, ReplicaKey.NO_DIRECTORY_ID); @@ -1440,7 +1528,24 @@ VoteRequestData voteRequest( epoch, candidateKey, lastEpoch, - lastEpochOffset + lastEpochOffset, + false + ); + } + + VoteRequestData preVoteRequest( + int epoch, + ReplicaKey candidateKey, + int lastEpoch, + long lastEpochOffset + ) { + return voteRequest( + clusterId, + epoch, + candidateKey, + lastEpoch, + lastEpochOffset, + true ); } @@ -1449,9 +1554,10 @@ VoteRequestData voteRequest( int epoch, ReplicaKey candidateKey, int lastEpoch, - long lastEpochOffset + long lastEpochOffset, + boolean preVote ) { - ReplicaKey localReplicaKey = kip853Rpc ? + ReplicaKey localReplicaKey = raftProtocol.isReconfigSupported() ? ReplicaKey.of(localIdOrThrow(), localDirectoryId) : ReplicaKey.of(-1, ReplicaKey.NO_DIRECTORY_ID); @@ -1461,7 +1567,8 @@ VoteRequestData voteRequest( candidateKey, localReplicaKey, lastEpoch, - lastEpochOffset + lastEpochOffset, + preVote ); } @@ -1471,7 +1578,8 @@ VoteRequestData voteRequest( ReplicaKey candidateKey, ReplicaKey voterKey, int lastEpoch, - long lastEpochOffset + long lastEpochOffset, + boolean preVote ) { return RaftUtil.singletonVoteRequest( metadataPartition, @@ -1480,17 +1588,26 @@ VoteRequestData voteRequest( candidateKey, voterKey, lastEpoch, - lastEpochOffset + lastEpochOffset, + preVote ); } VoteResponseData voteResponse(boolean voteGranted, OptionalInt leaderId, int epoch) { + return voteResponse(Errors.NONE, voteGranted, leaderId, epoch, voteRpcVersion()); + } + + VoteResponseData voteResponse(Errors error, OptionalInt leaderId, int epoch) { + return voteResponse(error, false, leaderId, epoch, voteRpcVersion()); + } + + VoteResponseData voteResponse(Errors error, boolean voteGranted, OptionalInt leaderId, int epoch, short version) { return RaftUtil.singletonVoteResponse( channel.listenerName(), - voteRpcVersion(), + version, Errors.NONE, metadataPartition, - Errors.NONE, + error, epoch, leaderId.orElse(-1), voteGranted, @@ -1618,7 +1735,7 @@ FetchRequestData fetchRequest( .setCurrentLeaderEpoch(epoch) .setLastFetchedEpoch(lastFetchedEpoch) .setFetchOffset(fetchOffset); - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { fetchPartition .setReplicaDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); } @@ -1803,7 +1920,7 @@ UpdateRaftVoterResponseData updateVoterResponse( } private short fetchRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 17; } else { return 16; @@ -1811,15 +1928,17 @@ private short fetchRpcVersion() { } private short fetchSnapshotRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 1; } else { return 0; } } - private short voteRpcVersion() { - if (kip853Rpc) { + short voteRpcVersion() { + if (raftProtocol.isPreVoteSupported()) { + return 2; + } else if (raftProtocol.isReconfigSupported()) { return 1; } else { return 0; @@ -1827,7 +1946,7 @@ private short voteRpcVersion() { } private short beginQuorumEpochRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 1; } else { return 0; @@ -1835,7 +1954,7 @@ private short beginQuorumEpochRpcVersion() { } private short endQuorumEpochRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 1; } else { return 0; @@ -1843,7 +1962,7 @@ private short endQuorumEpochRpcVersion() { } private short describeQuorumRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 2; } else { return 1; @@ -1851,26 +1970,26 @@ private short describeQuorumRpcVersion() { } private short addVoterRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 0; } else { - throw new IllegalStateException("Reconfiguration must be enabled by calling withKip853Rpc(true)"); + throw new IllegalStateException("Reconfiguration must be enabled by calling withRaftProtocol(KIP_853_PROTOCOL)"); } } private short removeVoterRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 0; } else { - throw new IllegalStateException("Reconfiguration must be enabled by calling withKip853Rpc(true)"); + throw new IllegalStateException("Reconfiguration must be enabled by calling withRaftProtocol(KIP_853_PROTOCOL)"); } } private short updateVoterRpcVersion() { - if (kip853Rpc) { + if (raftProtocol.isReconfigSupported()) { return 0; } else { - throw new IllegalStateException("Reconfiguration must be enabled by calling withKip853Rpc(true)"); + throw new IllegalStateException("Reconfiguration must be enabled by calling withRaftProtocol(KIP_853_PROTOCOL)"); } } @@ -2094,4 +2213,33 @@ public void handleLoadSnapshot(SnapshotReader reader) { snapshot = Optional.of(reader); } } + + /** + * Determines what versions of RPCs are in use. Note, these are ordered from oldest to newest, and are + * cumulative. E.g. KIP_996_PROTOCOL includes KIP_853_PROTOCOL and KIP_595_PROTOCOL changes + */ + enum RaftProtocol { + // kraft support + KIP_595_PROTOCOL, + // dynamic quorum reconfiguration support + KIP_853_PROTOCOL, + // preVote support + KIP_996_PROTOCOL; + + boolean isKRaftSupported() { + return isAtLeast(KIP_595_PROTOCOL); + } + + boolean isReconfigSupported() { + return isAtLeast(KIP_853_PROTOCOL); + } + + boolean isPreVoteSupported() { + return isAtLeast(KIP_996_PROTOCOL); + } + + private boolean isAtLeast(RaftProtocol otherRpc) { + return this.compareTo(otherRpc) >= 0; + } + } } diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftEventSimulationTest.java b/raft/src/test/java/org/apache/kafka/raft/RaftEventSimulationTest.java index 91bb6de70b076..48d08378c4f48 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftEventSimulationTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftEventSimulationTest.java @@ -35,7 +35,7 @@ import org.apache.kafka.raft.MockLog.LogBatch; import org.apache.kafka.raft.MockLog.LogEntry; import org.apache.kafka.raft.internals.BatchMemoryPool; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.serialization.RecordSerde; import org.apache.kafka.snapshot.RecordsSnapshotReader; import org.apache.kafka.snapshot.SnapshotReader; @@ -295,6 +295,111 @@ void canMakeProgressIfMajorityIsReachable( scheduler.runUntil(() -> cluster.allReachedHighWatermark(2 * restoredLogEndOffset)); } + @Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY) + void leadershipAssignedOnlyOnceWithNetworkPartitionIfThereExistsMajority( + @ForAll int seed, + @ForAll @IntRange(min = 0, max = 3) int numObservers + ) { + int numVoters = 5; + Random random = new Random(seed); + Cluster cluster = new Cluster(numVoters, numObservers, random); + MessageRouter router = new MessageRouter(cluster); + EventScheduler scheduler = schedulerWithDefaultInvariants(cluster); + scheduler.addInvariant(new StableLeadership(cluster)); + + // Create network partition which would result in ping-pong of leadership between nodes 2 and 3 without PreVote + // Scenario explained in detail in KIP-996 + // 0 1 + // | | + // 2 - 3 + // \ / + // 4 + router.filter( + 0, + new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(1, 3, 4))) + ); + router.filter( + 1, + new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(0, 2, 4))) + ); + router.filter(2, new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(1)))); + router.filter(3, new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(0)))); + router.filter(4, new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(0, 1)))); + + // Start cluster + cluster.startAll(); + schedulePolling(scheduler, cluster, 3, 5); + scheduler.schedule(router::deliverAll, 0, 2, 1); + scheduler.schedule(new SequentialAppendAction(cluster), 0, 2, 1); + scheduler.runUntil(cluster::hasConsistentLeader); + + // Check that leadership remains stable after majority processes some data + int leaderId = cluster.latestLeader().getAsInt(); + // Determine the voters in the majority based on the leader + Set majority = new HashSet<>(Set.of(0, 1, 2, 3, 4)); + switch (leaderId) { + case 2 -> majority.remove(1); + case 3 -> majority.remove(0); + case 4 -> { + majority.remove(0); + majority.remove(1); + } + default -> throw new IllegalStateException("Unexpected leader: " + leaderId); + } + scheduler.runUntil(() -> cluster.allReachedHighWatermark(20, majority)); + } + + @Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY) + void leadershipWillNotChangeDuringNetworkPartitionIfMajorityStillReachable( + @ForAll int seed, + @ForAll @IntRange(min = 0, max = 3) int numObservers + ) { + int numVoters = 5; + Random random = new Random(seed); + Cluster cluster = new Cluster(numVoters, numObservers, random); + MessageRouter router = new MessageRouter(cluster); + EventScheduler scheduler = schedulerWithDefaultInvariants(cluster); + scheduler.addInvariant(new StableLeadership(cluster)); + + // Seed the cluster with some data + cluster.startAll(); + schedulePolling(scheduler, cluster, 3, 5); + scheduler.schedule(router::deliverAll, 0, 2, 1); + scheduler.schedule(new SequentialAppendAction(cluster), 0, 2, 1); + scheduler.runUntil(cluster::hasConsistentLeader); + scheduler.runUntil(() -> cluster.allReachedHighWatermark(5)); + + int leaderId = cluster.latestLeader().orElseThrow(() -> + new AssertionError("Failed to find current leader during setup") + ); + + // Create network partition which would result in ping-pong of leadership between nodes C and D without PreVote + // Scenario explained in detail in KIP-996 + // A B + // | | + // C - D (have leader start in position C) + // \ / + // E + int nodeA = (leaderId + 1) % numVoters; + int nodeB = (leaderId + 2) % numVoters; + int nodeD = (leaderId + 3) % numVoters; + int nodeE = (leaderId + 4) % numVoters; + router.filter( + nodeA, + new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(nodeB, nodeD, nodeE))) + ); + router.filter( + nodeB, + new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(nodeA, leaderId, nodeE))) + ); + router.filter(leaderId, new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(nodeB)))); + router.filter(nodeD, new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(nodeA)))); + router.filter(nodeE, new DropOutboundRequestsTo(cluster.endpointsFromIds(Set.of(nodeA, nodeB)))); + + // Check that leadership remains stable + scheduler.runUntil(() -> cluster.allReachedHighWatermark(20, Set.of(nodeA, leaderId, nodeD, nodeE))); + } + @Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY) void canMakeProgressAfterBackToBackLeaderFailures( @ForAll int seed, @@ -651,14 +756,18 @@ boolean hasConsistentLeader() { return false; RaftNode first = iter.next(); - ElectionState election = first.store.readElectionState().get(); - if (!election.hasLeader()) + OptionalInt firstLeaderId = first.store.readElectionState().get().optionalLeaderId(); + int firstEpoch = first.store.readElectionState().get().epoch(); + if (firstLeaderId.isEmpty()) return false; while (iter.hasNext()) { RaftNode next = iter.next(); - if (!election.equals(next.store.readElectionState().get())) + OptionalInt nextLeaderId = next.store.readElectionState().get().optionalLeaderId(); + int nextEpoch = next.store.readElectionState().get().epoch(); + if (!firstLeaderId.equals(nextLeaderId) || firstEpoch != nextEpoch) { return false; + } } return true; @@ -793,7 +902,7 @@ void start(int nodeId) { clusterId, Collections.emptyList(), endpointsFromId(nodeId, channel.listenerName()), - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), logContext, random, quorumConfig @@ -958,8 +1067,7 @@ public boolean acceptInbound(RaftMessage message) { */ @Override public boolean acceptOutbound(RaftMessage message) { - if (message instanceof RaftRequest.Outbound) { - RaftRequest.Outbound request = (RaftRequest.Outbound) message; + if (message instanceof RaftRequest.Outbound request) { InetSocketAddress destination = InetSocketAddress.createUnresolved( request.destination().host(), request.destination().port() @@ -990,7 +1098,7 @@ public void verify() { Integer oldEpoch = nodeEpochs.get(nodeId); Optional electionState = state.store.readElectionState(); - if (!electionState.isPresent()) { + if (electionState.isEmpty()) { continue; } @@ -1057,6 +1165,45 @@ public void verify() { } } + /** + * This invariant currently checks that the leader does not change after the first successful election + * and should only be applied to tests where we expect leadership not to change (e.g. non-impactful + * routing filter changes, no network jitter) + */ + private static class StableLeadership implements Invariant { + final Cluster cluster; + OptionalInt epochWithFirstLeader = OptionalInt.empty(); + OptionalInt firstLeaderId = OptionalInt.empty(); + + private StableLeadership(Cluster cluster) { + this.cluster = cluster; + } + + @Override + public void verify() { + // KAFKA-18439: Currently this just checks the leader is never changed after the first successful election. + // KAFKA-18439 will generalize the invariant so it holds for all tests even if routing filters are changed. + // i.e. if the current leader is reachable by majority, we do not expect leadership to change + for (Map.Entry nodeEntry : cluster.nodes.entrySet()) { + PersistentState state = nodeEntry.getValue(); + Optional electionState = state.store.readElectionState(); + + electionState.ifPresent(election -> { + if (election.hasLeader()) { + // verify there were no leaders prior to this one + if (epochWithFirstLeader.isEmpty()) { + epochWithFirstLeader = OptionalInt.of(election.epoch()); + firstLeaderId = OptionalInt.of(election.leaderId()); + } else { + assertEquals(epochWithFirstLeader.getAsInt(), election.epoch()); + assertEquals(firstLeaderId.getAsInt(), election.leaderId()); + } + } + }); + } + } + } + private static class MonotonicHighWatermark implements Invariant { final Cluster cluster; long highWatermark = 0; @@ -1171,7 +1318,7 @@ private void assertCommittedData(RaftNode node) { final MockLog log = node.log; OptionalLong highWatermark = manager.highWatermark(); - if (!highWatermark.isPresent()) { + if (highWatermark.isEmpty()) { // We cannot do validation if the current high watermark is unknown return; } diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java b/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java index 5e1d234c279a8..6bfaf7b7ff37f 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java @@ -75,6 +75,8 @@ public class RaftUtilTest { private final ListenerName listenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT); private final InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 9990); private final String clusterId = "I4ZmrWqfT2e-upky_4fdPA"; + private static final Uuid TEST_DIRECTORY_ID1 = Uuid.randomUuid(); + private static final Uuid TEST_DIRECTORY_ID2 = Uuid.randomUuid(); @Test public void testErrorResponse() { @@ -93,13 +95,6 @@ public void testErrorResponse() { private static Stream singletonFetchRequestTestCases() { return Stream.of( - Arguments.of(new FetchRequestTestCase(Uuid.ZERO_UUID, (short) 0, (short) -1, - "{\"replicaId\":-1,\"maxWaitMs\":0,\"minBytes\":0,\"topics\":[{\"topic\":\"topic\"," + - "\"partitions\":[{\"partition\":2,\"fetchOffset\":333,\"partitionMaxBytes\":10}]}]}")), - Arguments.of(new FetchRequestTestCase(Uuid.ZERO_UUID, (short) 3, (short) -1, - "{\"replicaId\":-1,\"maxWaitMs\":0,\"minBytes\":0,\"maxBytes\":2147483647," + - "\"topics\":[{\"topic\":\"topic\",\"partitions\":[{\"partition\":2,\"fetchOffset\":333," + - "\"partitionMaxBytes\":10}]}]}")), Arguments.of(new FetchRequestTestCase(Uuid.ZERO_UUID, (short) 4, (short) -1, "{\"replicaId\":-1,\"maxWaitMs\":0,\"minBytes\":0,\"maxBytes\":2147483647,\"isolationLevel\":0," + "\"topics\":[{\"topic\":\"topic\",\"partitions\":[{\"partition\":2,\"fetchOffset\":333," + @@ -139,12 +134,6 @@ private static Stream singletonFetchRequestTestCases() { private static Stream singletonFetchResponseTestCases() { return Stream.of( - Arguments.of(new FetchResponseTestCase((short) 0, -1, - "{\"responses\":[{\"topic\":\"topic\",\"partitions\":[{\"partitionIndex\":1," + - "\"errorCode\":0,\"highWatermark\":1000,\"records\":\"\"}]}]}")), - Arguments.of(new FetchResponseTestCase((short) 1, -1, - "{\"throttleTimeMs\":0,\"responses\":[{\"topic\":\"topic\",\"partitions\":" + - "[{\"partitionIndex\":1,\"errorCode\":0,\"highWatermark\":1000,\"records\":\"\"}]}]}")), Arguments.of(new FetchResponseTestCase((short) 4, -1, "{\"throttleTimeMs\":0,\"responses\":[{\"topic\":\"topic\",\"partitions\":" + "[{\"partitionIndex\":1,\"errorCode\":0,\"highWatermark\":1000,\"lastStableOffset\":900," + @@ -187,13 +176,26 @@ private static Stream voteRequestTestCases() { return Stream.of( Arguments.of((short) 0, "{\"clusterId\":\"I4ZmrWqfT2e-upky_4fdPA\",\"topics\":[{\"topicName\":\"topic\"," + - "\"partitions\":[{\"partitionIndex\":1,\"candidateEpoch\":1,\"candidateId\":1," + + "\"partitions\":[{\"partitionIndex\":1,\"replicaEpoch\":1,\"replicaId\":1," + "\"lastOffsetEpoch\":1000,\"lastOffset\":1000}]}]}"), Arguments.of((short) 1, "{\"clusterId\":\"I4ZmrWqfT2e-upky_4fdPA\",\"voterId\":2,\"topics\":[{" + - "\"topicName\":\"topic\",\"partitions\":[{\"partitionIndex\":1,\"candidateEpoch\":1," + - "\"candidateId\":1,\"candidateDirectoryId\":\"AAAAAAAAAAAAAAAAAAAAAQ\"," + - "\"voterDirectoryId\":\"AAAAAAAAAAAAAAAAAAAAAQ\",\"lastOffsetEpoch\":1000,\"lastOffset\":1000}]}]}") + "\"topicName\":\"topic\",\"partitions\":[{\"partitionIndex\":1,\"replicaEpoch\":1," + + "\"replicaId\":1,\"replicaDirectoryId\":\"" + TEST_DIRECTORY_ID1 + "\"," + + "\"voterDirectoryId\":\"" + TEST_DIRECTORY_ID2 + "\",\"lastOffsetEpoch\":1000," + + "\"lastOffset\":1000}]}]}"), + Arguments.of((short) 2, + "{\"clusterId\":\"I4ZmrWqfT2e-upky_4fdPA\",\"voterId\":2,\"topics\":[{" + + "\"topicName\":\"topic\",\"partitions\":[{\"partitionIndex\":1,\"replicaEpoch\":1," + + "\"replicaId\":1,\"replicaDirectoryId\":\"" + TEST_DIRECTORY_ID1 + "\"," + + "\"voterDirectoryId\":\"" + TEST_DIRECTORY_ID2 + "\",\"lastOffsetEpoch\":1000," + + "\"lastOffset\":1000,\"preVote\":true}]}]}"), + Arguments.of((short) 2, + "{\"clusterId\":\"I4ZmrWqfT2e-upky_4fdPA\",\"voterId\":2,\"topics\":[{" + + "\"topicName\":\"topic\",\"partitions\":[{\"partitionIndex\":1,\"replicaEpoch\":1," + + "\"replicaId\":1,\"replicaDirectoryId\":\"" + TEST_DIRECTORY_ID1 + "\"," + + "\"voterDirectoryId\":\"" + TEST_DIRECTORY_ID2 + "\",\"lastOffsetEpoch\":1000," + + "\"lastOffset\":1000,\"preVote\":true}]}]}") ); } @@ -203,6 +205,10 @@ private static Stream voteResponseTestCases() { "{\"errorCode\":0,\"topics\":[{\"topicName\":\"topic\",\"partitions\":[{" + "\"partitionIndex\":0,\"errorCode\":0,\"leaderId\":1,\"leaderEpoch\":1,\"voteGranted\":true}]}]}"), Arguments.of((short) 1, + "{\"errorCode\":0,\"topics\":[{\"topicName\":\"topic\",\"partitions\":[{" + + "\"partitionIndex\":0,\"errorCode\":0,\"leaderId\":1,\"leaderEpoch\":1,\"voteGranted\":true}]}]," + + "\"nodeEndpoints\":[{\"nodeId\":1,\"host\":\"localhost\",\"port\":9990}]}"), + Arguments.of((short) 2, "{\"errorCode\":0,\"topics\":[{\"topicName\":\"topic\",\"partitions\":[{" + "\"partitionIndex\":0,\"errorCode\":0,\"leaderId\":1,\"leaderEpoch\":1,\"voteGranted\":true}]}]," + "\"nodeEndpoints\":[{\"nodeId\":1,\"host\":\"localhost\",\"port\":9990}]}") @@ -335,6 +341,29 @@ public void testSingletonFetchRequestForAllVersion(final FetchRequestTestCase te assertEquals(testCase.expectedJson, json.toString()); } + // Test that the replicaDirectoryId field introduced in version 17 is ignorable for older versions. + // This is done by setting a FetchPartition's replicaDirectoryId explicitly to a non-zero uuid and + // checking that the FetchRequestData can still be written to an older version specified by + // testCase.version. + @ParameterizedTest + @MethodSource("singletonFetchRequestTestCases") + public void testFetchRequestV17Compatibility(final FetchRequestTestCase testCase) { + FetchRequestData fetchRequestData = RaftUtil.singletonFetchRequest( + topicPartition, + Uuid.ONE_UUID, + partition -> partition + .setPartitionMaxBytes(10) + .setCurrentLeaderEpoch(5) + .setFetchOffset(333) + .setLastFetchedEpoch(testCase.lastFetchedEpoch) + .setPartition(2) + .setReplicaDirectoryId(Uuid.ONE_UUID) + .setLogStartOffset(0) + ); + JsonNode json = FetchRequestDataJsonConverter.write(fetchRequestData, testCase.version); + assertEquals(testCase.expectedJson, json.toString()); + } + @ParameterizedTest @MethodSource("singletonFetchResponseTestCases") public void testSingletonFetchResponseForAllVersion(final FetchResponseTestCase testCase) { @@ -377,18 +406,19 @@ public void testSingletonFetchResponseForAllVersion(final FetchResponseTestCase @ParameterizedTest @MethodSource("voteRequestTestCases") public void testSingletonVoteRequestForAllVersion(final short version, final String expectedJson) { - int candidateEpoch = 1; + int replicaEpoch = 1; int lastEpoch = 1000; long lastEpochOffset = 1000; VoteRequestData voteRequestData = RaftUtil.singletonVoteRequest( - topicPartition, - clusterId, - candidateEpoch, - ReplicaKey.of(1, Uuid.ONE_UUID), - ReplicaKey.of(2, Uuid.ONE_UUID), - lastEpoch, - lastEpochOffset + topicPartition, + clusterId, + replicaEpoch, + ReplicaKey.of(1, TEST_DIRECTORY_ID1), + ReplicaKey.of(2, TEST_DIRECTORY_ID2), + lastEpoch, + lastEpochOffset, + version >= 2 ); JsonNode json = VoteRequestDataJsonConverter.write(voteRequestData, version); assertEquals(expectedJson, json.toString()); @@ -437,6 +467,35 @@ public void testSingletonFetchSnapshotRequestForAllVersion(final short version, assertEquals(expectedJson, json.toString()); } + // Test that the replicaDirectoryId field introduced in version 1 is ignorable for version 0 + // This is done by setting a FetchPartition's replicaDirectoryId explicitly to a non-zero uuid and + // checking that the FetchSnapshotRequestData can still be written to an older version specified by + // testCase.version. + @ParameterizedTest + @MethodSource("fetchSnapshotRequestTestCases") + public void testSingletonFetchSnapshotRequestV1Compatibility( + short version, + Uuid directoryId, + String expectedJson + ) { + int epoch = 1; + int maxBytes = 1000; + int position = 10; + + FetchSnapshotRequestData fetchSnapshotRequestData = RaftUtil.singletonFetchSnapshotRequest( + clusterId, + ReplicaKey.of(1, directoryId), + topicPartition, + epoch, + new OffsetAndEpoch(10, epoch), + maxBytes, + position + ); + fetchSnapshotRequestData.topics().get(0).partitions().get(0).setReplicaDirectoryId(Uuid.ONE_UUID); + JsonNode json = FetchSnapshotRequestDataJsonConverter.write(fetchSnapshotRequestData, version); + assertEquals(expectedJson, json.toString()); + } + @ParameterizedTest @MethodSource("fetchSnapshotResponseTestCases") public void testSingletonFetchSnapshotResponseForAllVersion(final short version, final String expectedJson) { diff --git a/raft/src/test/java/org/apache/kafka/raft/ReplicatedCounter.java b/raft/src/test/java/org/apache/kafka/raft/ReplicatedCounter.java index 4b35d9e7551f5..4bb1b451b3fd5 100644 --- a/raft/src/test/java/org/apache/kafka/raft/ReplicatedCounter.java +++ b/raft/src/test/java/org/apache/kafka/raft/ReplicatedCounter.java @@ -58,7 +58,7 @@ public synchronized boolean isWritable() { } public synchronized void increment() { - if (!claimedEpoch.isPresent()) { + if (claimedEpoch.isEmpty()) { throw new KafkaException("Counter is not currently writable"); } diff --git a/raft/src/test/java/org/apache/kafka/raft/ResignedStateTest.java b/raft/src/test/java/org/apache/kafka/raft/ResignedStateTest.java index a21c7b5b9aa16..f4fc143e2eb2e 100644 --- a/raft/src/test/java/org/apache/kafka/raft/ResignedStateTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/ResignedStateTest.java @@ -25,6 +25,7 @@ import java.net.InetSocketAddress; import java.util.Collections; +import java.util.Optional; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -67,7 +68,7 @@ public void testResignedState() { ResignedState state = newResignedState(voters); - assertEquals(ElectionState.withElectedLeader(epoch, localId, voters), state.election()); + assertEquals(ElectionState.withElectedLeader(epoch, localId, Optional.empty(), voters), state.election()); assertEquals(epoch, state.epoch()); assertEquals(Collections.singleton(remoteId), state.unackedVoters()); @@ -89,9 +90,22 @@ public void testResignedState() { public void testGrantVote(boolean isLogUpToDate) { ResignedState state = newResignedState(Set.of(1, 2, 3)); - assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); - assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); - assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + + assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); } @Test @@ -100,7 +114,7 @@ void testNegativeScenarioAcknowledgeResignation() { ResignedState state = newResignedState(voters); - assertEquals(ElectionState.withElectedLeader(epoch, 0, voters), state.election()); + assertEquals(ElectionState.withElectedLeader(epoch, 0, Optional.empty(), voters), state.election()); assertEquals(epoch, state.epoch()); // try non-existed voter must throw an exception diff --git a/raft/src/test/java/org/apache/kafka/raft/UnattachedStateTest.java b/raft/src/test/java/org/apache/kafka/raft/UnattachedStateTest.java index 0870894067e5e..5e9c68d2d053c 100644 --- a/raft/src/test/java/org/apache/kafka/raft/UnattachedStateTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/UnattachedStateTest.java @@ -16,11 +16,13 @@ */ package org.apache.kafka.raft; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.ValueSource; import java.util.Optional; @@ -37,16 +39,19 @@ public class UnattachedStateTest { private final LogContext logContext = new LogContext(); private final int epoch = 5; private final int electionTimeoutMs = 10000; + private final Set voters = Set.of(1, 2, 3); + private final ReplicaKey voter1Key = ReplicaKey.of(1, Uuid.randomUuid()); + private final ReplicaKey votedKey = voter1Key; private UnattachedState newUnattachedState( - Set voters, - OptionalInt leaderId + OptionalInt leaderId, + Optional votedKey ) { return new UnattachedState( time, epoch, leaderId, - Optional.empty(), + votedKey, voters, Optional.empty(), electionTimeoutMs, @@ -54,15 +59,17 @@ private UnattachedState newUnattachedState( ); } - @Test - public void testElectionTimeout() { - Set voters = Set.of(1, 2, 3); - - UnattachedState state = newUnattachedState(voters, OptionalInt.empty()); - - assertEquals(epoch, state.epoch()); + @ParameterizedTest + @CsvSource({ "true,false", "false,true", "false,false" }) + public void testElectionStateAndElectionTimeout(boolean hasVotedKey, boolean hasLeaderId) { + OptionalInt leader = hasLeaderId ? OptionalInt.of(3) : OptionalInt.empty(); + Optional votedKey = hasVotedKey ? Optional.of(this.votedKey) : Optional.empty(); + UnattachedState state = newUnattachedState(leader, votedKey); - assertEquals(ElectionState.withUnknownLeader(epoch, voters), state.election()); + assertEquals( + new ElectionState(epoch, leader, votedKey, voters), + state.election() + ); assertEquals(electionTimeoutMs, state.remainingElectionTimeMs(time.milliseconds())); assertFalse(state.hasElectionTimeoutExpired(time.milliseconds())); @@ -77,44 +84,134 @@ public void testElectionTimeout() { @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testGrantVote(boolean isLogUpToDate) { - UnattachedState state = newUnattachedState(Set.of(1, 2, 3), OptionalInt.empty()); + public void testGrantVoteWithoutVotedKey(boolean isLogUpToDate) { + UnattachedState state = newUnattachedState(OptionalInt.empty(), Optional.empty()); + + assertEquals( + isLogUpToDate, + state.canGrantVote(voter1Key, isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(voter1Key, isLogUpToDate, false) + ); assertEquals( isLogUpToDate, - state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) + state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) ); assertEquals( isLogUpToDate, - state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) + state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false) + ); + + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) ); assertEquals( isLogUpToDate, - state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) + state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false) + ); + + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false) ); } - @Test - void testLeaderEndpoints() { - UnattachedState state = newUnattachedState(Set.of(1, 2, 3), OptionalInt.empty()); + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testCanGrantVoteWithVotedKey(boolean isLogUpToDate) { + UnattachedState state = newUnattachedState(OptionalInt.empty(), Optional.of(votedKey)); - assertEquals(Endpoints.empty(), state.leaderEndpoints()); + // Same voterKey + // Local can reject PreVote for a replica that local has already granted a standard vote to if their log is behind + assertEquals( + isLogUpToDate, + state.canGrantVote(votedKey, isLogUpToDate, true) + ); + assertTrue(state.canGrantVote(votedKey, isLogUpToDate, false)); + + // Different directoryId + // Local can grant PreVote for a replica that local has already granted a standard vote to if their log is up-to-date, + // even if the directoryId is different + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedKey.id(), Uuid.randomUuid()), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(votedKey.id(), Uuid.randomUuid()), isLogUpToDate, false)); + + // Missing directoryId + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(votedKey.id(), ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(votedKey.id(), ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + + // Different voterId + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(2, votedKey.directoryId().get()), isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(2, votedKey.directoryId().get()), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + + // Observer + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertFalse(state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); } @ParameterizedTest @ValueSource(booleans = {true, false}) - void testUnattachedWithLeader(boolean isLogUpToDate) { + void testGrantVoteWithLeader(boolean isLogUpToDate) { int leaderId = 3; - Set voters = Set.of(1, 2, leaderId); - - UnattachedState state = newUnattachedState(voters, OptionalInt.of(leaderId)); + UnattachedState state = newUnattachedState(OptionalInt.of(leaderId), Optional.empty()); // Check that the leader is persisted if the leader is known - assertEquals(ElectionState.withElectedLeader(epoch, leaderId, voters), state.election()); + assertEquals(ElectionState.withElectedLeader(epoch, leaderId, Optional.empty(), voters), state.election()); - // Check that the replica rejects all votes request if the leader is known - assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); - assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); - assertFalse(state.canGrantVote(ReplicaKey.of(3, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate)); + // Check that the replica can grant PreVotes if the log is up-to-date, even if the last leader is known + // This is because nodes in Unattached have not successfully fetched from the leader yet + assertEquals( + isLogUpToDate, + state.canGrantVote(voter1Key, isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(leaderId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + assertEquals( + isLogUpToDate, + state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, true) + ); + + // Check that the replica rejects all standard votes request if the leader is known + assertFalse(state.canGrantVote(ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(leaderId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + assertFalse(state.canGrantVote(ReplicaKey.of(10, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate, false)); + } + + @Test + public void testLeaderEndpoints() { + UnattachedState state = newUnattachedState(OptionalInt.of(3), Optional.of(this.votedKey)); + + assertEquals(Endpoints.empty(), state.leaderEndpoints()); } } diff --git a/raft/src/test/java/org/apache/kafka/raft/UnattachedStateWithVoteTest.java b/raft/src/test/java/org/apache/kafka/raft/UnattachedStateWithVoteTest.java deleted file mode 100644 index 2daffe84e7cab..0000000000000 --- a/raft/src/test/java/org/apache/kafka/raft/UnattachedStateWithVoteTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.raft; - -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.MockTime; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.util.Collections; -import java.util.Optional; -import java.util.OptionalInt; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; - -class UnattachedStateWithVoteTest { - - private final MockTime time = new MockTime(); - private final LogContext logContext = new LogContext(); - private final int epoch = 5; - private final int votedId = 1; - private final int electionTimeoutMs = 10000; - - private UnattachedState newUnattachedVotedState( - Uuid votedDirectoryId - ) { - return new UnattachedState( - time, - epoch, - OptionalInt.empty(), - Optional.of(ReplicaKey.of(votedId, votedDirectoryId)), - Collections.emptySet(), - Optional.empty(), - electionTimeoutMs, - logContext - ); - } - - @Test - public void testElectionTimeout() { - UnattachedState state = newUnattachedVotedState(ReplicaKey.NO_DIRECTORY_ID); - ReplicaKey votedKey = ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID); - - assertEquals(epoch, state.epoch()); - assertEquals(votedKey, state.votedKey().get()); - assertEquals( - ElectionState.withVotedCandidate(epoch, votedKey, Collections.emptySet()), - state.election() - ); - assertEquals(electionTimeoutMs, state.remainingElectionTimeMs(time.milliseconds())); - assertFalse(state.hasElectionTimeoutExpired(time.milliseconds())); - - time.sleep(5000); - assertEquals(electionTimeoutMs - 5000, state.remainingElectionTimeMs(time.milliseconds())); - assertFalse(state.hasElectionTimeoutExpired(time.milliseconds())); - - time.sleep(5000); - assertEquals(0, state.remainingElectionTimeMs(time.milliseconds())); - assertTrue(state.hasElectionTimeoutExpired(time.milliseconds())); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void testCanGrantVoteWithoutDirectoryId(boolean isLogUpToDate) { - UnattachedState state = newUnattachedVotedState(ReplicaKey.NO_DIRECTORY_ID); - - assertTrue( - state.canGrantVote(ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) - ); - assertTrue( - state.canGrantVote( - ReplicaKey.of(votedId, Uuid.randomUuid()), - isLogUpToDate - ) - ); - - assertFalse( - state.canGrantVote(ReplicaKey.of(votedId + 1, ReplicaKey.NO_DIRECTORY_ID), isLogUpToDate) - ); - } - - @Test - void testCanGrantVoteWithDirectoryId() { - Uuid votedDirectoryId = Uuid.randomUuid(); - UnattachedState state = newUnattachedVotedState(votedDirectoryId); - - assertTrue(state.canGrantVote(ReplicaKey.of(votedId, votedDirectoryId), false)); - - assertFalse( - state.canGrantVote(ReplicaKey.of(votedId, Uuid.randomUuid()), false) - ); - assertFalse(state.canGrantVote(ReplicaKey.of(votedId, ReplicaKey.NO_DIRECTORY_ID), false)); - - assertFalse(state.canGrantVote(ReplicaKey.of(votedId + 1, votedDirectoryId), false)); - assertFalse(state.canGrantVote(ReplicaKey.of(votedId + 1, ReplicaKey.NO_DIRECTORY_ID), false)); - } - - @Test - void testLeaderEndpoints() { - Uuid votedDirectoryId = Uuid.randomUuid(); - UnattachedState state = newUnattachedVotedState(votedDirectoryId); - - assertEquals(Endpoints.empty(), state.leaderEndpoints()); - } -} diff --git a/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java b/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java index 1c1287344f142..f8a0ffc8c35e8 100644 --- a/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java @@ -20,7 +20,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.feature.SupportedVersionRange; import org.apache.kafka.common.network.ListenerName; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -359,7 +359,7 @@ public static VoterSet.VoterNode voterNode(int id, boolean withDirectoryId) { } public static VoterSet.VoterNode voterNode(ReplicaKey replicaKey) { - return new VoterSet.VoterNode( + return voterNode( replicaKey, Endpoints.fromInetSocketAddresses( Collections.singletonMap( @@ -369,8 +369,15 @@ public static VoterSet.VoterNode voterNode(ReplicaKey replicaKey) { 9990 + replicaKey.id() ) ) - ), - Features.KRAFT_VERSION.supportedVersionRange() + ) + ); + } + + public static VoterSet.VoterNode voterNode(ReplicaKey replicaKey, Endpoints endpoints) { + return new VoterSet.VoterNode( + replicaKey, + endpoints, + Feature.KRAFT_VERSION.supportedVersionRange() ); } diff --git a/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java b/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java index ccb28f45477ac..2c4765804a934 100644 --- a/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/internals/BatchBuilderTest.java @@ -112,7 +112,7 @@ public void testHasRoomForUncompressed(int batchSize) { String record = "i am a record"; - while (!builder.bytesNeeded(Collections.singletonList(record), null).isPresent()) { + while (builder.bytesNeeded(Collections.singletonList(record), null).isEmpty()) { builder.appendRecord(record, null); } diff --git a/raft/src/test/java/org/apache/kafka/raft/internals/KafkaRaftMetricsTest.java b/raft/src/test/java/org/apache/kafka/raft/internals/KafkaRaftMetricsTest.java index 86f18a1aefae4..f0c6710cd604a 100644 --- a/raft/src/test/java/org/apache/kafka/raft/internals/KafkaRaftMetricsTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/internals/KafkaRaftMetricsTest.java @@ -29,7 +29,7 @@ import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; import org.apache.kafka.raft.VoterSetTest; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.KRaftVersion; import org.junit.jupiter.api.AfterEach; @@ -86,7 +86,7 @@ private QuorumState buildQuorumState(VoterSet voterSet, KRaftVersion kraftVersio localDirectoryId, mockPartitionState, voterSet.listeners(localId), - Features.KRAFT_VERSION.supportedVersionRange(), + Feature.KRAFT_VERSION.supportedVersionRange(), electionTimeoutMs, fetchTimeoutMs, new MockQuorumStateStore(), @@ -116,6 +116,7 @@ private VoterSet localStandaloneVoterSet(KRaftVersion kraftVersion) { public void shouldRecordVoterQuorumState(KRaftVersion kraftVersion) { boolean withDirectoryId = kraftVersion.featureLevel() > 0; Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2), withDirectoryId); + Uuid voter2DirectoryId = voterMap.get(2).voterKey().directoryId().orElse(Uuid.ZERO_UUID); voterMap.put( localId, VoterSetTest.voterNode( @@ -131,9 +132,46 @@ public void shouldRecordVoterQuorumState(KRaftVersion kraftVersion) { state.initialize(new OffsetAndEpoch(0L, 0)); raftMetrics = new KafkaRaftMetrics(metrics, "raft", state); + // unattached assertEquals("unattached", getMetric(metrics, "current-state").metricValue()); - assertEquals((double) -1L, getMetric(metrics, "current-leader").metricValue()); - assertEquals((double) -1L, getMetric(metrics, "current-vote").metricValue()); + assertEquals((double) -1, getMetric(metrics, "current-leader").metricValue()); + assertEquals((double) -1, getMetric(metrics, "current-vote").metricValue()); + assertEquals( + Uuid.ZERO_UUID.toString(), + getMetric(metrics, "current-vote-directory-id").metricValue() + ); + assertEquals((double) 0, getMetric(metrics, "current-epoch").metricValue()); + assertEquals((double) -1, getMetric(metrics, "high-watermark").metricValue()); + + // prospective + state.transitionToProspective(); + assertEquals("prospective", getMetric(metrics, "current-state").metricValue()); + assertEquals((double) -1, getMetric(metrics, "current-leader").metricValue()); + assertEquals((double) -1, getMetric(metrics, "current-vote").metricValue()); + assertEquals( + Uuid.ZERO_UUID.toString(), + getMetric(metrics, "current-vote-directory-id").metricValue() + ); + assertEquals((double) 0, getMetric(metrics, "current-epoch").metricValue()); + assertEquals((double) -1L, getMetric(metrics, "high-watermark").metricValue()); + + // prospective with votedKey + state.prospectiveAddVotedState(0, ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID)); + assertEquals("prospective-voted", getMetric(metrics, "current-state").metricValue()); + assertEquals((double) -1, getMetric(metrics, "current-leader").metricValue()); + assertEquals((double) 1, getMetric(metrics, "current-vote").metricValue()); + assertEquals( + Uuid.ZERO_UUID.toString(), + getMetric(metrics, "current-vote-directory-id").metricValue() + ); + assertEquals((double) 0, getMetric(metrics, "current-epoch").metricValue()); + assertEquals((double) -1L, getMetric(metrics, "high-watermark").metricValue()); + + // follower with votedKey and leader + state.transitionToFollower(0, 2, voters.listeners(2)); + assertEquals("follower", getMetric(metrics, "current-state").metricValue()); + assertEquals((double) 2, getMetric(metrics, "current-leader").metricValue()); + assertEquals((double) 1, getMetric(metrics, "current-vote").metricValue()); assertEquals( Uuid.ZERO_UUID.toString(), getMetric(metrics, "current-vote-directory-id").metricValue() @@ -141,17 +179,35 @@ public void shouldRecordVoterQuorumState(KRaftVersion kraftVersion) { assertEquals((double) 0, getMetric(metrics, "current-epoch").metricValue()); assertEquals((double) -1L, getMetric(metrics, "high-watermark").metricValue()); + // follower with updated HW + state.followerStateOrThrow().updateHighWatermark(OptionalLong.of(5L)); + assertEquals((double) 5L, getMetric(metrics, "high-watermark").metricValue()); + + // prospective with votedKey and leader + state.transitionToProspective(); + assertEquals("prospective-voted", getMetric(metrics, "current-state").metricValue()); + assertEquals((double) 2, getMetric(metrics, "current-leader").metricValue()); + assertEquals((double) 1, getMetric(metrics, "current-vote").metricValue()); + assertEquals( + Uuid.ZERO_UUID.toString(), + getMetric(metrics, "current-vote-directory-id").metricValue() + ); + assertEquals((double) 0, getMetric(metrics, "current-epoch").metricValue()); + assertEquals((double) 5L, getMetric(metrics, "high-watermark").metricValue()); + + // candidate state.transitionToCandidate(); assertEquals("candidate", getMetric(metrics, "current-state").metricValue()); - assertEquals((double) -1L, getMetric(metrics, "current-leader").metricValue()); + assertEquals((double) -1, getMetric(metrics, "current-leader").metricValue()); assertEquals((double) localId, getMetric(metrics, "current-vote").metricValue()); assertEquals( localDirectoryId.toString(), getMetric(metrics, "current-vote-directory-id").metricValue() ); assertEquals((double) 1, getMetric(metrics, "current-epoch").metricValue()); - assertEquals((double) -1L, getMetric(metrics, "high-watermark").metricValue()); + assertEquals((double) 5L, getMetric(metrics, "high-watermark").metricValue()); + // leader state.candidateStateOrThrow().recordGrantedVote(1); state.transitionToLeader(2L, accumulator); assertEquals("leader", getMetric(metrics, "current-state").metricValue()); @@ -162,16 +218,18 @@ public void shouldRecordVoterQuorumState(KRaftVersion kraftVersion) { getMetric(metrics, "current-vote-directory-id").metricValue() ); assertEquals((double) 1, getMetric(metrics, "current-epoch").metricValue()); - assertEquals((double) -1L, getMetric(metrics, "high-watermark").metricValue()); + assertEquals((double) -1L, getMetric(metrics, "high-watermark").metricValue()); // todo, bug fix - state.leaderStateOrThrow().updateLocalState(new LogOffsetMetadata(5L), voters); + // leader with updated HW + state.leaderStateOrThrow().updateLocalState(new LogOffsetMetadata(10L), voters); state.leaderStateOrThrow().updateReplicaState( voterMap.get(1).voterKey(), 0, - new LogOffsetMetadata(5L) + new LogOffsetMetadata(10L) ); - assertEquals((double) 5L, getMetric(metrics, "high-watermark").metricValue()); + assertEquals((double) 10L, getMetric(metrics, "high-watermark").metricValue()); + // follower state.transitionToFollower(2, 1, voters.listeners(1)); assertEquals("follower", getMetric(metrics, "current-state").metricValue()); assertEquals((double) 1, getMetric(metrics, "current-leader").metricValue()); @@ -181,25 +239,25 @@ public void shouldRecordVoterQuorumState(KRaftVersion kraftVersion) { getMetric(metrics, "current-vote-directory-id").metricValue() ); assertEquals((double) 2, getMetric(metrics, "current-epoch").metricValue()); - assertEquals((double) 5L, getMetric(metrics, "high-watermark").metricValue()); - - state.followerStateOrThrow().updateHighWatermark(OptionalLong.of(10L)); assertEquals((double) 10L, getMetric(metrics, "high-watermark").metricValue()); - state.transitionToUnattachedVotedState(3, ReplicaKey.of(2, ReplicaKey.NO_DIRECTORY_ID)); - assertEquals("voted", getMetric(metrics, "current-state").metricValue()); + // unattached with votedKey + state.transitionToUnattached(3, OptionalInt.empty()); + state.unattachedAddVotedState(3, ReplicaKey.of(2, voter2DirectoryId)); + assertEquals("unattached-voted", getMetric(metrics, "current-state").metricValue()); assertEquals((double) -1, getMetric(metrics, "current-leader").metricValue()); assertEquals((double) 2, getMetric(metrics, "current-vote").metricValue()); assertEquals( - Uuid.ZERO_UUID.toString(), + voter2DirectoryId.toString(), getMetric(metrics, "current-vote-directory-id").metricValue() ); assertEquals((double) 3, getMetric(metrics, "current-epoch").metricValue()); assertEquals((double) 10L, getMetric(metrics, "high-watermark").metricValue()); - state.transitionToUnattached(4); + // unattached with leader without votedKey + state.transitionToUnattached(4, OptionalInt.of(1)); assertEquals("unattached", getMetric(metrics, "current-state").metricValue()); - assertEquals((double) -1, getMetric(metrics, "current-leader").metricValue()); + assertEquals((double) 1, getMetric(metrics, "current-leader").metricValue()); assertEquals((double) -1, getMetric(metrics, "current-vote").metricValue()); assertEquals( Uuid.ZERO_UUID.toString(), @@ -244,7 +302,7 @@ public void shouldRecordNonVoterQuorumState(KRaftVersion kraftVersion) { state.followerStateOrThrow().updateHighWatermark(OptionalLong.of(10L)); assertEquals((double) 10L, getMetric(metrics, "high-watermark").metricValue()); - state.transitionToUnattached(4); + state.transitionToUnattached(4, OptionalInt.empty()); assertEquals("unattached", getMetric(metrics, "current-state").metricValue()); assertEquals((double) -1, getMetric(metrics, "current-leader").metricValue()); assertEquals((double) -1, getMetric(metrics, "current-vote").metricValue()); diff --git a/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetHistoryTest.java b/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetHistoryTest.java index 04f8aa8d3658c..302a1da52126d 100644 --- a/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetHistoryTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/internals/VoterSetHistoryTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.raft.internals; +import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.raft.VoterSet; import org.apache.kafka.raft.VoterSetTest; @@ -33,7 +34,7 @@ public final class VoterSetHistoryTest { @Test void testStaticVoterSet() { VoterSet staticVoterSet = VoterSet.fromMap(VoterSetTest.voterMap(IntStream.of(1, 2, 3), true)); - VoterSetHistory votersHistory = new VoterSetHistory(staticVoterSet); + VoterSetHistory votersHistory = voterSetHistory(staticVoterSet); assertEquals(Optional.empty(), votersHistory.valueAtOrBefore(0)); assertEquals(Optional.empty(), votersHistory.valueAtOrBefore(100)); @@ -54,7 +55,7 @@ void testStaticVoterSet() { @Test void TestNoStaticVoterSet() { - VoterSetHistory votersHistory = new VoterSetHistory(VoterSet.empty()); + VoterSetHistory votersHistory = voterSetHistory(VoterSet.empty()); assertEquals(Optional.empty(), votersHistory.valueAtOrBefore(0)); assertEquals(Optional.empty(), votersHistory.valueAtOrBefore(100)); @@ -65,7 +66,7 @@ void TestNoStaticVoterSet() { void testAddAt() { Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true); VoterSet staticVoterSet = VoterSet.fromMap(new HashMap<>(voterMap)); - VoterSetHistory votersHistory = new VoterSetHistory(staticVoterSet); + VoterSetHistory votersHistory = voterSetHistory(staticVoterSet); assertThrows( IllegalArgumentException.class, @@ -95,7 +96,7 @@ void testAddAt() { void testBootstrapAddAt() { Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true); VoterSet bootstrapVoterSet = VoterSet.fromMap(new HashMap<>(voterMap)); - VoterSetHistory votersHistory = new VoterSetHistory(VoterSet.empty()); + VoterSetHistory votersHistory = voterSetHistory(VoterSet.empty()); votersHistory.addAt(-1, bootstrapVoterSet); assertEquals(bootstrapVoterSet, votersHistory.lastValue()); @@ -124,7 +125,7 @@ void testBootstrapAddAt() { @Test void testAddAtNonOverlapping() { - VoterSetHistory votersHistory = new VoterSetHistory(VoterSet.empty()); + VoterSetHistory votersHistory = voterSetHistory(VoterSet.empty()); Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true); VoterSet voterSet = VoterSet.fromMap(new HashMap<>(voterMap)); @@ -132,35 +133,30 @@ void testAddAtNonOverlapping() { // Add a starting voter to the history votersHistory.addAt(100, voterSet); - // Remove voter so that it doesn't overlap - VoterSet nonoverlappingRemovedSet = voterSet + // Assert multiple voters can be removed at a time + VoterSet nonOverlappingRemovedSet = voterSet .removeVoter(voterMap.get(1).voterKey()).get() .removeVoter(voterMap.get(2).voterKey()).get(); - assertThrows( - IllegalArgumentException.class, - () -> votersHistory.addAt(200, nonoverlappingRemovedSet) - ); - assertEquals(voterSet, votersHistory.lastValue()); + votersHistory.addAt(200, nonOverlappingRemovedSet); + assertEquals(nonOverlappingRemovedSet, votersHistory.lastValue()); - // Add voters so that it doesn't overlap - VoterSet nonoverlappingAddSet = voterSet - .addVoter(VoterSetTest.voterNode(4, true)).get() - .addVoter(VoterSetTest.voterNode(5, true)).get(); + // Assert multiple voters can be added at a time + VoterSet nonOverlappingAddSet = nonOverlappingRemovedSet + .addVoter(VoterSetTest.voterNode(1, true)).get() + .addVoter(VoterSetTest.voterNode(2, true)).get(); - assertThrows( - IllegalArgumentException.class, - () -> votersHistory.addAt(200, nonoverlappingAddSet) - ); - assertEquals(voterSet, votersHistory.lastValue()); + votersHistory.addAt(300, nonOverlappingAddSet); + + assertEquals(nonOverlappingAddSet, votersHistory.lastValue()); } @Test void testNonoverlappingFromStaticVoterSet() { Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true); VoterSet staticVoterSet = VoterSet.fromMap(new HashMap<>(voterMap)); - VoterSetHistory votersHistory = new VoterSetHistory(VoterSet.empty()); + VoterSetHistory votersHistory = voterSetHistory(VoterSet.empty()); // Remove voter so that it doesn't overlap VoterSet nonoverlappingRemovedSet = staticVoterSet @@ -175,7 +171,7 @@ void testNonoverlappingFromStaticVoterSet() { void testTruncateTo() { Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true); VoterSet staticVoterSet = VoterSet.fromMap(new HashMap<>(voterMap)); - VoterSetHistory votersHistory = new VoterSetHistory(staticVoterSet); + VoterSetHistory votersHistory = voterSetHistory(staticVoterSet); // Add voter 4 to the voter set and voter set history voterMap.put(4, VoterSetTest.voterNode(4, true)); @@ -201,7 +197,7 @@ void testTruncateTo() { void testTrimPrefixTo() { Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true); VoterSet staticVoterSet = VoterSet.fromMap(new HashMap<>(voterMap)); - VoterSetHistory votersHistory = new VoterSetHistory(staticVoterSet); + VoterSetHistory votersHistory = voterSetHistory(staticVoterSet); // Add voter 4 to the voter set and voter set history voterMap.put(4, VoterSetTest.voterNode(4, true)); @@ -234,7 +230,7 @@ void testTrimPrefixTo() { void testClear() { Map voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true); VoterSet staticVoterSet = VoterSet.fromMap(new HashMap<>(voterMap)); - VoterSetHistory votersHistory = new VoterSetHistory(staticVoterSet); + VoterSetHistory votersHistory = voterSetHistory(staticVoterSet); // Add voter 4 to the voter set and voter set history voterMap.put(4, VoterSetTest.voterNode(4, true)); @@ -250,4 +246,8 @@ void testClear() { assertEquals(staticVoterSet, votersHistory.lastValue()); } + + private VoterSetHistory voterSetHistory(VoterSet staticVoterSet) { + return new VoterSetHistory(staticVoterSet, new LogContext()); + } } diff --git a/raft/src/test/resources/log4j.properties b/raft/src/test/resources/log4j.properties deleted file mode 100644 index 6d90f6dd34884..0000000000000 --- a/raft/src/test/resources/log4j.properties +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=OFF, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.org.apache.kafka.raft=ERROR -log4j.logger.org.apache.kafka.snapshot=ERROR diff --git a/raft/src/test/resources/log4j2.yaml b/raft/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..50d9e781b8ec1 --- /dev/null +++ b/raft/src/test/resources/log4j2.yaml @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: OFF + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka.raft + level: ERROR + + - name: org.apache.kafka.snapshot + level: ERROR diff --git a/release/templates.py b/release/templates.py index 89826e1097ca7..06a142e022229 100644 --- a/release/templates.py +++ b/release/templates.py @@ -232,9 +232,10 @@ def rc_vote_email_text(release_version, rc, rc_tag, dev_branch, docs_version, ap * Protocol: https://kafka.apache.org/{docs_version}/protocol.html -* Successful Jenkins builds for the {dev_branch} branch: +* Successful CI builds for the {dev_branch} branch: Unit/integration tests: https://ci-builds.apache.org/job/Kafka/job/kafka/job/{dev_branch}// -System tests: https://jenkins.confluent.io/job/system-test-kafka/job/{dev_branch}// +-- Confluent engineers can access the semphore build to provide the build number +System tests: https://confluent-open-source-kafka-system-test-results.s3-us-west-2.amazonaws.com/{dev_branch}//report.html * Successful Docker Image Github Actions Pipeline for {dev_branch} branch: diff --git a/server-common/src/main/java/org/apache/kafka/queue/EventQueue.java b/server-common/src/main/java/org/apache/kafka/queue/EventQueue.java index 8914931448d83..a659fcc873f7b 100644 --- a/server-common/src/main/java/org/apache/kafka/queue/EventQueue.java +++ b/server-common/src/main/java/org/apache/kafka/queue/EventQueue.java @@ -97,7 +97,7 @@ public EarliestDeadlineFunction(long newDeadlineNs) { @Override public OptionalLong apply(OptionalLong prevDeadlineNs) { - if (!prevDeadlineNs.isPresent()) { + if (prevDeadlineNs.isEmpty()) { return OptionalLong.of(newDeadlineNs); } else if (prevDeadlineNs.getAsLong() < newDeadlineNs) { return prevDeadlineNs; @@ -116,7 +116,7 @@ public LatestDeadlineFunction(long newDeadlineNs) { @Override public OptionalLong apply(OptionalLong prevDeadlineNs) { - if (!prevDeadlineNs.isPresent()) { + if (prevDeadlineNs.isEmpty()) { return OptionalLong.of(newDeadlineNs); } else if (prevDeadlineNs.getAsLong() > newDeadlineNs) { return prevDeadlineNs; diff --git a/server-common/src/main/java/org/apache/kafka/queue/KafkaEventQueue.java b/server-common/src/main/java/org/apache/kafka/queue/KafkaEventQueue.java index b32183a214f82..1a70bcd043f97 100644 --- a/server-common/src/main/java/org/apache/kafka/queue/KafkaEventQueue.java +++ b/server-common/src/main/java/org/apache/kafka/queue/KafkaEventQueue.java @@ -337,7 +337,7 @@ Exception enqueue(EventContext eventContext, } break; case DEFERRED: - if (!deadlineNs.isPresent()) { + if (deadlineNs.isEmpty()) { return new RuntimeException( "You must specify a deadline for deferred events."); } diff --git a/server-common/src/main/java/org/apache/kafka/security/EncryptingPasswordEncoder.java b/server-common/src/main/java/org/apache/kafka/security/EncryptingPasswordEncoder.java deleted file mode 100644 index 88a7216acf727..0000000000000 --- a/server-common/src/main/java/org/apache/kafka/security/EncryptingPasswordEncoder.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.security; - -import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.config.types.Password; -import org.apache.kafka.server.util.Csv; - -import java.nio.charset.StandardCharsets; -import java.security.GeneralSecurityException; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.security.spec.InvalidKeySpecException; -import java.util.HashMap; -import java.util.Map; -import java.util.stream.Collectors; - -import javax.crypto.Cipher; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.PBEKeySpec; -import javax.crypto.spec.SecretKeySpec; - -/** - * Password encoder and decoder implementation. Encoded passwords are persisted as a CSV map - * containing the encoded password in base64 and along with the properties used for encryption. - */ -public class EncryptingPasswordEncoder implements PasswordEncoder { - - private final SecureRandom secureRandom = new SecureRandom(); - - private final Password secret; - private final String keyFactoryAlgorithm; - private final String cipherAlgorithm; - private final int keyLength; - private final int iterations; - private final CipherParamsEncoder cipherParamsEncoder; - - - /** - * @param secret The secret used for encoding and decoding - * @param keyFactoryAlgorithm Key factory algorithm if configured. By default, PBKDF2WithHmacSHA512 is - * used if available, PBKDF2WithHmacSHA1 otherwise. - * @param cipherAlgorithm Cipher algorithm used for encoding. - * @param keyLength Key length used for encoding. This should be valid for the specified algorithms. - * @param iterations Iteration count used for encoding. - * The provided `keyFactoryAlgorithm`, `cipherAlgorithm`, `keyLength` and `iterations` are used for encoding passwords. - * The values used for encoding are stored along with the encoded password and the stored values are used for decoding. - */ - public EncryptingPasswordEncoder( - Password secret, - String keyFactoryAlgorithm, - String cipherAlgorithm, - int keyLength, - int iterations) { - this.secret = secret; - this.keyFactoryAlgorithm = keyFactoryAlgorithm; - this.cipherAlgorithm = cipherAlgorithm; - this.keyLength = keyLength; - this.iterations = iterations; - this.cipherParamsEncoder = cipherParamsInstance(cipherAlgorithm); - } - - @Override - public String encode(Password password) throws GeneralSecurityException { - byte[] salt = new byte[256]; - secureRandom.nextBytes(salt); - Cipher cipher = Cipher.getInstance(cipherAlgorithm); - SecretKeyFactory keyFactory = secretKeyFactory(keyFactoryAlgorithm); - SecretKeySpec keySpec = secretKeySpec(keyFactory, cipherAlgorithm, keyLength, salt, iterations); - cipher.init(Cipher.ENCRYPT_MODE, keySpec); - byte[] encryptedPassword = cipher.doFinal(password.value().getBytes(StandardCharsets.UTF_8)); - Map encryptedMap = new HashMap<>(); - encryptedMap.put(PasswordEncoder.KEY_FACTORY_ALGORITHM, keyFactory.getAlgorithm()); - encryptedMap.put(PasswordEncoder.CIPHER_ALGORITHM, cipherAlgorithm); - encryptedMap.put(PasswordEncoder.KEY_LENGTH, String.valueOf(keyLength)); - encryptedMap.put(PasswordEncoder.SALT, PasswordEncoder.base64Encode(salt)); - encryptedMap.put(PasswordEncoder.ITERATIONS, String.valueOf(iterations)); - encryptedMap.put(PasswordEncoder.ENCRYPTED_PASSWORD, PasswordEncoder.base64Encode(encryptedPassword)); - encryptedMap.put(PasswordEncoder.PASSWORD_LENGTH, String.valueOf(password.value().length())); - encryptedMap.putAll(cipherParamsEncoder.toMap(cipher.getParameters())); - - return encryptedMap.entrySet().stream() - .map(entry -> entry.getKey() + ":" + entry.getValue()) - .collect(Collectors.joining(",")); - } - - @Override - public Password decode(String encodedPassword) throws GeneralSecurityException { - Map params = Csv.parseCsvMap(encodedPassword); - String keyFactoryAlg = params.get(PasswordEncoder.KEY_FACTORY_ALGORITHM); - String cipherAlg = params.get(PasswordEncoder.CIPHER_ALGORITHM); - int keyLength = Integer.parseInt(params.get(PasswordEncoder.KEY_LENGTH)); - byte[] salt = PasswordEncoder.base64Decode(params.get(PasswordEncoder.SALT)); - int iterations = Integer.parseInt(params.get(PasswordEncoder.ITERATIONS)); - byte[] encryptedPassword = PasswordEncoder.base64Decode(params.get(PasswordEncoder.ENCRYPTED_PASSWORD)); - int passwordLengthProp = Integer.parseInt(params.get(PasswordEncoder.PASSWORD_LENGTH)); - Cipher cipher = Cipher.getInstance(cipherAlg); - SecretKeyFactory keyFactory = secretKeyFactory(keyFactoryAlg); - SecretKeySpec keySpec = secretKeySpec(keyFactory, cipherAlg, keyLength, salt, iterations); - cipher.init(Cipher.DECRYPT_MODE, keySpec, cipherParamsEncoder.toParameterSpec(params)); - try { - byte[] decrypted = cipher.doFinal(encryptedPassword); - String password = new String(decrypted, StandardCharsets.UTF_8); - if (password.length() != passwordLengthProp) // Sanity check - throw new ConfigException("Password could not be decoded, sanity check of length failed"); - return new Password(password); - } catch (Exception e) { - throw new ConfigException("Password could not be decoded", e); - } - } - - private SecretKeyFactory secretKeyFactory(String keyFactoryAlg) throws NoSuchAlgorithmException { - if (keyFactoryAlg != null) { - return SecretKeyFactory.getInstance(keyFactoryAlg); - } else { - try { - return SecretKeyFactory.getInstance("PBKDF2WithHmacSHA512"); - } catch (NoSuchAlgorithmException nsae) { - return SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1"); - } - } - } - - private SecretKeySpec secretKeySpec(SecretKeyFactory keyFactory, - String cipherAlg, - int keyLength, - byte[] salt, - int iterations) throws InvalidKeySpecException { - PBEKeySpec keySpec = new PBEKeySpec(secret.value().toCharArray(), salt, iterations, keyLength); - String algorithm = (cipherAlg.indexOf('/') > 0) ? cipherAlg.substring(0, cipherAlg.indexOf('/')) : cipherAlg; - return new SecretKeySpec(keyFactory.generateSecret(keySpec).getEncoded(), algorithm); - } - - private CipherParamsEncoder cipherParamsInstance(String cipherAlgorithm) { - if (cipherAlgorithm.startsWith("AES/GCM/")) { - return new GcmParamsEncoder(); - } else { - return new IvParamsEncoder(); - } - } -} diff --git a/server-common/src/main/java/org/apache/kafka/security/GcmParamsEncoder.java b/server-common/src/main/java/org/apache/kafka/security/GcmParamsEncoder.java deleted file mode 100644 index d089d27d6887a..0000000000000 --- a/server-common/src/main/java/org/apache/kafka/security/GcmParamsEncoder.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.security; - -import java.security.AlgorithmParameters; -import java.security.spec.AlgorithmParameterSpec; -import java.security.spec.InvalidParameterSpecException; -import java.util.HashMap; -import java.util.Map; - -import javax.crypto.spec.GCMParameterSpec; - -public class GcmParamsEncoder implements CipherParamsEncoder { - - private static final String AUTHENTICATION_TAG_LENGTH = "authenticationTagLength"; - - @Override - public Map toMap(AlgorithmParameters cipherParams) throws InvalidParameterSpecException { - if (cipherParams != null) { - GCMParameterSpec spec = cipherParams.getParameterSpec(GCMParameterSpec.class); - Map map = new HashMap<>(); - map.put(PasswordEncoder.INITIALIZATION_VECTOR, PasswordEncoder.base64Encode(spec.getIV())); - map.put(AUTHENTICATION_TAG_LENGTH, String.valueOf(spec.getTLen())); - return map; - } else - throw new IllegalStateException("Could not determine initialization vector for cipher"); - } - - @Override - public AlgorithmParameterSpec toParameterSpec(Map paramMap) { - return new GCMParameterSpec(Integer.parseInt(paramMap.get(AUTHENTICATION_TAG_LENGTH)), - PasswordEncoder.base64Decode(paramMap.get(PasswordEncoder.INITIALIZATION_VECTOR))); - } -} diff --git a/server-common/src/main/java/org/apache/kafka/security/IvParamsEncoder.java b/server-common/src/main/java/org/apache/kafka/security/IvParamsEncoder.java deleted file mode 100644 index 0e38c925991f3..0000000000000 --- a/server-common/src/main/java/org/apache/kafka/security/IvParamsEncoder.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.security; - -import java.security.AlgorithmParameters; -import java.security.spec.AlgorithmParameterSpec; -import java.security.spec.InvalidParameterSpecException; -import java.util.Collections; -import java.util.Map; - -import javax.crypto.spec.IvParameterSpec; - -public class IvParamsEncoder implements CipherParamsEncoder { - - @Override - public Map toMap(AlgorithmParameters cipherParams) throws InvalidParameterSpecException { - if (cipherParams != null) { - IvParameterSpec ivSpec = cipherParams.getParameterSpec(IvParameterSpec.class); - return Collections.singletonMap(PasswordEncoder.INITIALIZATION_VECTOR, PasswordEncoder.base64Encode(ivSpec.getIV())); - } else - throw new IllegalStateException("Could not determine initialization vector for cipher"); - } - - @Override - public AlgorithmParameterSpec toParameterSpec(Map paramMap) { - return new IvParameterSpec(PasswordEncoder.base64Decode(paramMap.get(PasswordEncoder.INITIALIZATION_VECTOR))); - } -} diff --git a/server-common/src/main/java/org/apache/kafka/security/PasswordEncoder.java b/server-common/src/main/java/org/apache/kafka/security/PasswordEncoder.java index 7d7822823b18f..64e11822e5fee 100644 --- a/server-common/src/main/java/org/apache/kafka/security/PasswordEncoder.java +++ b/server-common/src/main/java/org/apache/kafka/security/PasswordEncoder.java @@ -19,19 +19,8 @@ import org.apache.kafka.common.config.types.Password; import java.security.GeneralSecurityException; -import java.util.Base64; public interface PasswordEncoder { - - String KEY_FACTORY_ALGORITHM = "keyFactoryAlgorithm"; - String CIPHER_ALGORITHM = "cipherAlgorithm"; - String INITIALIZATION_VECTOR = "initializationVector"; - String KEY_LENGTH = "keyLength"; - String SALT = "salt"; - String ITERATIONS = "iterations"; - String ENCRYPTED_PASSWORD = "encryptedPassword"; - String PASSWORD_LENGTH = "passwordLength"; - /** * A password encoder that does not modify the given password. This is used in KRaft mode only. */ @@ -48,22 +37,6 @@ public Password decode(String encodedPassword) { } }; - static byte[] base64Decode(String encoded) { - return Base64.getDecoder().decode(encoded); - } - - static String base64Encode(byte[] bytes) { - return Base64.getEncoder().encodeToString(bytes); - } - - static EncryptingPasswordEncoder encrypting(Password secret, - String keyFactoryAlgorithm, - String cipherAlgorithm, - int keyLength, - int iterations) { - return new EncryptingPasswordEncoder(secret, keyFactoryAlgorithm, cipherAlgorithm, keyLength, iterations); - } - String encode(Password password) throws GeneralSecurityException; Password decode(String encodedPassword) throws GeneralSecurityException; } diff --git a/server-common/src/main/java/org/apache/kafka/security/PasswordEncoderConfigs.java b/server-common/src/main/java/org/apache/kafka/security/PasswordEncoderConfigs.java deleted file mode 100644 index ddb724420d73a..0000000000000 --- a/server-common/src/main/java/org/apache/kafka/security/PasswordEncoderConfigs.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.security; - -import org.apache.kafka.common.config.ConfigDef; - -import static org.apache.kafka.common.config.ConfigDef.Importance.LOW; -import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM; -import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; -import static org.apache.kafka.common.config.ConfigDef.Type.INT; -import static org.apache.kafka.common.config.ConfigDef.Type.PASSWORD; -import static org.apache.kafka.common.config.ConfigDef.Type.STRING; - -public class PasswordEncoderConfigs { - - public static final String PASSWORD_ENCODER_SECRET_CONFIG = "password.encoder.secret"; - public static final String PASSWORD_ENCODER_SECRET_DOC = "The secret used for encoding dynamically configured passwords for this broker."; - - public static final String PASSWORD_ENCODER_OLD_SECRET_CONFIG = "password.encoder.old.secret"; - public static final String PASSWORD_ENCODER_OLD_SECRET_DOC = "The old secret that was used for encoding dynamically configured passwords. " + - "This is required only when the secret is updated. If specified, all dynamically encoded passwords are " + - "decoded using this old secret and re-encoded using " + PASSWORD_ENCODER_SECRET_CONFIG + " when broker starts up."; - - public static final String PASSWORD_ENCODER_KEYFACTORY_ALGORITHM_CONFIG = "password.encoder.keyfactory.algorithm"; - public static final String PASSWORD_ENCODER_KEYFACTORY_ALGORITHM_DOC = "The SecretKeyFactory algorithm used for encoding dynamically configured passwords. " + - "Default is PBKDF2WithHmacSHA512 if available and PBKDF2WithHmacSHA1 otherwise."; - - public static final String PASSWORD_ENCODER_CIPHER_ALGORITHM_CONFIG = "password.encoder.cipher.algorithm"; - public static final String PASSWORD_ENCODER_CIPHER_ALGORITHM_DOC = "The Cipher algorithm used for encoding dynamically configured passwords."; - public static final String PASSWORD_ENCODER_CIPHER_ALGORITHM_DEFAULT = "AES/CBC/PKCS5Padding"; - - public static final String PASSWORD_ENCODER_KEY_LENGTH_CONFIG = "password.encoder.key.length"; - public static final String PASSWORD_ENCODER_KEY_LENGTH_DOC = "The key length used for encoding dynamically configured passwords."; - public static final int PASSWORD_ENCODER_KEY_LENGTH_DEFAULT = 128; - - public static final String PASSWORD_ENCODER_ITERATIONS_CONFIG = "password.encoder.iterations"; - public static final String PASSWORD_ENCODER_ITERATIONS_DOC = "The iteration count used for encoding dynamically configured passwords."; - public static final int PASSWORD_ENCODER_ITERATIONS_DEFAULT = 4096; - public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_CONFIG, PASSWORD, null, MEDIUM, PasswordEncoderConfigs.PASSWORD_ENCODER_SECRET_DOC) - .define(PasswordEncoderConfigs.PASSWORD_ENCODER_OLD_SECRET_CONFIG, PASSWORD, null, MEDIUM, PasswordEncoderConfigs.PASSWORD_ENCODER_OLD_SECRET_DOC) - .define(PasswordEncoderConfigs.PASSWORD_ENCODER_KEYFACTORY_ALGORITHM_CONFIG, STRING, null, LOW, PasswordEncoderConfigs.PASSWORD_ENCODER_KEYFACTORY_ALGORITHM_DOC) - .define(PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_CONFIG, STRING, PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_DEFAULT, LOW, PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_DOC) - .define(PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_CONFIG, INT, PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_DEFAULT, atLeast(8), LOW, PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_DOC) - .define(PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_CONFIG, INT, PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_DEFAULT, atLeast(1024), LOW, PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_DOC); -} diff --git a/server-common/src/main/java/org/apache/kafka/server/common/CheckpointFile.java b/server-common/src/main/java/org/apache/kafka/server/common/CheckpointFile.java index 6efbaa136e0e9..bd5cced567a54 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/CheckpointFile.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/CheckpointFile.java @@ -163,7 +163,7 @@ public List read() throws IOException { line = reader.readLine(); while (line != null) { Optional maybeEntry = formatter.fromString(line); - if (!maybeEntry.isPresent()) { + if (maybeEntry.isEmpty()) { throw buildMalformedLineException(line); } entries.add(maybeEntry.get()); diff --git a/server-common/src/main/java/org/apache/kafka/server/common/EligibleLeaderReplicasVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/EligibleLeaderReplicasVersion.java new file mode 100644 index 0000000000000..68dabd2594ade --- /dev/null +++ b/server-common/src/main/java/org/apache/kafka/server/common/EligibleLeaderReplicasVersion.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.common; + +import java.util.Collections; +import java.util.Map; + +public enum EligibleLeaderReplicasVersion implements FeatureVersion { + + // Version 0 is the version disable ELR. + ELRV_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + + // Version 1 enables the ELR (KIP-966). + ELRV_1(1, MetadataVersion.IBP_4_0_IV1, Collections.emptyMap()); + + public static final String FEATURE_NAME = "eligible.leader.replicas.version"; + + public static final EligibleLeaderReplicasVersion LATEST_PRODUCTION = ELRV_0; + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + EligibleLeaderReplicasVersion( + int featureLevel, + MetadataVersion bootstrapMetadataVersion, + Map dependencies + ) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + + public boolean isEligibleLeaderReplicasFeatureEnabeld() { + return featureLevel >= ELRV_1.featureLevel; + } + + public static EligibleLeaderReplicasVersion fromFeatureLevel(short version) { + switch (version) { + case 0: + return ELRV_0; + case 1: + return ELRV_1; + default: + throw new RuntimeException("Unknown eligible leader replicas feature level: " + (int) version); + } + } +} diff --git a/server-common/src/main/java/org/apache/kafka/server/common/Feature.java b/server-common/src/main/java/org/apache/kafka/server/common/Feature.java new file mode 100644 index 0000000000000..8a812fe521d8b --- /dev/null +++ b/server-common/src/main/java/org/apache/kafka/server/common/Feature.java @@ -0,0 +1,329 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.common; + +import org.apache.kafka.common.feature.SupportedVersionRange; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.kafka.server.common.UnitTestFeatureVersion.FV0.UT_FV0_0; + +/** + * This is enum for the various features implemented for Kafka clusters. + * KIP-584: Versioning Scheme for Features introduced the idea of various features, but only added one feature -- MetadataVersion. + * KIP-1022: Formatting and Updating Features allowed for more features to be added. In order to set and update features, + * they need to be specified via the StorageTool or FeatureCommand tools. + *
              + * Having a unified enum for the features that will use a shared type in the API used to set and update them + * makes it easier to process these features. + */ +public enum Feature { + + /** + * Features defined. If a feature is included in this list, and marked to be used in production they will also be specified when + * formatting a cluster via the StorageTool. MetadataVersion is handled separately, so it is not included here. + * + * See {@link TestFeatureVersion} as an example. See {@link FeatureVersion} when implementing a new feature. + */ + KRAFT_VERSION(KRaftVersion.FEATURE_NAME, KRaftVersion.values(), KRaftVersion.LATEST_PRODUCTION), + TRANSACTION_VERSION(TransactionVersion.FEATURE_NAME, TransactionVersion.values(), TransactionVersion.LATEST_PRODUCTION), + GROUP_VERSION(GroupVersion.FEATURE_NAME, GroupVersion.values(), GroupVersion.LATEST_PRODUCTION), + ELIGIBLE_LEADER_REPLICAS_VERSION(EligibleLeaderReplicasVersion.FEATURE_NAME, EligibleLeaderReplicasVersion.values(), EligibleLeaderReplicasVersion.LATEST_PRODUCTION), + + /** + * Features defined only for unit tests and are not used in production. + */ + TEST_VERSION(TestFeatureVersion.FEATURE_NAME, TestFeatureVersion.values(), TestFeatureVersion.LATEST_PRODUCTION), + UNIT_TEST_VERSION_0(UnitTestFeatureVersion.FV0.FEATURE_NAME, new FeatureVersion[]{UT_FV0_0}, UnitTestFeatureVersion.FV0.LATEST_PRODUCTION), + UNIT_TEST_VERSION_1(UnitTestFeatureVersion.FV1.FEATURE_NAME, UnitTestFeatureVersion.FV1.values(), UnitTestFeatureVersion.FV1.LATEST_PRODUCTION), + UNIT_TEST_VERSION_2(UnitTestFeatureVersion.FV2.FEATURE_NAME, UnitTestFeatureVersion.FV2.values(), UnitTestFeatureVersion.FV2.LATEST_PRODUCTION), + UNIT_TEST_VERSION_3(UnitTestFeatureVersion.FV3.FEATURE_NAME, UnitTestFeatureVersion.FV3.values(), UnitTestFeatureVersion.FV3.LATEST_PRODUCTION), + UNIT_TEST_VERSION_4(UnitTestFeatureVersion.FV4.FEATURE_NAME, UnitTestFeatureVersion.FV4.values(), UnitTestFeatureVersion.FV4.LATEST_PRODUCTION), + UNIT_TEST_VERSION_5(UnitTestFeatureVersion.FV5.FEATURE_NAME, UnitTestFeatureVersion.FV5.values(), UnitTestFeatureVersion.FV5.LATEST_PRODUCTION), + UNIT_TEST_VERSION_6(UnitTestFeatureVersion.FV6.FEATURE_NAME, UnitTestFeatureVersion.FV6.values(), UnitTestFeatureVersion.FV6.LATEST_PRODUCTION), + UNIT_TEST_VERSION_7(UnitTestFeatureVersion.FV7.FEATURE_NAME, UnitTestFeatureVersion.FV7.values(), UnitTestFeatureVersion.FV7.LATEST_PRODUCTION); + + public static final Feature[] FEATURES; + + // The list of features that are not unit test features. + public static final List TEST_AND_PRODUCTION_FEATURES; + + public static final List PRODUCTION_FEATURES; + + public static final List PRODUCTION_FEATURE_NAMES; + private final String name; + private final FeatureVersion[] featureVersions; + + // The latest production version of the feature, owned and updated by the feature owner + // in the respective feature definition. The value should not be smaller than the default + // value calculated with {@link #defaultValue(MetadataVersion)}. + public final FeatureVersion latestProduction; + + Feature(String name, + FeatureVersion[] featureVersions, + FeatureVersion latestProduction) { + this.name = name; + this.featureVersions = featureVersions; + this.latestProduction = latestProduction; + } + + static { + Feature[] enumValues = Feature.values(); + FEATURES = Arrays.copyOf(enumValues, enumValues.length); + + TEST_AND_PRODUCTION_FEATURES = Arrays.stream(FEATURES).filter(feature -> + !feature.name.startsWith("unit." + TestFeatureVersion.FEATURE_NAME) + ).collect(Collectors.toList()); + + PRODUCTION_FEATURES = Arrays.stream(FEATURES).filter(feature -> + !feature.name.equals(TEST_VERSION.featureName()) && + !feature.name.startsWith("unit." + TestFeatureVersion.FEATURE_NAME) + ).collect(Collectors.toList()); + PRODUCTION_FEATURE_NAMES = PRODUCTION_FEATURES.stream().map(feature -> + feature.name).collect(Collectors.toList()); + + validateDefaultValueAndLatestProductionValue(TEST_VERSION); + for (Feature feature : PRODUCTION_FEATURES) { + validateDefaultValueAndLatestProductionValue(feature); + } + } + + public String featureName() { + return name; + } + + public FeatureVersion[] featureVersions() { + return featureVersions; + } + + public short latestProduction() { + return latestProduction.featureLevel(); + } + + public short minimumProduction() { + return featureVersions[0].featureLevel(); + } + + public short latestTesting() { + return featureVersions[featureVersions.length - 1].featureLevel(); + } + + public SupportedVersionRange supportedVersionRange() { + return new SupportedVersionRange( + minimumProduction(), + latestTesting() + ); + } + + /** + * Creates a FeatureVersion from a level. + * + * @param level the level of the feature + * @param allowUnstableFeatureVersions whether unstable versions can be used + * @return the FeatureVersionUtils.FeatureVersion for the feature the enum is based on. + * @throws IllegalArgumentException if the feature is not known. + */ + public FeatureVersion fromFeatureLevel(short level, + boolean allowUnstableFeatureVersions) { + return Arrays.stream(featureVersions).filter(featureVersion -> + featureVersion.featureLevel() == level && (allowUnstableFeatureVersions || level <= latestProduction())).findFirst().orElseThrow( + () -> new IllegalArgumentException("No feature:" + featureName() + " with feature level " + level)); + } + + /** + * A method to validate the feature can be set. If a given feature relies on another feature, the dependencies should be + * captured in {@link FeatureVersion#dependencies()} + *

              + * For example, say feature X level x relies on feature Y level y: + * if feature X >= x then throw an error if feature Y < y. + * + * All feature levels above 0 in kraft require metadata.version=4 (IBP_3_3_IV0) in order to write the feature records to the cluster. + * + * @param feature the feature we are validating + * @param features the feature versions we have (or want to set) + * @throws IllegalArgumentException if the feature is not valid + */ + public static void validateVersion(FeatureVersion feature, Map features) { + Short metadataVersion = features.get(MetadataVersion.FEATURE_NAME); + + if (feature.featureLevel() >= 1 && (metadataVersion == null || metadataVersion < MetadataVersion.IBP_3_3_IV0.featureLevel())) + throw new IllegalArgumentException(feature.featureName() + " could not be set to " + feature.featureLevel() + + " because it depends on metadata.version=4 (" + MetadataVersion.IBP_3_3_IV0 + ")"); + + for (Map.Entry dependency: feature.dependencies().entrySet()) { + Short featureLevel = features.get(dependency.getKey()); + + if (featureLevel == null || featureLevel < dependency.getValue()) { + throw new IllegalArgumentException(feature.featureName() + " could not be set to " + feature.featureLevel() + + " because it depends on " + dependency.getKey() + " level " + dependency.getValue()); + } + } + } + + /** + * A method to return the default (latest production) version of a feature based on the metadata version provided. + * + * Every time a new feature is added, it should create a mapping from metadata version to feature version + * with {@link FeatureVersion#bootstrapMetadataVersion()}. The feature version should be marked as production ready + * before the metadata version is made production ready. + * + * @param metadataVersion the metadata version we want to use to set the default. + * @return the default version given the feature and provided metadata version + */ + public FeatureVersion defaultVersion(MetadataVersion metadataVersion) { + FeatureVersion version = featureVersions[0]; + for (Iterator it = Arrays.stream(featureVersions).iterator(); it.hasNext(); ) { + FeatureVersion feature = it.next(); + if (feature.bootstrapMetadataVersion().isLessThan(metadataVersion) || feature.bootstrapMetadataVersion().equals(metadataVersion)) + version = feature; + else + return version; + } + return version; + } + + public short defaultLevel(MetadataVersion metadataVersion) { + return defaultVersion(metadataVersion).featureLevel(); + } + + public static Feature featureFromName(String featureName) { + for (Feature feature : FEATURES) { + if (feature.name.equals(featureName)) + return feature; + } + throw new IllegalArgumentException("Feature " + featureName + " not found."); + } + + public boolean isProductionReady(short featureVersion) { + return featureVersion <= latestProduction(); + } + + public boolean hasFeatureVersion(FeatureVersion featureVersion) { + for (FeatureVersion v : featureVersions()) { + if (v == featureVersion) { + return true; + } + } + return false; + } + + /** + * The method ensures that the following statements are met: + * 1. The latest production value is one of the feature values. + * 2. The latest production value >= the default value. + * 3. The dependencies of the latest production value <= their latest production values. + * 4. The dependencies of all default values <= their default values. + * 5. If the latest production depends on MetadataVersion, the value should be <= MetadataVersion.LATEST_PRODUCTION. + * 6. If any default value depends on MetadataVersion, the value should be <= the default value bootstrap MV. + * + * Suppose we have feature X as the feature being validated. + * Invalid examples: + * - The feature X has default version = XV_10 (dependency = {}), latest production = XV_5 (dependency = {}) + * (Violating rule 2. The latest production value XV_5 is smaller than the default value) + * - The feature X has latest production = XV_11 (dependency = {Y: YV_4}) + * The feature Y has latest production = YV_3 (dependency = {}) + * (Violating rule 3. For latest production XV_11, Y's latest production YV_3 is smaller than the dependency value YV_4) + * - The feature X has default version = XV_10 (dependency = {Y: YV_4}) + * The feature Y has default version = YV_3 (dependency = {}) + * (Violating rule 4. For default version XV_10, Y's default value YV_3 is smaller than the dependency value YV_4) + * - The feature X has latest production = XV_11 (dependency = {MetadataVersion: IBP_4_0_IV1}), MetadataVersion.LATEST_PRODUCTION is IBP_4_0_IV0 + * (Violating rule 5. The dependency MV IBP_4_0_IV1 is behind MV latest production IBP_4_0_IV0) + * - The feature X has default version = XV_10 (dependency = {MetadataVersion: IBP_4_0_IV1}) and bootstrap MV = IBP_4_0_IV0 + * (Violating rule 6. When MV latest production is IBP_4_0_IV0, feature X will be set to XV_10 by default whereas it depends on MV IBP_4_0_IV1) + * Valid examples: + * - The feature X has default version = XV_10 (dependency = {}), latest production = XV_10 (dependency = {}) + * - The feature X has default version = XV_10 (dependency = {Y: YV_3}), latest production = XV_11 (dependency = {Y: YV_4}) + * The feature Y has default version = YV_3 (dependency = {}), latest production = YV_4 (dependency = {}) + * - The feature X has default version = XV_10 (dependency = {MetadataVersion: IBP_4_0_IV0}), boostrap MV = IBP_4_0_IV0, + * latest production = XV_11 (dependency = {MetadataVersion: IBP_4_0_IV1}), MV latest production = IBP_4_0_IV1 + * + * @param feature the feature to validate. + * @return true if the feature is valid, false otherwise. + * @throws IllegalArgumentException if the feature violates any of the rules thus is not valid. + */ + public static void validateDefaultValueAndLatestProductionValue( + Feature feature + ) throws IllegalArgumentException { + FeatureVersion defaultVersion = feature.defaultVersion(MetadataVersion.LATEST_PRODUCTION); + FeatureVersion latestProduction = feature.latestProduction; + + if (!feature.hasFeatureVersion(latestProduction)) { + throw new IllegalArgumentException(String.format("Feature %s has latest production version %s " + + "which is not one of its feature versions.", feature.name(), latestProduction)); + } + + if (latestProduction.featureLevel() < defaultVersion.featureLevel()) { + throw new IllegalArgumentException(String.format("Feature %s has latest production value %s " + + "smaller than its default version %s with latest production MV.", + feature.name(), latestProduction, defaultVersion)); + } + + for (Map.Entry dependency: latestProduction.dependencies().entrySet()) { + String dependencyFeatureName = dependency.getKey(); + if (!dependencyFeatureName.equals(MetadataVersion.FEATURE_NAME)) { + Feature dependencyFeature = featureFromName(dependencyFeatureName); + if (!dependencyFeature.isProductionReady(dependency.getValue())) { + throw new IllegalArgumentException(String.format("Feature %s has latest production FeatureVersion %s " + + "with dependency %s that is not production ready. (%s latest production: %s)", + feature.name(), latestProduction, dependencyFeature.fromFeatureLevel(dependency.getValue(), true), + dependencyFeature, dependencyFeature.latestProduction)); + } + } else { + if (dependency.getValue() > MetadataVersion.LATEST_PRODUCTION.featureLevel()) { + throw new IllegalArgumentException(String.format("Feature %s has latest production FeatureVersion %s " + + "with MV dependency %s that is not production ready. (MV latest production: %s)", + feature.name(), latestProduction, MetadataVersion.fromFeatureLevel(dependency.getValue()), + MetadataVersion.LATEST_PRODUCTION)); + } + } + } + + for (MetadataVersion metadataVersion: MetadataVersion.values()) { + // Only checking the kraft metadata versions. + if (metadataVersion.compareTo(MetadataVersion.MINIMUM_KRAFT_VERSION) < 0) { + continue; + } + + defaultVersion = feature.defaultVersion(metadataVersion); + for (Map.Entry dependency: defaultVersion.dependencies().entrySet()) { + String dependencyFeatureName = dependency.getKey(); + if (!dependencyFeatureName.equals(MetadataVersion.FEATURE_NAME)) { + Feature dependencyFeature = featureFromName(dependencyFeatureName); + if (dependency.getValue() > dependencyFeature.defaultLevel(metadataVersion)) { + throw new IllegalArgumentException(String.format("Feature %s has default FeatureVersion %s " + + "when MV=%s with dependency %s that is behind its default version %s.", + feature.name(), defaultVersion, metadataVersion, + dependencyFeature.fromFeatureLevel(dependency.getValue(), true), + dependencyFeature.defaultVersion(metadataVersion))); + } + } else { + if (dependency.getValue() > defaultVersion.bootstrapMetadataVersion().featureLevel()) { + throw new IllegalArgumentException(String.format("Feature %s has default FeatureVersion %s " + + "when MV=%s with MV dependency %s that is behind its bootstrap MV %s.", + feature.name(), defaultVersion, metadataVersion, + MetadataVersion.fromFeatureLevel(dependency.getValue()), + defaultVersion.bootstrapMetadataVersion())); + } + } + } + } + } +} diff --git a/server-common/src/main/java/org/apache/kafka/server/common/Features.java b/server-common/src/main/java/org/apache/kafka/server/common/Features.java deleted file mode 100644 index 51f3d78e86877..0000000000000 --- a/server-common/src/main/java/org/apache/kafka/server/common/Features.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.server.common; - -import org.apache.kafka.common.feature.SupportedVersionRange; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * This is enum for the various features implemented for Kafka clusters. - * KIP-584: Versioning Scheme for Features introduced the idea of various features, but only added one feature -- MetadataVersion. - * KIP-1022: Formatting and Updating Features allowed for more features to be added. In order to set and update features, - * they need to be specified via the StorageTool or FeatureCommand tools. - *
              - * Having a unified enum for the features that will use a shared type in the API used to set and update them - * makes it easier to process these features. - */ -public enum Features { - - /** - * Features defined. If a feature is included in this list, and marked to be used in production they will also be specified when - * formatting a cluster via the StorageTool. MetadataVersion is handled separately, so it is not included here. - * - * See {@link TestFeatureVersion} as an example. See {@link FeatureVersion} when implementing a new feature. - */ - TEST_VERSION("test.feature.version", TestFeatureVersion.values()), - KRAFT_VERSION("kraft.version", KRaftVersion.values()), - TRANSACTION_VERSION("transaction.version", TransactionVersion.values()), - GROUP_VERSION("group.version", GroupVersion.values()); - - public static final Features[] FEATURES; - public static final List PRODUCTION_FEATURES; - - public static final List PRODUCTION_FEATURE_NAMES; - private final String name; - private final FeatureVersion[] featureVersions; - - Features(String name, - FeatureVersion[] featureVersions) { - this.name = name; - this.featureVersions = featureVersions; - } - - static { - Features[] enumValues = Features.values(); - FEATURES = Arrays.copyOf(enumValues, enumValues.length); - - PRODUCTION_FEATURES = Arrays.stream(FEATURES).filter(feature -> - !feature.name.equals(TEST_VERSION.featureName())).collect(Collectors.toList()); - PRODUCTION_FEATURE_NAMES = PRODUCTION_FEATURES.stream().map(feature -> - feature.name).collect(Collectors.toList()); - } - - public String featureName() { - return name; - } - - public FeatureVersion[] featureVersions() { - return featureVersions; - } - - public short latestProduction() { - return defaultValue(MetadataVersion.LATEST_PRODUCTION); - } - - public short minimumProduction() { - return featureVersions[0].featureLevel(); - } - - public short latestTesting() { - return featureVersions[featureVersions.length - 1].featureLevel(); - } - - public SupportedVersionRange supportedVersionRange() { - return new SupportedVersionRange( - minimumProduction(), - latestTesting() - ); - } - - /** - * Creates a FeatureVersion from a level. - * - * @param level the level of the feature - * @param allowUnstableFeatureVersions whether unstable versions can be used - * @return the FeatureVersionUtils.FeatureVersion for the feature the enum is based on. - * @throws IllegalArgumentException if the feature is not known. - */ - public FeatureVersion fromFeatureLevel(short level, - boolean allowUnstableFeatureVersions) { - return Arrays.stream(featureVersions).filter(featureVersion -> - featureVersion.featureLevel() == level && (allowUnstableFeatureVersions || level <= latestProduction())).findFirst().orElseThrow( - () -> new IllegalArgumentException("No feature:" + featureName() + " with feature level " + level)); - } - - /** - * A method to validate the feature can be set. If a given feature relies on another feature, the dependencies should be - * captured in {@link FeatureVersion#dependencies()} - *

              - * For example, say feature X level x relies on feature Y level y: - * if feature X >= x then throw an error if feature Y < y. - * - * All feature levels above 0 in kraft require metadata.version=4 (IBP_3_3_IV0) in order to write the feature records to the cluster. - * - * @param feature the feature we are validating - * @param features the feature versions we have (or want to set) - * @throws IllegalArgumentException if the feature is not valid - */ - public static void validateVersion(FeatureVersion feature, Map features) { - Short metadataVersion = features.get(MetadataVersion.FEATURE_NAME); - - if (feature.featureLevel() >= 1 && (metadataVersion == null || metadataVersion < MetadataVersion.IBP_3_3_IV0.featureLevel())) - throw new IllegalArgumentException(feature.featureName() + " could not be set to " + feature.featureLevel() + - " because it depends on metadata.version=4 (" + MetadataVersion.IBP_3_3_IV0 + ")"); - - for (Map.Entry dependency: feature.dependencies().entrySet()) { - Short featureLevel = features.get(dependency.getKey()); - - if (featureLevel == null || featureLevel < dependency.getValue()) { - throw new IllegalArgumentException(feature.featureName() + " could not be set to " + feature.featureLevel() + - " because it depends on " + dependency.getKey() + " level " + dependency.getValue()); - } - } - } - - /** - * A method to return the default (latest production) level of a feature based on the metadata version provided. - * - * Every time a new feature is added, it should create a mapping from metadata version to feature version - * with {@link FeatureVersion#bootstrapMetadataVersion()}. When the feature version is production ready, the metadata - * version should be made production ready as well. - * - * @param metadataVersion the metadata version we want to use to set the default. - * @return the default version level given the feature and provided metadata version - */ - public short defaultValue(MetadataVersion metadataVersion) { - short level = 0; - for (Iterator it = Arrays.stream(featureVersions).iterator(); it.hasNext(); ) { - FeatureVersion feature = it.next(); - if (feature.bootstrapMetadataVersion().isLessThan(metadataVersion) || feature.bootstrapMetadataVersion().equals(metadataVersion)) - level = feature.featureLevel(); - else - return level; - } - return level; - } - - public static Features featureFromName(String featureName) { - for (Features features : FEATURES) { - if (features.name.equals(featureName)) - return features; - } - throw new IllegalArgumentException("Feature " + featureName + " not found."); - } - - /** - * Utility method to map a list of FeatureVersion to a map of feature name to feature level - */ - public static Map featureImplsToMap(List features) { - return features.stream().collect(Collectors.toMap(FeatureVersion::featureName, FeatureVersion::featureLevel)); - } -} diff --git a/server-common/src/main/java/org/apache/kafka/server/common/GroupVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/GroupVersion.java index 3ddbe4d15f03e..881031e6ecfce 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/GroupVersion.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/GroupVersion.java @@ -29,6 +29,8 @@ public enum GroupVersion implements FeatureVersion { public static final String FEATURE_NAME = "group.version"; + public static final GroupVersion LATEST_PRODUCTION = GV_1; + private final short featureLevel; private final MetadataVersion bootstrapMetadataVersion; private final Map dependencies; diff --git a/server-common/src/main/java/org/apache/kafka/server/common/KRaftVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/KRaftVersion.java index a55dc7318c4a1..734b515b5a835 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/KRaftVersion.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/KRaftVersion.java @@ -28,6 +28,8 @@ public enum KRaftVersion implements FeatureVersion { public static final String FEATURE_NAME = "kraft.version"; + public static final KRaftVersion LATEST_PRODUCTION = KRAFT_VERSION_1; + private final short featureLevel; private final MetadataVersion bootstrapMetadataVersion; diff --git a/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java index 43d63c52742ea..310a9242b2345 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.server.common; -import org.apache.kafka.common.record.RecordVersion; import java.util.Arrays; import java.util.HashMap; @@ -45,103 +44,6 @@ */ public enum MetadataVersion { - IBP_0_8_0(-1, "0.8.0", ""), - IBP_0_8_1(-1, "0.8.1", ""), - IBP_0_8_2(-1, "0.8.2", ""), - IBP_0_9_0(-1, "0.9.0", ""), - - // 0.10.0-IV0 is introduced for KIP-31/32 which changes the message format. - IBP_0_10_0_IV0(-1, "0.10.0", "IV0"), - - // 0.10.0-IV1 is introduced for KIP-36(rack awareness) and KIP-43(SASL handshake). - IBP_0_10_0_IV1(-1, "0.10.0", "IV1"), - - // introduced for JoinGroup protocol change in KIP-62 - IBP_0_10_1_IV0(-1, "0.10.1", "IV0"), - - // 0.10.1-IV1 is introduced for KIP-74(fetch response size limit). - IBP_0_10_1_IV1(-1, "0.10.1", "IV1"), - - // introduced ListOffsetRequest v1 in KIP-79 - IBP_0_10_1_IV2(-1, "0.10.1", "IV2"), - - // introduced UpdateMetadataRequest v3 in KIP-103 - IBP_0_10_2_IV0(-1, "0.10.2", "IV0"), - - // KIP-98 (idempotent and transactional producer support) - IBP_0_11_0_IV0(-1, "0.11.0", "IV0"), - - // introduced DeleteRecordsRequest v0 and FetchRequest v4 in KIP-107 - IBP_0_11_0_IV1(-1, "0.11.0", "IV1"), - - // Introduced leader epoch fetches to the replica fetcher via KIP-101 - IBP_0_11_0_IV2(-1, "0.11.0", "IV2"), - - // Introduced LeaderAndIsrRequest V1, UpdateMetadataRequest V4 and FetchRequest V6 via KIP-112 - IBP_1_0_IV0(-1, "1.0", "IV0"), - - // Introduced DeleteGroupsRequest V0 via KIP-229, plus KIP-227 incremental fetch requests, - // and KafkaStorageException for fetch requests. - IBP_1_1_IV0(-1, "1.1", "IV0"), - - // Introduced OffsetsForLeaderEpochRequest V1 via KIP-279 (Fix log divergence between leader and follower after fast leader fail over) - IBP_2_0_IV0(-1, "2.0", "IV0"), - - // Several request versions were bumped due to KIP-219 (Improve quota communication) - IBP_2_0_IV1(-1, "2.0", "IV1"), - - // Introduced new schemas for group offset (v2) and group metadata (v2) (KIP-211) - IBP_2_1_IV0(-1, "2.1", "IV0"), - - // New Fetch, OffsetsForLeaderEpoch, and ListOffsets schemas (KIP-320) - IBP_2_1_IV1(-1, "2.1", "IV1"), - - // Support ZStandard Compression Codec (KIP-110) - IBP_2_1_IV2(-1, "2.1", "IV2"), - - // Introduced broker generation (KIP-380), and - // LeaderAndIsrRequest V2, UpdateMetadataRequest V5, StopReplicaRequest V1 - IBP_2_2_IV0(-1, "2.2", "IV0"), - - // New error code for ListOffsets when a new leader is lagging behind former HW (KIP-207) - IBP_2_2_IV1(-1, "2.2", "IV1"), - - // Introduced static membership. - IBP_2_3_IV0(-1, "2.3", "IV0"), - - // Add rack_id to FetchRequest, preferred_read_replica to FetchResponse, and replica_id to OffsetsForLeaderRequest - IBP_2_3_IV1(-1, "2.3", "IV1"), - - // Add adding_replicas and removing_replicas fields to LeaderAndIsrRequest - IBP_2_4_IV0(-1, "2.4", "IV0"), - - // Flexible version support in inter-broker APIs - IBP_2_4_IV1(-1, "2.4", "IV1"), - - // No new APIs, equivalent to 2.4-IV1 - IBP_2_5_IV0(-1, "2.5", "IV0"), - - // Introduced StopReplicaRequest V3 containing the leader epoch for each partition (KIP-570) - IBP_2_6_IV0(-1, "2.6", "IV0"), - - // Introduced feature versioning support (KIP-584) - IBP_2_7_IV0(-1, "2.7", "IV0"), - - // Bup Fetch protocol for Raft protocol (KIP-595) - IBP_2_7_IV1(-1, "2.7", "IV1"), - - // Introduced AlterPartition (KIP-497) - IBP_2_7_IV2(-1, "2.7", "IV2"), - - // Flexible versioning on ListOffsets, WriteTxnMarkers and OffsetsForLeaderEpoch. Also adds topic IDs (KIP-516) - IBP_2_8_IV0(-1, "2.8", "IV0"), - - // Introduced topic IDs to LeaderAndIsr and UpdateMetadata requests/responses (KIP-516) - IBP_2_8_IV1(-1, "2.8", "IV1"), - - // Introduce AllocateProducerIds (KIP-730) - IBP_3_0_IV0(-1, "3.0", "IV0"), - // Introduce ListOffsets V7 which supports listing offsets by max timestamp (KIP-734) // Assume message format version is 3.0 (KIP-724) IBP_3_0_IV1(1, "3.0", "IV1", true), @@ -255,7 +157,7 @@ public enum MetadataVersion { // LATEST_STABLE_METADATA_VERSION version in tests/kafkatest/version.py /** - * An array containing all of the MetadataVersion entries. + * An array containing all the MetadataVersion entries. * * This is essentially a cached copy of MetadataVersion.values. Unlike that function, it doesn't * allocate a new array each time. @@ -267,10 +169,6 @@ public enum MetadataVersion { private final String ibpVersion; private final boolean didMetadataChange; - MetadataVersion(int featureLevel, String release, String subVersion) { - this(featureLevel, release, subVersion, true); - } - MetadataVersion(int featureLevel, String release, String subVersion, boolean didMetadataChange) { this.featureLevel = (short) featureLevel; this.release = release; @@ -290,34 +188,6 @@ public short featureLevel() { return featureLevel; } - public boolean isSaslInterBrokerHandshakeRequestEnabled() { - return this.isAtLeast(IBP_0_10_0_IV1); - } - - public boolean isOffsetForLeaderEpochSupported() { - return this.isAtLeast(IBP_0_11_0_IV2); - } - - public boolean isFeatureVersioningSupported() { - return this.isAtLeast(IBP_2_7_IV0); - } - - public boolean isTruncationOnFetchSupported() { - return this.isAtLeast(IBP_2_7_IV1); - } - - public boolean isAlterPartitionSupported() { - return this.isAtLeast(IBP_2_7_IV2); - } - - public boolean isTopicIdsSupported() { - return this.isAtLeast(IBP_2_8_IV0); - } - - public boolean isAllocateProducerIdsSupported() { - return this.isAtLeast(IBP_3_0_IV0); - } - public boolean isLeaderRecoverySupported() { return this.isAtLeast(IBP_3_2_IV0); } @@ -326,10 +196,6 @@ public boolean isNoOpRecordSupported() { return this.isAtLeast(IBP_3_3_IV1); } - public boolean isApiForwardingEnabled() { - return this.isAtLeast(IBP_3_4_IV0); - } - public boolean isScramSupported() { return this.isAtLeast(IBP_3_5_IV2); } @@ -358,16 +224,6 @@ public boolean isKRaftSupported() { return this.featureLevel > 0; } - public RecordVersion highestSupportedRecordVersion() { - if (this.isLessThan(IBP_0_10_0_IV0)) { - return RecordVersion.V0; - } else if (this.isLessThan(IBP_0_11_0_IV0)) { - return RecordVersion.V1; - } else { - return RecordVersion.V2; - } - } - public boolean isBrokerRegistrationChangeRecordSupported() { return this.isAtLeast(IBP_3_3_IV2); } @@ -438,43 +294,8 @@ public short fetchRequestVersion() { return 14; } else if (this.isAtLeast(IBP_3_1_IV0)) { return 13; - } else if (this.isAtLeast(IBP_2_7_IV1)) { - return 12; - } else if (this.isAtLeast(IBP_2_3_IV1)) { - return 11; - } else if (this.isAtLeast(IBP_2_1_IV2)) { - return 10; - } else if (this.isAtLeast(IBP_2_0_IV1)) { - return 8; - } else if (this.isAtLeast(IBP_1_1_IV0)) { - return 7; - } else if (this.isAtLeast(IBP_0_11_0_IV1)) { - return 5; - } else if (this.isAtLeast(IBP_0_11_0_IV0)) { - return 4; - } else if (this.isAtLeast(IBP_0_10_1_IV1)) { - return 3; - } else if (this.isAtLeast(IBP_0_10_0_IV0)) { - return 2; - } else if (this.isAtLeast(IBP_0_9_0)) { - return 1; - } else { - return 0; - } - } - - public short offsetForLeaderEpochRequestVersion() { - if (this.isAtLeast(IBP_2_8_IV0)) { - return 4; - } else if (this.isAtLeast(IBP_2_3_IV1)) { - return 3; - } else if (this.isAtLeast(IBP_2_1_IV1)) { - return 2; - } else if (this.isAtLeast(IBP_2_0_IV0)) { - return 1; - } else { - return 0; } + return 12; } public short listOffsetRequestVersion() { @@ -484,48 +305,8 @@ public short listOffsetRequestVersion() { return 9; } else if (this.isAtLeast(IBP_3_5_IV0)) { return 8; - } else if (this.isAtLeast(IBP_3_0_IV1)) { - return 7; - } else if (this.isAtLeast(IBP_2_8_IV0)) { - return 6; - } else if (this.isAtLeast(IBP_2_2_IV1)) { - return 5; - } else if (this.isAtLeast(IBP_2_1_IV1)) { - return 4; - } else if (this.isAtLeast(IBP_2_0_IV1)) { - return 3; - } else if (this.isAtLeast(IBP_0_11_0_IV0)) { - return 2; - } else if (this.isAtLeast(IBP_0_10_1_IV2)) { - return 1; - } else { - return 0; - } - } - - public short groupMetadataValueVersion() { - if (this.isLessThan(IBP_0_10_1_IV0)) { - return 0; - } else if (this.isLessThan(IBP_2_1_IV0)) { - return 1; - } else if (this.isLessThan(IBP_2_3_IV0)) { - return 2; - } else { - // Serialize with the highest supported non-flexible version - // until a tagged field is introduced or the version is bumped. - return 3; - } - } - - public short offsetCommitValueVersion(boolean expireTimestampMs) { - if (isLessThan(MetadataVersion.IBP_2_1_IV0) || expireTimestampMs) { - return 1; - } else if (isLessThan(MetadataVersion.IBP_2_1_IV1)) { - return 2; } else { - // Serialize with the highest supported non-flexible version - // until a tagged field is introduced or the version is bumped. - return 3; + return 7; } } @@ -600,22 +381,6 @@ public static MetadataVersion fromFeatureLevel(short version) { throw new IllegalArgumentException("No MetadataVersion with feature level " + version); } - /** - * Return the minimum `MetadataVersion` that supports `RecordVersion`. - */ - public static MetadataVersion minSupportedFor(RecordVersion recordVersion) { - switch (recordVersion) { - case V0: - return IBP_0_8_0; - case V1: - return IBP_0_10_0_IV0; - case V2: - return IBP_0_11_0_IV0; - default: - throw new IllegalArgumentException("Invalid message format version " + recordVersion); - } - } - // Testing only public static MetadataVersion latestTesting() { return VERSIONS[VERSIONS.length - 1]; @@ -654,14 +419,6 @@ private static boolean checkIfMetadataChangedOrdered(MetadataVersion highVersion return version != lowVersion; } - public short writeTxnMarkersRequestVersion() { - if (isAtLeast(IBP_2_8_IV0)) { - return 1; - } else { - return 0; - } - } - public boolean isAtLeast(MetadataVersion otherVersion) { return this.compareTo(otherVersion) >= 0; } diff --git a/server-common/src/main/java/org/apache/kafka/server/common/TestFeatureVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/TestFeatureVersion.java index 2d929d198977d..e9d54d0f21114 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/TestFeatureVersion.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/TestFeatureVersion.java @@ -23,7 +23,7 @@ public enum TestFeatureVersion implements FeatureVersion { TEST_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), // TEST_1 released right before MV 3.7-IVO was released, and it has no dependencies TEST_1(1, MetadataVersion.IBP_3_7_IV0, Collections.emptyMap()), - // TEST_2 is not yet released and maps to the latest testing version, and it depends on this metadata version + // TEST_2 is not yet set to be the default version and maps to the latest testing version, and it depends on this metadata version TEST_2(2, MetadataVersion.latestTesting(), Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.latestTesting().featureLevel())); private final short featureLevel; @@ -32,6 +32,9 @@ public enum TestFeatureVersion implements FeatureVersion { public static final String FEATURE_NAME = "test.feature.version"; + public static final TestFeatureVersion LATEST_PRODUCTION = + MetadataVersion.latestProduction() == MetadataVersion.latestTesting() ? TEST_2 : TEST_1; + TestFeatureVersion(int featureLevel, MetadataVersion metadataVersionMapping, Map dependencies) { this.featureLevel = (short) featureLevel; this.metadataVersionMapping = metadataVersionMapping; diff --git a/server-common/src/main/java/org/apache/kafka/server/common/TopicIdPartition.java b/server-common/src/main/java/org/apache/kafka/server/common/TopicIdPartition.java index 532798921a9ae..74f62dd953c8f 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/TopicIdPartition.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/TopicIdPartition.java @@ -48,8 +48,7 @@ public int partitionId() { @Override public boolean equals(Object o) { - if (!(o instanceof TopicIdPartition)) return false; - TopicIdPartition other = (TopicIdPartition) o; + if (!(o instanceof TopicIdPartition other)) return false; return other.topicId.equals(topicId) && other.partitionId == partitionId; } diff --git a/server-common/src/main/java/org/apache/kafka/server/common/TransactionVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/TransactionVersion.java index 36dadb5cf11d6..fc85f55606f71 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/TransactionVersion.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/TransactionVersion.java @@ -16,6 +16,9 @@ */ package org.apache.kafka.server.common; +import org.apache.kafka.common.requests.AddPartitionsToTxnRequest; +import org.apache.kafka.common.requests.EndTxnRequest; + import java.util.Collections; import java.util.Map; @@ -30,6 +33,8 @@ public enum TransactionVersion implements FeatureVersion { public static final String FEATURE_NAME = "transaction.version"; + public static final TransactionVersion LATEST_PRODUCTION = TV_2; + private final short featureLevel; private final MetadataVersion bootstrapMetadataVersion; private final Map dependencies; @@ -50,7 +55,17 @@ public short featureLevel() { } public static TransactionVersion fromFeatureLevel(short version) { - return (TransactionVersion) Features.TRANSACTION_VERSION.fromFeatureLevel(version, true); + return (TransactionVersion) Feature.TRANSACTION_VERSION.fromFeatureLevel(version, true); + } + + public static TransactionVersion transactionVersionForAddPartitionsToTxn(AddPartitionsToTxnRequest request) { + // If the request is greater than version 3, we know the client supports transaction version 2. + return request.version() > 3 ? TV_2 : TV_0; + } + + public static TransactionVersion transactionVersionForEndTxn(EndTxnRequest request) { + // If the request is greater than version 4, we know the client supports transaction version 2. + return request.version() > 4 ? TV_2 : TV_0; } @Override diff --git a/server-common/src/main/java/org/apache/kafka/server/common/UnitTestFeatureVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/UnitTestFeatureVersion.java new file mode 100644 index 0000000000000..ea107998e7611 --- /dev/null +++ b/server-common/src/main/java/org/apache/kafka/server/common/UnitTestFeatureVersion.java @@ -0,0 +1,361 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.common; + +import java.util.Collections; +import java.util.Map; + +/** + * Test versions only used for unit test FeatureTest.java. + */ +public class UnitTestFeatureVersion { + /** + * The feature is used for testing latest production is not one of the feature versions. + */ + public enum FV0 implements FeatureVersion { + UT_FV0_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + UT_FV0_1(1, MetadataVersion.IBP_3_7_IV0, Collections.emptyMap()); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.0"; + + public static final FV0 LATEST_PRODUCTION = UT_FV0_1; + + FV0(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } + + /** + * The feature is used to test latest production lags behind the default value. + */ + public enum FV1 implements FeatureVersion { + UT_FV1_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + UT_FV1_1(1, MetadataVersion.IBP_3_7_IV0, Collections.emptyMap()); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.1"; + + public static final FV1 LATEST_PRODUCTION = UT_FV1_0; + + FV1(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } + + /** + * The feature is used to test the dependency of the latest production that is not yet production ready. + */ + public enum FV2 implements FeatureVersion { + UT_FV2_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + UT_FV2_1(1, MetadataVersion.IBP_3_7_IV0, Collections.emptyMap()); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.2"; + + public static final FV2 LATEST_PRODUCTION = UT_FV2_0; + + FV2(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } + + /** + * The feature is used to test the dependency of the latest production that is not yet production ready. + */ + public enum FV3 implements FeatureVersion { + UT_FV3_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + UT_FV3_1(1, MetadataVersion.IBP_3_7_IV0, Collections.singletonMap(FV2.FEATURE_NAME, (short) 1)); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.3"; + + public static final FV3 LATEST_PRODUCTION = UT_FV3_1; + + FV3(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } + + /** + * The feature is used to test the dependency of the default value that is not yet default ready. + */ + public enum FV4 implements FeatureVersion { + UT_FV4_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + UT_FV4_1(1, MetadataVersion.latestTesting(), Collections.emptyMap()); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.4"; + + public static final FV4 LATEST_PRODUCTION = UT_FV4_1; + + FV4(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } + + /** + * The feature is used to test the dependency of the default value that is not yet default ready. + */ + public enum FV5 implements FeatureVersion { + UT_FV5_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + UT_FV5_1(1, MetadataVersion.IBP_3_7_IV0, Collections.singletonMap(FV4.FEATURE_NAME, (short) 1)); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.5"; + + public static final FV5 LATEST_PRODUCTION = UT_FV5_1; + + FV5(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } + + /** + * The feature is used to test the latest production has MV dependency that is not yet production ready. + */ + public enum FV6 implements FeatureVersion { + UT_FV6_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.emptyMap()), + UT_FV6_1(1, MetadataVersion.latestTesting(), Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.latestTesting().featureLevel())); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.6"; + + public static final FV6 LATEST_PRODUCTION = UT_FV6_1; + + FV6(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } + + /** + * The feature is used to test the default value has MV dependency that is behind the bootstrap MV. + */ + public enum FV7 implements FeatureVersion { + UT_FV7_0(0, MetadataVersion.MINIMUM_KRAFT_VERSION, Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_7_IV0.featureLevel())), + UT_FV7_1(1, MetadataVersion.IBP_3_8_IV0, Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_8_IV0.featureLevel())); + + private final short featureLevel; + private final MetadataVersion bootstrapMetadataVersion; + private final Map dependencies; + + public static final String FEATURE_NAME = "unit.test.feature.version.7"; + + public static final FV7 LATEST_PRODUCTION = UT_FV7_1; + + FV7(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map dependencies) { + this.featureLevel = (short) featureLevel; + this.bootstrapMetadataVersion = bootstrapMetadataVersion; + this.dependencies = dependencies; + } + + @Override + public short featureLevel() { + return featureLevel; + } + + @Override + public String featureName() { + return FEATURE_NAME; + } + + @Override + public MetadataVersion bootstrapMetadataVersion() { + return bootstrapMetadataVersion; + } + + @Override + public Map dependencies() { + return dependencies; + } + } +} diff --git a/server-common/src/main/java/org/apache/kafka/server/config/QuotaConfig.java b/server-common/src/main/java/org/apache/kafka/server/config/QuotaConfig.java index 6ecd7ffca3be1..4480e9f0c1088 100644 --- a/server-common/src/main/java/org/apache/kafka/server/config/QuotaConfig.java +++ b/server-common/src/main/java/org/apache/kafka/server/config/QuotaConfig.java @@ -20,9 +20,7 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.security.scram.internals.ScramMechanism; -import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -115,12 +113,12 @@ public class QuotaConfig { .define(QuotaConfig.ALTER_LOG_DIRS_REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_CONFIG, INT, QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_DEFAULT, atLeast(1), LOW, QuotaConfig.ALTER_LOG_DIRS_REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_DOC) .define(QuotaConfig.CONTROLLER_QUOTA_WINDOW_SIZE_SECONDS_CONFIG, INT, QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_DEFAULT, atLeast(1), LOW, QuotaConfig.CONTROLLER_QUOTA_WINDOW_SIZE_SECONDS_DOC) .define(QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, CLASS, null, LOW, QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_DOC); - private static final Set USER_AND_CLIENT_QUOTA_NAMES = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( + private static final Set USER_AND_CLIENT_QUOTA_NAMES = Set.of( PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, REQUEST_PERCENTAGE_OVERRIDE_CONFIG, CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG - ))); + ); private static void buildUserClientQuotaConfigDef(ConfigDef configDef) { configDef.define(PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, ConfigDef.Type.LONG, Long.MAX_VALUE, diff --git a/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java b/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java index c6e0810262c61..fa7ed93850f85 100644 --- a/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java +++ b/server-common/src/main/java/org/apache/kafka/server/config/ServerLogConfigs.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.config.TopicConfig; -import static org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV1; import static org.apache.kafka.server.config.ServerTopicConfigSynonyms.LOG_PREFIX; /** @@ -70,7 +69,7 @@ public class ServerLogConfigs { public static final String LOG_CLEANUP_POLICY_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.CLEANUP_POLICY_CONFIG); public static final String LOG_CLEANUP_POLICY_DEFAULT = TopicConfig.CLEANUP_POLICY_DELETE; - public static final String LOG_CLEANUP_POLICY_DOC = "The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. Valid policies are: \"delete\" and \"compact\""; + public static final String LOG_CLEANUP_POLICY_DOC = "The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies."; public static final String LOG_INDEX_SIZE_MAX_BYTES_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG); public static final int LOG_INDEX_SIZE_MAX_BYTES_DEFAULT = 10 * 1024 * 1024; @@ -106,37 +105,11 @@ public class ServerLogConfigs { public static final String LOG_PRE_ALLOCATE_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.PREALLOCATE_CONFIG); public static final String LOG_PRE_ALLOCATE_ENABLE_DOC = "Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true."; - /* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */ - /** - * @deprecated since "3.0" - */ - @Deprecated - public static final String LOG_MESSAGE_FORMAT_VERSION_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG); - @Deprecated - public static final String LOG_MESSAGE_FORMAT_VERSION_DEFAULT = IBP_3_0_IV1.version(); - public static final String LOG_MESSAGE_FORMAT_VERSION_DOC = "Specify the message format version the broker will use to append messages to the logs. The value should be a valid MetadataVersion. " + - "Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check MetadataVersion for more details. By setting a particular message format version, the " + - "user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly " + - "will cause consumers with older versions to break as they will receive messages with a format that they don't understand."; - public static final String LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG); public static final String LOG_MESSAGE_TIMESTAMP_TYPE_DEFAULT = "CreateTime"; public static final String LOG_MESSAGE_TIMESTAMP_TYPE_DOC = "Define whether the timestamp in the message is message create time or log append time. The value should be either " + "CreateTime or LogAppendTime."; - /* See `TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG` for details */ - /** - * @deprecated since "3.6" - */ - @Deprecated - public static final String LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG); - @Deprecated - public static final long LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DEFAULT = Long.MAX_VALUE; - public static final String LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC = "[DEPRECATED] The maximum difference allowed between the timestamp when a broker receives " + - "a message and the timestamp specified in the message. If log.message.timestamp.type=CreateTime, a message will be rejected " + - "if the difference in timestamp exceeds this threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime." + - "The maximum timestamp difference allowed should be no greater than log.retention.ms to avoid unnecessarily frequent log rolling."; - public static final String LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG); public static final long LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DEFAULT = Long.MAX_VALUE; public static final String LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC = "This configuration sets the allowable timestamp difference between the " + @@ -145,7 +118,7 @@ public class ServerLogConfigs { "If log.message.timestamp.type=CreateTime, the message will be rejected if the difference in timestamps exceeds " + "this specified threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime."; public static final String LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG); - public static final long LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DEFAULT = Long.MAX_VALUE; + public static final long LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DEFAULT = 3600000; // 1 hour public static final String LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DOC = "This configuration sets the allowable timestamp difference between the " + "message timestamp and the broker's timestamp. The message timestamp can be later than or equal to the broker's " + "timestamp, with the maximum allowable difference determined by the value set in this configuration. " + @@ -153,7 +126,7 @@ public class ServerLogConfigs { "this specified threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime."; public static final String NUM_RECOVERY_THREADS_PER_DATA_DIR_CONFIG = "num.recovery.threads.per.data.dir"; - public static final int NUM_RECOVERY_THREADS_PER_DATA_DIR_DEFAULT = 1; + public static final int NUM_RECOVERY_THREADS_PER_DATA_DIR_DEFAULT = 2; public static final String NUM_RECOVERY_THREADS_PER_DATA_DIR_DOC = "The number of threads per data directory to be used for log recovery at startup and flushing at shutdown"; public static final String AUTO_CREATE_TOPICS_ENABLE_CONFIG = "auto.create.topics.enable"; @@ -178,13 +151,6 @@ public class ServerLogConfigs { public static final String ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG = "alter.config.policy.class.name"; public static final String ALTER_CONFIG_POLICY_CLASS_NAME_DOC = "The alter configs policy class that should be used for validation. The class should " + "implement the org.apache.kafka.server.policy.AlterConfigPolicy interface."; - public static final String LOG_MESSAGE_DOWNCONVERSION_ENABLE_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG); - public static final boolean LOG_MESSAGE_DOWNCONVERSION_ENABLE_DEFAULT = true; - public static final String LOG_MESSAGE_DOWNCONVERSION_ENABLE_DOC = "This configuration controls whether " + - "down-conversion of message formats is enabled to satisfy consume requests. When set to false, " + - "broker will not perform down-conversion for consumers expecting an older message format. The broker responds " + - "with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration" + - "does not apply to any message format conversion that might be required for replication to followers."; public static final String LOG_INITIAL_TASK_DELAY_MS_CONFIG = LOG_PREFIX + "initial.task.delay.ms"; public static final long LOG_INITIAL_TASK_DELAY_MS_DEFAULT = 30 * 1000L; @@ -195,4 +161,4 @@ public class ServerLogConfigs { public static final Long LOG_DIR_FAILURE_TIMEOUT_MS_DEFAULT = 30000L; public static final String LOG_DIR_FAILURE_TIMEOUT_MS_DOC = "If the broker is unable to successfully communicate to the controller that some log " + "directory has failed for longer than this time, the broker will fail and shut down."; -} \ No newline at end of file +} diff --git a/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java b/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java index 66747e74364fb..3fc8c4435b95b 100644 --- a/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java +++ b/server-common/src/main/java/org/apache/kafka/server/config/ServerTopicConfigSynonyms.java @@ -50,7 +50,6 @@ public final class ServerTopicConfigSynonyms { * the first synonym and ignore the second. */ // Topic configs with no mapping to a server config can be found in `LogConfig.CONFIGS_WITH_NO_SERVER_DEFAULTS` - @SuppressWarnings("deprecation") public static final Map> ALL_TOPIC_CONFIG_SYNONYMS = Collections.unmodifiableMap(Utils.mkMap( sameNameWithLogPrefix(TopicConfig.SEGMENT_BYTES_CONFIG), listWithLogPrefix(TopicConfig.SEGMENT_MS_CONFIG, @@ -84,12 +83,9 @@ public final class ServerTopicConfigSynonyms { sameName(TopicConfig.COMPRESSION_LZ4_LEVEL_CONFIG), sameName(TopicConfig.COMPRESSION_ZSTD_LEVEL_CONFIG), sameNameWithLogPrefix(TopicConfig.PREALLOCATE_CONFIG), - sameNameWithLogPrefix(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG), sameNameWithLogPrefix(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG), - sameNameWithLogPrefix(TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG), sameNameWithLogPrefix(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG), sameNameWithLogPrefix(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG), - sameNameWithLogPrefix(TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG), sameNameWithLogPrefix(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG), sameNameWithLogPrefix(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG) )); diff --git a/server-common/src/main/java/org/apache/kafka/server/network/BrokerEndPoint.java b/server-common/src/main/java/org/apache/kafka/server/network/BrokerEndPoint.java index 34e44ffd2024c..b06a262a7a81f 100644 --- a/server-common/src/main/java/org/apache/kafka/server/network/BrokerEndPoint.java +++ b/server-common/src/main/java/org/apache/kafka/server/network/BrokerEndPoint.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.server.network; -import java.util.Objects; /** * BrokerEndPoint is used to connect to specific host:port pair. @@ -25,45 +24,4 @@ * Clients should know which security protocol to use from configuration. * This allows us to keep the wire protocol with the clients unchanged where the protocol is not needed. */ -public class BrokerEndPoint { - - private final int id; - private final String host; - private final int port; - - public BrokerEndPoint(int id, String host, int port) { - this.id = id; - this.host = host; - this.port = port; - } - - public int id() { - return id; - } - - public String host() { - return host; - } - - public int port() { - return port; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - BrokerEndPoint that = (BrokerEndPoint) o; - return id == that.id && host.equals(that.host) && port == that.port; - } - - @Override - public int hashCode() { - return Objects.hash(id, host, port); - } - - public String toString() { - return String.format("BrokerEndPoint(id=%s, host=%s:%s)", id, host, port); - } -} +public record BrokerEndPoint(int id, String host, int port) { } diff --git a/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperation.java b/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperation.java index 0ad638240c899..f3c818cb9c6c6 100644 --- a/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperation.java +++ b/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperation.java @@ -42,8 +42,8 @@ public abstract class DelayedOperation extends TimerTask { private final AtomicBoolean completed = new AtomicBoolean(false); - // Visible for testing - final Lock lock; + + protected final Lock lock; public DelayedOperation(long delayMs, Optional lockOpt) { this(delayMs, lockOpt.orElse(new ReentrantLock())); diff --git a/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperationPurgatory.java b/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperationPurgatory.java index 3491aee139e53..380f22c9c8ec8 100644 --- a/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperationPurgatory.java +++ b/server-common/src/main/java/org/apache/kafka/server/purgatory/DelayedOperationPurgatory.java @@ -147,7 +147,10 @@ public boolean tryCompleteElseWatch(T operation, // any exclusive lock. Since DelayedOperationPurgatory.checkAndComplete() completes delayed operations asynchronously, // holding an exclusive lock to make the call is often unnecessary. if (operation.safeTryCompleteOrElse(() -> { - watchKeys.forEach(key -> watchForOperation(key, operation)); + watchKeys.forEach(key -> { + if (!operation.isCompleted()) + watchForOperation(key, operation); + }); if (!watchKeys.isEmpty()) estimatedTotalOperations.incrementAndGet(); })) { diff --git a/share/src/main/java/org/apache/kafka/server/share/SharePartitionKey.java b/server-common/src/main/java/org/apache/kafka/server/share/SharePartitionKey.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/SharePartitionKey.java rename to server-common/src/main/java/org/apache/kafka/server/share/SharePartitionKey.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/DefaultStatePersister.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/DefaultStatePersister.java similarity index 66% rename from share/src/main/java/org/apache/kafka/server/share/persister/DefaultStatePersister.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/DefaultStatePersister.java index 15ecc89f78d2f..3b6db31b65761 100644 --- a/share/src/main/java/org/apache/kafka/server/share/persister/DefaultStatePersister.java +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/DefaultStatePersister.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ReadShareGroupStateResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryResponse; import org.apache.kafka.common.requests.WriteShareGroupStateResponse; import org.slf4j.Logger; @@ -31,7 +32,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; /** @@ -68,7 +68,7 @@ public void stop() { * @param request InitializeShareGroupStateParameters * @return A completable future of InitializeShareGroupStateResult */ - public CompletableFuture initializeState(InitializeShareGroupStateParameters request) throws IllegalArgumentException { + public CompletableFuture initializeState(InitializeShareGroupStateParameters request) { throw new RuntimeException("not implemented"); } @@ -79,8 +79,13 @@ public CompletableFuture initializeState(Initia * @param request WriteShareGroupStateParameters * @return A completable future of WriteShareGroupStateResult */ - public CompletableFuture writeState(WriteShareGroupStateParameters request) throws IllegalArgumentException { - validate(request); + public CompletableFuture writeState(WriteShareGroupStateParameters request) { + try { + validate(request); + } catch (Exception e) { + log.error("Unable to validate write state request", e); + return CompletableFuture.failedFuture(e); + } GroupTopicPartitionData gtp = request.groupTopicPartitionData(); String groupId = gtp.groupId(); @@ -119,7 +124,15 @@ stateManager.new WriteStateHandler( return combinedFuture.thenApply(v -> writeResponsesToResult(futureMap)); } - private WriteShareGroupStateResult writeResponsesToResult( + /** + * Takes in a list of COMPLETED futures and combines the results, + * taking care of errors if any, into a single WriteShareGroupStateResult + * + * @param futureMap - HashMap of {topic -> {partition -> future}} + * @return Object representing combined result of type WriteShareGroupStateResult + */ + // visible for testing + WriteShareGroupStateResult writeResponsesToResult( Map>> futureMap ) { List> topicsData = futureMap.keySet().stream() @@ -129,14 +142,15 @@ private WriteShareGroupStateResult writeResponsesToResult( int partition = partitionFuture.getKey(); CompletableFuture future = partitionFuture.getValue(); try { - WriteShareGroupStateResponse partitionResponse = future.get(); + // already completed because of allOf application in the caller + WriteShareGroupStateResponse partitionResponse = future.join(); return partitionResponse.data().results().get(0).partitions().stream() .map(partitionResult -> PartitionFactory.newPartitionErrorData( partitionResult.partition(), partitionResult.errorCode(), partitionResult.errorMessage())) .collect(Collectors.toList()); - } catch (InterruptedException | ExecutionException e) { + } catch (Exception e) { log.error("Unexpected exception while writing data to share coordinator", e); return Collections.singletonList(PartitionFactory.newPartitionErrorData( partition, @@ -162,8 +176,13 @@ private WriteShareGroupStateResult writeResponsesToResult( * @param request ReadShareGroupStateParameters * @return A completable future of ReadShareGroupStateResult */ - public CompletableFuture readState(ReadShareGroupStateParameters request) throws IllegalArgumentException { - validate(request); + public CompletableFuture readState(ReadShareGroupStateParameters request) { + try { + validate(request); + } catch (Exception e) { + log.error("Unable to validate read state request", e); + return CompletableFuture.failedFuture(e); + } GroupTopicPartitionData gtp = request.groupTopicPartitionData(); String groupId = gtp.groupId(); Map>> futureMap = new HashMap<>(); @@ -201,7 +220,15 @@ stateManager.new ReadStateHandler( return combinedFuture.thenApply(v -> readResponsesToResult(futureMap)); } - private ReadShareGroupStateResult readResponsesToResult( + /** + * Takes in a list of COMPLETED futures and combines the results, + * taking care of errors if any, into a single ReadShareGroupStateResult + * + * @param futureMap - HashMap of {topic -> {partition -> future}} + * @return Object representing combined result of type ReadShareGroupStateResult + */ + // visible for testing + ReadShareGroupStateResult readResponsesToResult( Map>> futureMap ) { List> topicsData = futureMap.keySet().stream() @@ -211,7 +238,8 @@ private ReadShareGroupStateResult readResponsesToResult( int partition = partitionFuture.getKey(); CompletableFuture future = partitionFuture.getValue(); try { - ReadShareGroupStateResponse partitionResponse = future.get(); + // already completed because of allOf call in the caller + ReadShareGroupStateResponse partitionResponse = future.join(); return partitionResponse.data().results().get(0).partitions().stream() .map(partitionResult -> PartitionFactory.newPartitionAllData( partitionResult.partition(), @@ -222,7 +250,7 @@ private ReadShareGroupStateResult readResponsesToResult( partitionResult.stateBatches().stream().map(PersisterStateBatch::from).collect(Collectors.toList()) )) .collect(Collectors.toList()); - } catch (InterruptedException | ExecutionException e) { + } catch (Exception e) { log.error("Unexpected exception while getting data from share coordinator", e); return Collections.singletonList(PartitionFactory.newPartitionAllData( partition, @@ -251,7 +279,7 @@ private ReadShareGroupStateResult readResponsesToResult( * @param request DeleteShareGroupStateParameters * @return A completable future of DeleteShareGroupStateResult */ - public CompletableFuture deleteState(DeleteShareGroupStateParameters request) throws IllegalArgumentException { + public CompletableFuture deleteState(DeleteShareGroupStateParameters request) { throw new RuntimeException("not implemented"); } @@ -262,8 +290,98 @@ public CompletableFuture deleteState(DeleteShareGro * @param request ReadShareGroupStateSummaryParameters * @return A completable future of ReadShareGroupStateSummaryResult */ - public CompletableFuture readSummary(ReadShareGroupStateSummaryParameters request) throws IllegalArgumentException { - throw new RuntimeException("not implemented"); + public CompletableFuture readSummary(ReadShareGroupStateSummaryParameters request) { + try { + validate(request); + } catch (Exception e) { + log.error("Unable to validate read state summary request", e); + return CompletableFuture.failedFuture(e); + } + + GroupTopicPartitionData gtp = request.groupTopicPartitionData(); + String groupId = gtp.groupId(); + Map>> futureMap = new HashMap<>(); + List handlers = new ArrayList<>(); + + gtp.topicsData().forEach(topicData -> { + topicData.partitions().forEach(partitionData -> { + CompletableFuture future = futureMap + .computeIfAbsent(topicData.topicId(), k -> new HashMap<>()) + .computeIfAbsent(partitionData.partition(), k -> new CompletableFuture<>()); + + handlers.add( + stateManager.new ReadStateSummaryHandler( + groupId, + topicData.topicId(), + partitionData.partition(), + partitionData.leaderEpoch(), + future, + null + ) + ); + }); + }); + + for (PersisterStateManager.PersisterStateManagerHandler handler : handlers) { + stateManager.enqueue(handler); + } + + // Combine all futures into a single CompletableFuture + CompletableFuture combinedFuture = CompletableFuture.allOf( + handlers.stream() + .map(PersisterStateManager.ReadStateSummaryHandler::result) + .toArray(CompletableFuture[]::new)); + + // Transform the combined CompletableFuture into CompletableFuture + return combinedFuture.thenApply(v -> readSummaryResponsesToResult(futureMap)); + } + + /** + * Takes in a list of COMPLETED futures and combines the results, + * taking care of errors if any, into a single ReadShareGroupStateSummaryResult + * + * @param futureMap - HashMap of {topic -> {partition -> future}} + * @return Object representing combined result of type ReadShareGroupStateSummaryResult + */ + // visible for testing + ReadShareGroupStateSummaryResult readSummaryResponsesToResult( + Map>> futureMap + ) { + List> topicsData = futureMap.keySet().stream() + .map(topicId -> { + List partitionStateErrorData = futureMap.get(topicId).entrySet().stream() + .map(partitionFuture -> { + int partition = partitionFuture.getKey(); + CompletableFuture future = partitionFuture.getValue(); + try { + // already completed because of allOf call in the caller + ReadShareGroupStateSummaryResponse partitionResponse = future.join(); + return partitionResponse.data().results().get(0).partitions().stream() + .map(partitionResult -> PartitionFactory.newPartitionStateSummaryData( + partitionResult.partition(), + partitionResult.stateEpoch(), + partitionResult.startOffset(), + partitionResult.errorCode(), + partitionResult.errorMessage())) + .collect(Collectors.toList()); + } catch (Exception e) { + log.error("Unexpected exception while getting data from share coordinator", e); + return Collections.singletonList(PartitionFactory.newPartitionStateSummaryData( + partition, + -1, + -1, + Errors.UNKNOWN_SERVER_ERROR.code(), // No specific public error code exists for InterruptedException / ExecutionException + "Error reading state from share coordinator: " + e.getMessage())); + } + }) + .flatMap(List::stream) + .collect(Collectors.toList()); + return new TopicData<>(topicId, partitionStateErrorData); + }) + .collect(Collectors.toList()); + return new ReadShareGroupStateSummaryResult.Builder() + .setTopicsData(topicsData) + .build(); } private static void validate(WriteShareGroupStateParameters params) { @@ -290,6 +408,18 @@ private static void validate(ReadShareGroupStateParameters params) { validateGroupTopicPartitionData(prefix, params.groupTopicPartitionData()); } + private static void validate(ReadShareGroupStateSummaryParameters params) { + String prefix = "Read share group summary parameters"; + if (params == null) { + throw new IllegalArgumentException(prefix + " cannot be null."); + } + if (params.groupTopicPartitionData() == null) { + throw new IllegalArgumentException(prefix + " data cannot be null."); + } + + validateGroupTopicPartitionData(prefix, params.groupTopicPartitionData()); + } + private static void validateGroupTopicPartitionData(String prefix, GroupTopicPartitionData data) { String groupId = data.groupId(); if (groupId == null || groupId.isEmpty()) { diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateParameters.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateParameters.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateParameters.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateParameters.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateResult.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateResult.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateResult.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/DeleteShareGroupStateResult.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/GroupTopicPartitionData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/GroupTopicPartitionData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/GroupTopicPartitionData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/GroupTopicPartitionData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateParameters.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateParameters.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateParameters.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateParameters.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateResult.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateResult.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateResult.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/InitializeShareGroupStateResult.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/NoOpShareStatePersister.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/NoOpShareStatePersister.java similarity index 98% rename from share/src/main/java/org/apache/kafka/server/share/persister/NoOpShareStatePersister.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/NoOpShareStatePersister.java index 83d3d7d74a89b..d4b22332be184 100644 --- a/share/src/main/java/org/apache/kafka/server/share/persister/NoOpShareStatePersister.java +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/NoOpShareStatePersister.java @@ -86,13 +86,13 @@ public CompletableFuture deleteState(DeleteShareGro @Override public CompletableFuture readSummary(ReadShareGroupStateSummaryParameters request) { GroupTopicPartitionData reqData = request.groupTopicPartitionData(); - List> resultArgs = new ArrayList<>(); + List> resultArgs = new ArrayList<>(); // we will fetch topic and partition info from the request and // return valid but default response (keep partition id and topic from request but initialize other // values as default). for (TopicData topicData : reqData.topicsData()) { resultArgs.add(new TopicData<>(topicData.topicId(), topicData.partitions().stream(). - map(partitionIdData -> PartitionFactory.newPartitionStateErrorData( + map(partitionIdData -> PartitionFactory.newPartitionStateSummaryData( partitionIdData.partition(), PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE)) .collect(Collectors.toList()))); } diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionAllData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionAllData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionAllData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionAllData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionData.java similarity index 99% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionData.java index 61b1d3a621f35..25db77380da7e 100644 --- a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionData.java +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionData.java @@ -26,7 +26,7 @@ */ public class PartitionData implements PartitionIdData, PartitionStateData, PartitionErrorData, PartitionStateErrorData, - PartitionStateBatchData, PartitionIdLeaderEpochData, PartitionAllData { + PartitionStateBatchData, PartitionIdLeaderEpochData, PartitionAllData, PartitionStateSummaryData { private final int partition; private final int stateEpoch; private final long startOffset; diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionErrorData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionErrorData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionErrorData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionErrorData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionFactory.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionFactory.java similarity index 91% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionFactory.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionFactory.java index abd44a854ee70..009eb9cccc149 100644 --- a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionFactory.java +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionFactory.java @@ -51,6 +51,10 @@ public static PartitionStateErrorData newPartitionStateErrorData(int partition, return new PartitionData(partition, stateEpoch, startOffset, errorCode, errorMessage, DEFAULT_LEADER_EPOCH, null); } + public static PartitionStateSummaryData newPartitionStateSummaryData(int partition, int stateEpoch, long startOffset, short errorCode, String errorMessage) { + return new PartitionData(partition, stateEpoch, startOffset, errorCode, errorMessage, DEFAULT_LEADER_EPOCH, null); + } + public static PartitionStateBatchData newPartitionStateBatchData(int partition, int stateEpoch, long startOffset, int leaderEpoch, List stateBatches) { return new PartitionData(partition, stateEpoch, startOffset, DEFAULT_ERROR_CODE, DEFAULT_ERR_MESSAGE, leaderEpoch, stateBatches); } diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionIdData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionIdData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionIdData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionIdData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionIdLeaderEpochData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionIdLeaderEpochData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionIdLeaderEpochData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionIdLeaderEpochData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionInfoData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionInfoData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionInfoData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionInfoData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionStateBatchData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateBatchData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionStateBatchData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateBatchData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionStateData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionStateData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PartitionStateErrorData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateErrorData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PartitionStateErrorData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateErrorData.java diff --git a/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateSummaryData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateSummaryData.java new file mode 100644 index 0000000000000..dc4732a79ae22 --- /dev/null +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/PartitionStateSummaryData.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.server.share.persister; + +/** + * This interface is implemented by classes used to contain the data for a partition with state summary and error data (if any) + * in the interface to {@link Persister}. + */ +public interface PartitionStateSummaryData extends PartitionInfoData, PartitionIdData { + int stateEpoch(); + + long startOffset(); + + short errorCode(); + + String errorMessage(); +} diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/Persister.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/Persister.java similarity index 87% rename from share/src/main/java/org/apache/kafka/server/share/persister/Persister.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/Persister.java index 49073c83cd682..fddb01154bd94 100644 --- a/share/src/main/java/org/apache/kafka/server/share/persister/Persister.java +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/Persister.java @@ -32,7 +32,7 @@ public interface Persister { * @param request Request parameters * @return A {@link CompletableFuture} that completes with the result. */ - CompletableFuture initializeState(InitializeShareGroupStateParameters request) throws IllegalArgumentException; + CompletableFuture initializeState(InitializeShareGroupStateParameters request); /** * Read share-partition state. @@ -40,7 +40,7 @@ public interface Persister { * @param request Request parameters * @return A {@link CompletableFuture} that completes with the result. */ - CompletableFuture readState(ReadShareGroupStateParameters request) throws IllegalArgumentException; + CompletableFuture readState(ReadShareGroupStateParameters request); /** * Write share-partition state. @@ -48,7 +48,7 @@ public interface Persister { * @param request Request parameters * @return A {@link CompletableFuture} that completes with the result. */ - CompletableFuture writeState(WriteShareGroupStateParameters request) throws IllegalArgumentException; + CompletableFuture writeState(WriteShareGroupStateParameters request); /** * Delete share-partition state. @@ -56,7 +56,7 @@ public interface Persister { * @param request Request parameters * @return A {@link CompletableFuture} that completes with the result. */ - CompletableFuture deleteState(DeleteShareGroupStateParameters request) throws IllegalArgumentException; + CompletableFuture deleteState(DeleteShareGroupStateParameters request); /** * Read the offset information from share-partition state. @@ -64,7 +64,7 @@ public interface Persister { * @param request Request parameters * @return A {@link CompletableFuture} that completes with the result. */ - CompletableFuture readSummary(ReadShareGroupStateSummaryParameters request) throws IllegalArgumentException; + CompletableFuture readSummary(ReadShareGroupStateSummaryParameters request); /** * Perform cleanup and interrupt any threads diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PersisterParameters.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterParameters.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PersisterParameters.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterParameters.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PersisterResult.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterResult.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PersisterResult.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterResult.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PersisterStateBatch.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterStateBatch.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/PersisterStateBatch.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterStateBatch.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java similarity index 82% rename from share/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java index ea14c4f59b432..5c9028b87e8c3 100644 --- a/share/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/PersisterStateManager.java @@ -28,6 +28,8 @@ import org.apache.kafka.common.message.FindCoordinatorResponseData; import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; @@ -38,6 +40,8 @@ import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.ReadShareGroupStateRequest; import org.apache.kafka.common.requests.ReadShareGroupStateResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryRequest; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryResponse; import org.apache.kafka.common.requests.WriteShareGroupStateRequest; import org.apache.kafka.common.requests.WriteShareGroupStateResponse; import org.apache.kafka.common.utils.ExponentialBackoff; @@ -679,7 +683,7 @@ protected void handleRequestResponse(ClientResponse response) { log.warn("Received retriable error in read state RPC for key {}: {}", partitionKey(), error.message()); if (!readStateBackoff.canAttempt()) { log.error("Exhausted max retries for read state RPC for key {} without success.", partitionKey()); - readStateErrorReponse(error, new Exception("Exhausted max retries to complete read state RPC without success.")); + readStateErrorResponse(error, new Exception("Exhausted max retries to complete read state RPC without success.")); return; } super.resetCoordinatorNode(); @@ -688,7 +692,7 @@ protected void handleRequestResponse(ClientResponse response) { default: log.error("Unable to perform read state RPC for key {}: {}", partitionKey(), error.message()); - readStateErrorReponse(error, null); + readStateErrorResponse(error, null); return; } } @@ -699,19 +703,19 @@ protected void handleRequestResponse(ClientResponse response) { IllegalStateException exception = new IllegalStateException( "Failed to read state for share partition " + partitionKey() ); - readStateErrorReponse(Errors.forException(exception), exception); + readStateErrorResponse(Errors.forException(exception), exception); } - protected void readStateErrorReponse(Errors error, Exception exception) { + protected void readStateErrorResponse(Errors error, Exception exception) { this.result.complete(new ReadShareGroupStateResponse( - ReadShareGroupStateResponse.toErrorResponseData(partitionKey().topicId(), partitionKey().partition(), error, "Error in find coordinator. " + + ReadShareGroupStateResponse.toErrorResponseData(partitionKey().topicId(), partitionKey().partition(), error, "Error in read state RPC. " + (exception == null ? error.message() : exception.getMessage())))); } @Override protected void findCoordinatorErrorResponse(Errors error, Exception exception) { this.result.complete(new ReadShareGroupStateResponse( - ReadShareGroupStateResponse.toErrorResponseData(partitionKey().topicId(), partitionKey().partition(), error, "Error in read state RPC. " + + ReadShareGroupStateResponse.toErrorResponseData(partitionKey().topicId(), partitionKey().partition(), error, "Error in find coordinator. " + (exception == null ? error.message() : exception.getMessage())))); } @@ -730,6 +734,147 @@ protected RPCType rpcType() { } } + public class ReadStateSummaryHandler extends PersisterStateManagerHandler { + private final int leaderEpoch; + private final CompletableFuture result; + private final BackoffManager readStateSummaryBackoff; + + public ReadStateSummaryHandler( + String groupId, + Uuid topicId, + int partition, + int leaderEpoch, + CompletableFuture result, + long backoffMs, + long backoffMaxMs, + int maxRPCRetryAttempts, + Consumer onCompleteCallback + ) { + super(groupId, topicId, partition, backoffMs, backoffMaxMs, maxRPCRetryAttempts); + this.leaderEpoch = leaderEpoch; + this.result = result; + this.readStateSummaryBackoff = new BackoffManager(maxRPCRetryAttempts, backoffMs, backoffMaxMs); + } + + public ReadStateSummaryHandler( + String groupId, + Uuid topicId, + int partition, + int leaderEpoch, + CompletableFuture result, + Consumer onCompleteCallback + ) { + this( + groupId, + topicId, + partition, + leaderEpoch, + result, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + MAX_FIND_COORD_ATTEMPTS, + onCompleteCallback + ); + } + + @Override + protected String name() { + return "ReadStateSummaryHandler"; + } + + @Override + protected AbstractRequest.Builder requestBuilder() { + throw new RuntimeException("Read Summary requests are batchable, hence individual requests not needed."); + } + + @Override + protected boolean isResponseForRequest(ClientResponse response) { + return response.requestHeader().apiKey() == ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY; + } + + @Override + protected void handleRequestResponse(ClientResponse response) { + log.debug("Read state summary response received - {}", response); + readStateSummaryBackoff.incrementAttempt(); + + ReadShareGroupStateSummaryResponse combinedResponse = (ReadShareGroupStateSummaryResponse) response.responseBody(); + for (ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult readStateSummaryResult : combinedResponse.data().results()) { + if (readStateSummaryResult.topicId().equals(partitionKey().topicId())) { + Optional partitionStateData = + readStateSummaryResult.partitions().stream().filter(partitionResult -> partitionResult.partition() == partitionKey().partition()) + .findFirst(); + + if (partitionStateData.isPresent()) { + Errors error = Errors.forCode(partitionStateData.get().errorCode()); + switch (error) { + case NONE: + readStateSummaryBackoff.resetAttempts(); + ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult result = ReadShareGroupStateSummaryResponse.toResponseReadStateSummaryResult( + partitionKey().topicId(), + Collections.singletonList(partitionStateData.get()) + ); + this.result.complete(new ReadShareGroupStateSummaryResponse(new ReadShareGroupStateSummaryResponseData() + .setResults(Collections.singletonList(result)))); + return; + + // check retriable errors + case COORDINATOR_NOT_AVAILABLE: + case COORDINATOR_LOAD_IN_PROGRESS: + case NOT_COORDINATOR: + log.warn("Received retriable error in read state summary RPC for key {}: {}", partitionKey(), error.message()); + if (!readStateSummaryBackoff.canAttempt()) { + log.error("Exhausted max retries for read state summary RPC for key {} without success.", partitionKey()); + readStateSummaryErrorResponse(error, new Exception("Exhausted max retries to complete read state summary RPC without success.")); + return; + } + super.resetCoordinatorNode(); + timer.add(new PersisterTimerTask(readStateSummaryBackoff.backOff(), this)); + return; + + default: + log.error("Unable to perform read state summary RPC for key {}: {}", partitionKey(), error.message()); + readStateSummaryErrorResponse(error, null); + return; + } + } + } + } + + // no response found specific topic partition + IllegalStateException exception = new IllegalStateException( + "Failed to read state summary for share partition " + partitionKey() + ); + readStateSummaryErrorResponse(Errors.forException(exception), exception); + } + + protected void readStateSummaryErrorResponse(Errors error, Exception exception) { + this.result.complete(new ReadShareGroupStateSummaryResponse( + ReadShareGroupStateSummaryResponse.toErrorResponseData(partitionKey().topicId(), partitionKey().partition(), error, "Error in read state summary RPC. " + + (exception == null ? error.message() : exception.getMessage())))); + } + + @Override + protected void findCoordinatorErrorResponse(Errors error, Exception exception) { + this.result.complete(new ReadShareGroupStateSummaryResponse( + ReadShareGroupStateSummaryResponse.toErrorResponseData(partitionKey().topicId(), partitionKey().partition(), error, "Error in find coordinator. " + + (exception == null ? error.message() : exception.getMessage())))); + } + + protected CompletableFuture result() { + return result; + } + + @Override + protected boolean isBatchable() { + return true; + } + + @Override + protected RPCType rpcType() { + return RPCType.SUMMARY; + } + } + private class SendThread extends InterBrokerSendThread { private final ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue<>(); private final Random random; @@ -912,6 +1057,8 @@ public static AbstractRequest.Builder coalesceRequest return coalesceWrites(groupId, handlers); case READ: return coalesceReads(groupId, handlers); + case SUMMARY: + return coalesceReadSummaries(groupId, handlers); default: throw new RuntimeException("Unknown rpc type: " + rpcType); } @@ -969,5 +1116,29 @@ private static AbstractRequest.Builder coalesceReads( .setPartitions(entry.getValue())) .collect(Collectors.toList()))); } + + private static AbstractRequest.Builder coalesceReadSummaries(String groupId, List handlers) { + Map> partitionData = new HashMap<>(); + handlers.forEach(persisterStateManagerHandler -> { + assert persisterStateManagerHandler instanceof ReadStateSummaryHandler; + ReadStateSummaryHandler handler = (ReadStateSummaryHandler) persisterStateManagerHandler; + partitionData.computeIfAbsent(handler.partitionKey().topicId(), topicId -> new LinkedList<>()) + .add( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(handler.partitionKey().partition()) + .setLeaderEpoch(handler.leaderEpoch) + ); + }); + + return new ReadShareGroupStateSummaryRequest.Builder(new ReadShareGroupStateSummaryRequestData() + .setGroupId(groupId) + .setTopics(partitionData.entrySet().stream() + .map(entry -> new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(entry.getKey()) + .setPartitions(entry.getValue())) + .collect(Collectors.toList())), + true + ); + } } } diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateParameters.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateParameters.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateParameters.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateParameters.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateResult.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateResult.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateResult.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateResult.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryParameters.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryParameters.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryParameters.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryParameters.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryResult.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryResult.java similarity index 87% rename from share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryResult.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryResult.java index 7172c6acef70b..7e0bee13c3806 100644 --- a/share/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryResult.java +++ b/server-common/src/main/java/org/apache/kafka/server/share/persister/ReadShareGroupStateSummaryResult.java @@ -26,9 +26,9 @@ * This class contains the result from {@link Persister#readSummary(ReadShareGroupStateSummaryParameters)}. */ public class ReadShareGroupStateSummaryResult implements PersisterResult { - private final List> topicsData; + private final List> topicsData; - private ReadShareGroupStateSummaryResult(List> topicsData) { + private ReadShareGroupStateSummaryResult(List> topicsData) { this.topicsData = topicsData; } @@ -37,7 +37,7 @@ public static ReadShareGroupStateSummaryResult from(ReadShareGroupStateSummaryRe .setTopicsData(data.results().stream() .map(readStateSummaryResult -> new TopicData<>(readStateSummaryResult.topicId(), readStateSummaryResult.partitions().stream() - .map(partitionResult -> PartitionFactory.newPartitionStateErrorData( + .map(partitionResult -> PartitionFactory.newPartitionStateSummaryData( partitionResult.partition(), partitionResult.stateEpoch(), partitionResult.startOffset(), partitionResult.errorCode(), partitionResult.errorMessage())) .collect(Collectors.toList()))) .collect(Collectors.toList())) @@ -45,9 +45,9 @@ public static ReadShareGroupStateSummaryResult from(ReadShareGroupStateSummaryRe } public static class Builder { - private List> topicsData; + private List> topicsData; - public Builder setTopicsData(List> topicsData) { + public Builder setTopicsData(List> topicsData) { this.topicsData = topicsData; return this; } @@ -56,4 +56,8 @@ public ReadShareGroupStateSummaryResult build() { return new ReadShareGroupStateSummaryResult(topicsData); } } + + public List> topicsData() { + return topicsData; + } } diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/ShareCoordinatorMetadataCacheHelper.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/ShareCoordinatorMetadataCacheHelper.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/ShareCoordinatorMetadataCacheHelper.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/ShareCoordinatorMetadataCacheHelper.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/TopicData.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/TopicData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/TopicData.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/TopicData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateParameters.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateParameters.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateParameters.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateParameters.java diff --git a/share/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateResult.java b/server-common/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateResult.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateResult.java rename to server-common/src/main/java/org/apache/kafka/server/share/persister/WriteShareGroupStateResult.java diff --git a/server-common/src/main/java/org/apache/kafka/server/storage/log/FetchParams.java b/server-common/src/main/java/org/apache/kafka/server/storage/log/FetchParams.java index 7980ed52f4a08..9829ce76aedda 100644 --- a/server-common/src/main/java/org/apache/kafka/server/storage/log/FetchParams.java +++ b/server-common/src/main/java/org/apache/kafka/server/storage/log/FetchParams.java @@ -81,7 +81,7 @@ public boolean isFromConsumer() { } public boolean fetchOnlyLeader() { - return isFromFollower() || (isFromConsumer() && !clientMetadata.isPresent()) || shareFetchRequest; + return isFromFollower() || (isFromConsumer() && clientMetadata.isEmpty()) || shareFetchRequest; } public boolean hardMaxBytesLimit() { diff --git a/server-common/src/main/java/org/apache/kafka/server/storage/log/UnexpectedAppendOffsetException.java b/server-common/src/main/java/org/apache/kafka/server/storage/log/UnexpectedAppendOffsetException.java new file mode 100644 index 0000000000000..652b6745ca055 --- /dev/null +++ b/server-common/src/main/java/org/apache/kafka/server/storage/log/UnexpectedAppendOffsetException.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.storage.log; + +public class UnexpectedAppendOffsetException extends RuntimeException { + + public final long firstOffset; + public final long lastOffset; + + /** + * Indicates the follower or the future replica received records from the leader (or current + * replica) with first offset less than expected next offset. + * @param firstOffset The first offset of the records to append + * @param lastOffset The last offset of the records to append + */ + public UnexpectedAppendOffsetException(String message, long firstOffset, long lastOffset) { + super(message); + this.firstOffset = firstOffset; + this.lastOffset = lastOffset; + } +} diff --git a/server-common/src/main/java/org/apache/kafka/server/util/FutureUtils.java b/server-common/src/main/java/org/apache/kafka/server/util/FutureUtils.java index 14d963d626c7b..30af58b03fdb0 100644 --- a/server-common/src/main/java/org/apache/kafka/server/util/FutureUtils.java +++ b/server-common/src/main/java/org/apache/kafka/server/util/FutureUtils.java @@ -65,8 +65,7 @@ public static T waitWithLogging( timeout.setStackTrace(t.getStackTrace()); throw timeout; } catch (Throwable t) { - if (t instanceof ExecutionException) { - ExecutionException executionException = (ExecutionException) t; + if (t instanceof ExecutionException executionException) { t = executionException.getCause(); } log.error("{}Received a fatal error while waiting for {}", prefix, action, t); diff --git a/server-common/src/main/java/org/apache/kafka/server/util/InterBrokerSendThread.java b/server-common/src/main/java/org/apache/kafka/server/util/InterBrokerSendThread.java index 1ef60a547bcca..093946eb5f085 100644 --- a/server-common/src/main/java/org/apache/kafka/server/util/InterBrokerSendThread.java +++ b/server-common/src/main/java/org/apache/kafka/server/util/InterBrokerSendThread.java @@ -43,13 +43,13 @@ */ public abstract class InterBrokerSendThread extends ShutdownableThread { - protected volatile KafkaClient networkClient; + protected final KafkaClient networkClient; private final int requestTimeoutMs; private final Time time; private final UnsentRequests unsentRequests; - public InterBrokerSendThread( + protected InterBrokerSendThread( String name, KafkaClient networkClient, int requestTimeoutMs, @@ -58,7 +58,7 @@ public InterBrokerSendThread( this(name, networkClient, requestTimeoutMs, time, true); } - public InterBrokerSendThread( + protected InterBrokerSendThread( String name, KafkaClient networkClient, int requestTimeoutMs, diff --git a/server-common/src/main/java/org/apache/kafka/server/util/TranslatedValueMapView.java b/server-common/src/main/java/org/apache/kafka/server/util/TranslatedValueMapView.java index d269550ab8307..639d7d194519b 100644 --- a/server-common/src/main/java/org/apache/kafka/server/util/TranslatedValueMapView.java +++ b/server-common/src/main/java/org/apache/kafka/server/util/TranslatedValueMapView.java @@ -43,8 +43,7 @@ public Iterator> iterator() { @SuppressWarnings("rawtypes") @Override public boolean contains(Object o) { - if (!(o instanceof Entry)) return false; - Entry other = (Entry) o; + if (!(o instanceof Entry other)) return false; if (!underlyingMap.containsKey(other.getKey())) return false; B value = underlyingMap.get(other.getKey()); V translatedValue = valueMapping.apply(value); diff --git a/server-common/src/main/java/org/apache/kafka/timeline/BaseHashTable.java b/server-common/src/main/java/org/apache/kafka/timeline/BaseHashTable.java index 9d41aa65a520c..b8d2c9fc5fb58 100644 --- a/server-common/src/main/java/org/apache/kafka/timeline/BaseHashTable.java +++ b/server-common/src/main/java/org/apache/kafka/timeline/BaseHashTable.java @@ -150,8 +150,7 @@ final T baseRemove(Object key) { Object object = elements[slot]; if (object == null) { return null; - } else if (object instanceof Object[]) { - Object[] curArray = (Object[]) object; + } else if (object instanceof Object[] curArray) { for (int i = 0; i < curArray.length; i++) { if (curArray[i].equals(key)) { size--; @@ -191,8 +190,7 @@ private void rehash(int newSize) { Object cur = elements[newSlot]; if (cur == null) { elements[newSlot] = object; - } else if (cur instanceof Object[]) { - Object[] curArray = (Object[]) cur; + } else if (cur instanceof Object[] curArray) { Object[] newArray = new Object[curArray.length + 1]; System.arraycopy(curArray, 0, newArray, 0, curArray.length); newArray[curArray.length] = object; @@ -225,8 +223,7 @@ static int findSlot(Object object, int numElements) { static void unpackSlot(List out, Object[] elements, int slot) { Object value = elements[slot]; if (value != null) { - if (value instanceof Object[]) { - Object[] array = (Object[]) value; + if (value instanceof Object[] array) { for (Object object : array) { out.add((T) object); } @@ -244,8 +241,7 @@ String baseToDebugString() { bld.append(String.format("%n%d: ", i)); if (slotObject == null) { bld.append("null"); - } else if (slotObject instanceof Object[]) { - Object[] array = (Object[]) slotObject; + } else if (slotObject instanceof Object[] array) { String prefix = ""; for (Object object : array) { bld.append(prefix); diff --git a/server-common/src/main/java/org/apache/kafka/timeline/SnapshotRegistry.java b/server-common/src/main/java/org/apache/kafka/timeline/SnapshotRegistry.java index 52ab96ecbcd66..b600b122f9b43 100644 --- a/server-common/src/main/java/org/apache/kafka/timeline/SnapshotRegistry.java +++ b/server-common/src/main/java/org/apache/kafka/timeline/SnapshotRegistry.java @@ -21,17 +21,18 @@ import org.slf4j.Logger; +import java.lang.ref.WeakReference; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.stream.Collectors; - /** - * A registry containing snapshots of timeline data structures. - * We generally expect a small number of snapshots-- perhaps 1 or 2 at a time. - * Therefore, we use ArrayLists here rather than a data structure with higher overhead. + * A registry containing snapshots of timeline data structures. All timeline data structures must + * be registered here, so that they can be reverted to the expected state when desired. + * Because the registry only keeps a weak reference to each timeline data structure, it does not + * prevent them from being garbage collected. */ public class SnapshotRegistry { public static final long LATEST_EPOCH = Long.MAX_VALUE; @@ -107,12 +108,39 @@ public Snapshot next() { private final Snapshot head = new Snapshot(Long.MIN_VALUE); /** - * Collection of all Revertable registered with this registry + * A collection of all Revertable objects registered here. Since we store only weak + * references, every time we access a revertable through this list, we must check to + * see if it has been garbage collected. If so, WeakReference.get will return null. + * + * Although the garbage collector handles freeing the underlying Revertables, over + * time slots in the ArrayList will fill up with expired references. Therefore, after + * enough registrations, we scrub the ArrayList of the expired references by creating + * a new arraylist. */ - private final List revertables = new ArrayList<>(); + private List> revertables = new ArrayList<>(); + + /** + * The maximum number of registrations to allow before we compact the revertable list. + */ + private final int maxRegistrationsSinceScrub; + + /** + * The number of registrations we have done since removing all expired weak references. + */ + private int numRegistrationsSinceScrub = 0; + + /** + * The number of scrubs that we have done. + */ + private long numScrubs = 0; public SnapshotRegistry(LogContext logContext) { + this(logContext, 10_000); + } + + public SnapshotRegistry(LogContext logContext, int maxRegistrationsSinceScrub) { this.log = logContext.logger(SnapshotRegistry.class); + this.maxRegistrationsSinceScrub = maxRegistrationsSinceScrub; } /** @@ -283,21 +311,60 @@ public long latestEpoch() { return head.prev().epoch(); } + /** + * Return the number of scrub operations that we have done. + */ + public long numScrubs() { + return numScrubs; + } + /** * Associate a revertable with this registry. */ void register(Revertable revertable) { - revertables.add(revertable); + numRegistrationsSinceScrub++; + if (numRegistrationsSinceScrub > maxRegistrationsSinceScrub) { + scrub(); + } + revertables.add(new WeakReference<>(revertable)); + } + + /** + * Remove all expired weak references from the revertable list. + */ + void scrub() { + ArrayList> newRevertables = + new ArrayList<>(revertables.size() / 2); + for (WeakReference ref : revertables) { + if (ref.get() != null) { + newRevertables.add(ref); + } + } + numScrubs++; + this.revertables = newRevertables; + numRegistrationsSinceScrub = 0; } /** - * Delete all snapshots and resets all of the Revertable object registered. + * Delete all snapshots and reset all of the Revertable objects. */ public void reset() { deleteSnapshotsUpTo(LATEST_EPOCH); - for (Revertable revertable : revertables) { - revertable.reset(); + ArrayList> newRevertables = new ArrayList<>(); + for (WeakReference ref : revertables) { + Revertable revertable = ref.get(); + if (revertable != null) { + try { + revertable.reset(); + } catch (Exception e) { + log.error("Error reverting {}", revertable, e); + } + newRevertables.add(ref); + } } + numScrubs++; + this.revertables = newRevertables; + numRegistrationsSinceScrub = 0; } } diff --git a/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashMap.java b/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashMap.java index aaf9126adeee8..019dcb0bebdc2 100644 --- a/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashMap.java +++ b/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashMap.java @@ -386,9 +386,8 @@ public int hashCode() { public boolean equals(Object o) { if (o == this) return true; - if (!(o instanceof Map)) + if (!(o instanceof Map m)) return false; - Map m = (Map) o; if (m.size() != size()) return false; try { diff --git a/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashSet.java b/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashSet.java index f4eab55ba3e01..d30b3943880ee 100644 --- a/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashSet.java +++ b/server-common/src/main/java/org/apache/kafka/timeline/TimelineHashSet.java @@ -241,9 +241,8 @@ public int hashCode() { public boolean equals(Object o) { if (o == this) return true; - if (!(o instanceof Set)) + if (!(o instanceof Collection c)) return false; - Collection c = (Collection) o; if (c.size() != size()) return false; try { diff --git a/server-common/src/main/java/org/apache/kafka/timeline/TimelineInteger.java b/server-common/src/main/java/org/apache/kafka/timeline/TimelineInteger.java index d26e5dbcd622c..1d93b31c8bb05 100644 --- a/server-common/src/main/java/org/apache/kafka/timeline/TimelineInteger.java +++ b/server-common/src/main/java/org/apache/kafka/timeline/TimelineInteger.java @@ -110,8 +110,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof TimelineInteger)) return false; - TimelineInteger other = (TimelineInteger) o; + if (!(o instanceof TimelineInteger other)) return false; return value == other.value; } diff --git a/server-common/src/main/java/org/apache/kafka/timeline/TimelineLong.java b/server-common/src/main/java/org/apache/kafka/timeline/TimelineLong.java index a51d832482b9d..ff376e43c4c03 100644 --- a/server-common/src/main/java/org/apache/kafka/timeline/TimelineLong.java +++ b/server-common/src/main/java/org/apache/kafka/timeline/TimelineLong.java @@ -110,8 +110,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof TimelineLong)) return false; - TimelineLong other = (TimelineLong) o; + if (!(o instanceof TimelineLong other)) return false; return value == other.value; } diff --git a/server-common/src/main/java/org/apache/kafka/timeline/TimelineObject.java b/server-common/src/main/java/org/apache/kafka/timeline/TimelineObject.java index d5cf03d2364c4..677936a2022e7 100644 --- a/server-common/src/main/java/org/apache/kafka/timeline/TimelineObject.java +++ b/server-common/src/main/java/org/apache/kafka/timeline/TimelineObject.java @@ -109,8 +109,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof TimelineObject)) return false; - TimelineObject other = (TimelineObject) o; + if (!(o instanceof TimelineObject other)) return false; return value.equals(other.value); } diff --git a/server-common/src/test/java/org/apache/kafka/security/PasswordEncoderTest.java b/server-common/src/test/java/org/apache/kafka/security/PasswordEncoderTest.java deleted file mode 100644 index eaa8c3ec87de7..0000000000000 --- a/server-common/src/test/java/org/apache/kafka/security/PasswordEncoderTest.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.security; - -import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.config.types.Password; -import org.apache.kafka.server.util.Csv; - -import org.junit.jupiter.api.Test; - -import java.security.GeneralSecurityException; -import java.util.Map; - -import javax.crypto.SecretKeyFactory; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; - -class PasswordEncoderTest { - - @Test - public void testEncodeDecode() throws GeneralSecurityException { - PasswordEncoder encoder = PasswordEncoder.encrypting(new Password("password-encoder-secret"), - null, - PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_DEFAULT, - PasswordEncoderConfigs.PASSWORD_ENCODER_KEY_LENGTH_DEFAULT, - PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_DEFAULT); - String password = "test-password"; - String encoded = encoder.encode(new Password(password)); - Map encodedMap = Csv.parseCsvMap(encoded); - assertEquals("4096", encodedMap.get(PasswordEncoder.ITERATIONS)); - assertEquals("128", encodedMap.get(PasswordEncoder.KEY_LENGTH)); - String defaultKeyFactoryAlgorithm; - try { - SecretKeyFactory.getInstance("PBKDF2WithHmacSHA512"); - defaultKeyFactoryAlgorithm = "PBKDF2WithHmacSHA512"; - - } catch (Exception e) { - defaultKeyFactoryAlgorithm = "PBKDF2WithHmacSHA1"; - } - assertEquals(defaultKeyFactoryAlgorithm, encodedMap.get(PasswordEncoder.KEY_FACTORY_ALGORITHM)); - assertEquals("AES/CBC/PKCS5Padding", encodedMap.get(PasswordEncoder.CIPHER_ALGORITHM)); - verifyEncodedPassword(encoder, password, encoded); - } - - @Test - public void testEncoderConfigChange() throws GeneralSecurityException { - PasswordEncoder encoder = PasswordEncoder.encrypting(new Password("password-encoder-secret"), - "PBKDF2WithHmacSHA1", - "DES/CBC/PKCS5Padding", - 64, - 1024); - String password = "test-password"; - String encoded = encoder.encode(new Password(password)); - Map encodedMap = Csv.parseCsvMap(encoded); - assertEquals("1024", encodedMap.get(PasswordEncoder.ITERATIONS)); - assertEquals("64", encodedMap.get(PasswordEncoder.KEY_LENGTH)); - assertEquals("PBKDF2WithHmacSHA1", encodedMap.get(PasswordEncoder.KEY_FACTORY_ALGORITHM)); - assertEquals("DES/CBC/PKCS5Padding", encodedMap.get(PasswordEncoder.CIPHER_ALGORITHM)); - - // Test that decoding works even if PasswordEncoder algorithm, iterations etc. are altered - PasswordEncoder decoder = PasswordEncoder.encrypting(new Password("password-encoder-secret"), - "PBKDF2WithHmacSHA1", - "AES/CBC/PKCS5Padding", - 128, - 2048); - assertEquals(password, decoder.decode(encoded).value()); - - // Test that decoding fails if secret is altered - PasswordEncoder decoder2 = PasswordEncoder.encrypting(new Password("secret-2"), - "PBKDF2WithHmacSHA1", - "AES/CBC/PKCS5Padding", - 128, - 1024); - assertThrows(ConfigException.class, () -> decoder2.decode(encoded)); - } - - @Test - public void testEncodeDecodeAlgorithms() throws GeneralSecurityException { - verifyEncodeDecode(null, "DES/CBC/PKCS5Padding", 64); - verifyEncodeDecode(null, "DESede/CBC/PKCS5Padding", 192); - verifyEncodeDecode(null, "AES/CBC/PKCS5Padding", 128); - verifyEncodeDecode(null, "AES/CFB/PKCS5Padding", 128); - verifyEncodeDecode(null, "AES/OFB/PKCS5Padding", 128); - verifyEncodeDecode("PBKDF2WithHmacSHA1", PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_DEFAULT, 128); - verifyEncodeDecode(null, "AES/GCM/NoPadding", 128); - verifyEncodeDecode("PBKDF2WithHmacSHA256", PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_DEFAULT, 128); - verifyEncodeDecode("PBKDF2WithHmacSHA512", PasswordEncoderConfigs.PASSWORD_ENCODER_CIPHER_ALGORITHM_DEFAULT, 128); - } - - private void verifyEncodeDecode(String keyFactoryAlg, String cipherAlg, int keyLength) throws GeneralSecurityException { - PasswordEncoder encoder = PasswordEncoder.encrypting(new Password("password-encoder-secret"), - keyFactoryAlg, - cipherAlg, - keyLength, - PasswordEncoderConfigs.PASSWORD_ENCODER_ITERATIONS_DEFAULT); - String password = "test-password"; - String encoded = encoder.encode(new Password(password)); - verifyEncodedPassword(encoder, password, encoded); - } - - private void verifyEncodedPassword(PasswordEncoder encoder, String password, String encoded) throws GeneralSecurityException { - Map encodedMap = Csv.parseCsvMap(encoded); - assertEquals(String.valueOf(password.length()), encodedMap.get(PasswordEncoder.PASSWORD_LENGTH)); - assertNotNull(PasswordEncoder.base64Decode(encodedMap.get("salt")), "Invalid salt"); - assertNotNull(PasswordEncoder.base64Decode(encodedMap.get(PasswordEncoder.INITIALIZATION_VECTOR)), "Invalid encoding parameters"); - assertNotNull(PasswordEncoder.base64Decode(encodedMap.get(PasswordEncoder.ENCRYPTED_PASSWORD)), "Invalid encoded password"); - assertEquals(password, encoder.decode(encoded).value()); - } -} diff --git a/server-common/src/test/java/org/apache/kafka/server/common/FeatureTest.java b/server-common/src/test/java/org/apache/kafka/server/common/FeatureTest.java new file mode 100644 index 0000000000000..31e842435e398 --- /dev/null +++ b/server-common/src/test/java/org/apache/kafka/server/common/FeatureTest.java @@ -0,0 +1,293 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.common; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.kafka.server.common.Feature.validateDefaultValueAndLatestProductionValue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class FeatureTest { + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testV0SupportedInEarliestMV(Feature feature) { + assertTrue(feature.featureVersions().length >= 1); + assertEquals(MetadataVersion.MINIMUM_KRAFT_VERSION, + feature.featureVersions()[0].bootstrapMetadataVersion()); + } + + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testFromFeatureLevelAllFeatures(Feature feature) { + FeatureVersion[] featureImplementations = feature.featureVersions(); + int numFeatures = featureImplementations.length; + short latestProductionLevel = feature.latestProduction(); + + for (short i = 0; i < numFeatures; i++) { + short level = i; + if (latestProductionLevel < i) { + assertEquals(featureImplementations[i], feature.fromFeatureLevel(level, true)); + assertThrows(IllegalArgumentException.class, () -> feature.fromFeatureLevel(level, false)); + } else { + assertEquals(featureImplementations[i], feature.fromFeatureLevel(level, false)); + } + } + } + + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testValidateVersionAllFeatures(Feature feature) { + for (FeatureVersion featureImpl : feature.featureVersions()) { + // Ensure the minimum bootstrap metadata version is included if no metadata version dependency. + Map deps = new HashMap<>(); + deps.putAll(featureImpl.dependencies()); + if (!deps.containsKey(MetadataVersion.FEATURE_NAME)) { + deps.put(MetadataVersion.FEATURE_NAME, MetadataVersion.MINIMUM_BOOTSTRAP_VERSION.featureLevel()); + } + + // Ensure that the feature is valid given the typical metadataVersionMapping and the dependencies. + // Note: Other metadata versions are valid, but this one should always be valid. + Feature.validateVersion(featureImpl, deps); + } + } + + @Test + public void testInvalidValidateVersion() { + // No MetadataVersion is invalid + assertThrows(IllegalArgumentException.class, + () -> Feature.validateVersion( + TestFeatureVersion.TEST_1, + Collections.emptyMap() + ) + ); + + // Using too low of a MetadataVersion is invalid + assertThrows(IllegalArgumentException.class, + () -> Feature.validateVersion( + TestFeatureVersion.TEST_1, + Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_0_IV1.featureLevel()) + ) + ); + + // Using a version that is lower than the dependency will fail. + assertThrows(IllegalArgumentException.class, + () -> Feature.validateVersion( + TestFeatureVersion.TEST_2, + Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_7_IV0.featureLevel()) + ) + ); + } + + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testDefaultLevelAllFeatures(Feature feature) { + for (FeatureVersion featureImpl : feature.featureVersions()) { + // If features have the same bootstrapMetadataVersion, the highest level feature should be chosen. + short defaultLevel = feature.defaultLevel(featureImpl.bootstrapMetadataVersion()); + if (defaultLevel != featureImpl.featureLevel()) { + FeatureVersion otherFeature = feature.fromFeatureLevel(defaultLevel, true); + assertEquals(featureImpl.bootstrapMetadataVersion(), otherFeature.bootstrapMetadataVersion()); + assertTrue(defaultLevel > featureImpl.featureLevel()); + } + } + } + + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testLatestProductionIsOneOfFeatureValues(Feature feature) { + assertTrue(feature.hasFeatureVersion(feature.latestProduction)); + } + + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testLatestProductionIsNotBehindLatestMetadataVersion(Feature feature) { + assertTrue(feature.latestProduction() >= feature.defaultLevel(MetadataVersion.latestProduction())); + } + + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testLatestProductionDependencyIsProductionReady(Feature feature) { + for (Map.Entry dependency: feature.latestProduction.dependencies().entrySet()) { + String featureName = dependency.getKey(); + if (!featureName.equals(MetadataVersion.FEATURE_NAME)) { + Feature dependencyFeature = Feature.featureFromName(featureName); + assertTrue(dependencyFeature.isProductionReady(dependency.getValue())); + } + } + } + + @ParameterizedTest + @EnumSource(value = Feature.class, names = { + "UNIT_TEST_VERSION_0", + "UNIT_TEST_VERSION_1", + "UNIT_TEST_VERSION_2", + "UNIT_TEST_VERSION_3", + "UNIT_TEST_VERSION_4", + "UNIT_TEST_VERSION_5", + "UNIT_TEST_VERSION_6", + "UNIT_TEST_VERSION_7"}, mode = EnumSource.Mode.EXCLUDE) + public void testDefaultVersionDependencyIsDefaultReady(Feature feature) { + for (Map.Entry dependency: feature.defaultVersion(MetadataVersion.LATEST_PRODUCTION).dependencies().entrySet()) { + String featureName = dependency.getKey(); + if (!featureName.equals(MetadataVersion.FEATURE_NAME)) { + Feature dependencyFeature = Feature.featureFromName(featureName); + assertTrue(dependency.getValue() <= dependencyFeature.defaultLevel(MetadataVersion.LATEST_PRODUCTION)); + } + } + } + + @ParameterizedTest + @EnumSource(MetadataVersion.class) + public void testDefaultTestVersion(MetadataVersion metadataVersion) { + short expectedVersion; + if (!metadataVersion.isLessThan(MetadataVersion.latestTesting())) { + expectedVersion = 2; + } else if (!metadataVersion.isLessThan(MetadataVersion.IBP_3_7_IV0)) { + expectedVersion = 1; + } else { + expectedVersion = 0; + } + assertEquals(expectedVersion, Feature.TEST_VERSION.defaultLevel(metadataVersion)); + } + + @Test + public void testUnstableTestVersion() { + // If the latest MetadataVersion is stable, we don't throw an error. In that case, we don't worry about unstable feature + // versions since all feature versions are stable. + if (MetadataVersion.latestProduction().isLessThan(MetadataVersion.latestTesting())) { + assertThrows(IllegalArgumentException.class, () -> + Feature.TEST_VERSION.fromFeatureLevel(Feature.TEST_VERSION.latestTesting(), false)); + } + Feature.TEST_VERSION.fromFeatureLevel(Feature.TEST_VERSION.latestTesting(), true); + } + + @Test + public void testValidateWithNonExistentLatestProduction() { + assertThrows(IllegalArgumentException.class, () -> + validateDefaultValueAndLatestProductionValue(Feature.UNIT_TEST_VERSION_0), + "Feature UNIT_TEST_VERSION_0 has latest production version UT_FV0_1 " + + "which is not one of its feature versions."); + } + + @Test + public void testValidateWithLaggingLatestProduction() { + assertThrows(IllegalArgumentException.class, () -> + validateDefaultValueAndLatestProductionValue(Feature.UNIT_TEST_VERSION_1), + "Feature UNIT_TEST_VERSION_1 has latest production value UT_FV1_0 " + + "smaller than its default version UT_FV1_1 with latest production MV."); + } + + @Test + public void testValidateWithDependencyNotProductionReady() { + assertThrows(IllegalArgumentException.class, () -> + validateDefaultValueAndLatestProductionValue(Feature.UNIT_TEST_VERSION_3), + "Feature UNIT_TEST_VERSION_3 has latest production FeatureVersion UT_FV3_1 with dependency " + + "UT_FV2_1 that is not production ready. (UNIT_TEST_VERSION_2 latest production: UT_FV2_0)"); + } + + @Test + public void testValidateWithDefaultValueDependencyAheadOfItsDefaultLevel() { + if (MetadataVersion.latestProduction().isLessThan(MetadataVersion.latestTesting())) { + assertThrows(IllegalArgumentException.class, () -> + validateDefaultValueAndLatestProductionValue(Feature.UNIT_TEST_VERSION_5), + "Feature UNIT_TEST_VERSION_5 has default FeatureVersion UT_FV5_1 when MV=3.7-IV0 with " + + "dependency UT_FV4_1 that is behind its default version UT_FV4_0."); + } + } + + @Test + public void testValidateWithMVDependencyNotProductionReady() { + if (MetadataVersion.latestProduction().isLessThan(MetadataVersion.latestTesting())) { + assertThrows(IllegalArgumentException.class, () -> + validateDefaultValueAndLatestProductionValue(Feature.UNIT_TEST_VERSION_6), + "Feature UNIT_TEST_VERSION_6 has latest production FeatureVersion UT_FV6_1 with " + + "MV dependency 4.0-IV3 that is not production ready. (MV latest production: 4.0-IV0)"); + } + } + + @Test + public void testValidateWithMVDependencyAheadOfBootstrapMV() { + assertThrows(IllegalArgumentException.class, () -> + validateDefaultValueAndLatestProductionValue(Feature.UNIT_TEST_VERSION_7), + "Feature UNIT_TEST_VERSION_7 has default FeatureVersion UT_FV7_0 when MV=3.0-IV1 with " + + "MV dependency 3.7-IV0 that is behind its bootstrap MV 3.0-IV1."); + } +} diff --git a/server-common/src/test/java/org/apache/kafka/server/common/FeaturesTest.java b/server-common/src/test/java/org/apache/kafka/server/common/FeaturesTest.java deleted file mode 100644 index dd74b6e2d8a9c..0000000000000 --- a/server-common/src/test/java/org/apache/kafka/server/common/FeaturesTest.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.server.common; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.EnumSource; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class FeaturesTest { - @ParameterizedTest - @EnumSource(Features.class) - public void testV0SupportedInEarliestMV(Features feature) { - assertTrue(feature.featureVersions().length >= 1); - assertEquals(MetadataVersion.MINIMUM_KRAFT_VERSION, - feature.featureVersions()[0].bootstrapMetadataVersion()); - } - - @ParameterizedTest - @EnumSource(Features.class) - public void testFromFeatureLevelAllFeatures(Features feature) { - FeatureVersion[] featureImplementations = feature.featureVersions(); - int numFeatures = featureImplementations.length; - short latestProductionLevel = feature.latestProduction(); - - for (short i = 0; i < numFeatures; i++) { - short level = i; - if (latestProductionLevel < i) { - assertEquals(featureImplementations[i], feature.fromFeatureLevel(level, true)); - assertThrows(IllegalArgumentException.class, () -> feature.fromFeatureLevel(level, false)); - } else { - assertEquals(featureImplementations[i], feature.fromFeatureLevel(level, false)); - } - } - } - - @ParameterizedTest - @EnumSource(Features.class) - public void testValidateVersionAllFeatures(Features feature) { - for (FeatureVersion featureImpl : feature.featureVersions()) { - // Ensure the minimum bootstrap metadata version is included if no metadata version dependency. - Map deps = new HashMap<>(); - deps.putAll(featureImpl.dependencies()); - if (!deps.containsKey(MetadataVersion.FEATURE_NAME)) { - deps.put(MetadataVersion.FEATURE_NAME, MetadataVersion.MINIMUM_BOOTSTRAP_VERSION.featureLevel()); - } - - // Ensure that the feature is valid given the typical metadataVersionMapping and the dependencies. - // Note: Other metadata versions are valid, but this one should always be valid. - Features.validateVersion(featureImpl, deps); - } - } - - @Test - public void testInvalidValidateVersion() { - // No MetadataVersion is invalid - assertThrows(IllegalArgumentException.class, - () -> Features.validateVersion( - TestFeatureVersion.TEST_1, - Collections.emptyMap() - ) - ); - - // Using too low of a MetadataVersion is invalid - assertThrows(IllegalArgumentException.class, - () -> Features.validateVersion( - TestFeatureVersion.TEST_1, - Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_2_8_IV0.featureLevel()) - ) - ); - - // Using a version that is lower than the dependency will fail. - assertThrows(IllegalArgumentException.class, - () -> Features.validateVersion( - TestFeatureVersion.TEST_2, - Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_7_IV0.featureLevel()) - ) - ); - } - - @ParameterizedTest - @EnumSource(Features.class) - public void testDefaultValueAllFeatures(Features feature) { - for (FeatureVersion featureImpl : feature.featureVersions()) { - // If features have the same bootstrapMetadataVersion, the highest level feature should be chosen. - short defaultLevel = feature.defaultValue(featureImpl.bootstrapMetadataVersion()); - if (defaultLevel != featureImpl.featureLevel()) { - FeatureVersion otherFeature = feature.fromFeatureLevel(defaultLevel, true); - assertEquals(featureImpl.bootstrapMetadataVersion(), otherFeature.bootstrapMetadataVersion()); - assertTrue(defaultLevel > featureImpl.featureLevel()); - } - } - } - - @ParameterizedTest - @EnumSource(Features.class) - public void testLatestProductionMapsToLatestMetadataVersion(Features features) { - assertEquals(features.latestProduction(), features.defaultValue(MetadataVersion.LATEST_PRODUCTION)); - } - - @ParameterizedTest - @EnumSource(MetadataVersion.class) - public void testDefaultTestVersion(MetadataVersion metadataVersion) { - short expectedVersion; - if (!metadataVersion.isLessThan(MetadataVersion.latestTesting())) { - expectedVersion = 2; - } else if (!metadataVersion.isLessThan(MetadataVersion.IBP_3_7_IV0)) { - expectedVersion = 1; - } else { - expectedVersion = 0; - } - assertEquals(expectedVersion, Features.TEST_VERSION.defaultValue(metadataVersion)); - } - - @Test - public void testUnstableTestVersion() { - // If the latest MetadataVersion is stable, we don't throw an error. In that case, we don't worry about unstable feature - // versions since all feature versions are stable. - if (MetadataVersion.latestProduction().isLessThan(MetadataVersion.latestTesting())) { - assertThrows(IllegalArgumentException.class, () -> - Features.TEST_VERSION.fromFeatureLevel(Features.TEST_VERSION.latestTesting(), false)); - } - Features.TEST_VERSION.fromFeatureLevel(Features.TEST_VERSION.latestTesting(), true); - } -} diff --git a/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java index fe8805e52edf0..49f3285b09e66 100644 --- a/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java +++ b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java @@ -18,7 +18,6 @@ package org.apache.kafka.server.common; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.record.RecordVersion; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -27,8 +26,6 @@ import static org.apache.kafka.server.common.MetadataVersion.*; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; class MetadataVersionTest { @@ -50,102 +47,7 @@ public void testKRaftFeatureLevelsAtAndAfter3_0_IV1() { @Test @SuppressWarnings("checkstyle:JavaNCSS") public void testFromVersionString() { - assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0")); - assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0.0")); - assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0.1")); - // should throw an exception as long as IBP_8_0_IV0 is not defined - assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("8.0")); - - assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1")); - assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1.0")); - assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1.1")); - - assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2")); - assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2.0")); - assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2.1")); - - assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0")); - assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0.0")); - assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0.1")); - - assertEquals(IBP_0_10_0_IV0, MetadataVersion.fromVersionString("0.10.0-IV0")); - - assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0")); - assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.0")); - assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.0-IV0")); - assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.1")); - - assertEquals(IBP_0_10_1_IV0, MetadataVersion.fromVersionString("0.10.1-IV0")); - assertEquals(IBP_0_10_1_IV1, MetadataVersion.fromVersionString("0.10.1-IV1")); - - assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1")); - assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1.0")); - assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1-IV2")); - assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1.1")); - - assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2")); - assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2.0")); - assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2-IV0")); - assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2.1")); - - assertEquals(IBP_0_11_0_IV0, MetadataVersion.fromVersionString("0.11.0-IV0")); - assertEquals(IBP_0_11_0_IV1, MetadataVersion.fromVersionString("0.11.0-IV1")); - - assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0")); - assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0.0")); - assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0-IV2")); - assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0.1")); - - assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0")); - assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.0")); - assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.0-IV0")); - assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.1")); - assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0")); - assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0.0")); - assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0-IV0")); - assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0.0-IV0")); - - assertEquals(IBP_1_1_IV0, MetadataVersion.fromVersionString("1.1-IV0")); - - assertEquals(IBP_2_0_IV1, MetadataVersion.fromVersionString("2.0")); - assertEquals(IBP_2_0_IV0, MetadataVersion.fromVersionString("2.0-IV0")); - assertEquals(IBP_2_0_IV1, MetadataVersion.fromVersionString("2.0-IV1")); - - assertEquals(IBP_2_1_IV2, MetadataVersion.fromVersionString("2.1")); - assertEquals(IBP_2_1_IV0, MetadataVersion.fromVersionString("2.1-IV0")); - assertEquals(IBP_2_1_IV1, MetadataVersion.fromVersionString("2.1-IV1")); - assertEquals(IBP_2_1_IV2, MetadataVersion.fromVersionString("2.1-IV2")); - - assertEquals(IBP_2_2_IV1, MetadataVersion.fromVersionString("2.2")); - assertEquals(IBP_2_2_IV0, MetadataVersion.fromVersionString("2.2-IV0")); - assertEquals(IBP_2_2_IV1, MetadataVersion.fromVersionString("2.2-IV1")); - - assertEquals(IBP_2_3_IV1, MetadataVersion.fromVersionString("2.3")); - assertEquals(IBP_2_3_IV0, MetadataVersion.fromVersionString("2.3-IV0")); - assertEquals(IBP_2_3_IV1, MetadataVersion.fromVersionString("2.3-IV1")); - - assertEquals(IBP_2_4_IV1, MetadataVersion.fromVersionString("2.4")); - assertEquals(IBP_2_4_IV0, MetadataVersion.fromVersionString("2.4-IV0")); - assertEquals(IBP_2_4_IV1, MetadataVersion.fromVersionString("2.4-IV1")); - - assertEquals(IBP_2_5_IV0, MetadataVersion.fromVersionString("2.5")); - assertEquals(IBP_2_5_IV0, MetadataVersion.fromVersionString("2.5-IV0")); - - assertEquals(IBP_2_6_IV0, MetadataVersion.fromVersionString("2.6")); - assertEquals(IBP_2_6_IV0, MetadataVersion.fromVersionString("2.6-IV0")); - - // 2.7-IV2 is the latest production version in the 2.7 line - assertEquals(IBP_2_7_IV2, MetadataVersion.fromVersionString("2.7")); - assertEquals(IBP_2_7_IV0, MetadataVersion.fromVersionString("2.7-IV0")); - assertEquals(IBP_2_7_IV1, MetadataVersion.fromVersionString("2.7-IV1")); - assertEquals(IBP_2_7_IV2, MetadataVersion.fromVersionString("2.7-IV2")); - - assertEquals(IBP_2_8_IV1, MetadataVersion.fromVersionString("2.8")); - assertEquals(IBP_2_8_IV0, MetadataVersion.fromVersionString("2.8-IV0")); - assertEquals(IBP_2_8_IV1, MetadataVersion.fromVersionString("2.8-IV1")); - assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0")); - assertEquals(IBP_3_0_IV0, MetadataVersion.fromVersionString("3.0-IV0")); assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0-IV1")); assertEquals(IBP_3_1_IV0, MetadataVersion.fromVersionString("3.1")); @@ -199,44 +101,8 @@ public void testFromVersionString() { assertEquals(IBP_4_0_IV3, MetadataVersion.fromVersionString("4.0-IV3")); } - @Test - public void testMinSupportedVersionFor() { - assertEquals(IBP_0_8_0, MetadataVersion.minSupportedFor(RecordVersion.V0)); - assertEquals(IBP_0_10_0_IV0, MetadataVersion.minSupportedFor(RecordVersion.V1)); - assertEquals(IBP_0_11_0_IV0, MetadataVersion.minSupportedFor(RecordVersion.V2)); - - // Ensure that all record versions have a defined min version so that we remember to update the method - for (RecordVersion recordVersion : RecordVersion.values()) { - assertNotNull(MetadataVersion.minSupportedFor(recordVersion)); - } - } - @Test public void testShortVersion() { - assertEquals("0.8.0", IBP_0_8_0.shortVersion()); - assertEquals("0.10.0", IBP_0_10_0_IV0.shortVersion()); - assertEquals("0.10.0", IBP_0_10_0_IV1.shortVersion()); - assertEquals("0.11.0", IBP_0_11_0_IV0.shortVersion()); - assertEquals("0.11.0", IBP_0_11_0_IV1.shortVersion()); - assertEquals("0.11.0", IBP_0_11_0_IV2.shortVersion()); - assertEquals("1.0", IBP_1_0_IV0.shortVersion()); - assertEquals("1.1", IBP_1_1_IV0.shortVersion()); - assertEquals("2.0", IBP_2_0_IV0.shortVersion()); - assertEquals("2.0", IBP_2_0_IV1.shortVersion()); - assertEquals("2.1", IBP_2_1_IV0.shortVersion()); - assertEquals("2.1", IBP_2_1_IV1.shortVersion()); - assertEquals("2.1", IBP_2_1_IV2.shortVersion()); - assertEquals("2.2", IBP_2_2_IV0.shortVersion()); - assertEquals("2.2", IBP_2_2_IV1.shortVersion()); - assertEquals("2.3", IBP_2_3_IV0.shortVersion()); - assertEquals("2.3", IBP_2_3_IV1.shortVersion()); - assertEquals("2.4", IBP_2_4_IV0.shortVersion()); - assertEquals("2.5", IBP_2_5_IV0.shortVersion()); - assertEquals("2.6", IBP_2_6_IV0.shortVersion()); - assertEquals("2.7", IBP_2_7_IV2.shortVersion()); - assertEquals("2.8", IBP_2_8_IV0.shortVersion()); - assertEquals("2.8", IBP_2_8_IV1.shortVersion()); - assertEquals("3.0", IBP_3_0_IV0.shortVersion()); assertEquals("3.0", IBP_3_0_IV1.shortVersion()); assertEquals("3.1", IBP_3_1_IV0.shortVersion()); assertEquals("3.2", IBP_3_2_IV0.shortVersion()); @@ -266,31 +132,6 @@ public void testShortVersion() { @Test public void testVersion() { - assertEquals("0.8.0", IBP_0_8_0.version()); - assertEquals("0.8.2", IBP_0_8_2.version()); - assertEquals("0.10.0-IV0", IBP_0_10_0_IV0.version()); - assertEquals("0.10.0-IV1", IBP_0_10_0_IV1.version()); - assertEquals("0.11.0-IV0", IBP_0_11_0_IV0.version()); - assertEquals("0.11.0-IV1", IBP_0_11_0_IV1.version()); - assertEquals("0.11.0-IV2", IBP_0_11_0_IV2.version()); - assertEquals("1.0-IV0", IBP_1_0_IV0.version()); - assertEquals("1.1-IV0", IBP_1_1_IV0.version()); - assertEquals("2.0-IV0", IBP_2_0_IV0.version()); - assertEquals("2.0-IV1", IBP_2_0_IV1.version()); - assertEquals("2.1-IV0", IBP_2_1_IV0.version()); - assertEquals("2.1-IV1", IBP_2_1_IV1.version()); - assertEquals("2.1-IV2", IBP_2_1_IV2.version()); - assertEquals("2.2-IV0", IBP_2_2_IV0.version()); - assertEquals("2.2-IV1", IBP_2_2_IV1.version()); - assertEquals("2.3-IV0", IBP_2_3_IV0.version()); - assertEquals("2.3-IV1", IBP_2_3_IV1.version()); - assertEquals("2.4-IV0", IBP_2_4_IV0.version()); - assertEquals("2.5-IV0", IBP_2_5_IV0.version()); - assertEquals("2.6-IV0", IBP_2_6_IV0.version()); - assertEquals("2.7-IV2", IBP_2_7_IV2.version()); - assertEquals("2.8-IV0", IBP_2_8_IV0.version()); - assertEquals("2.8-IV1", IBP_2_8_IV1.version()); - assertEquals("3.0-IV0", IBP_3_0_IV0.version()); assertEquals("3.0-IV1", IBP_3_0_IV1.version()); assertEquals("3.1-IV0", IBP_3_1_IV0.version()); assertEquals("3.2-IV0", IBP_3_2_IV0.version()); @@ -332,13 +173,12 @@ public void testMetadataChanged() { assertFalse(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_2_IV0)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_1_IV0)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_0_IV1)); - assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_0_IV0)); - assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_2_8_IV1)); + assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_0_IV1)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_3_IV1, IBP_3_3_IV0)); // Check that argument order doesn't matter - assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_0_IV0, IBP_3_2_IV0)); - assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_2_8_IV1, IBP_3_2_IV0)); + assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_1_IV0, IBP_3_2_IV0)); + assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_0_IV1, IBP_3_2_IV0)); } @Test @@ -430,42 +270,6 @@ public void testRegisterBrokerRecordVersion(MetadataVersion metadataVersion) { assertEquals(expectedVersion, metadataVersion.registerBrokerRecordVersion()); } - @ParameterizedTest - @EnumSource(value = MetadataVersion.class) - public void testGroupMetadataValueVersion(MetadataVersion metadataVersion) { - final short expectedVersion; - if (metadataVersion.isAtLeast(MetadataVersion.IBP_2_3_IV0)) { - expectedVersion = 3; - } else if (metadataVersion.isAtLeast(IBP_2_1_IV0)) { - expectedVersion = 2; - } else if (metadataVersion.isAtLeast(IBP_0_10_1_IV0)) { - expectedVersion = 1; - } else { - expectedVersion = 0; - } - assertEquals(expectedVersion, metadataVersion.groupMetadataValueVersion()); - } - - @ParameterizedTest - @EnumSource(value = MetadataVersion.class) - public void testOffsetCommitValueVersion(MetadataVersion metadataVersion) { - final short expectedVersion; - if (metadataVersion.isAtLeast(MetadataVersion.IBP_2_1_IV1)) { - expectedVersion = 3; - } else if (metadataVersion.isAtLeast(IBP_2_1_IV0)) { - expectedVersion = 2; - } else { - expectedVersion = 1; - } - assertEquals(expectedVersion, metadataVersion.offsetCommitValueVersion(false)); - } - - @ParameterizedTest - @EnumSource(value = MetadataVersion.class) - public void testOffsetCommitValueVersionWithExpiredTimestamp(MetadataVersion metadataVersion) { - assertEquals((short) 1, metadataVersion.offsetCommitValueVersion(true)); - } - @Test public void assertLatestProductionIsLessThanLatest() { assertTrue(LATEST_PRODUCTION.ordinal() < MetadataVersion.latestTesting().ordinal(), @@ -483,8 +287,6 @@ public void testProductionMetadataDontUseUnstableApiVersion() { MetadataVersion mv = MetadataVersion.latestProduction(); assertTrue(mv.listOffsetRequestVersion() <= ApiKeys.LIST_OFFSETS.latestVersion(false)); assertTrue(mv.fetchRequestVersion() <= ApiKeys.FETCH.latestVersion(false)); - assertTrue(mv.offsetForLeaderEpochRequestVersion() <= ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(false)); - assertTrue(mv.writeTxnMarkersRequestVersion() <= ApiKeys.WRITE_TXN_MARKERS.latestVersion(false)); } @Test diff --git a/server-common/src/test/java/org/apache/kafka/server/share/persister/DefaultStatePersisterTest.java b/server-common/src/test/java/org/apache/kafka/server/share/persister/DefaultStatePersisterTest.java new file mode 100644 index 0000000000000..88c83c88914f2 --- /dev/null +++ b/server-common/src/test/java/org/apache/kafka/server/share/persister/DefaultStatePersisterTest.java @@ -0,0 +1,1128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.server.share.persister; + +import org.apache.kafka.clients.KafkaClient; +import org.apache.kafka.clients.MockClient; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.message.FindCoordinatorResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateRequestData; +import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; +import org.apache.kafka.common.message.WriteShareGroupStateRequestData; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.FindCoordinatorRequest; +import org.apache.kafka.common.requests.FindCoordinatorResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateRequest; +import org.apache.kafka.common.requests.ReadShareGroupStateResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryRequest; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryResponse; +import org.apache.kafka.common.requests.WriteShareGroupStateRequest; +import org.apache.kafka.common.requests.WriteShareGroupStateResponse; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.share.SharePartitionKey; +import org.apache.kafka.server.util.MockTime; +import org.apache.kafka.server.util.timer.MockTimer; +import org.apache.kafka.server.util.timer.Timer; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static org.apache.kafka.test.TestUtils.assertFutureThrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +class DefaultStatePersisterTest { + private static final KafkaClient CLIENT = mock(KafkaClient.class); + private static final Time MOCK_TIME = new MockTime(); + private static final Timer MOCK_TIMER = new MockTimer(); + private static final ShareCoordinatorMetadataCacheHelper CACHE_HELPER = mock(ShareCoordinatorMetadataCacheHelper.class); + + private static final String HOST = "localhost"; + private static final int PORT = 9092; + + private static class DefaultStatePersisterBuilder { + + private KafkaClient client = CLIENT; + private Time time = MOCK_TIME; + private Timer timer = MOCK_TIMER; + private ShareCoordinatorMetadataCacheHelper cacheHelper = CACHE_HELPER; + + private DefaultStatePersisterBuilder withKafkaClient(KafkaClient client) { + this.client = client; + return this; + } + + private DefaultStatePersisterBuilder withCacheHelper(ShareCoordinatorMetadataCacheHelper cacheHelper) { + this.cacheHelper = cacheHelper; + return this; + } + + private DefaultStatePersisterBuilder withTime(Time time) { + this.time = time; + return this; + } + + private DefaultStatePersisterBuilder withTimer(Timer timer) { + this.timer = timer; + return this; + } + + public static DefaultStatePersisterBuilder builder() { + return new DefaultStatePersisterBuilder(); + } + + public DefaultStatePersister build() { + PersisterStateManager persisterStateManager = new PersisterStateManager(client, cacheHelper, time, timer); + return new DefaultStatePersister(persisterStateManager); + } + } + + private ShareCoordinatorMetadataCacheHelper getDefaultCacheHelper(Node suppliedNode) { + return new ShareCoordinatorMetadataCacheHelper() { + @Override + public boolean containsTopic(String topic) { + return false; + } + + @Override + public Node getShareCoordinator(SharePartitionKey key, String internalTopicName) { + return Node.noNode(); + } + + @Override + public List getClusterNodes() { + return Collections.singletonList(suppliedNode); + } + }; + } + + @Test + public void testWriteStateValidate() { + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 0; + int incorrectPartition = -1; + + // Request Parameters are null + DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + CompletableFuture result = defaultStatePersister.writeState(null); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // groupTopicPartitionData is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder().setGroupTopicPartitionData(null).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // groupId is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(null).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // topicsData is empty + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.emptyList()).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // topicId is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.singletonList(new TopicData<>(null, + Collections.singletonList(PartitionFactory.newPartitionStateBatchData( + partition, 1, 0, 0, null))))).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // partitionData is empty + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.singletonList(new TopicData<>(topicId, Collections.emptyList()))).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // partition value is incorrect + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.singletonList(new TopicData<>(topicId, + Collections.singletonList(PartitionFactory.newPartitionStateBatchData( + incorrectPartition, 1, 0, 0, null))))).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + } + + @Test + public void testReadStateValidate() { + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 0; + int incorrectPartition = -1; + + // Request Parameters are null + DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + CompletableFuture result = defaultStatePersister.readState(null); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // groupTopicPartitionData is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder().setGroupTopicPartitionData(null).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // groupId is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(null).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // topicsData is empty + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.emptyList()).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // topicId is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.singletonList(new TopicData<>(null, + Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData(partition, 1)))) + ).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // partitionData is empty + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.singletonList(new TopicData<>(topicId, Collections.emptyList()))).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // partition value is incorrect + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.singletonList(new TopicData<>(topicId, + Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData(incorrectPartition, 1))))).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + } + + @Test + public void testReadStateSummaryValidate() { + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 0; + int incorrectPartition = -1; + + // Request Parameters are null + DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + CompletableFuture result = defaultStatePersister.readSummary(null); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // groupTopicPartitionData is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readSummary(new ReadShareGroupStateSummaryParameters.Builder().setGroupTopicPartitionData(null).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // groupId is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readSummary(new ReadShareGroupStateSummaryParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(null).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // topicsData is empty + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readSummary(new ReadShareGroupStateSummaryParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(Collections.emptyList()).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // topicId is null + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readSummary(new ReadShareGroupStateSummaryParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(List.of(new TopicData<>(null, + List.of(PartitionFactory.newPartitionIdLeaderEpochData(partition, 1)))) + ).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // partitionData is empty + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readSummary(new ReadShareGroupStateSummaryParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(List.of(new TopicData<>(topicId, Collections.emptyList()))).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + + // partition value is incorrect + defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); + result = defaultStatePersister.readSummary(new ReadShareGroupStateSummaryParameters.Builder() + .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() + .setGroupId(groupId) + .setTopicsData(List.of(new TopicData<>(topicId, + List.of(PartitionFactory.newPartitionIdLeaderEpochData(incorrectPartition, 1))))).build()).build()); + assertTrue(result.isDone()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalArgumentException.class); + } + + @Test + public void testWriteStateSuccess() { + + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId1 = Uuid.randomUuid(); + int partition1 = 10; + + Uuid topicId2 = Uuid.randomUuid(); + int partition2 = 8; + + Node suppliedNode = new Node(0, HOST, PORT); + Node coordinatorNode1 = new Node(5, HOST, PORT); + Node coordinatorNode2 = new Node(6, HOST, PORT); + + String coordinatorKey1 = SharePartitionKey.asCoordinatorKey(groupId, topicId1, partition1); + String coordinatorKey2 = SharePartitionKey.asCoordinatorKey(groupId, topicId2, partition2); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey1), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(Collections.singletonList( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(5) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey2), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(Collections.singletonList( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(6) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom( + body -> { + WriteShareGroupStateRequest request = (WriteShareGroupStateRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId1 && requestPartition == partition1; + }, + new WriteShareGroupStateResponse(WriteShareGroupStateResponse.toResponseData(topicId1, partition1)), + coordinatorNode1); + + client.prepareResponseFrom( + body -> { + WriteShareGroupStateRequest request = (WriteShareGroupStateRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId2 && requestPartition == partition2; + }, + new WriteShareGroupStateResponse(WriteShareGroupStateResponse.toResponseData(topicId2, partition2)), + coordinatorNode2); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); + + DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder() + .withKafkaClient(client) + .withCacheHelper(cacheHelper) + .build(); + + WriteShareGroupStateParameters request = WriteShareGroupStateParameters.from( + new WriteShareGroupStateRequestData() + .setGroupId(groupId) + .setTopics(Arrays.asList( + new WriteShareGroupStateRequestData.WriteStateData() + .setTopicId(topicId1) + .setPartitions(Collections.singletonList( + new WriteShareGroupStateRequestData.PartitionData() + .setPartition(partition1) + .setStateEpoch(0) + .setLeaderEpoch(1) + .setStartOffset(0) + .setStateBatches(Collections.singletonList(new WriteShareGroupStateRequestData.StateBatch() + .setFirstOffset(0) + .setLastOffset(10) + .setDeliveryCount((short) 1) + .setDeliveryState((byte) 0))) + )), + new WriteShareGroupStateRequestData.WriteStateData() + .setTopicId(topicId2) + .setPartitions(Collections.singletonList( + new WriteShareGroupStateRequestData.PartitionData() + .setPartition(partition2) + .setStateEpoch(0) + .setLeaderEpoch(1) + .setStartOffset(0) + .setStateBatches(Arrays.asList( + new WriteShareGroupStateRequestData.StateBatch() + .setFirstOffset(0) + .setLastOffset(10) + .setDeliveryCount((short) 1) + .setDeliveryState((byte) 0), + new WriteShareGroupStateRequestData.StateBatch() + .setFirstOffset(11) + .setLastOffset(20) + .setDeliveryCount((short) 1) + .setDeliveryState((byte) 0))) + )) + )) + ); + + CompletableFuture resultFuture = defaultStatePersister.writeState(request); + + WriteShareGroupStateResult result = null; + try { + // adding long delay to allow for environment/GC issues + result = resultFuture.get(10L, TimeUnit.SECONDS); + } catch (Exception e) { + fail("Unexpected exception", e); + } + + HashSet resultMap = new HashSet<>(); + result.topicsData().forEach( + topicData -> topicData.partitions().forEach( + partitionData -> resultMap.add((PartitionData) partitionData) + ) + ); + + + HashSet expectedResultMap = new HashSet<>(); + expectedResultMap.add((PartitionData) PartitionFactory.newPartitionErrorData(partition1, Errors.NONE.code(), null)); + + expectedResultMap.add((PartitionData) PartitionFactory.newPartitionErrorData(partition2, Errors.NONE.code(), null)); + + assertEquals(2, result.topicsData().size()); + assertEquals(expectedResultMap, resultMap); + } + + @Test + public void testReadStateSuccess() { + + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId1 = Uuid.randomUuid(); + int partition1 = 10; + + Uuid topicId2 = Uuid.randomUuid(); + int partition2 = 8; + + Node suppliedNode = new Node(0, HOST, PORT); + Node coordinatorNode1 = new Node(5, HOST, PORT); + Node coordinatorNode2 = new Node(6, HOST, PORT); + + String coordinatorKey1 = SharePartitionKey.asCoordinatorKey(groupId, topicId1, partition1); + String coordinatorKey2 = SharePartitionKey.asCoordinatorKey(groupId, topicId2, partition2); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey1), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(Collections.singletonList( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(5) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey2), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(Collections.singletonList( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(6) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom( + body -> { + ReadShareGroupStateRequest request = (ReadShareGroupStateRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId1 && requestPartition == partition1; + }, + new ReadShareGroupStateResponse(ReadShareGroupStateResponse.toResponseData(topicId1, partition1, 0, 1, + Collections.singletonList(new ReadShareGroupStateResponseData.StateBatch() + .setFirstOffset(0) + .setLastOffset(10) + .setDeliveryCount((short) 1) + .setDeliveryState((byte) 0)))), + coordinatorNode1); + + client.prepareResponseFrom( + body -> { + ReadShareGroupStateRequest request = (ReadShareGroupStateRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId2 && requestPartition == partition2; + }, + new ReadShareGroupStateResponse(ReadShareGroupStateResponse.toResponseData(topicId2, partition2, 0, 1, + Arrays.asList(new ReadShareGroupStateResponseData.StateBatch() + .setFirstOffset(0) + .setLastOffset(10) + .setDeliveryCount((short) 1) + .setDeliveryState((byte) 0), + new ReadShareGroupStateResponseData.StateBatch() + .setFirstOffset(11) + .setLastOffset(20) + .setDeliveryCount((short) 1) + .setDeliveryState((byte) 0)))), + coordinatorNode2); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); + + DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder() + .withKafkaClient(client) + .withCacheHelper(cacheHelper) + .build(); + + ReadShareGroupStateParameters request = ReadShareGroupStateParameters.from( + new ReadShareGroupStateRequestData() + .setGroupId(groupId) + .setTopics(Arrays.asList( + new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(topicId1) + .setPartitions(Collections.singletonList( + new ReadShareGroupStateRequestData.PartitionData() + .setPartition(partition1) + .setLeaderEpoch(1) + )), + new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(topicId2) + .setPartitions(Collections.singletonList( + new ReadShareGroupStateRequestData.PartitionData() + .setPartition(partition2) + .setLeaderEpoch(1) + )) + )) + ); + + CompletableFuture resultFuture = defaultStatePersister.readState(request); + + ReadShareGroupStateResult result = null; + try { + // adding long delay to allow for environment/GC issues + result = resultFuture.get(10L, TimeUnit.SECONDS); + } catch (Exception e) { + fail("Unexpected exception", e); + } + + HashSet resultMap = new HashSet<>(); + result.topicsData().forEach( + topicData -> topicData.partitions().forEach( + partitionData -> resultMap.add((PartitionData) partitionData) + ) + ); + + HashSet expectedResultMap = new HashSet<>(); + expectedResultMap.add( + (PartitionData) PartitionFactory.newPartitionAllData(partition1, 1, 0, Errors.NONE.code(), + null, Collections.singletonList(new PersisterStateBatch(0, 10, (byte) 0, (short) 1) + ))); + + expectedResultMap.add( + (PartitionData) PartitionFactory.newPartitionAllData(partition2, 1, 0, Errors.NONE.code(), + null, Arrays.asList( + new PersisterStateBatch(0, 10, (byte) 0, (short) 1), + new PersisterStateBatch(11, 20, (byte) 0, (short) 1) + ))); + + assertEquals(2, result.topicsData().size()); + assertEquals(expectedResultMap, resultMap); + } + + @Test + public void testReadStateSummarySuccess() { + + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId1 = Uuid.randomUuid(); + int partition1 = 10; + + Uuid topicId2 = Uuid.randomUuid(); + int partition2 = 8; + + Node suppliedNode = new Node(0, HOST, PORT); + Node coordinatorNode1 = new Node(5, HOST, PORT); + Node coordinatorNode2 = new Node(6, HOST, PORT); + + String coordinatorKey1 = SharePartitionKey.asCoordinatorKey(groupId, topicId1, partition1); + String coordinatorKey2 = SharePartitionKey.asCoordinatorKey(groupId, topicId2, partition2); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey1), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(5) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey2), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(6) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom( + body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId1 && requestPartition == partition1; + }, + new ReadShareGroupStateSummaryResponse(ReadShareGroupStateSummaryResponse.toResponseData(topicId1, partition1, 0, 1)), + coordinatorNode1); + + client.prepareResponseFrom( + body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId2 && requestPartition == partition2; + }, + new ReadShareGroupStateSummaryResponse(ReadShareGroupStateSummaryResponse.toResponseData(topicId2, partition2, 0, 1)), + coordinatorNode2); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); + + DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder() + .withKafkaClient(client) + .withCacheHelper(cacheHelper) + .build(); + + ReadShareGroupStateSummaryParameters request = ReadShareGroupStateSummaryParameters.from( + new ReadShareGroupStateSummaryRequestData() + .setGroupId(groupId) + .setTopics(Arrays.asList( + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId1) + .setPartitions(List.of( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition1) + .setLeaderEpoch(1) + )), + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId2) + .setPartitions(List.of( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition2) + .setLeaderEpoch(1) + )) + )) + ); + + CompletableFuture resultFuture = defaultStatePersister.readSummary(request); + + ReadShareGroupStateSummaryResult result = null; + try { + // adding long delay to allow for environment/GC issues + result = resultFuture.get(10L, TimeUnit.SECONDS); + } catch (Exception e) { + fail("Unexpected exception", e); + } + + HashSet resultMap = new HashSet<>(); + result.topicsData().forEach( + topicData -> topicData.partitions().forEach( + partitionData -> resultMap.add((PartitionData) partitionData) + ) + ); + + HashSet expectedResultMap = new HashSet<>(); + expectedResultMap.add( + (PartitionData) PartitionFactory.newPartitionStateSummaryData(partition1, 1, 0, Errors.NONE.code(), + null + )); + + expectedResultMap.add( + (PartitionData) PartitionFactory.newPartitionStateSummaryData(partition2, 1, 0, Errors.NONE.code(), + null + )); + + assertEquals(2, result.topicsData().size()); + assertEquals(expectedResultMap, resultMap); + } + + @Test + public void testWriteStateResponseToResultPartialResults() { + Map>> futureMap = new HashMap<>(); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + + // one entry has valid results + futureMap.computeIfAbsent(tp1.topicId(), k -> new HashMap<>()) + .put(tp1.partition(), CompletableFuture.completedFuture( + new WriteShareGroupStateResponse( + WriteShareGroupStateResponse.toResponseData( + tp1.topicId(), + tp1.partition() + ) + ) + ) + ); + + // one entry has error + futureMap.computeIfAbsent(tp2.topicId(), k -> new HashMap<>()) + .put(tp2.partition(), CompletableFuture.completedFuture( + new WriteShareGroupStateResponse( + WriteShareGroupStateResponse.toErrorResponseData( + tp2.topicId(), + tp2.partition(), + Errors.UNKNOWN_TOPIC_OR_PARTITION, + "unknown tp" + ) + ) + ) + ); + + PersisterStateManager psm = mock(PersisterStateManager.class); + DefaultStatePersister dsp = new DefaultStatePersister(psm); + + WriteShareGroupStateResult results = dsp.writeResponsesToResult(futureMap); + + // results should contain partial results + assertEquals(2, results.topicsData().size()); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp1.topicId(), + Collections.singletonList(PartitionFactory.newPartitionErrorData(tp1.partition(), Errors.NONE.code(), null)) + ) + ) + ); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp2.topicId(), + Collections.singletonList(PartitionFactory.newPartitionErrorData(tp2.partition(), Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), "unknown tp")) + ) + ) + ); + } + + @Test + public void testWriteStateResponseToResultFailedFuture() { + Map>> futureMap = new HashMap<>(); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + + // one entry has valid results + futureMap.computeIfAbsent(tp1.topicId(), k -> new HashMap<>()) + .put(tp1.partition(), CompletableFuture.completedFuture( + new WriteShareGroupStateResponse( + WriteShareGroupStateResponse.toResponseData( + tp1.topicId(), + tp1.partition() + ) + ) + ) + ); + + // one entry has failed future + futureMap.computeIfAbsent(tp2.topicId(), k -> new HashMap<>()) + .put(tp2.partition(), CompletableFuture.failedFuture(new Exception("scary stuff"))); + + PersisterStateManager psm = mock(PersisterStateManager.class); + DefaultStatePersister dsp = new DefaultStatePersister(psm); + + WriteShareGroupStateResult results = dsp.writeResponsesToResult(futureMap); + + // results should contain partial results + assertEquals(2, results.topicsData().size()); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp1.topicId(), + Collections.singletonList(PartitionFactory.newPartitionErrorData(tp1.partition(), Errors.NONE.code(), null)) + ) + ) + ); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp2.topicId(), + Collections.singletonList(PartitionFactory.newPartitionErrorData(tp2.partition(), Errors.UNKNOWN_SERVER_ERROR.code(), "Error writing state to share coordinator: java.lang.Exception: scary stuff")) + ) + ) + ); + } + + @Test + public void testReadStateResponseToResultPartialResults() { + Map>> futureMap = new HashMap<>(); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + + // one entry has valid results + futureMap.computeIfAbsent(tp1.topicId(), k -> new HashMap<>()) + .put(tp1.partition(), CompletableFuture.completedFuture( + new ReadShareGroupStateResponse( + ReadShareGroupStateResponse.toResponseData( + tp1.topicId(), + tp1.partition(), + 1L, + 2, + Collections.emptyList() + ) + ) + ) + ); + + // one entry has error + futureMap.computeIfAbsent(tp2.topicId(), k -> new HashMap<>()) + .put(tp2.partition(), CompletableFuture.completedFuture( + new ReadShareGroupStateResponse( + ReadShareGroupStateResponse.toErrorResponseData( + tp2.topicId(), + tp2.partition(), + Errors.UNKNOWN_TOPIC_OR_PARTITION, + "unknown tp" + ) + ) + ) + ); + + PersisterStateManager psm = mock(PersisterStateManager.class); + DefaultStatePersister dsp = new DefaultStatePersister(psm); + + ReadShareGroupStateResult results = dsp.readResponsesToResult(futureMap); + + // results should contain partial results + assertEquals(2, results.topicsData().size()); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp1.topicId(), + Collections.singletonList(PartitionFactory.newPartitionAllData(tp1.partition(), 2, 1L, Errors.NONE.code(), null, Collections.emptyList())) + ) + ) + ); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp2.topicId(), + Collections.singletonList(PartitionFactory.newPartitionAllData(tp2.partition(), 0, 0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), "unknown tp", Collections.emptyList())) + ) + ) + ); + } + + @Test + public void testReadStateResponseToResultFailedFuture() { + Map>> futureMap = new HashMap<>(); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + + // one entry has valid results + futureMap.computeIfAbsent(tp1.topicId(), k -> new HashMap<>()) + .put(tp1.partition(), CompletableFuture.completedFuture( + new ReadShareGroupStateResponse( + ReadShareGroupStateResponse.toResponseData( + tp1.topicId(), + tp1.partition(), + 1L, + 2, + Collections.emptyList() + ) + ) + ) + ); + + // one entry has failed future + futureMap.computeIfAbsent(tp2.topicId(), k -> new HashMap<>()) + .put(tp2.partition(), CompletableFuture.failedFuture(new Exception("scary stuff"))); + + PersisterStateManager psm = mock(PersisterStateManager.class); + DefaultStatePersister dsp = new DefaultStatePersister(psm); + + ReadShareGroupStateResult results = dsp.readResponsesToResult(futureMap); + + // results should contain partial results + assertEquals(2, results.topicsData().size()); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp1.topicId(), + Collections.singletonList(PartitionFactory.newPartitionAllData(tp1.partition(), 2, 1L, Errors.NONE.code(), null, Collections.emptyList())) + ) + ) + ); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp2.topicId(), + Collections.singletonList(PartitionFactory.newPartitionAllData(tp2.partition(), -1, -1L, Errors.UNKNOWN_SERVER_ERROR.code(), "Error reading state from share coordinator: java.lang.Exception: scary stuff", Collections.emptyList())) + ) + ) + ); + } + + @Test + public void testReadStateSummaryResponseToResultPartialResults() { + Map>> futureMap = new HashMap<>(); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + + // one entry has valid results + futureMap.computeIfAbsent(tp1.topicId(), k -> new HashMap<>()) + .put(tp1.partition(), CompletableFuture.completedFuture( + new ReadShareGroupStateSummaryResponse( + ReadShareGroupStateSummaryResponse.toResponseData( + tp1.topicId(), + tp1.partition(), + 1L, + 2 + ) + ) + ) + ); + + // one entry has error + futureMap.computeIfAbsent(tp2.topicId(), k -> new HashMap<>()) + .put(tp2.partition(), CompletableFuture.completedFuture( + new ReadShareGroupStateSummaryResponse( + ReadShareGroupStateSummaryResponse.toErrorResponseData( + tp2.topicId(), + tp2.partition(), + Errors.UNKNOWN_TOPIC_OR_PARTITION, + "unknown tp" + ) + ) + ) + ); + + PersisterStateManager psm = mock(PersisterStateManager.class); + DefaultStatePersister dsp = new DefaultStatePersister(psm); + + ReadShareGroupStateSummaryResult results = dsp.readSummaryResponsesToResult(futureMap); + + // results should contain partial results + assertEquals(2, results.topicsData().size()); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp1.topicId(), + List.of(PartitionFactory.newPartitionStateSummaryData(tp1.partition(), 2, 1L, Errors.NONE.code(), null)) + ) + ) + ); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp2.topicId(), + List.of(PartitionFactory.newPartitionStateSummaryData(tp2.partition(), 0, 0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), "unknown tp")) + ) + ) + ); + } + + @Test + public void testReadStateSummaryResponseToResultFailedFuture() { + Map>> futureMap = new HashMap<>(); + TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), 1, null); + + // one entry has valid results + futureMap.computeIfAbsent(tp1.topicId(), k -> new HashMap<>()) + .put(tp1.partition(), CompletableFuture.completedFuture( + new ReadShareGroupStateSummaryResponse( + ReadShareGroupStateSummaryResponse.toResponseData( + tp1.topicId(), + tp1.partition(), + 1L, + 2 + ) + ) + ) + ); + + // one entry has failed future + futureMap.computeIfAbsent(tp2.topicId(), k -> new HashMap<>()) + .put(tp2.partition(), CompletableFuture.failedFuture(new Exception("scary stuff"))); + + PersisterStateManager psm = mock(PersisterStateManager.class); + DefaultStatePersister dsp = new DefaultStatePersister(psm); + + ReadShareGroupStateSummaryResult results = dsp.readSummaryResponsesToResult(futureMap); + + // results should contain partial results + assertEquals(2, results.topicsData().size()); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp1.topicId(), + List.of(PartitionFactory.newPartitionStateSummaryData(tp1.partition(), 2, 1L, Errors.NONE.code(), null)) + ) + ) + ); + assertTrue( + results.topicsData().contains( + new TopicData<>( + tp2.topicId(), + List.of(PartitionFactory.newPartitionStateSummaryData(tp2.partition(), -1, -1L, Errors.UNKNOWN_SERVER_ERROR.code(), "Error reading state from share coordinator: java.lang.Exception: scary stuff")) + ) + ) + ); + } + + @Test + public void testDefaultPersisterClose() { + PersisterStateManager psm = mock(PersisterStateManager.class); + DefaultStatePersister dsp = new DefaultStatePersister(psm); + try { + verify(psm, times(0)).stop(); + + dsp.stop(); + + verify(psm, times(1)).stop(); + } catch (Exception e) { + fail("Unexpected exception", e); + } + } +} diff --git a/share/src/test/java/org/apache/kafka/server/share/persister/PersisterStateManagerTest.java b/server-common/src/test/java/org/apache/kafka/server/share/persister/PersisterStateManagerTest.java similarity index 72% rename from share/src/test/java/org/apache/kafka/server/share/persister/PersisterStateManagerTest.java rename to server-common/src/test/java/org/apache/kafka/server/share/persister/PersisterStateManagerTest.java index e410c2e3588a8..afa9b07aa177f 100644 --- a/share/src/test/java/org/apache/kafka/server/share/persister/PersisterStateManagerTest.java +++ b/server-common/src/test/java/org/apache/kafka/server/share/persister/PersisterStateManagerTest.java @@ -24,6 +24,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.FindCoordinatorResponseData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractRequest; @@ -31,6 +32,8 @@ import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.ReadShareGroupStateRequest; import org.apache.kafka.common.requests.ReadShareGroupStateResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryRequest; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryResponse; import org.apache.kafka.common.requests.WriteShareGroupStateRequest; import org.apache.kafka.common.requests.WriteShareGroupStateResponse; import org.apache.kafka.common.utils.Time; @@ -2070,6 +2073,776 @@ public void testReadStateRequestFailureMaxRetriesExhausted() { } } + @Test + public void testReadStateSummaryRequestCoordinatorFoundSuccessfully() { + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 10; + + Node suppliedNode = new Node(0, HOST, PORT); + Node coordinatorNode = new Node(1, HOST, PORT); + + String coordinatorKey = SharePartitionKey.asCoordinatorKey(groupId, topicId, partition); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(1) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); + + PersisterStateManager stateManager = PersisterStateManagerBuilder.builder() + .withKafkaClient(client) + .withTimer(mockTimer) + .withCacheHelper(cacheHelper) + .build(); + + stateManager.start(); + + CompletableFuture future = new CompletableFuture<>(); + + PersisterStateManager.ReadStateSummaryHandler handler = spy(stateManager.new ReadStateSummaryHandler( + groupId, + topicId, + partition, + 0, + future, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + MAX_RPC_RETRY_ATTEMPTS, + null + )); + + stateManager.enqueue(handler); + + CompletableFuture resultFuture = handler.result(); + + ReadShareGroupStateSummaryResponse result = null; + try { + result = resultFuture.get(); + } catch (Exception e) { + fail("Failed to get result from future", e); + } + + ReadShareGroupStateSummaryResponseData.PartitionResult partitionResult = result.data().results().get(0).partitions().get(0); + + verify(handler, times(1)).findShareCoordinatorBuilder(); + verify(handler, times(0)).requestBuilder(); + + // Verifying the coordinator node was populated correctly by the FIND_COORDINATOR request + assertEquals(coordinatorNode, handler.getCoordinatorNode()); + + // Verifying the result returned in correct + assertEquals(partition, partitionResult.partition()); + assertEquals(Errors.NONE.code(), partitionResult.errorCode()); + assertEquals(1, partitionResult.stateEpoch()); + assertEquals(0, partitionResult.startOffset()); + + try { + // Stopping the state manager + stateManager.stop(); + } catch (Exception e) { + fail("Failed to stop state manager", e); + } + } + + @Test + public void testReadStateSummaryRequestIllegalStateCoordinatorFoundSuccessfully() { + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 10; + + Node suppliedNode = new Node(0, HOST, PORT); + Node coordinatorNode = new Node(1, HOST, PORT); + + String coordinatorKey = SharePartitionKey.asCoordinatorKey(groupId, topicId, partition); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(1) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(Uuid.randomUuid()) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(500) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); + + PersisterStateManager stateManager = PersisterStateManagerBuilder.builder() + .withKafkaClient(client) + .withTimer(mockTimer) + .withCacheHelper(cacheHelper) + .build(); + + stateManager.start(); + + CompletableFuture future = new CompletableFuture<>(); + + PersisterStateManager.ReadStateSummaryHandler handler = spy(stateManager.new ReadStateSummaryHandler( + groupId, + topicId, + partition, + 0, + future, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + MAX_RPC_RETRY_ATTEMPTS, + null + )); + + stateManager.enqueue(handler); + + CompletableFuture resultFuture = handler.result(); + + ReadShareGroupStateSummaryResponse result = null; + try { + result = resultFuture.get(); + } catch (Exception e) { + fail("Failed to get result from future", e); + } + + ReadShareGroupStateSummaryResponseData.PartitionResult partitionResult = result.data().results().get(0).partitions().get(0); + + verify(handler, times(1)).findShareCoordinatorBuilder(); + verify(handler, times(0)).requestBuilder(); + + // Verifying the coordinator node was populated correctly by the FIND_COORDINATOR request + assertEquals(coordinatorNode, handler.getCoordinatorNode()); + + // Verifying the result returned in correct + assertEquals(Errors.UNKNOWN_SERVER_ERROR.code(), partitionResult.errorCode()); + + try { + // Stopping the state manager + stateManager.stop(); + } catch (Exception e) { + fail("Failed to stop state manager", e); + } + } + + @Test + public void testReadStateSummaryRequestRetryWithNotCoordinatorSuccessfulOnRetry() throws ExecutionException, InterruptedException { + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 10; + + Node suppliedNode = new Node(0, HOST, PORT); + Node coordinatorNode = new Node(1, HOST, PORT); + + String coordinatorKey = SharePartitionKey.asCoordinatorKey(groupId, topicId, partition); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setErrorCode(Errors.NOT_COORDINATOR.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(1) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); + + PersisterStateManager stateManager = PersisterStateManagerBuilder.builder() + .withKafkaClient(client) + .withTimer(mockTimer) + .withCacheHelper(cacheHelper) + .build(); + + stateManager.start(); + + CompletableFuture future = new CompletableFuture<>(); + + PersisterStateManager.ReadStateSummaryHandler handler = spy(stateManager.new ReadStateSummaryHandler( + groupId, + topicId, + partition, + 0, + future, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + MAX_RPC_RETRY_ATTEMPTS, + null + )); + + stateManager.enqueue(handler); + + CompletableFuture resultFuture = handler.result(); + + TestUtils.waitForCondition(resultFuture::isDone, TestUtils.DEFAULT_MAX_WAIT_MS, 10L, () -> "Failed to get result from future"); + + ReadShareGroupStateSummaryResponse result = resultFuture.get(); + ReadShareGroupStateSummaryResponseData.PartitionResult partitionResult = result.data().results().get(0).partitions().get(0); + + verify(handler, times(2)).findShareCoordinatorBuilder(); + verify(handler, times(0)).requestBuilder(); + + // Verifying the coordinator node was populated correctly by the FIND_COORDINATOR request + assertEquals(coordinatorNode, handler.getCoordinatorNode()); + + // Verifying the result returned in correct + assertEquals(partition, partitionResult.partition()); + assertEquals(Errors.NONE.code(), partitionResult.errorCode()); + + try { + // Stopping the state manager + stateManager.stop(); + } catch (Exception e) { + fail("Failed to stop state manager", e); + } + } + + @Test + public void testReadStateSummaryRequestCoordinatorFoundOnRetry() { + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 10; + + Node suppliedNode = new Node(0, HOST, PORT); + Node coordinatorNode = new Node(1, HOST, PORT); + + String coordinatorKey = SharePartitionKey.asCoordinatorKey(groupId, topicId, partition); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setErrorCode(Errors.NOT_COORDINATOR.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest + && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() + && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey), + new FindCoordinatorResponse( + new FindCoordinatorResponseData() + .setCoordinators(List.of( + new FindCoordinatorResponseData.Coordinator() + .setNodeId(1) + .setHost(HOST) + .setPort(PORT) + .setErrorCode(Errors.NONE.code()) + )) + ), + suppliedNode + ); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); + + PersisterStateManager stateManager = PersisterStateManagerBuilder.builder() + .withKafkaClient(client) + .withTimer(mockTimer) + .withCacheHelper(cacheHelper) + .build(); + + stateManager.start(); + + CompletableFuture future = new CompletableFuture<>(); + + PersisterStateManager.ReadStateSummaryHandler handler = spy(stateManager.new ReadStateSummaryHandler( + groupId, + topicId, + partition, + 0, + future, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + MAX_RPC_RETRY_ATTEMPTS, + null + )); + + stateManager.enqueue(handler); + + CompletableFuture resultFuture = handler.result(); + + ReadShareGroupStateSummaryResponse result = null; + try { + result = resultFuture.get(); + } catch (Exception e) { + fail("Failed to get result from future", e); + } + + ReadShareGroupStateSummaryResponseData.PartitionResult partitionResult = result.data().results().get(0).partitions().get(0); + + verify(handler, times(2)).findShareCoordinatorBuilder(); + verify(handler, times(0)).requestBuilder(); + + // Verifying the coordinator node was populated correctly by the FIND_COORDINATOR request + assertEquals(coordinatorNode, handler.getCoordinatorNode()); + + // Verifying the result returned in correct + assertEquals(partition, partitionResult.partition()); + assertEquals(Errors.NONE.code(), partitionResult.errorCode()); + assertEquals(1, partitionResult.stateEpoch()); + assertEquals(0, partitionResult.startOffset()); + + try { + // Stopping the state manager + stateManager.stop(); + } catch (Exception e) { + fail("Failed to stop state manager", e); + } + } + + @Test + public void testReadStateSummaryRequestWithCoordinatorNodeLookup() { + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 10; + + Node coordinatorNode = new Node(1, HOST, PORT); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getCoordinatorCacheHelper(coordinatorNode); + + PersisterStateManager stateManager = PersisterStateManagerBuilder.builder() + .withKafkaClient(client) + .withTimer(mockTimer) + .withCacheHelper(cacheHelper) + .build(); + + stateManager.start(); + + CompletableFuture future = new CompletableFuture<>(); + + PersisterStateManager.ReadStateSummaryHandler handler = spy(stateManager.new ReadStateSummaryHandler( + groupId, + topicId, + partition, + 0, + future, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + MAX_RPC_RETRY_ATTEMPTS, + null + )); + + stateManager.enqueue(handler); + + CompletableFuture resultFuture = handler.result(); + + ReadShareGroupStateSummaryResponse result = null; + try { + result = resultFuture.get(); + } catch (Exception e) { + fail("Failed to get result from future", e); + } + + ReadShareGroupStateSummaryResponseData.PartitionResult partitionResult = result.data().results().get(0).partitions().get(0); + + verify(handler, times(0)).findShareCoordinatorBuilder(); + verify(handler, times(0)).requestBuilder(); + verify(handler, times(1)).onComplete(any()); + + // Verifying the coordinator node was populated correctly by the constructor + assertEquals(coordinatorNode, handler.getCoordinatorNode()); + + // Verifying the result returned in correct + assertEquals(partition, partitionResult.partition()); + assertEquals(Errors.NONE.code(), partitionResult.errorCode()); + assertEquals(1, partitionResult.stateEpoch()); + assertEquals(0, partitionResult.startOffset()); + + try { + // Stopping the state manager + stateManager.stop(); + } catch (Exception e) { + fail("Failed to stop state manager", e); + } + } + + @Test + public void testReadStateSummaryRequestRetryWithCoordinatorNodeLookup() { + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 10; + + Node coordinatorNode = new Node(1, HOST, PORT); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.NONE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getCoordinatorCacheHelper(coordinatorNode); + + PersisterStateManager stateManager = PersisterStateManagerBuilder.builder() + .withKafkaClient(client) + .withTimer(mockTimer) + .withCacheHelper(cacheHelper) + .build(); + + stateManager.start(); + + CompletableFuture future = new CompletableFuture<>(); + + PersisterStateManager.ReadStateSummaryHandler handler = spy(stateManager.new ReadStateSummaryHandler( + groupId, + topicId, + partition, + 0, + future, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + MAX_RPC_RETRY_ATTEMPTS, + null + )); + + stateManager.enqueue(handler); + + CompletableFuture resultFuture = handler.result(); + + ReadShareGroupStateSummaryResponse result = null; + try { + result = resultFuture.get(); + } catch (Exception e) { + fail("Failed to get result from future", e); + } + + ReadShareGroupStateSummaryResponseData.PartitionResult partitionResult = result.data().results().get(0).partitions().get(0); + + verify(handler, times(0)).findShareCoordinatorBuilder(); + verify(handler, times(0)).requestBuilder(); + verify(handler, times(2)).onComplete(any()); + + // Verifying the coordinator node was populated correctly by the constructor + assertEquals(coordinatorNode, handler.getCoordinatorNode()); + + // Verifying the result returned in correct + assertEquals(partition, partitionResult.partition()); + assertEquals(Errors.NONE.code(), partitionResult.errorCode()); + assertEquals(1, partitionResult.stateEpoch()); + assertEquals(0, partitionResult.startOffset()); + + try { + // Stopping the state manager + stateManager.stop(); + } catch (Exception e) { + fail("Failed to stop state manager", e); + } + } + + @Test + public void testReadStateSummaryRequestFailureMaxRetriesExhausted() { + MockClient client = new MockClient(MOCK_TIME); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 10; + + Node coordinatorNode = new Node(1, HOST, PORT); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + client.prepareResponseFrom(body -> { + ReadShareGroupStateSummaryRequest request = (ReadShareGroupStateSummaryRequest) body; + String requestGroupId = request.data().groupId(); + Uuid requestTopicId = request.data().topics().get(0).topicId(); + int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); + + return requestGroupId.equals(groupId) && requestTopicId == topicId && requestPartition == partition; + }, new ReadShareGroupStateSummaryResponse( + new ReadShareGroupStateSummaryResponseData() + .setResults(List.of( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(List.of( + new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) + .setErrorMessage("") + .setStateEpoch(1) + .setStartOffset(0) + )) + )) + ), coordinatorNode); + + ShareCoordinatorMetadataCacheHelper cacheHelper = getCoordinatorCacheHelper(coordinatorNode); + + PersisterStateManager stateManager = PersisterStateManagerBuilder.builder() + .withKafkaClient(client) + .withTimer(mockTimer) + .withCacheHelper(cacheHelper) + .build(); + + stateManager.start(); + + CompletableFuture future = new CompletableFuture<>(); + + PersisterStateManager.ReadStateSummaryHandler handler = spy(stateManager.new ReadStateSummaryHandler( + groupId, + topicId, + partition, + 0, + future, + REQUEST_BACKOFF_MS, + REQUEST_BACKOFF_MAX_MS, + 2, + null + )); + + stateManager.enqueue(handler); + + CompletableFuture resultFuture = handler.result(); + + ReadShareGroupStateSummaryResponse result = null; + try { + result = resultFuture.get(); + } catch (Exception e) { + fail("Failed to get result from future", e); + } + + ReadShareGroupStateSummaryResponseData.PartitionResult partitionResult = result.data().results().get(0).partitions().get(0); + + verify(handler, times(0)).findShareCoordinatorBuilder(); + verify(handler, times(0)).requestBuilder(); + verify(handler, times(2)).onComplete(any()); + + // Verifying the coordinator node was populated correctly by the constructor + assertEquals(coordinatorNode, handler.getCoordinatorNode()); + + // Verifying the result returned in correct + assertEquals(partition, partitionResult.partition()); + assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS.code(), partitionResult.errorCode()); + + try { + // Stopping the state manager + stateManager.stop(); + } catch (Exception e) { + fail("Failed to stop state manager", e); + } + } + @Test public void testPersisterStateManagerClose() { KafkaClient client = mock(KafkaClient.class); diff --git a/server-common/src/test/java/org/apache/kafka/timeline/SnapshotRegistryTest.java b/server-common/src/test/java/org/apache/kafka/timeline/SnapshotRegistryTest.java index 264c9231f9c4c..dacc91fa931d4 100644 --- a/server-common/src/test/java/org/apache/kafka/timeline/SnapshotRegistryTest.java +++ b/server-common/src/test/java/org/apache/kafka/timeline/SnapshotRegistryTest.java @@ -94,4 +94,28 @@ public void testCreateSnapshotOfLatest() { assertEquals(latest, duplicate); } + + @Test + public void testScrub() { + SnapshotRegistry registry = new SnapshotRegistry(new LogContext(), 2); + new TimelineInteger(registry).set(123); + new TimelineInteger(registry).set(123); + assertEquals(0, registry.numScrubs()); + new TimelineInteger(registry).set(123); + assertEquals(1, registry.numScrubs()); + new TimelineInteger(registry).set(123); + new TimelineInteger(registry).set(123); + new TimelineInteger(registry).set(123); + assertEquals(2, registry.numScrubs()); + } + + @Test + public void testReset() { + SnapshotRegistry registry = new SnapshotRegistry(new LogContext(), 2); + TimelineInteger integer = new TimelineInteger(registry); + integer.set(123); + registry.reset(); + assertEquals(0, integer.get()); + assertEquals(1, registry.numScrubs()); + } } diff --git a/server-common/src/test/java/org/apache/kafka/timeline/SnapshottableHashTableTest.java b/server-common/src/test/java/org/apache/kafka/timeline/SnapshottableHashTableTest.java index d51ed67098f67..e731960d985e4 100644 --- a/server-common/src/test/java/org/apache/kafka/timeline/SnapshottableHashTableTest.java +++ b/server-common/src/test/java/org/apache/kafka/timeline/SnapshottableHashTableTest.java @@ -74,10 +74,9 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof TestElement)) { + if (!(o instanceof TestElement other)) { return false; } - TestElement other = (TestElement) o; return other.i == i; } diff --git a/server-common/src/test/resources/log4j2.yaml b/server-common/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..be546a18b55e6 --- /dev/null +++ b/server-common/src/test/resources/log4j2.yaml @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka + level: INFO diff --git a/server/src/main/java/org/apache/kafka/network/RequestConvertToJson.java b/server/src/main/java/org/apache/kafka/network/RequestConvertToJson.java index ac744ef7bac2d..d2bda3245f310 100644 --- a/server/src/main/java/org/apache/kafka/network/RequestConvertToJson.java +++ b/server/src/main/java/org/apache/kafka/network/RequestConvertToJson.java @@ -90,6 +90,8 @@ import org.apache.kafka.common.message.DescribeProducersResponseDataJsonConverter; import org.apache.kafka.common.message.DescribeQuorumRequestDataJsonConverter; import org.apache.kafka.common.message.DescribeQuorumResponseDataJsonConverter; +import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestDataJsonConverter; +import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseDataJsonConverter; import org.apache.kafka.common.message.DescribeTopicPartitionsRequestDataJsonConverter; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseDataJsonConverter; import org.apache.kafka.common.message.DescribeTransactionsRequestDataJsonConverter; @@ -175,6 +177,10 @@ import org.apache.kafka.common.message.ShareGroupHeartbeatResponseDataJsonConverter; import org.apache.kafka.common.message.StopReplicaRequestDataJsonConverter; import org.apache.kafka.common.message.StopReplicaResponseDataJsonConverter; +import org.apache.kafka.common.message.StreamsGroupDescribeRequestDataJsonConverter; +import org.apache.kafka.common.message.StreamsGroupDescribeResponseDataJsonConverter; +import org.apache.kafka.common.message.StreamsGroupHeartbeatRequestDataJsonConverter; +import org.apache.kafka.common.message.StreamsGroupHeartbeatResponseDataJsonConverter; import org.apache.kafka.common.message.SyncGroupRequestDataJsonConverter; import org.apache.kafka.common.message.SyncGroupResponseDataJsonConverter; import org.apache.kafka.common.message.TxnOffsetCommitRequestDataJsonConverter; @@ -270,6 +276,8 @@ import org.apache.kafka.common.requests.DescribeProducersResponse; import org.apache.kafka.common.requests.DescribeQuorumRequest; import org.apache.kafka.common.requests.DescribeQuorumResponse; +import org.apache.kafka.common.requests.DescribeShareGroupOffsetsRequest; +import org.apache.kafka.common.requests.DescribeShareGroupOffsetsResponse; import org.apache.kafka.common.requests.DescribeTopicPartitionsRequest; import org.apache.kafka.common.requests.DescribeTopicPartitionsResponse; import org.apache.kafka.common.requests.DescribeTransactionsRequest; @@ -356,6 +364,10 @@ import org.apache.kafka.common.requests.ShareGroupHeartbeatResponse; import org.apache.kafka.common.requests.StopReplicaRequest; import org.apache.kafka.common.requests.StopReplicaResponse; +import org.apache.kafka.common.requests.StreamsGroupDescribeRequest; +import org.apache.kafka.common.requests.StreamsGroupDescribeResponse; +import org.apache.kafka.common.requests.StreamsGroupHeartbeatRequest; +import org.apache.kafka.common.requests.StreamsGroupHeartbeatResponse; import org.apache.kafka.common.requests.SyncGroupRequest; import org.apache.kafka.common.requests.SyncGroupResponse; import org.apache.kafka.common.requests.TxnOffsetCommitRequest; @@ -393,6 +405,8 @@ public static JsonNode request(AbstractRequest request) { return AddOffsetsToTxnRequestDataJsonConverter.write(((AddOffsetsToTxnRequest) request).data(), request.version()); case ADD_PARTITIONS_TO_TXN: return AddPartitionsToTxnRequestDataJsonConverter.write(((AddPartitionsToTxnRequest) request).data(), request.version()); + case ADD_RAFT_VOTER: + return AddRaftVoterRequestDataJsonConverter.write(((AddRaftVoterRequest) request).data(), request.version()); case ALLOCATE_PRODUCER_IDS: return AllocateProducerIdsRequestDataJsonConverter.write(((AllocateProducerIdsRequest) request).data(), request.version()); case ALTER_CLIENT_QUOTAS: @@ -461,6 +475,8 @@ public static JsonNode request(AbstractRequest request) { return DescribeProducersRequestDataJsonConverter.write(((DescribeProducersRequest) request).data(), request.version()); case DESCRIBE_QUORUM: return DescribeQuorumRequestDataJsonConverter.write(((DescribeQuorumRequest) request).data(), request.version()); + case DESCRIBE_SHARE_GROUP_OFFSETS: + return DescribeShareGroupOffsetsRequestDataJsonConverter.write(((DescribeShareGroupOffsetsRequest) request).data(), request.version()); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsRequestDataJsonConverter.write(((DescribeTopicPartitionsRequest) request).data(), request.version()); case DESCRIBE_TRANSACTIONS: @@ -527,6 +543,8 @@ public static JsonNode request(AbstractRequest request) { return ReadShareGroupStateRequestDataJsonConverter.write(((ReadShareGroupStateRequest) request).data(), request.version()); case READ_SHARE_GROUP_STATE_SUMMARY: return ReadShareGroupStateSummaryRequestDataJsonConverter.write(((ReadShareGroupStateSummaryRequest) request).data(), request.version()); + case REMOVE_RAFT_VOTER: + return RemoveRaftVoterRequestDataJsonConverter.write(((RemoveRaftVoterRequest) request).data(), request.version()); case RENEW_DELEGATION_TOKEN: return RenewDelegationTokenRequestDataJsonConverter.write(((RenewDelegationTokenRequest) request).data(), request.version()); case SASL_AUTHENTICATE: @@ -541,6 +559,10 @@ public static JsonNode request(AbstractRequest request) { return ShareGroupDescribeRequestDataJsonConverter.write(((ShareGroupDescribeRequest) request).data(), request.version()); case SHARE_GROUP_HEARTBEAT: return ShareGroupHeartbeatRequestDataJsonConverter.write(((ShareGroupHeartbeatRequest) request).data(), request.version()); + case STREAMS_GROUP_DESCRIBE: + return StreamsGroupDescribeRequestDataJsonConverter.write(((StreamsGroupDescribeRequest) request).data(), request.version()); + case STREAMS_GROUP_HEARTBEAT: + return StreamsGroupHeartbeatRequestDataJsonConverter.write(((StreamsGroupHeartbeatRequest) request).data(), request.version()); case STOP_REPLICA: return StopReplicaRequestDataJsonConverter.write(((StopReplicaRequest) request).data(), request.version()); case SYNC_GROUP: @@ -553,18 +575,14 @@ public static JsonNode request(AbstractRequest request) { return UpdateFeaturesRequestDataJsonConverter.write(((UpdateFeaturesRequest) request).data(), request.version()); case UPDATE_METADATA: return UpdateMetadataRequestDataJsonConverter.write(((UpdateMetadataRequest) request).data(), request.version()); + case UPDATE_RAFT_VOTER: + return UpdateRaftVoterRequestDataJsonConverter.write(((UpdateRaftVoterRequest) request).data(), request.version()); case VOTE: return VoteRequestDataJsonConverter.write(((VoteRequest) request).data(), request.version()); case WRITE_SHARE_GROUP_STATE: return WriteShareGroupStateRequestDataJsonConverter.write(((WriteShareGroupStateRequest) request).data(), request.version()); case WRITE_TXN_MARKERS: return WriteTxnMarkersRequestDataJsonConverter.write(((WriteTxnMarkersRequest) request).data(), request.version()); - case ADD_RAFT_VOTER: - return AddRaftVoterRequestDataJsonConverter.write(((AddRaftVoterRequest) request).data(), request.version()); - case REMOVE_RAFT_VOTER: - return RemoveRaftVoterRequestDataJsonConverter.write(((RemoveRaftVoterRequest) request).data(), request.version()); - case UPDATE_RAFT_VOTER: - return UpdateRaftVoterRequestDataJsonConverter.write(((UpdateRaftVoterRequest) request).data(), request.version()); default: throw new IllegalStateException("ApiKey " + request.apiKey() + " is not currently handled in `request`, the " + "code should be updated to do so."); @@ -577,6 +595,8 @@ public static JsonNode response(AbstractResponse response, short version) { return AddOffsetsToTxnResponseDataJsonConverter.write(((AddOffsetsToTxnResponse) response).data(), version); case ADD_PARTITIONS_TO_TXN: return AddPartitionsToTxnResponseDataJsonConverter.write(((AddPartitionsToTxnResponse) response).data(), version); + case ADD_RAFT_VOTER: + return AddRaftVoterResponseDataJsonConverter.write(((AddRaftVoterResponse) response).data(), version); case ALLOCATE_PRODUCER_IDS: return AllocateProducerIdsResponseDataJsonConverter.write(((AllocateProducerIdsResponse) response).data(), version); case ALTER_CLIENT_QUOTAS: @@ -645,6 +665,8 @@ public static JsonNode response(AbstractResponse response, short version) { return DescribeProducersResponseDataJsonConverter.write(((DescribeProducersResponse) response).data(), version); case DESCRIBE_QUORUM: return DescribeQuorumResponseDataJsonConverter.write(((DescribeQuorumResponse) response).data(), version); + case DESCRIBE_SHARE_GROUP_OFFSETS: + return DescribeShareGroupOffsetsResponseDataJsonConverter.write(((DescribeShareGroupOffsetsResponse) response).data(), version); case DESCRIBE_TOPIC_PARTITIONS: return DescribeTopicPartitionsResponseDataJsonConverter.write(((DescribeTopicPartitionsResponse) response).data(), version); case DESCRIBE_TRANSACTIONS: @@ -711,6 +733,8 @@ public static JsonNode response(AbstractResponse response, short version) { return ReadShareGroupStateResponseDataJsonConverter.write(((ReadShareGroupStateResponse) response).data(), version); case READ_SHARE_GROUP_STATE_SUMMARY: return ReadShareGroupStateSummaryResponseDataJsonConverter.write(((ReadShareGroupStateSummaryResponse) response).data(), version); + case REMOVE_RAFT_VOTER: + return RemoveRaftVoterResponseDataJsonConverter.write(((RemoveRaftVoterResponse) response).data(), version); case RENEW_DELEGATION_TOKEN: return RenewDelegationTokenResponseDataJsonConverter.write(((RenewDelegationTokenResponse) response).data(), version); case SASL_AUTHENTICATE: @@ -725,6 +749,10 @@ public static JsonNode response(AbstractResponse response, short version) { return ShareGroupDescribeResponseDataJsonConverter.write(((ShareGroupDescribeResponse) response).data(), version); case SHARE_GROUP_HEARTBEAT: return ShareGroupHeartbeatResponseDataJsonConverter.write(((ShareGroupHeartbeatResponse) response).data(), version); + case STREAMS_GROUP_DESCRIBE: + return StreamsGroupDescribeResponseDataJsonConverter.write(((StreamsGroupDescribeResponse) response).data(), version); + case STREAMS_GROUP_HEARTBEAT: + return StreamsGroupHeartbeatResponseDataJsonConverter.write(((StreamsGroupHeartbeatResponse) response).data(), version); case STOP_REPLICA: return StopReplicaResponseDataJsonConverter.write(((StopReplicaResponse) response).data(), version); case SYNC_GROUP: @@ -737,18 +765,14 @@ public static JsonNode response(AbstractResponse response, short version) { return UpdateFeaturesResponseDataJsonConverter.write(((UpdateFeaturesResponse) response).data(), version); case UPDATE_METADATA: return UpdateMetadataResponseDataJsonConverter.write(((UpdateMetadataResponse) response).data(), version); + case UPDATE_RAFT_VOTER: + return UpdateRaftVoterResponseDataJsonConverter.write(((UpdateRaftVoterResponse) response).data(), version); case VOTE: return VoteResponseDataJsonConverter.write(((VoteResponse) response).data(), version); case WRITE_SHARE_GROUP_STATE: return WriteShareGroupStateResponseDataJsonConverter.write(((WriteShareGroupStateResponse) response).data(), version); case WRITE_TXN_MARKERS: return WriteTxnMarkersResponseDataJsonConverter.write(((WriteTxnMarkersResponse) response).data(), version); - case ADD_RAFT_VOTER: - return AddRaftVoterResponseDataJsonConverter.write(((AddRaftVoterResponse) response).data(), version); - case REMOVE_RAFT_VOTER: - return RemoveRaftVoterResponseDataJsonConverter.write(((RemoveRaftVoterResponse) response).data(), version); - case UPDATE_RAFT_VOTER: - return UpdateRaftVoterResponseDataJsonConverter.write(((UpdateRaftVoterResponse) response).data(), version); default: throw new IllegalStateException("ApiKey " + response.apiKey() + " is not currently handled in `response`, the " + "code should be updated to do so."); @@ -760,7 +784,7 @@ public static JsonNode requestHeaderNode(RequestHeader header) { header.data(), header.headerVersion(), false ); node.set("requestApiKeyName", new TextNode(header.apiKey().toString())); - if (header.apiKey().isVersionDeprecated(header.apiVersion())) { + if (header.isApiVersionDeprecated()) { node.set("requestApiVersionDeprecated", BooleanNode.TRUE); } return node; diff --git a/server/src/main/java/org/apache/kafka/network/SocketServerConfigs.java b/server/src/main/java/org/apache/kafka/network/SocketServerConfigs.java index d623090ad8309..e7cb901bf19f7 100644 --- a/server/src/main/java/org/apache/kafka/network/SocketServerConfigs.java +++ b/server/src/main/java/org/apache/kafka/network/SocketServerConfigs.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.server.config.ReplicationConfigs; import org.apache.kafka.server.util.Csv; import java.util.ArrayList; @@ -65,21 +64,21 @@ public class SocketServerConfigs { public static final String LISTENERS_CONFIG = "listeners"; public static final String LISTENERS_DEFAULT = "PLAINTEXT://:9092"; - public static final String LISTENERS_DOC = "Listener List - Comma-separated list of URIs we will listen on and the listener names." + - String.format(" If the listener name is not a security protocol, %s must also be set.%n", LISTENER_SECURITY_PROTOCOL_MAP_CONFIG) + - " Listener names and port numbers must be unique unless %n" + - " one listener is an IPv4 address and the other listener is %n" + - " an IPv6 address (for the same port).%n" + - " Specify hostname as 0.0.0.0 to bind to all interfaces.%n" + - " Leave hostname empty to bind to default interface.%n" + - " Examples of legal listener lists:%n" + - " PLAINTEXT://myhost:9092,SSL://:9091%n" + - " CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093%n" + - " PLAINTEXT://127.0.0.1:9092,SSL://[::1]:9092%n"; + public static final String LISTENERS_DOC = String.format("Listener List - Comma-separated list of URIs we will listen on and the listener names." + + " If the listener name is not a security protocol, %s must also be set.%n" + + " Listener names and port numbers must be unique unless one listener is an IPv4 address and the other listener is an IPv6 address (for the same port).%n" + + " Specify hostname as 0.0.0.0 to bind to all interfaces.%n" + + " Leave hostname empty to bind to default interface.%n" + + " Examples of legal listener lists:%n" + + " PLAINTEXT://myhost:9092,SSL://:9091%n" + + " CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093%n" + + " PLAINTEXT://127.0.0.1:9092,SSL://[::1]:9092%n", LISTENER_SECURITY_PROTOCOL_MAP_CONFIG); public static final String ADVERTISED_LISTENERS_CONFIG = "advertised.listeners"; - public static final String ADVERTISED_LISTENERS_DOC = String.format( - "Listeners to publish to ZooKeeper for clients to use, if different than the %s config property." + + public static final String ADVERTISED_LISTENERS_DOC = String.format("Specifies the listener addresses that the Kafka brokers will advertise to clients and other brokers." + + " The config is useful where the actual listener configuration %s does not represent the addresses that clients should" + + " use to connect, such as in cloud environments. In environments using ZooKeeper, these addresses are published to ZooKeeper." + + " In Kraft mode, the address would be published to and managed by kraft controller, the brokers would pull these data from controller as needed." + " In IaaS environments, this may need to be different from the interface to which the broker binds." + " If this is not set, the value for %1$1s will be used." + " Unlike %1$1s, it is not valid to advertise the 0.0.0.0 meta-address.%n" + @@ -87,28 +86,6 @@ public class SocketServerConfigs { " so that one listener can be configured to advertise another listener's address." + " This can be useful in some cases where external load balancers are used.", LISTENERS_CONFIG); - - public static final String CONTROL_PLANE_LISTENER_NAME_CONFIG = "control.plane.listener.name"; - public static final String CONTROL_PLANE_LISTENER_NAME_DOC = String.format( - "Name of listener used for communication between controller and brokers. " + - "A broker will use the %s to locate the endpoint in %s list, to listen for connections from the controller. " + - "For example, if a broker's config is:%n" + - "listeners=INTERNAL://192.1.1.8:9092,EXTERNAL://10.1.1.5:9093,CONTROLLER://192.1.1.8:9094%n" + - "listener.security.protocol.map=INTERNAL:PLAINTEXT,EXTERNAL:SSL,CONTROLLER:SSL%n" + - "control.plane.listener.name = CONTROLLER%n" + - "On startup, the broker will start listening on \"192.1.1.8:9094\" with security protocol \"SSL\".%n" + - "On the controller side, when it discovers a broker's published endpoints through ZooKeeper, it will use the %1$1s " + - "to find the endpoint, which it will use to establish connection to the broker.%n" + - "For example, if the broker's published endpoints on ZooKeeper are:%n" + - " \"endpoints\":[\"INTERNAL://broker1.example.com:9092\",\"EXTERNAL://broker1.example.com:9093\",\"CONTROLLER://broker1.example.com:9094\"]%n" + - " and the controller's config is:%n" + - "listener.security.protocol.map = INTERNAL:PLAINTEXT, EXTERNAL:SSL, CONTROLLER:SSL%n" + - "control.plane.listener.name = CONTROLLER%n" + - "then the controller will use \"broker1.example.com:9094\" with security protocol \"SSL\" to connect to the broker.%n" + - "If not explicitly configured, the default value will be null and there will be no dedicated endpoints for controller connections.%n" + - "If explicitly configured, the value cannot be the same as the value of %s.", - CONTROL_PLANE_LISTENER_NAME_CONFIG, LISTENERS_CONFIG, ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG); - public static final String SOCKET_SEND_BUFFER_BYTES_CONFIG = "socket.send.buffer.bytes"; public static final int SOCKET_SEND_BUFFER_BYTES_DEFAULT = 100 * 1024; public static final String SOCKET_SEND_BUFFER_BYTES_DOC = "The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used."; @@ -181,7 +158,6 @@ public class SocketServerConfigs { .define(LISTENERS_CONFIG, STRING, LISTENERS_DEFAULT, HIGH, LISTENERS_DOC) .define(ADVERTISED_LISTENERS_CONFIG, STRING, null, HIGH, ADVERTISED_LISTENERS_DOC) .define(LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, STRING, LISTENER_SECURITY_PROTOCOL_MAP_DEFAULT, LOW, LISTENER_SECURITY_PROTOCOL_MAP_DOC) - .define(CONTROL_PLANE_LISTENER_NAME_CONFIG, STRING, null, HIGH, CONTROL_PLANE_LISTENER_NAME_DOC) .define(SOCKET_SEND_BUFFER_BYTES_CONFIG, INT, SOCKET_SEND_BUFFER_BYTES_DEFAULT, HIGH, SOCKET_SEND_BUFFER_BYTES_DOC) .define(SOCKET_RECEIVE_BUFFER_BYTES_CONFIG, INT, SOCKET_RECEIVE_BUFFER_BYTES_DEFAULT, HIGH, SOCKET_RECEIVE_BUFFER_BYTES_DOC) .define(SOCKET_REQUEST_MAX_BYTES_CONFIG, INT, SOCKET_REQUEST_MAX_BYTES_DEFAULT, atLeast(1), HIGH, SOCKET_REQUEST_MAX_BYTES_DOC) diff --git a/server/src/main/java/org/apache/kafka/security/CredentialProvider.java b/server/src/main/java/org/apache/kafka/security/CredentialProvider.java index a8c4d378a1b83..43f21a8616a45 100644 --- a/server/src/main/java/org/apache/kafka/security/CredentialProvider.java +++ b/server/src/main/java/org/apache/kafka/security/CredentialProvider.java @@ -16,14 +16,13 @@ */ package org.apache.kafka.security; +import org.apache.kafka.clients.admin.ScramMechanism; import org.apache.kafka.common.security.authenticator.CredentialCache; import org.apache.kafka.common.security.scram.ScramCredential; import org.apache.kafka.common.security.scram.internals.ScramCredentialUtils; -import org.apache.kafka.common.security.scram.internals.ScramMechanism; import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache; import java.util.Collection; -import java.util.Properties; public class CredentialProvider { public final DelegationTokenCache tokenCache; @@ -34,22 +33,8 @@ public CredentialProvider(Collection scramMechanisms, DelegationTokenCac ScramCredentialUtils.createCache(credentialCache, scramMechanisms); } - public void updateCredentials(String username, Properties config) { - for (ScramMechanism mechanism : ScramMechanism.values()) { - CredentialCache.Cache cache = credentialCache.cache(mechanism.mechanismName(), ScramCredential.class); - if (cache != null) { - String c = config.getProperty(mechanism.mechanismName()); - if (c == null) { - cache.remove(username); - } else { - cache.put(username, ScramCredentialUtils.credentialFromString(c)); - } - } - } - } - public void updateCredential( - org.apache.kafka.clients.admin.ScramMechanism mechanism, + ScramMechanism mechanism, String name, ScramCredential credential ) { @@ -58,7 +43,7 @@ public void updateCredential( } public void removeCredentials( - org.apache.kafka.clients.admin.ScramMechanism mechanism, + ScramMechanism mechanism, String name ) { CredentialCache.Cache cache = credentialCache.cache(mechanism.mechanismName(), ScramCredential.class); diff --git a/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java b/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java index ea27ee6ea417b..012be7f55a95d 100644 --- a/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java +++ b/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java @@ -115,7 +115,7 @@ public static Set fromBytes(byte[] bytes) throws IOException { return Collections.emptySet(); Optional jsonValue = Json.parseBytes(bytes); - if (!jsonValue.isPresent()) + if (jsonValue.isEmpty()) return Collections.emptySet(); JsonObject js = jsonValue.get().asJsonObject(); diff --git a/server/src/main/java/org/apache/kafka/server/Assignment.java b/server/src/main/java/org/apache/kafka/server/Assignment.java index 582c638cb7e42..393a0dae1dc14 100644 --- a/server/src/main/java/org/apache/kafka/server/Assignment.java +++ b/server/src/main/java/org/apache/kafka/server/Assignment.java @@ -98,8 +98,7 @@ boolean valid(int nodeId, MetadataImage image) { @Override public boolean equals(Object o) { - if (o == null || (!(o instanceof Assignment))) return false; - Assignment other = (Assignment) o; + if (o == null || (!(o instanceof Assignment other))) return false; return topicIdPartition.equals(other.topicIdPartition) && directoryId.equals(other.directoryId) && submissionTimeNs == other.submissionTimeNs && @@ -125,4 +124,4 @@ public String toString() { bld.append(")"); return bld.toString(); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/apache/kafka/server/AssignmentsManager.java b/server/src/main/java/org/apache/kafka/server/AssignmentsManager.java index 5920fd5563b29..3605a175e08aa 100644 --- a/server/src/main/java/org/apache/kafka/server/AssignmentsManager.java +++ b/server/src/main/java/org/apache/kafka/server/AssignmentsManager.java @@ -429,7 +429,7 @@ int numInFlight() { } static Optional globalResponseError(Optional response) { - if (!response.isPresent()) { + if (response.isEmpty()) { return Optional.of("Timeout"); } if (response.get().authenticationException() != null) { diff --git a/server/src/main/java/org/apache/kafka/server/BrokerFeatures.java b/server/src/main/java/org/apache/kafka/server/BrokerFeatures.java index 95777effe5e81..197ebea427a58 100644 --- a/server/src/main/java/org/apache/kafka/server/BrokerFeatures.java +++ b/server/src/main/java/org/apache/kafka/server/BrokerFeatures.java @@ -29,7 +29,7 @@ import java.util.Map; import java.util.stream.Collectors; -import static org.apache.kafka.server.common.Features.PRODUCTION_FEATURES; +import static org.apache.kafka.server.common.Feature.PRODUCTION_FEATURES; /** * A class that encapsulates the latest features supported by the Broker and also provides APIs to diff --git a/server/src/main/java/org/apache/kafka/server/ClientMetricsManager.java b/server/src/main/java/org/apache/kafka/server/ClientMetricsManager.java index b7ca1bbd88aa7..fc799d4393830 100644 --- a/server/src/main/java/org/apache/kafka/server/ClientMetricsManager.java +++ b/server/src/main/java/org/apache/kafka/server/ClientMetricsManager.java @@ -62,7 +62,6 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -85,8 +84,8 @@ public class ClientMetricsManager implements AutoCloseable { public static final String CLIENT_METRICS_REAPER_THREAD_NAME = "client-metrics-reaper"; private static final Logger log = LoggerFactory.getLogger(ClientMetricsManager.class); - private static final List SUPPORTED_COMPRESSION_TYPES = Collections.unmodifiableList( - Arrays.asList(CompressionType.ZSTD.id, CompressionType.LZ4.id, CompressionType.GZIP.id, CompressionType.SNAPPY.id)); + private static final List SUPPORTED_COMPRESSION_TYPES = List.of(CompressionType.ZSTD.id, CompressionType.LZ4.id, + CompressionType.GZIP.id, CompressionType.SNAPPY.id); // Max cache size (16k active client connections per broker) private static final int CACHE_MAX_SIZE = 16384; private static final int DEFAULT_CACHE_EXPIRY_MS = 60 * 1000; diff --git a/server/src/main/java/org/apache/kafka/server/config/AbstractKafkaConfig.java b/server/src/main/java/org/apache/kafka/server/config/AbstractKafkaConfig.java index 537b0869d523b..da9b1cf65fcb7 100644 --- a/server/src/main/java/org/apache/kafka/server/config/AbstractKafkaConfig.java +++ b/server/src/main/java/org/apache/kafka/server/config/AbstractKafkaConfig.java @@ -22,17 +22,17 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig; +import org.apache.kafka.coordinator.share.ShareCoordinatorConfig; import org.apache.kafka.coordinator.transaction.TransactionLogConfig; import org.apache.kafka.coordinator.transaction.TransactionStateManagerConfig; import org.apache.kafka.network.SocketServerConfigs; import org.apache.kafka.raft.QuorumConfig; -import org.apache.kafka.security.PasswordEncoderConfigs; import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig; import org.apache.kafka.server.metrics.MetricConfigs; import org.apache.kafka.storage.internals.log.CleanerConfig; import org.apache.kafka.storage.internals.log.LogConfig; -import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -42,32 +42,31 @@ * For more details check KAFKA-15853 */ public abstract class AbstractKafkaConfig extends AbstractConfig { - @SuppressWarnings("deprecation") - public static final ConfigDef CONFIG_DEF = Utils.mergeConfigs(Arrays.asList( - RemoteLogManagerConfig.configDef(), - ZkConfigs.CONFIG_DEF, - ServerConfigs.CONFIG_DEF, - KRaftConfigs.CONFIG_DEF, - SocketServerConfigs.CONFIG_DEF, - ReplicationConfigs.CONFIG_DEF, - GroupCoordinatorConfig.GROUP_COORDINATOR_CONFIG_DEF, - GroupCoordinatorConfig.NEW_GROUP_CONFIG_DEF, - GroupCoordinatorConfig.OFFSET_MANAGEMENT_CONFIG_DEF, - GroupCoordinatorConfig.CONSUMER_GROUP_CONFIG_DEF, - GroupCoordinatorConfig.SHARE_GROUP_CONFIG_DEF, - CleanerConfig.CONFIG_DEF, - LogConfig.SERVER_CONFIG_DEF, - ShareGroupConfig.CONFIG_DEF, - ShareCoordinatorConfig.CONFIG_DEF, - TransactionLogConfig.CONFIG_DEF, - TransactionStateManagerConfig.CONFIG_DEF, - QuorumConfig.CONFIG_DEF, - MetricConfigs.CONFIG_DEF, - QuotaConfig.CONFIG_DEF, - BrokerSecurityConfigs.CONFIG_DEF, - DelegationTokenManagerConfigs.CONFIG_DEF, - PasswordEncoderConfigs.CONFIG_DEF - )); + public static final ConfigDef CONFIG_DEF = Utils.mergeConfigs(List.of( + RemoteLogManagerConfig.configDef(), + ZkConfigs.CONFIG_DEF, + ServerConfigs.CONFIG_DEF, + KRaftConfigs.CONFIG_DEF, + SocketServerConfigs.CONFIG_DEF, + ReplicationConfigs.CONFIG_DEF, + GroupCoordinatorConfig.GROUP_COORDINATOR_CONFIG_DEF, + GroupCoordinatorConfig.NEW_GROUP_CONFIG_DEF, + GroupCoordinatorConfig.OFFSET_MANAGEMENT_CONFIG_DEF, + GroupCoordinatorConfig.CONSUMER_GROUP_CONFIG_DEF, + GroupCoordinatorConfig.SHARE_GROUP_CONFIG_DEF, + CleanerConfig.CONFIG_DEF, + LogConfig.SERVER_CONFIG_DEF, + ShareGroupConfig.CONFIG_DEF, + ShareCoordinatorConfig.CONFIG_DEF, + TransactionLogConfig.CONFIG_DEF, + TransactionStateManagerConfig.CONFIG_DEF, + QuorumConfig.CONFIG_DEF, + MetricConfigs.CONFIG_DEF, + QuotaConfig.CONFIG_DEF, + BrokerSecurityConfigs.CONFIG_DEF, + DelegationTokenManagerConfigs.CONFIG_DEF + )); + public AbstractKafkaConfig(ConfigDef definition, Map originals, Map configProviderProps, boolean doLog) { super(definition, originals, configProviderProps, doLog); } diff --git a/server/src/main/java/org/apache/kafka/server/config/KRaftConfigs.java b/server/src/main/java/org/apache/kafka/server/config/KRaftConfigs.java index a29c71ad4be85..3f07c0b754723 100644 --- a/server/src/main/java/org/apache/kafka/server/config/KRaftConfigs.java +++ b/server/src/main/java/org/apache/kafka/server/config/KRaftConfigs.java @@ -21,14 +21,12 @@ import org.apache.kafka.common.record.Records; import org.apache.kafka.storage.internals.log.LogConfig; -import java.util.Collections; import java.util.concurrent.TimeUnit; import static org.apache.kafka.common.config.ConfigDef.Importance.HIGH; import static org.apache.kafka.common.config.ConfigDef.Importance.LOW; import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM; import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; -import static org.apache.kafka.common.config.ConfigDef.Type.BOOLEAN; import static org.apache.kafka.common.config.ConfigDef.Type.INT; import static org.apache.kafka.common.config.ConfigDef.Type.LIST; import static org.apache.kafka.common.config.ConfigDef.Type.LONG; @@ -115,22 +113,19 @@ public class KRaftConfigs { public static final String SERVER_MAX_STARTUP_TIME_MS_DOC = "The maximum number of milliseconds we will wait for the server to come up. " + "By default there is no limit. This should be used for testing only."; - /** ZK to KRaft Migration configs */ - public static final String MIGRATION_ENABLED_CONFIG = "zookeeper.metadata.migration.enable"; - public static final String MIGRATION_ENABLED_DOC = "Enable ZK to KRaft migration"; + public static final String CONTROLLER_PERFORMANCE_SAMPLE_PERIOD_MS = "controller.performance.sample.period.ms"; + public static final long CONTROLLER_PERFORMANCE_SAMPLE_PERIOD_MS_DEFAULT = 60000; + public static final String CONTROLLER_PERFORMANCE_SAMPLE_PERIOD_MS_DOC = "The number of milliseconds between periodic controller event performance log messages."; - public static final String MIGRATION_METADATA_MIN_BATCH_SIZE_CONFIG = "zookeeper.metadata.migration.min.batch.size"; - public static final int MIGRATION_METADATA_MIN_BATCH_SIZE_DEFAULT = 200; - public static final String MIGRATION_METADATA_MIN_BATCH_SIZE_DOC = "Soft minimum batch size to use when migrating metadata from ZooKeeper to KRaft"; + public static final String CONTROLLER_PERFORMANCE_ALWAYS_LOG_THRESHOLD_MS = "controller.performance.always.log.threshold.ms"; + public static final long CONTROLLER_PERFORMANCE_ALWAYS_LOG_THRESHOLD_MS_DEFAULT = 2000; + public static final String CONTROLLER_PERFORMANCE_ALWAYS_LOG_THRESHOLD_MS_DOC = "We will log an error message about controller events that take longer than this threshold."; - /** Enable eligible leader replicas configs */ - public static final String ELR_ENABLED_CONFIG = "eligible.leader.replicas.enable"; - public static final String ELR_ENABLED_DOC = "Enable the Eligible leader replicas"; public static final ConfigDef CONFIG_DEF = new ConfigDef() .define(METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG, LONG, METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES, atLeast(1), HIGH, METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_DOC) .define(METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, LONG, METADATA_SNAPSHOT_MAX_INTERVAL_MS_DEFAULT, atLeast(0), HIGH, METADATA_SNAPSHOT_MAX_INTERVAL_MS_DOC) - .define(PROCESS_ROLES_CONFIG, LIST, Collections.emptyList(), ConfigDef.ValidList.in("broker", "controller"), HIGH, PROCESS_ROLES_DOC) - .define(NODE_ID_CONFIG, INT, EMPTY_NODE_ID, null, HIGH, NODE_ID_DOC) + .define(PROCESS_ROLES_CONFIG, LIST, ConfigDef.NO_DEFAULT_VALUE, ConfigDef.ValidList.in("broker", "controller"), HIGH, PROCESS_ROLES_DOC) + .define(NODE_ID_CONFIG, INT, ConfigDef.NO_DEFAULT_VALUE, atLeast(0), HIGH, NODE_ID_DOC) .define(INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG, INT, INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_DEFAULT, null, MEDIUM, INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_DOC) .define(BROKER_HEARTBEAT_INTERVAL_MS_CONFIG, INT, BROKER_HEARTBEAT_INTERVAL_MS_DEFAULT, null, MEDIUM, BROKER_HEARTBEAT_INTERVAL_MS_DOC) .define(BROKER_SESSION_TIMEOUT_MS_CONFIG, INT, BROKER_SESSION_TIMEOUT_MS_DEFAULT, null, MEDIUM, BROKER_SESSION_TIMEOUT_MS_DOC) @@ -143,9 +138,7 @@ public class KRaftConfigs { .define(METADATA_MAX_RETENTION_BYTES_CONFIG, LONG, METADATA_MAX_RETENTION_BYTES_DEFAULT, null, HIGH, METADATA_MAX_RETENTION_BYTES_DOC) .define(METADATA_MAX_RETENTION_MILLIS_CONFIG, LONG, LogConfig.DEFAULT_RETENTION_MS, null, HIGH, METADATA_MAX_RETENTION_MILLIS_DOC) .define(METADATA_MAX_IDLE_INTERVAL_MS_CONFIG, INT, METADATA_MAX_IDLE_INTERVAL_MS_DEFAULT, atLeast(0), LOW, METADATA_MAX_IDLE_INTERVAL_MS_DOC) - .defineInternal(SERVER_MAX_STARTUP_TIME_MS_CONFIG, LONG, SERVER_MAX_STARTUP_TIME_MS_DEFAULT, atLeast(0), MEDIUM, SERVER_MAX_STARTUP_TIME_MS_DOC) - .define(MIGRATION_ENABLED_CONFIG, BOOLEAN, false, HIGH, MIGRATION_ENABLED_DOC) - .define(ELR_ENABLED_CONFIG, BOOLEAN, false, HIGH, ELR_ENABLED_DOC) - .defineInternal(MIGRATION_METADATA_MIN_BATCH_SIZE_CONFIG, INT, MIGRATION_METADATA_MIN_BATCH_SIZE_DEFAULT, atLeast(1), - MEDIUM, MIGRATION_METADATA_MIN_BATCH_SIZE_DOC); + .defineInternal(CONTROLLER_PERFORMANCE_SAMPLE_PERIOD_MS, LONG, CONTROLLER_PERFORMANCE_SAMPLE_PERIOD_MS_DEFAULT, atLeast(100), MEDIUM, CONTROLLER_PERFORMANCE_SAMPLE_PERIOD_MS_DOC) + .defineInternal(CONTROLLER_PERFORMANCE_ALWAYS_LOG_THRESHOLD_MS, LONG, CONTROLLER_PERFORMANCE_ALWAYS_LOG_THRESHOLD_MS_DEFAULT, atLeast(0), MEDIUM, CONTROLLER_PERFORMANCE_ALWAYS_LOG_THRESHOLD_MS_DOC) + .defineInternal(SERVER_MAX_STARTUP_TIME_MS_CONFIG, LONG, SERVER_MAX_STARTUP_TIME_MS_DEFAULT, atLeast(0), MEDIUM, SERVER_MAX_STARTUP_TIME_MS_DOC); } diff --git a/server/src/main/java/org/apache/kafka/server/config/ReplicationConfigs.java b/server/src/main/java/org/apache/kafka/server/config/ReplicationConfigs.java index 82fdcba8f540c..8f9d3372be03f 100644 --- a/server/src/main/java/org/apache/kafka/server/config/ReplicationConfigs.java +++ b/server/src/main/java/org/apache/kafka/server/config/ReplicationConfigs.java @@ -47,8 +47,8 @@ public class ReplicationConfigs { public static final String REPLICA_LAG_TIME_MAX_MS_CONFIG = "replica.lag.time.max.ms"; public static final long REPLICA_LAG_TIME_MAX_MS_DEFAULT = 30000L; - public static final String REPLICA_LAG_TIME_MAX_MS_DOC = "If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time," + - " the leader will remove the follower from isr"; + public static final String REPLICA_LAG_TIME_MAX_MS_DOC = "If a follower hasn't sent any fetch requests or hasn't consumed up to the leader's log end offset for at least this time," + + " the leader will remove the follower from ISR"; public static final String REPLICA_SOCKET_TIMEOUT_MS_CONFIG = "replica.socket.timeout.ms"; public static final int REPLICA_SOCKET_TIMEOUT_MS_DEFAULT = 30 * 1000; @@ -131,15 +131,14 @@ public class ReplicationConfigs { public static final String INTER_BROKER_PROTOCOL_VERSION_DEFAULT = MetadataVersion.latestProduction().version(); public static final String INTER_BROKER_PROTOCOL_VERSION_DOC = "Specify which version of the inter-broker protocol will be used.\n" + "This is typically bumped after all brokers were upgraded to a new version.\n" + - " Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check MetadataVersion for the full list.\n" + + "Check MetadataVersion for the full list.\n" + "This configuration is only applicable in Zookeeper mode."; public static final String INTER_BROKER_SECURITY_PROTOCOL_CONFIG = "security.inter.broker.protocol"; public static final String INTER_BROKER_SECURITY_PROTOCOL_DEFAULT = SecurityProtocol.PLAINTEXT.toString(); public static final String INTER_BROKER_LISTENER_NAME_CONFIG = "inter.broker.listener.name"; - public static final String INTER_BROKER_SECURITY_PROTOCOL_DOC = "Security protocol used to communicate between brokers. Valid values are: " + - String.join(", ", SecurityProtocol.names()) + ". It is an error to set this and " + INTER_BROKER_LISTENER_NAME_CONFIG + - " properties at the same time."; + public static final String INTER_BROKER_SECURITY_PROTOCOL_DOC = "Security protocol used to communicate between brokers. " + + "It is an error to set this and " + INTER_BROKER_LISTENER_NAME_CONFIG + " properties at the same time."; public static final String INTER_BROKER_LISTENER_NAME_DOC = "Name of listener used for communication between brokers. If this is unset, the listener name is defined by " + INTER_BROKER_SECURITY_PROTOCOL_CONFIG + ". It is an error to set this and " + INTER_BROKER_SECURITY_PROTOCOL_CONFIG + " properties at the same time."; diff --git a/server/src/main/java/org/apache/kafka/server/config/ServerConfigs.java b/server/src/main/java/org/apache/kafka/server/config/ServerConfigs.java index 883eaf19febf8..cd5da5ae9fd00 100644 --- a/server/src/main/java/org/apache/kafka/server/config/ServerConfigs.java +++ b/server/src/main/java/org/apache/kafka/server/config/ServerConfigs.java @@ -98,14 +98,6 @@ public class ServerConfigs { public static final String BROKER_RACK_DOC = "Rack of the broker. This will be used in rack aware replication assignment for fault tolerance. Examples: RACK1, us-east-1d"; /** ********* Controlled shutdown configuration ***********/ - public static final String CONTROLLED_SHUTDOWN_MAX_RETRIES_CONFIG = "controlled.shutdown.max.retries"; - public static final int CONTROLLED_SHUTDOWN_MAX_RETRIES_DEFAULT = 3; - public static final String CONTROLLED_SHUTDOWN_MAX_RETRIES_DOC = "Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens"; - - public static final String CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_CONFIG = "controlled.shutdown.retry.backoff.ms"; - public static final int CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_DEFAULT = 5000; - public static final String CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_DOC = "Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying."; - public static final String CONTROLLED_SHUTDOWN_ENABLE_CONFIG = "controlled.shutdown.enable"; public static final boolean CONTROLLED_SHUTDOWN_ENABLE_DEFAULT = true; public static final String CONTROLLED_SHUTDOWN_ENABLE_DOC = "Enable controlled shutdown of the server."; @@ -155,8 +147,6 @@ public class ServerConfigs { /************ Rack Configuration ******************/ .define(BROKER_RACK_CONFIG, STRING, null, MEDIUM, BROKER_RACK_DOC) /** ********* Controlled shutdown configuration ***********/ - .define(CONTROLLED_SHUTDOWN_MAX_RETRIES_CONFIG, INT, CONTROLLED_SHUTDOWN_MAX_RETRIES_DEFAULT, MEDIUM, CONTROLLED_SHUTDOWN_MAX_RETRIES_DOC) - .define(CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_CONFIG, LONG, CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_DEFAULT, MEDIUM, CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS_DOC) .define(CONTROLLED_SHUTDOWN_ENABLE_CONFIG, BOOLEAN, CONTROLLED_SHUTDOWN_ENABLE_DEFAULT, MEDIUM, CONTROLLED_SHUTDOWN_ENABLE_DOC) .define(DELETE_TOPIC_ENABLE_CONFIG, BOOLEAN, DELETE_TOPIC_ENABLE_DEFAULT, HIGH, DELETE_TOPIC_ENABLE_DOC) .define(COMPRESSION_TYPE_CONFIG, STRING, LogConfig.DEFAULT_COMPRESSION_TYPE, ConfigDef.ValidString.in(BrokerCompressionType.names().toArray(new String[0])), HIGH, COMPRESSION_TYPE_DOC) diff --git a/server/src/main/java/org/apache/kafka/server/config/ZkConfigs.java b/server/src/main/java/org/apache/kafka/server/config/ZkConfigs.java index 711ffe8c94aca..0fd251edd160e 100644 --- a/server/src/main/java/org/apache/kafka/server/config/ZkConfigs.java +++ b/server/src/main/java/org/apache/kafka/server/config/ZkConfigs.java @@ -19,8 +19,6 @@ import org.apache.kafka.common.config.ConfigDef; -import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; @@ -98,24 +96,23 @@ public final class ZkConfigs { private static final String ZOOKEEPER_CLIENT_CNXN_SOCKET = "zookeeper.clientCnxnSocket"; static { - Map zkSslConfigToSystemPropertyMap = new HashMap<>(); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_CLIENT_ENABLE_CONFIG, SECURE_CLIENT); - zkSslConfigToSystemPropertyMap.put(ZK_CLIENT_CNXN_SOCKET_CONFIG, ZOOKEEPER_CLIENT_CNXN_SOCKET); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_KEY_STORE_LOCATION_CONFIG, "zookeeper.ssl.keyStore.location"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_KEY_STORE_PASSWORD_CONFIG, "zookeeper.ssl.keyStore.password"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_KEY_STORE_TYPE_CONFIG, "zookeeper.ssl.keyStore.type"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_TRUST_STORE_LOCATION_CONFIG, "zookeeper.ssl.trustStore.location"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_TRUST_STORE_PASSWORD_CONFIG, "zookeeper.ssl.trustStore.password"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_TRUST_STORE_TYPE_CONFIG, "zookeeper.ssl.trustStore.type"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_PROTOCOL_CONFIG, "zookeeper.ssl.protocol"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_ENABLED_PROTOCOLS_CONFIG, "zookeeper.ssl.enabledProtocols"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_CIPHER_SUITES_CONFIG, "zookeeper.ssl.ciphersuites"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "zookeeper.ssl.hostnameVerification"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_CRL_ENABLE_CONFIG, "zookeeper.ssl.crl"); - zkSslConfigToSystemPropertyMap.put(ZK_SSL_OCSP_ENABLE_CONFIG, "zookeeper.ssl.ocsp"); - - ZK_SSL_CONFIG_TO_SYSTEM_PROPERTY_MAP = Collections.unmodifiableMap(zkSslConfigToSystemPropertyMap); + ZK_SSL_CONFIG_TO_SYSTEM_PROPERTY_MAP = Map.ofEntries( + Map.entry(ZK_SSL_CLIENT_ENABLE_CONFIG, SECURE_CLIENT), + Map.entry(ZK_CLIENT_CNXN_SOCKET_CONFIG, ZOOKEEPER_CLIENT_CNXN_SOCKET), + Map.entry(ZK_SSL_KEY_STORE_LOCATION_CONFIG, "zookeeper.ssl.keyStore.location"), + Map.entry(ZK_SSL_KEY_STORE_PASSWORD_CONFIG, "zookeeper.ssl.keyStore.password"), + Map.entry(ZK_SSL_KEY_STORE_TYPE_CONFIG, "zookeeper.ssl.keyStore.type"), + Map.entry(ZK_SSL_TRUST_STORE_LOCATION_CONFIG, "zookeeper.ssl.trustStore.location"), + Map.entry(ZK_SSL_TRUST_STORE_PASSWORD_CONFIG, "zookeeper.ssl.trustStore.password"), + Map.entry(ZK_SSL_TRUST_STORE_TYPE_CONFIG, "zookeeper.ssl.trustStore.type"), + Map.entry(ZK_SSL_PROTOCOL_CONFIG, "zookeeper.ssl.protocol"), + Map.entry(ZK_SSL_ENABLED_PROTOCOLS_CONFIG, "zookeeper.ssl.enabledProtocols"), + Map.entry(ZK_SSL_CIPHER_SUITES_CONFIG, "zookeeper.ssl.ciphersuites"), + Map.entry(ZK_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "zookeeper.ssl.hostnameVerification"), + Map.entry(ZK_SSL_CRL_ENABLE_CONFIG, "zookeeper.ssl.crl"), + Map.entry(ZK_SSL_OCSP_ENABLE_CONFIG, "zookeeper.ssl.ocsp") + ); ZK_SSL_CLIENT_ENABLE_DOC = "Set client to use TLS when connecting to ZooKeeper." + " An explicit value overrides any value set via the zookeeper.client.secure system property (note the different name)." + diff --git a/share/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java b/server/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java rename to server/src/main/java/org/apache/kafka/server/share/CachedSharePartition.java diff --git a/share/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java b/server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java rename to server/src/main/java/org/apache/kafka/server/share/ErroneousAndValidPartitionData.java diff --git a/share/src/main/java/org/apache/kafka/server/share/acknowledge/ShareAcknowledgementBatch.java b/server/src/main/java/org/apache/kafka/server/share/acknowledge/ShareAcknowledgementBatch.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/acknowledge/ShareAcknowledgementBatch.java rename to server/src/main/java/org/apache/kafka/server/share/acknowledge/ShareAcknowledgementBatch.java diff --git a/share/src/main/java/org/apache/kafka/server/share/context/FinalContext.java b/server/src/main/java/org/apache/kafka/server/share/context/FinalContext.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/context/FinalContext.java rename to server/src/main/java/org/apache/kafka/server/share/context/FinalContext.java diff --git a/share/src/main/java/org/apache/kafka/server/share/context/ShareFetchContext.java b/server/src/main/java/org/apache/kafka/server/share/context/ShareFetchContext.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/context/ShareFetchContext.java rename to server/src/main/java/org/apache/kafka/server/share/context/ShareFetchContext.java diff --git a/share/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java b/server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java rename to server/src/main/java/org/apache/kafka/server/share/context/ShareSessionContext.java diff --git a/share/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchGroupKey.java b/server/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchGroupKey.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchGroupKey.java rename to server/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchGroupKey.java diff --git a/share/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKey.java b/server/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKey.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKey.java rename to server/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKey.java diff --git a/share/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchPartitionKey.java b/server/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchPartitionKey.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchPartitionKey.java rename to server/src/main/java/org/apache/kafka/server/share/fetch/DelayedShareFetchPartitionKey.java diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionMaxBytesStrategy.java b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionMaxBytesStrategy.java new file mode 100644 index 0000000000000..e0600e02842fc --- /dev/null +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/PartitionMaxBytesStrategy.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.share.fetch; + +import org.apache.kafka.common.TopicIdPartition; + +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Set; + +/** + * This interface helps identify the max bytes for topic partitions in a share fetch request based on different strategy types. + */ +public interface PartitionMaxBytesStrategy { + + enum StrategyType { + UNIFORM; + + @Override + public String toString() { + return super.toString().toLowerCase(Locale.ROOT); + } + } + + /** + * Returns the partition max bytes for a given partition based on the strategy type. + * + * @param requestMaxBytes - The total max bytes available for the share fetch request + * @param partitions - The topic partitions in the order for which we compute the partition max bytes. + * @param acquiredPartitionsSize - The total partitions that have been acquired. + * @return the partition max bytes for the topic partitions + */ + LinkedHashMap maxBytes(int requestMaxBytes, Set partitions, int acquiredPartitionsSize); + + static PartitionMaxBytesStrategy type(StrategyType type) { + if (type == null) + throw new IllegalArgumentException("Strategy type cannot be null"); + return switch (type) { + case UNIFORM -> PartitionMaxBytesStrategy::uniformPartitionMaxBytes; + }; + } + + + private static LinkedHashMap uniformPartitionMaxBytes(int requestMaxBytes, Set partitions, int acquiredPartitionsSize) { + checkValidArguments(requestMaxBytes, partitions, acquiredPartitionsSize); + LinkedHashMap partitionMaxBytes = new LinkedHashMap<>(); + partitions.forEach(partition -> partitionMaxBytes.put(partition, requestMaxBytes / acquiredPartitionsSize)); + return partitionMaxBytes; + } + + // Visible for testing. + static void checkValidArguments(int requestMaxBytes, Set partitions, int acquiredPartitionsSize) { + if (partitions == null || partitions.isEmpty()) { + throw new IllegalArgumentException("Partitions to generate max bytes is null or empty"); + } + if (requestMaxBytes <= 0) { + throw new IllegalArgumentException("Request max bytes must be greater than 0"); + } + if (acquiredPartitionsSize <= 0) { + throw new IllegalArgumentException("Acquired partitions size must be greater than 0"); + } + } +} diff --git a/share/src/main/java/org/apache/kafka/server/share/fetch/ShareAcquiredRecords.java b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareAcquiredRecords.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/fetch/ShareAcquiredRecords.java rename to server/src/main/java/org/apache/kafka/server/share/fetch/ShareAcquiredRecords.java diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java new file mode 100644 index 0000000000000..9a050575c0d7d --- /dev/null +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/ShareFetch.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.server.share.fetch; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.server.storage.log.FetchParams; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; + +/** + * The ShareFetch class is used to store the fetch parameters for a share fetch request. + */ +public class ShareFetch { + + /** + * The future that will be completed when the fetch is done. + */ + private final CompletableFuture> future; + + /** + * The fetch parameters for the fetch request. + */ + private final FetchParams fetchParams; + /** + * The group id of the share group that is fetching the records. + */ + private final String groupId; + /** + * The member id of the share group that is fetching the records. + */ + private final String memberId; + /** + * The maximum number of bytes that can be fetched for each partition. + */ + private final Map partitionMaxBytes; + /** + * The batch size of the fetch request. + */ + private final int batchSize; + /** + * The maximum number of records that can be fetched for the request. + */ + private final int maxFetchRecords; + /** + * The partitions that had an error during the fetch. + */ + private Map erroneous; + + public ShareFetch( + FetchParams fetchParams, + String groupId, + String memberId, + CompletableFuture> future, + Map partitionMaxBytes, + int batchSize, + int maxFetchRecords + ) { + this.fetchParams = fetchParams; + this.groupId = groupId; + this.memberId = memberId; + this.future = future; + this.partitionMaxBytes = partitionMaxBytes; + this.batchSize = batchSize; + this.maxFetchRecords = maxFetchRecords; + } + + public String groupId() { + return groupId; + } + + public String memberId() { + return memberId; + } + + public Map partitionMaxBytes() { + return partitionMaxBytes; + } + + public FetchParams fetchParams() { + return fetchParams; + } + + public int batchSize() { + return batchSize; + } + + public int maxFetchRecords() { + return maxFetchRecords; + } + + /** + * Add an erroneous partition to the share fetch request. If the erroneous map is null, it will + * be created. + *

              + * The method is synchronized to avoid concurrent modification of the erroneous map, as for + * some partitions the pending initialization can be on some threads and for other partitions + * share fetch request can be processed in purgatory. + * + * @param topicIdPartition The partition that had an error. + * @param throwable The error that occurred. + */ + public synchronized void addErroneous(TopicIdPartition topicIdPartition, Throwable throwable) { + if (erroneous == null) { + erroneous = new HashMap<>(); + } + erroneous.put(topicIdPartition, throwable); + } + + /** + * Check if the share fetch request is completed. + * @return true if the request is completed, false otherwise. + */ + public boolean isCompleted() { + return future.isDone(); + } + + /** + * Check if all the partitions in the request have errored. + * @return true if all the partitions in the request have errored, false otherwise. + */ + public synchronized boolean errorInAllPartitions() { + return erroneous != null && erroneous.size() == partitionMaxBytes().size(); + } + + /** + * May be complete the share fetch request with the given partition data. If the request is already completed, + * this method does nothing. If there are any erroneous partitions, they will be added to the response. + * + * @param partitionData The partition data to complete the fetch with. + */ + public void maybeComplete(Map partitionData) { + if (isCompleted()) { + return; + } + + Map response = new HashMap<>(partitionData); + // Add any erroneous partitions to the response. + addErroneousToResponse(response); + future.complete(response); + } + + /** + * Maybe complete the share fetch request with the given exception for the topicIdPartitions. + * If the request is already completed, this method does nothing. If there are any erroneous partitions, + * they will be added to the response. + * + * @param topicIdPartitions The topic id partitions which errored out. + * @param throwable The exception to complete the fetch with. + */ + public void maybeCompleteWithException(Collection topicIdPartitions, Throwable throwable) { + if (isCompleted()) { + return; + } + Map response = topicIdPartitions.stream().collect( + Collectors.toMap(tp -> tp, tp -> new PartitionData() + .setErrorCode(Errors.forException(throwable).code()) + .setErrorMessage(throwable.getMessage()))); + // Add any erroneous partitions to the response. + addErroneousToResponse(response); + future.complete(response); + } + + /** + * Filter out the erroneous partitions from the given set of topicIdPartitions. The order of + * partitions is important hence the method expects an ordered set as input and returns the ordered + * set as well. + * + * @param topicIdPartitions The topic id partitions to filter. + * @return The topic id partitions without the erroneous partitions. + */ + public synchronized Set filterErroneousTopicPartitions(Set topicIdPartitions) { + if (erroneous != null) { + Set retain = new LinkedHashSet<>(topicIdPartitions); + retain.removeAll(erroneous.keySet()); + return retain; + } + return topicIdPartitions; + } + + private synchronized void addErroneousToResponse(Map response) { + if (erroneous != null) { + erroneous.forEach((topicIdPartition, throwable) -> { + response.put(topicIdPartition, new PartitionData() + .setErrorCode(Errors.forException(throwable).code()) + .setErrorMessage(throwable.getMessage())); + }); + } + } +} diff --git a/share/src/main/java/org/apache/kafka/server/share/session/LastUsedKey.java b/server/src/main/java/org/apache/kafka/server/share/session/LastUsedKey.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/session/LastUsedKey.java rename to server/src/main/java/org/apache/kafka/server/share/session/LastUsedKey.java diff --git a/share/src/main/java/org/apache/kafka/server/share/session/ShareSession.java b/server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/session/ShareSession.java rename to server/src/main/java/org/apache/kafka/server/share/session/ShareSession.java diff --git a/share/src/main/java/org/apache/kafka/server/share/session/ShareSessionCache.java b/server/src/main/java/org/apache/kafka/server/share/session/ShareSessionCache.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/session/ShareSessionCache.java rename to server/src/main/java/org/apache/kafka/server/share/session/ShareSessionCache.java diff --git a/share/src/main/java/org/apache/kafka/server/share/session/ShareSessionKey.java b/server/src/main/java/org/apache/kafka/server/share/session/ShareSessionKey.java similarity index 100% rename from share/src/main/java/org/apache/kafka/server/share/session/ShareSessionKey.java rename to server/src/main/java/org/apache/kafka/server/share/session/ShareSessionKey.java diff --git a/server/src/test/java/org/apache/kafka/server/AssignmentsManagerTest.java b/server/src/test/java/org/apache/kafka/server/AssignmentsManagerTest.java index 543e67d03e21b..f426c355ceb50 100644 --- a/server/src/test/java/org/apache/kafka/server/AssignmentsManagerTest.java +++ b/server/src/test/java/org/apache/kafka/server/AssignmentsManagerTest.java @@ -493,11 +493,11 @@ void testBuildRequestData() { setPartitions(Collections.singletonList( new AssignReplicasToDirsRequestData.PartitionData(). setPartitionIndex(2))), - new AssignReplicasToDirsRequestData.TopicData(). - setTopicId(TOPIC_2). - setPartitions(Collections.singletonList( - new AssignReplicasToDirsRequestData.PartitionData(). - setPartitionIndex(5))))), + new AssignReplicasToDirsRequestData.TopicData(). + setTopicId(TOPIC_2). + setPartitions(Collections.singletonList( + new AssignReplicasToDirsRequestData.PartitionData(). + setPartitionIndex(5))))), new AssignReplicasToDirsRequestData.DirectoryData(). setId(DIR_3). setTopics(Collections.singletonList( diff --git a/server/src/test/java/org/apache/kafka/server/BrokerFeaturesTest.java b/server/src/test/java/org/apache/kafka/server/BrokerFeaturesTest.java index 6ce2b3a7e65bc..4bf9934457128 100644 --- a/server/src/test/java/org/apache/kafka/server/BrokerFeaturesTest.java +++ b/server/src/test/java/org/apache/kafka/server/BrokerFeaturesTest.java @@ -27,8 +27,9 @@ import java.util.HashMap; import java.util.Map; -import static org.apache.kafka.server.common.Features.GROUP_VERSION; -import static org.apache.kafka.server.common.Features.TRANSACTION_VERSION; +import static org.apache.kafka.server.common.Feature.ELIGIBLE_LEADER_REPLICAS_VERSION; +import static org.apache.kafka.server.common.Feature.GROUP_VERSION; +import static org.apache.kafka.server.common.Feature.TRANSACTION_VERSION; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -111,6 +112,7 @@ public void testDefaultFinalizedFeatures() { expectedFeatures.put(MetadataVersion.FEATURE_NAME, MetadataVersion.latestTesting().featureLevel()); expectedFeatures.put(TRANSACTION_VERSION.featureName(), TRANSACTION_VERSION.latestTesting()); expectedFeatures.put(GROUP_VERSION.featureName(), GROUP_VERSION.latestTesting()); + expectedFeatures.put(ELIGIBLE_LEADER_REPLICAS_VERSION.featureName(), ELIGIBLE_LEADER_REPLICAS_VERSION.latestTesting()); expectedFeatures.put("kraft.version", (short) 0); expectedFeatures.put("test_feature_1", (short) 4); expectedFeatures.put("test_feature_2", (short) 3); diff --git a/server/src/test/java/org/apache/kafka/server/ClientMetricsManagerTest.java b/server/src/test/java/org/apache/kafka/server/ClientMetricsManagerTest.java index 57027a2bc4fff..0c928bbaa8234 100644 --- a/server/src/test/java/org/apache/kafka/server/ClientMetricsManagerTest.java +++ b/server/src/test/java/org/apache/kafka/server/ClientMetricsManagerTest.java @@ -1313,7 +1313,7 @@ private KafkaMetric getMetric(Metrics kafkaMetrics, String name) throws Exceptio Optional> metric = kafkaMetrics.metrics().entrySet().stream() .filter(entry -> entry.getKey().name().equals(name)) .findFirst(); - if (!metric.isPresent()) + if (metric.isEmpty()) throw new Exception(String.format("Could not find metric called %s", name)); return metric.get().getValue(); diff --git a/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java b/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java index f7be7157120aa..0f96468569fa2 100644 --- a/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java +++ b/server/src/test/java/org/apache/kafka/server/metrics/ClientMetricsTestUtils.java @@ -32,8 +32,6 @@ import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.Properties; @@ -43,10 +41,10 @@ public class ClientMetricsTestUtils { public static final String DEFAULT_METRICS = "org.apache.kafka.client.producer.partition.queue.,org.apache.kafka.client.producer.partition.latency"; public static final int DEFAULT_INTERVAL_MS = 30 * 1000; // 30 seconds - public static final List DEFAULT_MATCH = Collections.unmodifiableList(Arrays.asList( + public static final List DEFAULT_MATCH = List.of( ClientMetricsConfigs.CLIENT_SOFTWARE_NAME + "=apache-kafka-java", ClientMetricsConfigs.CLIENT_SOFTWARE_VERSION + "=3.5.*" - )); + ); public static final int CLIENT_PORT = 56078; public static Properties defaultProperties() { diff --git a/server/src/test/java/org/apache/kafka/server/metrics/LinuxIoMetricsCollectorTest.java b/server/src/test/java/org/apache/kafka/server/metrics/LinuxIoMetricsCollectorTest.java index ab2760e14db4d..8c96c5c5c10ee 100644 --- a/server/src/test/java/org/apache/kafka/server/metrics/LinuxIoMetricsCollectorTest.java +++ b/server/src/test/java/org/apache/kafka/server/metrics/LinuxIoMetricsCollectorTest.java @@ -25,7 +25,6 @@ import java.io.File; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -87,7 +86,7 @@ void writeProcFile(long readBytes, long writeBytes) throws IOException { "read_bytes: " + readBytes + "\n" + "write_bytes: " + writeBytes + "\n" + "cancelled_write_bytes: 0\n"; - Files.write(selfDir.resolve("io"), bld.getBytes(StandardCharsets.UTF_8)); + Files.writeString(selfDir.resolve("io"), bld); } } } diff --git a/share/src/test/java/org/apache/kafka/server/share/CachedSharePartitionTest.java b/server/src/test/java/org/apache/kafka/server/share/CachedSharePartitionTest.java similarity index 100% rename from share/src/test/java/org/apache/kafka/server/share/CachedSharePartitionTest.java rename to server/src/test/java/org/apache/kafka/server/share/CachedSharePartitionTest.java diff --git a/share/src/test/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKeyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKeyTest.java similarity index 100% rename from share/src/test/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKeyTest.java rename to server/src/test/java/org/apache/kafka/server/share/fetch/DelayedShareFetchKeyTest.java diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionMaxBytesStrategyTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionMaxBytesStrategyTest.java new file mode 100644 index 0000000000000..073386738322e --- /dev/null +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/PartitionMaxBytesStrategyTest.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.share.fetch; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy.StrategyType; + +import org.junit.jupiter.api.Test; + +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class PartitionMaxBytesStrategyTest { + + @Test + public void testConstructor() { + assertThrows(IllegalArgumentException.class, () -> PartitionMaxBytesStrategy.type(null)); + assertDoesNotThrow(() -> PartitionMaxBytesStrategy.type(StrategyType.UNIFORM)); + } + + @Test + public void testCheckValidArguments() { + TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 0)); + TopicIdPartition topicIdPartition2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 1)); + TopicIdPartition topicIdPartition3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic2", 0)); + Set partitions = new LinkedHashSet<>(); + partitions.add(topicIdPartition1); + partitions.add(topicIdPartition2); + partitions.add(topicIdPartition3); + + // acquired partitions size is 0. + assertThrows(IllegalArgumentException.class, () -> PartitionMaxBytesStrategy.checkValidArguments( + 100, partitions, 0)); + // empty partitions set. + assertThrows(IllegalArgumentException.class, () -> PartitionMaxBytesStrategy.checkValidArguments( + 100, Set.of(), 20)); + // partitions is null. + assertThrows(IllegalArgumentException.class, () -> PartitionMaxBytesStrategy.checkValidArguments( + 100, null, 20)); + // request max bytes is 0. + assertThrows(IllegalArgumentException.class, () -> PartitionMaxBytesStrategy.checkValidArguments( + 0, partitions, 20)); + + // Valid arguments. + assertDoesNotThrow(() -> PartitionMaxBytesStrategy.checkValidArguments(100, partitions, 20)); + } + + @Test + public void testUniformStrategy() { + PartitionMaxBytesStrategy partitionMaxBytesStrategy = PartitionMaxBytesStrategy.type(StrategyType.UNIFORM); + TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 0)); + TopicIdPartition topicIdPartition2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 1)); + TopicIdPartition topicIdPartition3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic2", 0)); + Set partitions = new LinkedHashSet<>(); + partitions.add(topicIdPartition1); + partitions.add(topicIdPartition2); + partitions.add(topicIdPartition3); + + LinkedHashMap result = partitionMaxBytesStrategy.maxBytes( + 100, partitions, 3); + assertEquals(result.values().stream().toList(), List.of(33, 33, 33)); + + result = partitionMaxBytesStrategy.maxBytes( + 100, partitions, 5); + assertEquals(result.values().stream().toList(), List.of(20, 20, 20)); + } +} diff --git a/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java new file mode 100644 index 0000000000000..c86facef7a439 --- /dev/null +++ b/server/src/test/java/org/apache/kafka/server/share/fetch/ShareFetchTest.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.share.fetch; + +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.server.storage.log.FetchParams; + +import org.junit.jupiter.api.Test; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +public class ShareFetchTest { + + private static final String GROUP_ID = "groupId"; + private static final String MEMBER_ID = "memberId"; + private static final int BATCH_SIZE = 500; + + @Test + public void testErrorInAllPartitions() { + TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), + Map.of(topicIdPartition, 10), BATCH_SIZE, 100); + assertFalse(shareFetch.errorInAllPartitions()); + + shareFetch.addErroneous(topicIdPartition, new RuntimeException()); + assertTrue(shareFetch.errorInAllPartitions()); + } + + @Test + public void testErrorInAllPartitionsWithMultipleTopicIdPartitions() { + TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), + Map.of(topicIdPartition0, 10, topicIdPartition1, 10), BATCH_SIZE, 100); + assertFalse(shareFetch.errorInAllPartitions()); + + shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); + assertFalse(shareFetch.errorInAllPartitions()); + + shareFetch.addErroneous(topicIdPartition1, new RuntimeException()); + assertTrue(shareFetch.errorInAllPartitions()); + } + + @Test + public void testFilterErroneousTopicPartitions() { + TopicIdPartition topicIdPartition0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + TopicIdPartition topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + ShareFetch shareFetch = new ShareFetch(mock(FetchParams.class), GROUP_ID, MEMBER_ID, new CompletableFuture<>(), + Map.of(topicIdPartition0, 10, topicIdPartition1, 10), BATCH_SIZE, 100); + Set result = shareFetch.filterErroneousTopicPartitions(Set.of(topicIdPartition0, topicIdPartition1)); + // No erroneous partitions, hence all partitions should be returned. + assertEquals(2, result.size()); + assertTrue(result.contains(topicIdPartition0)); + assertTrue(result.contains(topicIdPartition1)); + + // Add an erroneous partition and verify that it is filtered out. + shareFetch.addErroneous(topicIdPartition0, new RuntimeException()); + result = shareFetch.filterErroneousTopicPartitions(Set.of(topicIdPartition0, topicIdPartition1)); + assertEquals(1, result.size()); + assertTrue(result.contains(topicIdPartition1)); + + // Add another erroneous partition and verify that it is filtered out. + shareFetch.addErroneous(topicIdPartition1, new RuntimeException()); + result = shareFetch.filterErroneousTopicPartitions(Set.of(topicIdPartition0, topicIdPartition1)); + assertTrue(result.isEmpty()); + } + +} diff --git a/share/src/test/java/org/apache/kafka/server/share/session/ShareSessionCacheTest.java b/server/src/test/java/org/apache/kafka/server/share/session/ShareSessionCacheTest.java similarity index 100% rename from share/src/test/java/org/apache/kafka/server/share/session/ShareSessionCacheTest.java rename to server/src/test/java/org/apache/kafka/server/share/session/ShareSessionCacheTest.java diff --git a/share/src/test/java/org/apache/kafka/server/share/session/ShareSessionTest.java b/server/src/test/java/org/apache/kafka/server/share/session/ShareSessionTest.java similarity index 100% rename from share/src/test/java/org/apache/kafka/server/share/session/ShareSessionTest.java rename to server/src/test/java/org/apache/kafka/server/share/session/ShareSessionTest.java diff --git a/settings.gradle b/settings.gradle index 22db097254205..dd76b7690251a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -19,24 +19,20 @@ plugins { } def isGithubActions = System.getenv('GITHUB_ACTIONS') != null -def isJenkins = System.getenv('JENKINS_URL') != null -def isCI = isGithubActions || isJenkins def currentJvm = JavaVersion.current() develocity { server = "https://ge.apache.org" projectId = "kafka" buildScan { - uploadInBackground = !isCI + uploadInBackground = !isGithubActions publishing.onlyIf { it.authenticated } obfuscation { // This obfuscates the IP addresses of the build machine in the build scan. // Alternatively, the build scan will provide the hostname for troubleshooting host-specific issues. ipAddresses { addresses -> addresses.collect { address -> "0.0.0.0"} } } - if (isJenkins) { - tag "jenkins" - } else if (isGithubActions) { + if (isGithubActions) { tag "github" } else { tag "local" @@ -49,7 +45,7 @@ buildCache { local { // This allows the build cache to be used locally or on GitHub Actions. // Using the cache on GH should be safe since each job is run on a new VM - enabled = !isJenkins + enabled = true } remote(develocity.buildCache) { @@ -88,9 +84,6 @@ include 'clients', 'streams:integration-tests', 'streams:streams-scala', 'streams:test-utils', - 'streams:upgrade-system-tests-0100', - 'streams:upgrade-system-tests-0101', - 'streams:upgrade-system-tests-0102', 'streams:upgrade-system-tests-0110', 'streams:upgrade-system-tests-10', 'streams:upgrade-system-tests-11', @@ -112,12 +105,14 @@ include 'clients', 'streams:upgrade-system-tests-36', 'streams:upgrade-system-tests-37', 'streams:upgrade-system-tests-38', + 'streams:upgrade-system-tests-39', 'tools', 'tools:tools-api', 'transaction-coordinator', 'trogdor', 'test-common', - 'test-common:test-common-api' + 'test-common:test-common-api', + 'test-common:test-common-runtime' project(":storage:api").name = "storage-api" rootProject.name = 'kafka' diff --git a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/PersisterStateBatchCombiner.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/PersisterStateBatchCombiner.java index a2010c761f50f..9078c5fee650b 100644 --- a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/PersisterStateBatchCombiner.java +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/PersisterStateBatchCombiner.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.TreeSet; @@ -179,7 +180,7 @@ private MergeCandidatePair getMergeCandidatePair() { } Iterator iter = sortedBatches.iterator(); PersisterStateBatch prev = iter.next(); - List nonOverlapping = new ArrayList<>(sortedBatches.size()); + List nonOverlapping = new LinkedList<>(); while (iter.hasNext()) { PersisterStateBatch candidate = iter.next(); if (candidate.firstOffset() <= prev.lastOffset() || // overlap @@ -389,8 +390,7 @@ public PersisterStateBatch candidate() { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof MergeCandidatePair)) return false; - MergeCandidatePair that = (MergeCandidatePair) o; + if (!(o instanceof MergeCandidatePair that)) return false; return Objects.equals(prev, that.prev) && Objects.equals(candidate, that.candidate); } diff --git a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinator.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinator.java index dd56503dbafe5..72427ac870559 100644 --- a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinator.java +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinator.java @@ -19,6 +19,8 @@ import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.requests.RequestContext; @@ -32,11 +34,6 @@ import java.util.function.IntSupplier; public interface ShareCoordinator { - short SHARE_SNAPSHOT_RECORD_KEY_VERSION = 0; - short SHARE_SNAPSHOT_RECORD_VALUE_VERSION = 0; - short SHARE_UPDATE_RECORD_KEY_VERSION = 1; - short SHARE_UPDATE_RECORD_VALUE_VERSION = 1; - /** * Return the partition index for the given key. * @@ -81,6 +78,14 @@ public interface ShareCoordinator { */ CompletableFuture readState(RequestContext context, ReadShareGroupStateRequestData request); + /** + * Handle read share state summary call + * @param context - represents the incoming read summary request context + * @param request - actual RPC request object + * @return completable future comprising ReadShareGroupStateSummaryRequestData + */ + CompletableFuture readStateSummary(RequestContext context, ReadShareGroupStateSummaryRequestData request); + /** * Called when new coordinator is elected * @param partitionIndex - The partition index (internal topic) diff --git a/server/src/main/java/org/apache/kafka/server/config/ShareCoordinatorConfig.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorConfig.java similarity index 89% rename from server/src/main/java/org/apache/kafka/server/config/ShareCoordinatorConfig.java rename to share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorConfig.java index a27a19914c954..f4a11abea8eef 100644 --- a/server/src/main/java/org/apache/kafka/server/config/ShareCoordinatorConfig.java +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorConfig.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.server.config; +package org.apache.kafka.coordinator.share; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; @@ -24,6 +24,7 @@ import java.util.Optional; import static org.apache.kafka.common.config.ConfigDef.Importance.HIGH; +import static org.apache.kafka.common.config.ConfigDef.Importance.LOW; import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM; import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; import static org.apache.kafka.common.config.ConfigDef.Type.INT; @@ -68,9 +69,13 @@ public class ShareCoordinatorConfig { public static final String STATE_TOPIC_COMPRESSION_CODEC_DOC = "Compression codec for the share-group state topic."; public static final String APPEND_LINGER_MS_CONFIG = "share.coordinator.append.linger.ms"; - public static final int APPEND_LINGER_MS_DEFAULT = 10; + public static final int APPEND_LINGER_MS_DEFAULT = 5; public static final String APPEND_LINGER_MS_DOC = "The duration in milliseconds that the share coordinator will wait for writes to accumulate before flushing them to disk."; + public static final String STATE_TOPIC_PRUNE_INTERVAL_MS_CONFIG = "share.coordinator.state.topic.prune.interval.ms"; + public static final int STATE_TOPIC_PRUNE_INTERVAL_MS_DEFAULT = 5 * 60 * 1000; // 5 minutes + public static final String STATE_TOPIC_PRUNE_INTERVAL_MS_DOC = "The duration in milliseconds that the share coordinator will wait between pruning eligible records in share-group state topic."; + public static final ConfigDef CONFIG_DEF = new ConfigDef() .define(STATE_TOPIC_NUM_PARTITIONS_CONFIG, INT, STATE_TOPIC_NUM_PARTITIONS_DEFAULT, atLeast(1), HIGH, STATE_TOPIC_NUM_PARTITIONS_DOC) .define(STATE_TOPIC_REPLICATION_FACTOR_CONFIG, SHORT, STATE_TOPIC_REPLICATION_FACTOR_DEFAULT, atLeast(1), HIGH, STATE_TOPIC_REPLICATION_FACTOR_DOC) @@ -81,7 +86,8 @@ public class ShareCoordinatorConfig { .define(LOAD_BUFFER_SIZE_CONFIG, INT, LOAD_BUFFER_SIZE_DEFAULT, atLeast(1), HIGH, LOAD_BUFFER_SIZE_DOC) .define(STATE_TOPIC_COMPRESSION_CODEC_CONFIG, INT, (int) STATE_TOPIC_COMPRESSION_CODEC_DEFAULT.id, HIGH, STATE_TOPIC_COMPRESSION_CODEC_DOC) .define(APPEND_LINGER_MS_CONFIG, INT, APPEND_LINGER_MS_DEFAULT, atLeast(0), MEDIUM, APPEND_LINGER_MS_DOC) - .define(WRITE_TIMEOUT_MS_CONFIG, INT, WRITE_TIMEOUT_MS_DEFAULT, atLeast(1), HIGH, WRITE_TIMEOUT_MS_DOC); + .define(WRITE_TIMEOUT_MS_CONFIG, INT, WRITE_TIMEOUT_MS_DEFAULT, atLeast(1), HIGH, WRITE_TIMEOUT_MS_DOC) + .defineInternal(STATE_TOPIC_PRUNE_INTERVAL_MS_CONFIG, INT, STATE_TOPIC_PRUNE_INTERVAL_MS_DEFAULT, atLeast(1), LOW, STATE_TOPIC_PRUNE_INTERVAL_MS_DOC); private final int stateTopicNumPartitions; private final short stateTopicReplicationFactor; @@ -93,6 +99,7 @@ public class ShareCoordinatorConfig { private final int loadBufferSize; private final CompressionType compressionType; private final int appendLingerMs; + private final int pruneIntervalMs; public ShareCoordinatorConfig(AbstractConfig config) { @@ -108,6 +115,7 @@ public ShareCoordinatorConfig(AbstractConfig config) { .map(CompressionType::forId) .orElse(null); appendLingerMs = config.getInt(APPEND_LINGER_MS_CONFIG); + pruneIntervalMs = config.getInt(STATE_TOPIC_PRUNE_INTERVAL_MS_CONFIG); validate(); } @@ -151,6 +159,10 @@ public CompressionType shareCoordinatorStateTopicCompressionType() { return compressionType; } + public int shareCoordinatorTopicPruneIntervalMs() { + return pruneIntervalMs; + } + private void validate() { Utils.require(snapshotUpdateRecordsPerSnapshot >= 0 && snapshotUpdateRecordsPerSnapshot <= 500, String.format("%s must be between [0, 500]", SNAPSHOT_UPDATE_RECORDS_PER_SNAPSHOT_CONFIG)); diff --git a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorOffsetsManager.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorOffsetsManager.java new file mode 100644 index 0000000000000..69070f65e936f --- /dev/null +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorOffsetsManager.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.coordinator.share; + +import org.apache.kafka.server.share.SharePartitionKey; +import org.apache.kafka.timeline.SnapshotRegistry; +import org.apache.kafka.timeline.TimelineHashMap; +import org.apache.kafka.timeline.TimelineLong; + +import java.util.Objects; +import java.util.Optional; + +/** + * Util class to track the offsets written into the internal topic + * per share partition key. + * It calculates the minimum offset globally up to which the records + * in the internal partition are redundant i.e. they have been overridden + * by newer records. + */ +public class ShareCoordinatorOffsetsManager { + + // Map to store share partition key => current partition offset + // being written. + private final TimelineHashMap offsets; + + // Minimum offset representing the smallest necessary offset + // across the internal partition (offsets below this are redundant). + // We are using timeline object here because the offsets which are passed into + // updateState might not be committed yet. In case of retry, these offsets would + // be invalidated via the snapshot registry. Hence, using timeline object + // the values would automatically revert in accordance with the last committed offset. + private final TimelineLong lastRedundantOffset; + + public ShareCoordinatorOffsetsManager(SnapshotRegistry snapshotRegistry) { + Objects.requireNonNull(snapshotRegistry); + offsets = new TimelineHashMap<>(snapshotRegistry, 0); + lastRedundantOffset = new TimelineLong(snapshotRegistry); + lastRedundantOffset.set(Long.MAX_VALUE); // For easy application of Math.min. + } + + /** + * Method updates internal state with the supplied offset for the provided + * share partition key. It then calculates the minimum offset, if possible, + * below which all offsets are redundant. + * + * @param key - represents {@link SharePartitionKey} whose offset needs updating + * @param offset - represents the latest partition offset for provided key + */ + public void updateState(SharePartitionKey key, long offset) { + lastRedundantOffset.set(Math.min(lastRedundantOffset.get(), offset)); + offsets.put(key, offset); + + Optional redundantOffset = findRedundantOffset(); + redundantOffset.ifPresent(lastRedundantOffset::set); + } + + private Optional findRedundantOffset() { + if (offsets.isEmpty()) { + return Optional.empty(); + } + + long soFar = Long.MAX_VALUE; + + for (long offset : offsets.values()) { + // Get min offset among latest offsets + // for all share keys in the internal partition. + soFar = Math.min(soFar, offset); + + // lastRedundantOffset represents the smallest necessary offset + // and if soFar equals it, we cannot proceed. This can happen + // if a share partition key hasn't had records written for a while. + // For example, + //

              + // key1:1 + // key2:2 4 6 + // key3:3 5 7 + //

              + // We can see in above that offsets 2, 4, 3, 5 are redundant, + // but we do not have a contiguous prefix starting at lastRedundantOffset + // and we cannot proceed. + if (soFar == lastRedundantOffset.get()) { + return Optional.of(soFar); + } + } + + return Optional.of(soFar); + } + + /** + * Most recent last redundant offset. This method is to be used + * when the caller wants to query the value of such offset. + * @return Optional of type Long representing the offset or empty for invalid offset values + */ + public Optional lastRedundantOffset() { + long value = lastRedundantOffset.get(); + if (value <= 0 || value == Long.MAX_VALUE) { + return Optional.empty(); + } + + return Optional.of(value); + } + + // visible for testing + TimelineHashMap curState() { + return offsets; + } +} diff --git a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpers.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpers.java index bd4bd57a34a34..abde3f442b008 100644 --- a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpers.java +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpers.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; +import org.apache.kafka.coordinator.share.generated.CoordinatorRecordType; import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey; import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue; import org.apache.kafka.coordinator.share.generated.ShareUpdateKey; @@ -33,7 +34,7 @@ public static CoordinatorRecord newShareSnapshotRecord(String groupId, Uuid topi .setGroupId(groupId) .setTopicId(topicId) .setPartition(partitionId), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION), + CoordinatorRecordType.SHARE_SNAPSHOT.id()), new ApiMessageAndVersion(new ShareSnapshotValue() .setSnapshotEpoch(offsetData.snapshotEpoch()) .setStateEpoch(offsetData.stateEpoch()) @@ -46,7 +47,7 @@ public static CoordinatorRecord newShareSnapshotRecord(String groupId, Uuid topi .setDeliveryCount(batch.deliveryCount()) .setDeliveryState(batch.deliveryState())) .collect(Collectors.toList())), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_VALUE_VERSION) + (short) 0) ); } @@ -56,7 +57,7 @@ public static CoordinatorRecord newShareSnapshotUpdateRecord(String groupId, Uui .setGroupId(groupId) .setTopicId(topicId) .setPartition(partitionId), - ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION), + CoordinatorRecordType.SHARE_UPDATE.id()), new ApiMessageAndVersion(new ShareUpdateValue() .setSnapshotEpoch(offsetData.snapshotEpoch()) .setLeaderEpoch(offsetData.leaderEpoch()) @@ -68,7 +69,7 @@ public static CoordinatorRecord newShareSnapshotUpdateRecord(String groupId, Uui .setDeliveryCount(batch.deliveryCount()) .setDeliveryState(batch.deliveryState())) .collect(Collectors.toList())), - ShareCoordinator.SHARE_UPDATE_RECORD_VALUE_VERSION) + (short) 0) ); } } diff --git a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerde.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerde.java index a620289d17ae4..1fbfabb98f201 100644 --- a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerde.java +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerde.java @@ -17,36 +17,28 @@ package org.apache.kafka.coordinator.share; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecordSerde; -import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey; -import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue; -import org.apache.kafka.coordinator.share.generated.ShareUpdateKey; -import org.apache.kafka.coordinator.share.generated.ShareUpdateValue; +import org.apache.kafka.coordinator.share.generated.CoordinatorRecordType; public class ShareCoordinatorRecordSerde extends CoordinatorRecordSerde { @Override - protected ApiMessage apiMessageKeyFor(short recordVersion) { - switch (recordVersion) { - case ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION: - return new ShareSnapshotKey(); - case ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION: - return new ShareUpdateKey(); - default: - throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion); + protected ApiMessage apiMessageKeyFor(short recordType) { + try { + return CoordinatorRecordType.fromId(recordType).newRecordKey(); + } catch (UnsupportedVersionException ex) { + throw new CoordinatorLoader.UnknownRecordTypeException(recordType); } } @Override protected ApiMessage apiMessageValueFor(short recordVersion) { - switch (recordVersion) { - case ShareCoordinator.SHARE_SNAPSHOT_RECORD_VALUE_VERSION: - return new ShareSnapshotValue(); - case ShareCoordinator.SHARE_UPDATE_RECORD_VALUE_VERSION: - return new ShareUpdateValue(); - default: - throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion); + try { + return CoordinatorRecordType.fromId(recordVersion).newRecordValue(); + } catch (UnsupportedVersionException ex) { + throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion); } } } diff --git a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorService.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorService.java index 05999bd6d2587..a006edd7e6479 100644 --- a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorService.java +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorService.java @@ -24,10 +24,13 @@ import org.apache.kafka.common.internals.Topic; import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ReadShareGroupStateResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryResponse; import org.apache.kafka.common.requests.RequestContext; import org.apache.kafka.common.requests.WriteShareGroupStateResponse; import org.apache.kafka.common.utils.LogContext; @@ -44,10 +47,10 @@ import org.apache.kafka.coordinator.share.metrics.ShareCoordinatorMetrics; import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; -import org.apache.kafka.server.config.ShareCoordinatorConfig; import org.apache.kafka.server.record.BrokerCompressionType; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.server.util.timer.TimerTask; import org.slf4j.Logger; @@ -60,6 +63,8 @@ import java.util.OptionalInt; import java.util.Properties; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.IntSupplier; import java.util.stream.Collectors; @@ -74,6 +79,9 @@ public class ShareCoordinatorService implements ShareCoordinator { private final ShareCoordinatorMetrics shareCoordinatorMetrics; private volatile int numPartitions = -1; // Number of partitions for __share_group_state. Provided when component is started. private final Time time; + private final Timer timer; + private final PartitionWriter writer; + private final Map lastPrunedOffsets; public static class Builder { private final int nodeId; @@ -175,6 +183,7 @@ public ShareCoordinatorService build() { .withSerializer(new ShareCoordinatorRecordSerde()) .withCompression(Compression.of(config.shareCoordinatorStateTopicCompressionType()).build()) .withAppendLingerMs(config.shareCoordinatorAppendLingerMs()) + .withExecutorService(Executors.newSingleThreadExecutor()) .build(); return new ShareCoordinatorService( @@ -182,7 +191,9 @@ public ShareCoordinatorService build() { config, runtime, coordinatorMetrics, - time + time, + timer, + writer ); } } @@ -192,12 +203,18 @@ public ShareCoordinatorService( ShareCoordinatorConfig config, CoordinatorRuntime runtime, ShareCoordinatorMetrics shareCoordinatorMetrics, - Time time) { + Time time, + Timer timer, + PartitionWriter writer + ) { this.log = logContext.logger(ShareCoordinatorService.class); this.config = config; this.runtime = runtime; this.shareCoordinatorMetrics = shareCoordinatorMetrics; this.time = time; + this.timer = timer; + this.writer = writer; + this.lastPrunedOffsets = new ConcurrentHashMap<>(); } @Override @@ -238,9 +255,82 @@ public void startup( log.info("Starting up."); numPartitions = shareGroupTopicPartitionCount.getAsInt(); + setupRecordPruning(); log.info("Startup complete."); } + private void setupRecordPruning() { + log.info("Scheduling share-group state topic prune job."); + timer.add(new TimerTask(config.shareCoordinatorTopicPruneIntervalMs()) { + @Override + public void run() { + List> futures = new ArrayList<>(); + runtime.activeTopicPartitions().forEach(tp -> futures.add(performRecordPruning(tp))); + + CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})) + .whenComplete((res, exp) -> { + if (exp != null) { + log.error("Received error in share-group state topic prune.", exp); + } + // Perpetual recursion, failure or not. + setupRecordPruning(); + }); + } + }); + } + + private CompletableFuture performRecordPruning(TopicPartition tp) { + // This future will always be completed normally, exception or not. + CompletableFuture fut = new CompletableFuture<>(); + + runtime.scheduleWriteOperation( + "write-state-record-prune", + tp, + Duration.ofMillis(config.shareCoordinatorWriteTimeoutMs()), + ShareCoordinatorShard::lastRedundantOffset + ).whenComplete((result, exception) -> { + if (exception != null) { + log.debug("Last redundant offset for tp {} lookup threw an error.", tp, exception); + Errors error = Errors.forException(exception); + // These errors might result from partition metadata not loaded + // or shard re-election. Will cause unnecessary noise, hence not logging + if (!(error.equals(Errors.COORDINATOR_LOAD_IN_PROGRESS) || error.equals(Errors.NOT_COORDINATOR))) { + log.error("Last redundant offset lookup for tp {} threw an error.", tp, exception); + fut.completeExceptionally(exception); + return; + } + fut.complete(null); + return; + } + if (result.isPresent()) { + Long off = result.get(); + Long lastPrunedOffset = lastPrunedOffsets.get(tp); + if (lastPrunedOffset != null && lastPrunedOffset.longValue() == off) { + log.debug("{} already pruned till offset {}", tp, off); + fut.complete(null); + return; + } + + log.info("Pruning records in {} till offset {}.", tp, off); + writer.deleteRecords(tp, off) + .whenComplete((res, exp) -> { + if (exp != null) { + log.debug("Exception while deleting records in {} till offset {}.", tp, off, exp); + fut.completeExceptionally(exp); + return; + } + fut.complete(null); + // Best effort prevention of issuing duplicate delete calls. + lastPrunedOffsets.put(tp, off); + }); + } else { + log.debug("No offset value for tp {} found.", tp); + fut.complete(null); + } + }); + return fut; + } + @Override public void shutdown() { if (!isActive.compareAndSet(true, false)) { @@ -336,7 +426,7 @@ public CompletableFuture writeState(RequestCon }); }); - // Combine all futures into a single CompletableFuture + // Combine all futures into a single CompletableFuture. CompletableFuture combinedFuture = CompletableFuture.allOf(futureMap.values().stream() .flatMap(partMap -> partMap.values().stream()).toArray(CompletableFuture[]::new)); @@ -347,7 +437,7 @@ public CompletableFuture writeState(RequestCon (topicId, topicEntry) -> { List partitionResults = new ArrayList<>(topicEntry.size()); topicEntry.forEach( - // map of partition id -> responses from api + // Map of partition id -> responses from api. (partitionId, responseFut) -> { // This is the future returned by runtime.scheduleWriteOperation which returns when the // operation has completed including error information. When this line executes, the future @@ -361,8 +451,8 @@ public CompletableFuture writeState(RequestCon } ); - // time taken for write - // at this point all futures are completed written above. + // Time taken for write. + // At this point all futures are completed written above. shareCoordinatorMetrics.record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_LATENCY_SENSOR_NAME, time.hiResClockMs() - startTimeMs); @@ -377,7 +467,7 @@ public CompletableFuture readState(RequestConte // A map to store the futures for each topicId and partition. Map>> futureMap = new HashMap<>(); - // Send an empty response if topic data is empty + // Send an empty response if topic data is empty. if (isEmpty(request.topics())) { log.error("Topic Data is empty: {}", request); return CompletableFuture.completedFuture( @@ -385,7 +475,7 @@ public CompletableFuture readState(RequestConte ); } - // Send an empty response if partition data is empty for any topic + // Send an empty response if partition data is empty for any topic. for (ReadShareGroupStateRequestData.ReadStateData topicData : request.topics()) { if (isEmpty(topicData.partitions())) { log.error("Partition Data for topic {} is empty: {}", topicData.topicId(), request); @@ -395,7 +485,7 @@ public CompletableFuture readState(RequestConte } } - // Send an empty response if groupId is invalid + // Send an empty response if groupId is invalid. if (isGroupIdEmpty(groupId)) { log.error("Group id must be specified and non-empty: {}", request); return CompletableFuture.completedFuture( @@ -403,7 +493,7 @@ public CompletableFuture readState(RequestConte ); } - // Send an empty response if the coordinator is not active + // Send an empty response if the coordinator is not active. if (!isActive.get()) { return CompletableFuture.completedFuture( generateErrorReadStateResponse( @@ -419,43 +509,55 @@ public CompletableFuture readState(RequestConte // be looping over the keys below and constructing new ReadShareGroupStateRequestData objects to pass // onto the shard method. - request.topics().forEach(topicData -> { + // It is possible that a read state request contains a leaderEpoch which is the higher than seen so + // far, for a specific share partition. Hence, for each read request - we must check for this + // and update the state appropriately. + + for (ReadShareGroupStateRequestData.ReadStateData topicData : request.topics()) { Uuid topicId = topicData.topicId(); - topicData.partitions().forEach(partitionData -> { - // Request object containing information of a single topic partition + for (ReadShareGroupStateRequestData.PartitionData partitionData : topicData.partitions()) { + SharePartitionKey coordinatorKey = SharePartitionKey.getInstance(request.groupId(), topicId, partitionData.partition()); + ReadShareGroupStateRequestData requestForCurrentPartition = new ReadShareGroupStateRequestData() .setGroupId(groupId) .setTopics(Collections.singletonList(new ReadShareGroupStateRequestData.ReadStateData() .setTopicId(topicId) .setPartitions(Collections.singletonList(partitionData)))); - SharePartitionKey coordinatorKey = SharePartitionKey.getInstance(request.groupId(), topicId, partitionData.partition()); - // Scheduling a runtime read operation to read share partition state from the coordinator in memory state - CompletableFuture future = runtime.scheduleReadOperation( - "read-share-group-state", + + // We are issuing a scheduleWriteOperation even though the request is of read type since + // we might want to update the leader epoch, if it is the highest seen so far for the specific + // share partition. In that case, we require the strong consistency offered by scheduleWriteOperation. + // At the time of writing, read after write consistency for the readState and writeState requests + // is not guaranteed. + CompletableFuture readFuture = runtime.scheduleWriteOperation( + "read-update-leader-epoch-state", topicPartitionFor(coordinatorKey), - (coordinator, offset) -> coordinator.readState(requestForCurrentPartition, offset) - ).exceptionally(exception -> handleOperationException( - "read-share-group-state", - request, - exception, - (error, message) -> ReadShareGroupStateResponse.toErrorResponseData( - topicData.topicId(), - partitionData.partition(), - error, - "Unable to read share group state: " + exception.getMessage() - ), - log - )); + Duration.ofMillis(config.shareCoordinatorWriteTimeoutMs()), + coordinator -> coordinator.readStateAndMaybeUpdateLeaderEpoch(requestForCurrentPartition) + ).exceptionally(readException -> + handleOperationException( + "read-update-leader-epoch-state", + request, + readException, + (error, message) -> ReadShareGroupStateResponse.toErrorResponseData( + topicData.topicId(), + partitionData.partition(), + error, + "Unable to read share group state: " + readException.getMessage() + ), + log + )); + futureMap.computeIfAbsent(topicId, k -> new HashMap<>()) - .put(partitionData.partition(), future); - }); - }); + .put(partitionData.partition(), readFuture); + } + } - // Combine all futures into a single CompletableFuture + // Combine all futures into a single CompletableFuture. CompletableFuture combinedFuture = CompletableFuture.allOf(futureMap.values().stream() .flatMap(map -> map.values().stream()).toArray(CompletableFuture[]::new)); - // Transform the combined CompletableFuture into CompletableFuture + // Transform the combined CompletableFuture into CompletableFuture. return combinedFuture.thenApply(v -> { List readStateResult = new ArrayList<>(futureMap.size()); futureMap.forEach( @@ -463,7 +565,7 @@ public CompletableFuture readState(RequestConte List partitionResults = new ArrayList<>(topicEntry.size()); topicEntry.forEach( (partitionId, responseFut) -> { - // responseFut would already be completed by now since we have used + // ResponseFut would already be completed by now since we have used // CompletableFuture::allOf to create a combined future from the future map. partitionResults.add( responseFut.getNow(null).results().get(0).partitions().get(0) @@ -478,6 +580,116 @@ public CompletableFuture readState(RequestConte }); } + @Override + public CompletableFuture readStateSummary(RequestContext context, ReadShareGroupStateSummaryRequestData request) { + // Send an empty response if the coordinator is not active. + if (!isActive.get()) { + return CompletableFuture.completedFuture( + generateErrorReadStateSummaryResponse( + request, + Errors.COORDINATOR_NOT_AVAILABLE, + "Share coordinator is not available." + ) + ); + } + + String groupId = request.groupId(); + // Send an empty response if groupId is invalid. + if (isGroupIdEmpty(groupId)) { + log.error("Group id must be specified and non-empty: {}", request); + return CompletableFuture.completedFuture( + new ReadShareGroupStateSummaryResponseData() + ); + } + + // Send an empty response if topic data is empty. + if (isEmpty(request.topics())) { + log.error("Topic Data is empty: {}", request); + return CompletableFuture.completedFuture( + new ReadShareGroupStateSummaryResponseData() + ); + } + + // Send an empty response if partition data is empty for any topic. + for (ReadShareGroupStateSummaryRequestData.ReadStateSummaryData topicData : request.topics()) { + if (isEmpty(topicData.partitions())) { + log.error("Partition Data for topic {} is empty: {}", topicData.topicId(), request); + return CompletableFuture.completedFuture( + new ReadShareGroupStateSummaryResponseData() + ); + } + } + + // A map to store the futures for each topicId and partition. + Map>> futureMap = new HashMap<>(); + + // The request received here could have multiple keys of structure group:topic:partition. However, + // the readStateSummary method in ShareCoordinatorShard expects a single key in the request. Hence, we will + // be looping over the keys below and constructing new ReadShareGroupStateSummaryRequestData objects to pass + // onto the shard method. + + for (ReadShareGroupStateSummaryRequestData.ReadStateSummaryData topicData : request.topics()) { + Uuid topicId = topicData.topicId(); + for (ReadShareGroupStateSummaryRequestData.PartitionData partitionData : topicData.partitions()) { + SharePartitionKey coordinatorKey = SharePartitionKey.getInstance(request.groupId(), topicId, partitionData.partition()); + + ReadShareGroupStateSummaryRequestData requestForCurrentPartition = new ReadShareGroupStateSummaryRequestData() + .setGroupId(groupId) + .setTopics(List.of(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId) + .setPartitions(List.of(partitionData)))); + + CompletableFuture readFuture = runtime.scheduleWriteOperation( + "read-share-group-state-summary", + topicPartitionFor(coordinatorKey), + Duration.ofMillis(config.shareCoordinatorWriteTimeoutMs()), + coordinator -> coordinator.readStateSummary(requestForCurrentPartition) + ).exceptionally(readException -> + handleOperationException( + "read-share-group-state-summary", + request, + readException, + (error, message) -> ReadShareGroupStateSummaryResponse.toErrorResponseData( + topicData.topicId(), + partitionData.partition(), + error, + "Unable to read share group state summary: " + readException.getMessage() + ), + log + )); + + futureMap.computeIfAbsent(topicId, k -> new HashMap<>()) + .put(partitionData.partition(), readFuture); + } + } + + // Combine all futures into a single CompletableFuture. + CompletableFuture combinedFuture = CompletableFuture.allOf(futureMap.values().stream() + .flatMap(map -> map.values().stream()).toArray(CompletableFuture[]::new)); + + // Transform the combined CompletableFuture into CompletableFuture. + return combinedFuture.thenApply(v -> { + List readStateSummaryResult = new ArrayList<>(futureMap.size()); + futureMap.forEach( + (topicId, topicEntry) -> { + List partitionResults = new ArrayList<>(topicEntry.size()); + topicEntry.forEach( + (partitionId, responseFuture) -> { + // ResponseFut would already be completed by now since we have used + // CompletableFuture::allOf to create a combined future from the future map. + partitionResults.add( + responseFuture.getNow(null).results().get(0).partitions().get(0) + ); + } + ); + readStateSummaryResult.add(ReadShareGroupStateSummaryResponse.toResponseReadStateSummaryResult(topicId, partitionResults)); + } + ); + return new ReadShareGroupStateSummaryResponseData() + .setResults(readStateSummaryResult); + }); + } + private ReadShareGroupStateResponseData generateErrorReadStateResponse( ReadShareGroupStateRequestData request, Errors error, @@ -495,6 +707,23 @@ private ReadShareGroupStateResponseData generateErrorReadStateResponse( }).collect(Collectors.toList())); } + private ReadShareGroupStateSummaryResponseData generateErrorReadStateSummaryResponse( + ReadShareGroupStateSummaryRequestData request, + Errors error, + String errorMessage + ) { + return new ReadShareGroupStateSummaryResponseData().setResults(request.topics().stream() + .map(topicData -> { + ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult resultData = new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult(); + resultData.setTopicId(topicData.topicId()); + resultData.setPartitions(topicData.partitions().stream() + .map(partitionData -> ReadShareGroupStateSummaryResponse.toErrorResponsePartitionResult( + partitionData.partition(), error, errorMessage + )).collect(Collectors.toList())); + return resultData; + }).collect(Collectors.toList())); + } + private WriteShareGroupStateResponseData generateErrorWriteStateResponse( WriteShareGroupStateRequestData request, Errors error, @@ -529,8 +758,10 @@ public void onElection(int partitionIndex, int partitionLeaderEpoch) { @Override public void onResignation(int partitionIndex, OptionalInt partitionLeaderEpoch) { throwIfNotActive(); + TopicPartition tp = new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, partitionIndex); + lastPrunedOffsets.remove(tp); runtime.scheduleUnloadOperation( - new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, partitionIndex), + tp, partitionLeaderEpoch ); } diff --git a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorShard.java b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorShard.java index a08f85a8108f4..bf674c82e1c8a 100644 --- a/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorShard.java +++ b/share-coordinator/src/main/java/org/apache/kafka/coordinator/share/ShareCoordinatorShard.java @@ -19,17 +19,22 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ReadShareGroupStateResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryResponse; import org.apache.kafka.common.requests.TransactionResult; import org.apache.kafka.common.requests.WriteShareGroupStateResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.coordinator.common.runtime.CoordinatorExecutor; import org.apache.kafka.coordinator.common.runtime.CoordinatorMetrics; import org.apache.kafka.coordinator.common.runtime.CoordinatorMetricsShard; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; @@ -37,6 +42,7 @@ import org.apache.kafka.coordinator.common.runtime.CoordinatorShard; import org.apache.kafka.coordinator.common.runtime.CoordinatorShardBuilder; import org.apache.kafka.coordinator.common.runtime.CoordinatorTimer; +import org.apache.kafka.coordinator.share.generated.CoordinatorRecordType; import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey; import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue; import org.apache.kafka.coordinator.share.generated.ShareUpdateKey; @@ -46,7 +52,6 @@ import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.config.ShareCoordinatorConfig; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.persister.PartitionFactory; import org.apache.kafka.server.share.persister.PersisterStateBatch; @@ -72,6 +77,7 @@ public class ShareCoordinatorShard implements CoordinatorShard snapshotUpdateCount; private final TimelineHashMap stateEpochMap; private MetadataImage metadataImage; + private final ShareCoordinatorOffsetsManager offsetsManager; public static final Exception NULL_TOPIC_ID = new Exception("The topic id cannot be null."); public static final Exception NEGATIVE_PARTITION_ID = new Exception("The partition id cannot be a negative number."); @@ -111,6 +117,12 @@ public CoordinatorShardBuilder withTim return this; } + @Override + public CoordinatorShardBuilder withExecutor(CoordinatorExecutor executor) { + // method is required due to interface + return this; + } + @Override public CoordinatorShardBuilder withCoordinatorMetrics(CoordinatorMetrics coordinatorMetrics) { this.coordinatorMetrics = coordinatorMetrics; @@ -155,6 +167,17 @@ public ShareCoordinatorShard build() { CoordinatorMetrics coordinatorMetrics, CoordinatorMetricsShard metricsShard, SnapshotRegistry snapshotRegistry + ) { + this(logContext, config, coordinatorMetrics, metricsShard, snapshotRegistry, new ShareCoordinatorOffsetsManager(snapshotRegistry)); + } + + ShareCoordinatorShard( + LogContext logContext, + ShareCoordinatorConfig config, + CoordinatorMetrics coordinatorMetrics, + CoordinatorMetricsShard metricsShard, + SnapshotRegistry snapshotRegistry, + ShareCoordinatorOffsetsManager offsetsManager ) { this.log = logContext.logger(ShareCoordinatorShard.class); this.config = config; @@ -164,6 +187,7 @@ public ShareCoordinatorShard build() { this.leaderEpochMap = new TimelineHashMap<>(snapshotRegistry, 0); this.snapshotUpdateCount = new TimelineHashMap<>(snapshotRegistry, 0); this.stateEpochMap = new TimelineHashMap<>(snapshotRegistry, 0); + this.offsetsManager = offsetsManager; } @Override @@ -186,19 +210,23 @@ public void replay(long offset, long producerId, short producerEpoch, Coordinato ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); - switch (key.version()) { - case ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION: // ShareSnapshot - handleShareSnapshot((ShareSnapshotKey) key.message(), (ShareSnapshotValue) messageOrNull(value)); - break; - case ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION: // ShareUpdate - handleShareUpdate((ShareUpdateKey) key.message(), (ShareUpdateValue) messageOrNull(value)); - break; - default: - // noop + try { + switch (CoordinatorRecordType.fromId(key.version())) { + case SHARE_SNAPSHOT: + handleShareSnapshot((ShareSnapshotKey) key.message(), (ShareSnapshotValue) messageOrNull(value), offset); + break; + case SHARE_UPDATE: + handleShareUpdate((ShareUpdateKey) key.message(), (ShareUpdateValue) messageOrNull(value)); + break; + default: + // Noop + } + } catch (UnsupportedVersionException ex) { + // Ignore } } - private void handleShareSnapshot(ShareSnapshotKey key, ShareSnapshotValue value) { + private void handleShareSnapshot(ShareSnapshotKey key, ShareSnapshotValue value, long offset) { SharePartitionKey mapKey = SharePartitionKey.getInstance(key.groupId(), key.topicId(), key.partition()); maybeUpdateLeaderEpochMap(mapKey, value.leaderEpoch()); maybeUpdateStateEpochMap(mapKey, value.stateEpoch()); @@ -212,17 +240,19 @@ private void handleShareSnapshot(ShareSnapshotKey key, ShareSnapshotValue value) snapshotUpdateCount.put(mapKey, 0); } } + + offsetsManager.updateState(mapKey, offset); } private void handleShareUpdate(ShareUpdateKey key, ShareUpdateValue value) { SharePartitionKey mapKey = SharePartitionKey.getInstance(key.groupId(), key.topicId(), key.partition()); maybeUpdateLeaderEpochMap(mapKey, value.leaderEpoch()); - // share update does not hold state epoch information. + // Share update does not hold state epoch information. ShareGroupOffset offsetRecord = ShareGroupOffset.fromRecord(value); - // this is an incremental snapshot - // so, we need to apply it to our current soft state + // This is an incremental snapshot, + // so we need to apply it to our current soft state. shareStateMap.compute(mapKey, (k, v) -> v == null ? offsetRecord : merge(v, value)); snapshotUpdateCount.compute(mapKey, (k, v) -> v == null ? 0 : v + 1); } @@ -261,8 +291,8 @@ public void replayEndTransactionMarker(long producerId, short producerEpoch, Tra public CoordinatorResult writeState( WriteShareGroupStateRequestData request ) { - // records to write (with both key and value of snapshot type), response to caller - // only one key will be there in the request by design + // Records to write (with both key and value of snapshot type), response to caller + // only one key will be there in the request by design. metricsShard.record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME); Optional> error = maybeGetWriteStateError(request); if (error.isPresent()) { @@ -289,15 +319,171 @@ public CoordinatorResult wr return new CoordinatorResult<>(Collections.singletonList(record), responseData); } + /** + * Method reads data from the soft state and if needed updates the leader epoch. + * It can happen that a read state call for a share partition has a higher leaderEpoch + * value than seen so far. + * In case an update is not required, empty record list will be generated along with a success response. + * + * @param request - represents ReadShareGroupStateRequestData + * @return CoordinatorResult object + */ + public CoordinatorResult readStateAndMaybeUpdateLeaderEpoch( + ReadShareGroupStateRequestData request + ) { + // Only one key will be there in the request by design. + Optional error = maybeGetReadStateError(request); + if (error.isPresent()) { + return new CoordinatorResult<>(Collections.emptyList(), error.get()); + } + + ReadShareGroupStateRequestData.ReadStateData topicData = request.topics().get(0); + ReadShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0); + + Uuid topicId = topicData.topicId(); + int partitionId = partitionData.partition(); + int leaderEpoch = partitionData.leaderEpoch(); + SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicId, partitionId); + + ReadShareGroupStateResponseData responseData = null; + + if (!shareStateMap.containsKey(key)) { + // Leader epoch update might be needed + responseData = ReadShareGroupStateResponse.toResponseData( + topicId, + partitionId, + PartitionFactory.UNINITIALIZED_START_OFFSET, + PartitionFactory.DEFAULT_STATE_EPOCH, + Collections.emptyList() + ); + } else { + // Leader epoch update might be needed + ShareGroupOffset offsetValue = shareStateMap.get(key); + List stateBatches = (offsetValue.stateBatches() != null && !offsetValue.stateBatches().isEmpty()) ? + offsetValue.stateBatches().stream() + .map( + stateBatch -> new ReadShareGroupStateResponseData.StateBatch() + .setFirstOffset(stateBatch.firstOffset()) + .setLastOffset(stateBatch.lastOffset()) + .setDeliveryState(stateBatch.deliveryState()) + .setDeliveryCount(stateBatch.deliveryCount()) + ).collect(Collectors.toList()) : Collections.emptyList(); + + responseData = ReadShareGroupStateResponse.toResponseData( + topicId, + partitionId, + offsetValue.startOffset(), + offsetValue.stateEpoch(), + stateBatches + ); + } + + // Optimization in case leaderEpoch update is not required. + if (leaderEpoch == -1 || + (leaderEpochMap.get(key) != null && leaderEpochMap.get(key) == leaderEpoch)) { + return new CoordinatorResult<>(Collections.emptyList(), responseData); + } + + // It is OK to info log this since this reaching this codepoint should be quite infrequent. + log.info("Read with leader epoch update call for key {} having new leader epoch {}.", key, leaderEpoch); + + // Recording the sensor here as above if condition will not produce any record. + metricsShard.record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME); + + // Generate record with leaderEpoch info. + WriteShareGroupStateRequestData.PartitionData writePartitionData = new WriteShareGroupStateRequestData.PartitionData() + .setPartition(partitionId) + .setLeaderEpoch(leaderEpoch) + .setStateBatches(Collections.emptyList()) + .setStartOffset(responseData.results().get(0).partitions().get(0).startOffset()) + .setStateEpoch(responseData.results().get(0).partitions().get(0).stateEpoch()); + + CoordinatorRecord record = generateShareStateRecord(writePartitionData, key); + return new CoordinatorResult<>(Collections.singletonList(record), responseData); + } + + /** + * This method finds the ShareSnapshotValue record corresponding to the requested topic partition from the + * in-memory state of coordinator shard, the shareStateMap. + *

              + * This method as called by the ShareCoordinatorService will be provided with + * the request data which covers only key i.e. group1:topic1:partition1. The implementation + * below was done keeping this in mind. + * + * @param request - ReadShareGroupStateSummaryRequestData for a single key + * @return CoordinatorResult(records, response) + */ + + public CoordinatorResult readStateSummary( + ReadShareGroupStateSummaryRequestData request + ) { + // Only one key will be there in the request by design. + Optional error = maybeGetReadStateSummaryError(request); + if (error.isPresent()) { + return new CoordinatorResult<>(List.of(), error.get()); + } + + ReadShareGroupStateSummaryRequestData.ReadStateSummaryData topicData = request.topics().get(0); + ReadShareGroupStateSummaryRequestData.PartitionData partitionData = topicData.partitions().get(0); + + Uuid topicId = topicData.topicId(); + int partitionId = partitionData.partition(); + SharePartitionKey key = SharePartitionKey.getInstance(request.groupId(), topicId, partitionId); + + ReadShareGroupStateSummaryResponseData responseData = null; + + if (!shareStateMap.containsKey(key)) { + responseData = ReadShareGroupStateSummaryResponse.toResponseData( + topicId, + partitionId, + PartitionFactory.UNINITIALIZED_START_OFFSET, + PartitionFactory.DEFAULT_STATE_EPOCH + ); + } else { + ShareGroupOffset offsetValue = shareStateMap.get(key); + if (offsetValue == null) { + log.error("Data not found for topic {}, partition {} for group {}, in the in-memory state of share coordinator", topicId, partitionId, request.groupId()); + responseData = ReadShareGroupStateSummaryResponse.toErrorResponseData( + topicId, + partitionId, + Errors.UNKNOWN_SERVER_ERROR, + "Data not found for the topics " + topicId + ", partition " + partitionId + " for group " + request.groupId() + ", in the in-memory state of share coordinator" + ); + } else { + responseData = ReadShareGroupStateSummaryResponse.toResponseData( + topicId, + partitionId, + offsetValue.startOffset(), + offsetValue.stateEpoch() + ); + } + } + + return new CoordinatorResult<>(Collections.emptyList(), responseData); + } + + /** + * Method which returns the last known redundant offset from the partition + * led by this shard. + * + * @return CoordinatorResult containing empty record list and an Optional representing the offset. + */ + public CoordinatorResult, CoordinatorRecord> lastRedundantOffset() { + return new CoordinatorResult<>( + Collections.emptyList(), + this.offsetsManager.lastRedundantOffset() + ); + } + /** * Util method to generate a ShareSnapshot or ShareUpdate type record for a key, based on various conditions. *

              - * if no snapshot has been created for the key => create a new ShareSnapshot record + * If no snapshot has been created for the key => create a new ShareSnapshot record * else if number of ShareUpdate records for key >= max allowed per snapshot per key => create a new ShareSnapshot record * else create a new ShareUpdate record * * @param partitionData - Represents the data which should be written into the share state record. - * @param key - The {@link SharePartitionKey} object. + * @param key - The {@link SharePartitionKey} object. * @return {@link CoordinatorRecord} representing ShareSnapshot or ShareUpdate */ private CoordinatorRecord generateShareStateRecord( @@ -328,14 +514,14 @@ private CoordinatorRecord generateShareStateRecord( return ShareCoordinatorRecordHelpers.newShareSnapshotRecord( key.groupId(), key.topicId(), partitionData.partition(), new ShareGroupOffset.Builder() - .setSnapshotEpoch(currentState.snapshotEpoch() + 1) // we must increment snapshot epoch as this is new snapshot + .setSnapshotEpoch(currentState.snapshotEpoch() + 1) // We must increment snapshot epoch as this is new snapshot. .setStartOffset(newStartOffset) .setLeaderEpoch(newLeaderEpoch) .setStateEpoch(newStateEpoch) .setStateBatches(mergeBatches(currentState.stateBatches(), partitionData, newStartOffset)) .build()); } else { - ShareGroupOffset currentState = shareStateMap.get(key); // shareStateMap will have the entry as containsKey is true + ShareGroupOffset currentState = shareStateMap.get(key); // shareStateMap will have the entry as containsKey is true. // Share snapshot is present and number of share snapshot update records < snapshotUpdateRecordsPerSnapshot // so create a share update record. @@ -343,7 +529,7 @@ private CoordinatorRecord generateShareStateRecord( return ShareCoordinatorRecordHelpers.newShareSnapshotUpdateRecord( key.groupId(), key.topicId(), partitionData.partition(), new ShareGroupOffset.Builder() - .setSnapshotEpoch(currentState.snapshotEpoch()) // use same snapshotEpoch as last share snapshot + .setSnapshotEpoch(currentState.snapshotEpoch()) // Use same snapshotEpoch as last share snapshot. .setStartOffset(partitionData.startOffset()) .setLeaderEpoch(partitionData.leaderEpoch()) .setStateBatches(mergeBatches(Collections.emptyList(), partitionData)) @@ -371,70 +557,6 @@ private List mergeBatches( .combineStateBatches(); } - /** - * This method finds the ShareSnapshotValue record corresponding to the requested topic partition from the - * in-memory state of coordinator shard, the shareStateMap. - *

              - * This method as called by the ShareCoordinatorService will be provided with - * the request data which covers only key i.e. group1:topic1:partition1. The implementation - * below was done keeping this in mind. - * - * @param request - WriteShareGroupStateRequestData for a single key - * @param offset - offset to read from the __share_group_state topic partition - * @return CoordinatorResult(records, response) - */ - public ReadShareGroupStateResponseData readState(ReadShareGroupStateRequestData request, Long offset) { - // records to read (with the key of snapshot type), response to caller - // only one key will be there in the request by design - Optional error = maybeGetReadStateError(request, offset); - if (error.isPresent()) { - return error.get(); - } - - Uuid topicId = request.topics().get(0).topicId(); - int partition = request.topics().get(0).partitions().get(0).partition(); - int leaderEpoch = request.topics().get(0).partitions().get(0).leaderEpoch(); - - SharePartitionKey coordinatorKey = SharePartitionKey.getInstance(request.groupId(), topicId, partition); - - if (!shareStateMap.containsKey(coordinatorKey)) { - return ReadShareGroupStateResponse.toResponseData( - topicId, - partition, - PartitionFactory.UNINITIALIZED_START_OFFSET, - PartitionFactory.DEFAULT_STATE_EPOCH, - Collections.emptyList() - ); - } - - ShareGroupOffset offsetValue = shareStateMap.get(coordinatorKey, offset); - - if (offsetValue == null) { - // Returning an error response as the snapshot value was not found - return ReadShareGroupStateResponse.toErrorResponseData( - topicId, - partition, - Errors.UNKNOWN_SERVER_ERROR, - "Data not found for topic {}, partition {} for group {}, in the in-memory state of share coordinator" - ); - } - - List stateBatches = (offsetValue.stateBatches() != null && !offsetValue.stateBatches().isEmpty()) ? - offsetValue.stateBatches().stream().map( - stateBatch -> new ReadShareGroupStateResponseData.StateBatch() - .setFirstOffset(stateBatch.firstOffset()) - .setLastOffset(stateBatch.lastOffset()) - .setDeliveryState(stateBatch.deliveryState()) - .setDeliveryCount(stateBatch.deliveryCount()) - ).collect(java.util.stream.Collectors.toList()) : Collections.emptyList(); - - // Updating the leader map with the new leader epoch - leaderEpochMap.put(coordinatorKey, leaderEpoch); - - // Returning the successfully retrieved snapshot value - return ReadShareGroupStateResponse.toResponseData(topicId, partition, offsetValue.startOffset(), offsetValue.stateEpoch(), stateBatches); - } - private Optional> maybeGetWriteStateError( WriteShareGroupStateRequestData request ) { @@ -454,11 +576,11 @@ private Optional partitionData.leaderEpoch()) { + if (partitionData.leaderEpoch() != -1 && leaderEpochMap.containsKey(mapKey) && leaderEpochMap.get(mapKey) > partitionData.leaderEpoch()) { log.error("Request leader epoch smaller than last recorded."); return Optional.of(getWriteErrorResponse(Errors.FENCED_LEADER_EPOCH, null, topicId, partitionId)); } - if (stateEpochMap.containsKey(mapKey) && stateEpochMap.get(mapKey) > partitionData.stateEpoch()) { + if (partitionData.stateEpoch() != -1 && stateEpochMap.containsKey(mapKey) && stateEpochMap.get(mapKey) > partitionData.stateEpoch()) { log.error("Request state epoch smaller than last recorded."); return Optional.of(getWriteErrorResponse(Errors.FENCED_STATE_EPOCH, null, topicId, partitionId)); } @@ -475,7 +597,7 @@ private Optional maybeGetReadStateError(ReadShareGroupStateRequestData request, Long offset) { + private Optional maybeGetReadStateError(ReadShareGroupStateRequestData request) { String groupId = request.groupId(); ReadShareGroupStateRequestData.ReadStateData topicData = request.topics().get(0); ReadShareGroupStateRequestData.PartitionData partitionData = topicData.partitions().get(0); @@ -496,7 +618,7 @@ private Optional maybeGetReadStateError(ReadSha } SharePartitionKey mapKey = SharePartitionKey.getInstance(groupId, topicId, partitionId); - if (leaderEpochMap.containsKey(mapKey, offset) && leaderEpochMap.get(mapKey, offset) > partitionData.leaderEpoch()) { + if (leaderEpochMap.containsKey(mapKey) && leaderEpochMap.get(mapKey) > partitionData.leaderEpoch()) { log.error("Request leader epoch id is smaller than last recorded."); return Optional.of(ReadShareGroupStateResponse.toErrorResponseData(topicId, partitionId, Errors.FENCED_LEADER_EPOCH, Errors.FENCED_LEADER_EPOCH.message())); } @@ -515,6 +637,39 @@ private Optional maybeGetReadStateError(ReadSha return Optional.empty(); } + private Optional maybeGetReadStateSummaryError(ReadShareGroupStateSummaryRequestData request) { + ReadShareGroupStateSummaryRequestData.ReadStateSummaryData topicData = request.topics().get(0); + ReadShareGroupStateSummaryRequestData.PartitionData partitionData = topicData.partitions().get(0); + + Uuid topicId = topicData.topicId(); + int partitionId = partitionData.partition(); + + if (topicId == null) { + log.error("Request topic id is null."); + return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData( + null, partitionId, Errors.INVALID_REQUEST, NULL_TOPIC_ID.getMessage())); + } + + if (partitionId < 0) { + log.error("Request partition id is negative."); + return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData( + topicId, partitionId, Errors.INVALID_REQUEST, NEGATIVE_PARTITION_ID.getMessage())); + } + + if (metadataImage == null) { + log.error("Metadata image is null"); + return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData(topicId, partitionId, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.UNKNOWN_TOPIC_OR_PARTITION.message())); + } + + if (metadataImage.topics().getTopic(topicId) == null || + metadataImage.topics().getPartition(topicId, partitionId) == null) { + log.error("Topic/TopicPartition not found in metadata image."); + return Optional.of(ReadShareGroupStateSummaryResponse.toErrorResponseData(topicId, partitionId, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.UNKNOWN_TOPIC_OR_PARTITION.message())); + } + + return Optional.empty(); + } + private CoordinatorResult getWriteErrorResponse( Errors error, Exception exception, @@ -547,7 +702,7 @@ CoordinatorMetricsShard getMetricsShard() { } private static ShareGroupOffset merge(ShareGroupOffset soFar, ShareUpdateValue newData) { - // snapshot epoch should be same as last share snapshot + // Snapshot epoch should be same as last share snapshot. // state epoch is not present List currentBatches = soFar.stateBatches(); long newStartOffset = newData.startOffset() == -1 ? soFar.startOffset() : newData.startOffset(); diff --git a/share-coordinator/src/main/resources/common/message/ShareSnapshotKey.json b/share-coordinator/src/main/resources/common/message/ShareSnapshotKey.json index f8de8a237feed..128f39a0182f8 100644 --- a/share-coordinator/src/main/resources/common/message/ShareSnapshotKey.json +++ b/share-coordinator/src/main/resources/common/message/ShareSnapshotKey.json @@ -16,7 +16,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 0, + "type": "coordinator-key", "name": "ShareSnapshotKey", "validVersions": "0", "flexibleVersions": "none", diff --git a/share-coordinator/src/main/resources/common/message/ShareSnapshotValue.json b/share-coordinator/src/main/resources/common/message/ShareSnapshotValue.json index 9d49b2a735fd7..4b375381c3b5c 100644 --- a/share-coordinator/src/main/resources/common/message/ShareSnapshotValue.json +++ b/share-coordinator/src/main/resources/common/message/ShareSnapshotValue.json @@ -16,7 +16,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 0, + "type": "coordinator-value", "name": "ShareSnapshotValue", "validVersions": "0", "flexibleVersions": "0+", @@ -29,13 +30,14 @@ "about": "The leader epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0", "about": "The share-partition start offset." }, - { "name": "StateBatches", "type": "[]StateBatch", "versions": "0", "fields": [ + { "name": "StateBatches", "type": "[]StateBatch", "versions": "0", + "about": "The state batches.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0", "about": "The first offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0", "about": "The last offset of this state batch." }, { "name": "DeliveryState", "type": "int8", "versions": "0", - "about": "The delivery state - 0:Available,2:Acked,4:Archived" }, + "about": "The delivery state - 0:Available,2:Acked,4:Archived." }, { "name": "DeliveryCount", "type": "int16", "versions": "0", "about": "The delivery count." } ]} diff --git a/share-coordinator/src/main/resources/common/message/ShareUpdateKey.json b/share-coordinator/src/main/resources/common/message/ShareUpdateKey.json index 923ed5cf6f6af..ba919dc6b1c08 100644 --- a/share-coordinator/src/main/resources/common/message/ShareUpdateKey.json +++ b/share-coordinator/src/main/resources/common/message/ShareUpdateKey.json @@ -16,16 +16,17 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 1, + "type": "coordinator-key", "name": "ShareUpdateKey", - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "none", "fields": [ - { "name": "GroupId", "type": "string", "versions": "1", + { "name": "GroupId", "type": "string", "versions": "0", "about": "The group id." }, - { "name": "TopicId", "type": "uuid", "versions": "1", + { "name": "TopicId", "type": "uuid", "versions": "0", "about": "The topic id." }, - { "name": "Partition", "type": "int32", "versions": "1", + { "name": "Partition", "type": "int32", "versions": "0", "about": "The partition index." } ] } diff --git a/share-coordinator/src/main/resources/common/message/ShareUpdateValue.json b/share-coordinator/src/main/resources/common/message/ShareUpdateValue.json index 6dd52f3c55e2d..389cf688c8709 100644 --- a/share-coordinator/src/main/resources/common/message/ShareUpdateValue.json +++ b/share-coordinator/src/main/resources/common/message/ShareUpdateValue.json @@ -16,7 +16,8 @@ // KIP-932 is in development. This schema is subject to non-backwards-compatible changes. { - "type": "data", + "apiKey": 1, + "type": "coordinator-value", "name": "ShareUpdateValue", "validVersions": "0", "flexibleVersions": "0+", @@ -27,13 +28,14 @@ "about": "The leader epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0", "about": "The share-partition start offset, or -1 if the start offset is not being updated." }, - { "name": "StateBatches", "type": "[]StateBatch", "versions": "0", "fields": [ + { "name": "StateBatches", "type": "[]StateBatch", "versions": "0", + "about": "The state batches that have been updated.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0", "about": "The first offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0", "about": "The last offset of this state batch." }, { "name": "DeliveryState", "type": "int8", "versions": "0", - "about": "The delivery state - 0:Available,2:Acked,4:Archived" }, + "about": "The delivery state - 0:Available,2:Acked,4:Archived." }, { "name": "DeliveryCount", "type": "int16", "versions": "0", "about": "The delivery count." } ]} diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorOffsetsManagerTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorOffsetsManagerTest.java new file mode 100644 index 0000000000000..262f166be192d --- /dev/null +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorOffsetsManagerTest.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.coordinator.share; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.server.share.SharePartitionKey; +import org.apache.kafka.timeline.SnapshotRegistry; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; +import java.util.Optional; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ShareCoordinatorOffsetsManagerTest { + + private ShareCoordinatorOffsetsManager manager; + private static final SharePartitionKey KEY1 = SharePartitionKey.getInstance("gs1", Uuid.randomUuid(), 0); + private static final SharePartitionKey KEY2 = SharePartitionKey.getInstance("gs2", Uuid.randomUuid(), 0); + private static final SharePartitionKey KEY3 = SharePartitionKey.getInstance("gs1", Uuid.randomUuid(), 1); + private static final SharePartitionKey KEY4 = SharePartitionKey.getInstance("gs1", Uuid.randomUuid(), 7); + + @BeforeEach + public void setUp() { + manager = new ShareCoordinatorOffsetsManager(new SnapshotRegistry(new LogContext())); + } + + @Test + public void testUpdateStateAddsToInternalState() { + manager.updateState(KEY1, 0L); + assertEquals(Optional.empty(), manager.lastRedundantOffset()); + + manager.updateState(KEY1, 10L); + assertEquals(Optional.of(10L), manager.lastRedundantOffset()); // [0-9] offsets are redundant. + + manager.updateState(KEY2, 15L); + assertEquals(Optional.of(10L), manager.lastRedundantOffset()); // No update to last redundant after adding 15L so, still 10L. + + assertEquals(10L, manager.curState().get(KEY1)); + assertEquals(15L, manager.curState().get(KEY2)); + } + + private static class ShareOffsetTestHolder { + static class TestTuple { + final SharePartitionKey key; + final long offset; + final Optional expectedOffset; + + private TestTuple(SharePartitionKey key, long offset, Optional expectedOffset) { + this.key = key; + this.offset = offset; + this.expectedOffset = expectedOffset; + } + + static TestTuple instance(SharePartitionKey key, long offset, Optional expectedOffset) { + return new TestTuple(key, offset, expectedOffset); + } + } + + private final String testName; + private final List tuples; + private final boolean shouldRun; + + ShareOffsetTestHolder(String testName, List tuples) { + this(testName, tuples, true); + } + + ShareOffsetTestHolder(String testName, List tuples, boolean shouldRun) { + this.testName = testName; + this.tuples = tuples; + this.shouldRun = shouldRun; + } + } + + static Stream generateNoRedundantStateCases() { + return Stream.of( + new ShareOffsetTestHolder( + "no redundant state single key", + List.of( + ShareOffsetTestHolder.TestTuple.instance(KEY1, 10L, Optional.of(10L)) + ) + ), + + new ShareOffsetTestHolder( + "no redundant state multiple keys", + List.of( + ShareOffsetTestHolder.TestTuple.instance(KEY1, 10L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY4, 11L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 13L, Optional.of(10L)) + ) + ) + ); + } + + static Stream generateRedundantStateCases() { + return Stream.of( + new ShareOffsetTestHolder( + "redundant state single key", + List.of( + ShareOffsetTestHolder.TestTuple.instance(KEY1, 10L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY1, 11L, Optional.of(11L)), + ShareOffsetTestHolder.TestTuple.instance(KEY1, 15L, Optional.of(15L)) + ) + ), + + new ShareOffsetTestHolder( + "redundant state multiple keys", + // KEY1: 10 17 + // KEY2: 11 16 + // KEY3: 15 + List.of( + ShareOffsetTestHolder.TestTuple.instance(KEY1, 10L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 11L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY3, 15L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 16L, Optional.of(10L)), // KEY2 11 redundant but should not be returned + ShareOffsetTestHolder.TestTuple.instance(KEY1, 17L, Optional.of(15L)) + ) + ) + ); + + } + + static Stream generateComplexCases() { + return Stream.of( + new ShareOffsetTestHolder( + "redundant state reverse key order", + // Requests come in order KEY1, KEY2, KEY3, KEY3, KEY2, KEY1. + List.of( + ShareOffsetTestHolder.TestTuple.instance(KEY1, 10L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 11L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY3, 15L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY3, 18L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 20L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY1, 25L, Optional.of(18L)) + ) + ), + + new ShareOffsetTestHolder( + "redundant state infrequently written partition.", + List.of( + ShareOffsetTestHolder.TestTuple.instance(KEY1, 10L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 11L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY3, 15L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 18L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY3, 20L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 22L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY3, 25L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY2, 27L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY3, 28L, Optional.of(10L)), + ShareOffsetTestHolder.TestTuple.instance(KEY1, 30L, Optional.of(27L)) + ) + ) + ); + } + + @ParameterizedTest + @MethodSource("generateNoRedundantStateCases") + public void testUpdateStateNoRedundantState(ShareOffsetTestHolder holder) { + if (holder.shouldRun) { + holder.tuples.forEach(tuple -> { + manager.updateState(tuple.key, tuple.offset); + assertEquals(tuple.expectedOffset, manager.lastRedundantOffset(), holder.testName); + }); + } + } + + @ParameterizedTest + @MethodSource("generateRedundantStateCases") + public void testUpdateStateRedundantState(ShareOffsetTestHolder holder) { + if (holder.shouldRun) { + holder.tuples.forEach(tuple -> { + manager.updateState(tuple.key, tuple.offset); + assertEquals(tuple.expectedOffset, manager.lastRedundantOffset(), holder.testName); + }); + } + } + + @ParameterizedTest + @MethodSource("generateComplexCases") + public void testUpdateStateComplexCases(ShareOffsetTestHolder holder) { + if (holder.shouldRun) { + holder.tuples.forEach(tuple -> { + manager.updateState(tuple.key, tuple.offset); + assertEquals(tuple.expectedOffset, manager.lastRedundantOffset(), holder.testName); + }); + } + } +} diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpersTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpersTest.java index 1f47cf9c166c4..6b33727ac19ab 100644 --- a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpersTest.java +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordHelpersTest.java @@ -57,7 +57,7 @@ public void testNewShareSnapshotRecord() { .setGroupId(groupId) .setTopicId(topicId) .setPartition(partitionId), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION), + (short) 0), new ApiMessageAndVersion( new ShareSnapshotValue() .setSnapshotEpoch(0) @@ -70,7 +70,7 @@ public void testNewShareSnapshotRecord() { .setLastOffset(10L) .setDeliveryState((byte) 0) .setDeliveryCount((short) 1))), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_VALUE_VERSION)); + (short) 0)); assertEquals(expectedRecord, record); } @@ -100,7 +100,7 @@ public void testNewShareUpdateRecord() { .setGroupId(groupId) .setTopicId(topicId) .setPartition(partitionId), - ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION), + (short) 1), new ApiMessageAndVersion( new ShareUpdateValue() .setSnapshotEpoch(0) @@ -112,7 +112,7 @@ public void testNewShareUpdateRecord() { .setLastOffset(10L) .setDeliveryState((byte) 0) .setDeliveryCount((short) 1))), - ShareCoordinator.SHARE_UPDATE_RECORD_VALUE_VERSION)); + (short) 0)); assertEquals(expectedRecord, record); } diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerdeTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerdeTest.java index c62abdb13dc99..3f40924028bc3 100644 --- a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerdeTest.java +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorRecordSerdeTest.java @@ -21,10 +21,9 @@ import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; +import org.apache.kafka.coordinator.share.generated.CoordinatorRecordType; import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey; import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue; -import org.apache.kafka.coordinator.share.generated.ShareUpdateKey; -import org.apache.kafka.coordinator.share.generated.ShareUpdateValue; import org.apache.kafka.server.common.ApiMessageAndVersion; import org.junit.jupiter.api.BeforeEach; @@ -75,7 +74,7 @@ public void testSerializeNullValue() { .setGroupId("group") .setTopicId(Uuid.randomUuid()) .setPartition(1), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION + CoordinatorRecordType.SHARE_SNAPSHOT.id() ), null ); @@ -104,7 +103,7 @@ public void testDeserializeWithTombstoneForValue() { .setGroupId("groupId") .setTopicId(Uuid.randomUuid()) .setPartition(1), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION + CoordinatorRecordType.SHARE_SNAPSHOT.id() ); ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message()); @@ -145,7 +144,7 @@ public void testDeserializeWithValueEmptyBuffer() { .setGroupId("foo") .setTopicId(Uuid.randomUuid()) .setPartition(1), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION + CoordinatorRecordType.SHARE_SNAPSHOT.id() ); ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message()); @@ -181,7 +180,7 @@ public void testDeserializeWithInvalidValueBytes() { .setGroupId("foo") .setTopicId(Uuid.randomUuid()) .setPartition(1), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION + CoordinatorRecordType.SHARE_SNAPSHOT.id() ); ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message()); @@ -198,8 +197,9 @@ public void testDeserializeWithInvalidValueBytes() { @Test public void testDeserializeAllRecordTypes() { - roundTrip((short) 0, new ShareSnapshotKey(), new ShareSnapshotValue()); - roundTrip((short) 1, new ShareUpdateKey(), new ShareUpdateValue()); + for (CoordinatorRecordType record : CoordinatorRecordType.values()) { + roundTrip(record.id(), record.newRecordKey(), record.newRecordValue()); + } } private void roundTrip( @@ -228,7 +228,7 @@ private static CoordinatorRecord getShareSnapshotRecord(String groupId, Uuid top .setGroupId(groupId) .setTopicId(topicId) .setPartition(partitionId), - ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION + CoordinatorRecordType.SHARE_SNAPSHOT.id() ), new ApiMessageAndVersion( new ShareSnapshotValue() diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java index c641447dd6b7f..e7ace5fd2b923 100644 --- a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java @@ -25,6 +25,8 @@ import org.apache.kafka.common.internals.Topic; import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.metrics.Metrics; @@ -35,17 +37,22 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord; import org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime; +import org.apache.kafka.coordinator.common.runtime.PartitionWriter; import org.apache.kafka.coordinator.share.metrics.ShareCoordinatorMetrics; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.util.FutureUtils; +import org.apache.kafka.server.util.MockTime; +import org.apache.kafka.server.util.timer.MockTimer; +import org.apache.kafka.server.util.timer.Timer; import org.junit.jupiter.api.Test; -import org.mockito.ArgumentMatchers; import java.time.Duration; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -57,7 +64,10 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -66,7 +76,10 @@ class ShareCoordinatorServiceTest { @SuppressWarnings("unchecked") private CoordinatorRuntime mockRuntime() { - return (CoordinatorRuntime) mock(CoordinatorRuntime.class); + CoordinatorRuntime runtime = mock(CoordinatorRuntime.class); + when(runtime.activeTopicPartitions()) + .thenReturn(Collections.singletonList(new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0))); + return runtime; } @Test @@ -74,10 +87,12 @@ public void testStartupShutdown() throws Exception { CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + new MockTimer(), + mock(PartitionWriter.class) ); service.startup(() -> 1); @@ -95,10 +110,12 @@ public void testWriteStateSuccess() throws ExecutionException, InterruptedExcept when(time.hiResClockMs()).thenReturn(0L).thenReturn(100L).thenReturn(150L); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, coordinatorMetrics, - time + time, + mock(Timer.class), + mock(PartitionWriter.class) ); service.startup(() -> 1); @@ -163,10 +180,10 @@ public void testWriteStateSuccess() throws ExecutionException, InterruptedExcept )); when(runtime.scheduleWriteOperation( - ArgumentMatchers.eq("write-share-group-state"), - ArgumentMatchers.eq(new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0)), - ArgumentMatchers.eq(Duration.ofMillis(5000)), - ArgumentMatchers.any() + eq("write-share-group-state"), + eq(new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0)), + eq(Duration.ofMillis(5000)), + any() )) .thenReturn(CompletableFuture.completedFuture(response1)) .thenReturn(CompletableFuture.completedFuture(response2)); @@ -203,10 +220,12 @@ public void testReadStateSuccess() throws ExecutionException, InterruptedExcepti CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); service.startup(() -> 1); @@ -274,10 +293,11 @@ public void testReadStateSuccess() throws ExecutionException, InterruptedExcepti ))) ); - when(runtime.scheduleReadOperation( - ArgumentMatchers.eq("read-share-group-state"), - ArgumentMatchers.eq(new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0)), - ArgumentMatchers.any() + when(runtime.scheduleWriteOperation( + eq("read-update-leader-epoch-state"), + eq(new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0)), + any(), + any() )) .thenReturn(CompletableFuture.completedFuture(new ReadShareGroupStateResponseData() .setResults(Collections.singletonList(topicData1)))) @@ -297,15 +317,101 @@ public void testReadStateSuccess() throws ExecutionException, InterruptedExcepti assertEquals(expectedResult, result); } + @Test + public void testReadStateSummarySuccess() throws ExecutionException, InterruptedException, TimeoutException { + CoordinatorRuntime runtime = mockRuntime(); + ShareCoordinatorService service = new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.createConfig(ShareCoordinatorTestConfig.testConfigMap()), + runtime, + new ShareCoordinatorMetrics(), + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) + ); + + service.startup(() -> 1); + + String groupId = "group1"; + Uuid topicId1 = Uuid.randomUuid(); + int partition1 = 0; + + Uuid topicId2 = Uuid.randomUuid(); + int partition2 = 1; + + ReadShareGroupStateSummaryRequestData request = new ReadShareGroupStateSummaryRequestData() + .setGroupId(groupId) + .setTopics(Arrays.asList( + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId1) + .setPartitions(Collections.singletonList( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition1) + .setLeaderEpoch(1) + )), + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId2) + .setPartitions(Collections.singletonList( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition2) + .setLeaderEpoch(1) + )) + ) + ); + + ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult topicData1 = new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId1) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition1) + .setErrorCode(Errors.NONE.code()) + .setStateEpoch(1) + .setStartOffset(0) + )); + + ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult topicData2 = new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId2) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition2) + .setErrorCode(Errors.NONE.code()) + .setStateEpoch(1) + .setStartOffset(0) + )); + + when(runtime.scheduleWriteOperation( + eq("read-share-group-state-summary"), + eq(new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0)), + any(), + any() + )) + .thenReturn(CompletableFuture.completedFuture(new ReadShareGroupStateSummaryResponseData() + .setResults(Collections.singletonList(topicData1)))) + .thenReturn(CompletableFuture.completedFuture(new ReadShareGroupStateSummaryResponseData() + .setResults(Collections.singletonList(topicData2)))); + + CompletableFuture future = service.readStateSummary( + requestContext(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY), + request + ); + + HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); + + HashSet expectedResult = new HashSet<>(Arrays.asList( + topicData1, + topicData2)); + assertEquals(expectedResult, result); + } + @Test public void testWriteStateValidationsError() throws ExecutionException, InterruptedException, TimeoutException { CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); service.startup(() -> 1); @@ -347,10 +453,12 @@ public void testReadStateValidationsError() throws ExecutionException, Interrupt CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); service.startup(() -> 1); @@ -387,15 +495,64 @@ public void testReadStateValidationsError() throws ExecutionException, Interrupt ); } + @Test + public void testReadStateSummaryValidationsError() throws ExecutionException, InterruptedException, TimeoutException { + CoordinatorRuntime runtime = mockRuntime(); + ShareCoordinatorService service = new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.createConfig(ShareCoordinatorTestConfig.testConfigMap()), + runtime, + new ShareCoordinatorMetrics(), + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) + ); + + service.startup(() -> 1); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 0; + + // 1. Empty topicsData + assertEquals(new ReadShareGroupStateSummaryResponseData(), + service.readStateSummary( + requestContext(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY), + new ReadShareGroupStateSummaryRequestData().setGroupId(groupId) + ).get(5, TimeUnit.SECONDS) + ); + + // 2. Empty partitionsData + assertEquals(new ReadShareGroupStateSummaryResponseData(), + service.readStateSummary( + requestContext(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY), + new ReadShareGroupStateSummaryRequestData().setGroupId(groupId).setTopics(Collections.singletonList( + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData().setTopicId(topicId))) + ).get(5, TimeUnit.SECONDS) + ); + + // 3. Invalid groupId + assertEquals(new ReadShareGroupStateSummaryResponseData(), + service.readStateSummary( + requestContext(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY), + new ReadShareGroupStateSummaryRequestData().setGroupId(null).setTopics(Collections.singletonList( + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData().setTopicId(topicId).setPartitions(Collections.singletonList( + new ReadShareGroupStateSummaryRequestData.PartitionData().setPartition(partition))))) + ).get(5, TimeUnit.SECONDS) + ); + } + @Test public void testWriteStateWhenNotStarted() throws ExecutionException, InterruptedException, TimeoutException { CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); String groupId = "group1"; @@ -469,10 +626,12 @@ public void testReadStateWhenNotStarted() throws ExecutionException, Interrupted CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); String groupId = "group1"; @@ -525,15 +684,80 @@ public void testReadStateWhenNotStarted() throws ExecutionException, Interrupted assertEquals(expectedResult, result); } + @Test + public void testReadStateSummaryWhenNotStarted() throws ExecutionException, InterruptedException, TimeoutException { + CoordinatorRuntime runtime = mockRuntime(); + ShareCoordinatorService service = new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.createConfig(ShareCoordinatorTestConfig.testConfigMap()), + runtime, + new ShareCoordinatorMetrics(), + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) + ); + + String groupId = "group1"; + Uuid topicId1 = Uuid.randomUuid(); + int partition1 = 0; + + Uuid topicId2 = Uuid.randomUuid(); + int partition2 = 1; + + ReadShareGroupStateSummaryRequestData request = new ReadShareGroupStateSummaryRequestData() + .setGroupId(groupId) + .setTopics(Arrays.asList( + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId1) + .setPartitions(Collections.singletonList( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition1) + .setLeaderEpoch(1) + )), + new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId2) + .setPartitions(Collections.singletonList( + new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition2) + .setLeaderEpoch(1) + )) + ) + ); + + CompletableFuture future = service.readStateSummary( + requestContext(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY), + request + ); + + HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); + + HashSet expectedResult = new HashSet<>(Arrays.asList( + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId2) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition2) + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + .setErrorMessage("Share coordinator is not available."))), + new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId1) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition1) + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + .setErrorMessage("Share coordinator is not available."))))); + assertEquals(expectedResult, result); + } + @Test public void testWriteFutureReturnsError() throws ExecutionException, InterruptedException, TimeoutException { CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); service.startup(() -> 1); @@ -578,10 +802,12 @@ public void testReadFutureReturnsError() throws ExecutionException, InterruptedE CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); service.startup(() -> 1); @@ -590,7 +816,7 @@ public void testReadFutureReturnsError() throws ExecutionException, InterruptedE Uuid topicId = Uuid.randomUuid(); int partition = 0; - when(runtime.scheduleReadOperation(any(), any(), any())) + when(runtime.scheduleWriteOperation(any(), any(), any(), any())) .thenReturn(FutureUtils.failedFuture(Errors.UNKNOWN_SERVER_ERROR.exception())); assertEquals(new ReadShareGroupStateResponseData() @@ -614,18 +840,63 @@ public void testReadFutureReturnsError() throws ExecutionException, InterruptedE ); } + @Test + public void testReadSummaryFutureReturnsError() throws ExecutionException, InterruptedException, TimeoutException { + CoordinatorRuntime runtime = mockRuntime(); + ShareCoordinatorService service = new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.createConfig(ShareCoordinatorTestConfig.testConfigMap()), + runtime, + new ShareCoordinatorMetrics(), + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) + ); + + service.startup(() -> 1); + + String groupId = "group1"; + Uuid topicId = Uuid.randomUuid(); + int partition = 0; + + when(runtime.scheduleWriteOperation(any(), any(), any(), any())) + .thenReturn(FutureUtils.failedFuture(Errors.UNKNOWN_SERVER_ERROR.exception())); + + assertEquals(new ReadShareGroupStateSummaryResponseData() + .setResults(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicId) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partition) + .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) + .setErrorMessage("Unable to read share group state summary: The server experienced an unexpected error when processing the request."))))), + service.readStateSummary( + requestContext(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY), + new ReadShareGroupStateSummaryRequestData().setGroupId(groupId) + .setTopics(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(topicId) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition) + .setLeaderEpoch(1) + )) + )) + ).get(5, TimeUnit.SECONDS) + ); + } + @Test public void testTopicPartitionFor() { CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), + ShareCoordinatorTestConfig.testConfig(), runtime, new ShareCoordinatorMetrics(), - Time.SYSTEM + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); - service.startup(() -> 50); + service.startup(() -> 1); String groupId = "group1"; Uuid topicId = Uuid.randomUuid(); @@ -635,7 +906,7 @@ public void testTopicPartitionFor() { assertEquals(Topic.SHARE_GROUP_STATE_TOPIC_NAME, tp.topic()); int expectedPartition = tp.partition(); - // The presence of a topic name should not affect the choice of partition + // The presence of a topic name should not affect the choice of partition. tp = service.topicPartitionFor(new SharePartitionKey(groupId, new TopicIdPartition(topicId, partition, "whatever"))); assertEquals(Topic.SHARE_GROUP_STATE_TOPIC_NAME, tp.topic()); assertEquals(expectedPartition, tp.partition()); @@ -645,33 +916,451 @@ public void testTopicPartitionFor() { public void testPartitionFor() { CoordinatorRuntime runtime = mockRuntime(); ShareCoordinatorService service = new ShareCoordinatorService( - new LogContext(), - ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap()), - runtime, - new ShareCoordinatorMetrics(), - Time.SYSTEM + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + Time.SYSTEM, + mock(Timer.class), + mock(PartitionWriter.class) ); String groupId = "group1"; Uuid topicId = Uuid.randomUuid(); int partition = 0; - // inactive shard should throw exception + // Inactive shard should throw exception. assertThrows(CoordinatorNotAvailableException.class, () -> service.partitionFor(SharePartitionKey.getInstance(groupId, topicId, partition))); - final int numPartitions = 50; + final int numPartitions = 1; service.startup(() -> numPartitions); final SharePartitionKey key1 = SharePartitionKey.getInstance(groupId, new TopicIdPartition(topicId, partition, null)); - int sharePartitionKey = service.partitionFor(key1); - assertEquals(Utils.abs(key1.asCoordinatorKey().hashCode()) % numPartitions, sharePartitionKey); + assertEquals(Utils.abs(key1.asCoordinatorKey().hashCode()) % numPartitions, service.partitionFor(key1)); - // The presence of a topic name should not affect the choice of partition + // The presence of a topic name should not affect the choice of partition. final SharePartitionKey key2 = new SharePartitionKey(groupId, new TopicIdPartition(topicId, partition, "whatever")); - sharePartitionKey = service.partitionFor(key2); - assertEquals(Utils.abs(key2.asCoordinatorKey().hashCode()) % numPartitions, sharePartitionKey); + assertEquals(Utils.abs(key2.asCoordinatorKey().hashCode()) % numPartitions, service.partitionFor(key2)); - // asCoordinatorKey does not discriminate on topic name + // asCoordinatorKey does not discriminate on topic name. assertEquals(key1.asCoordinatorKey(), key2.asCoordinatorKey()); } + + @Test + public void testRecordPruningTaskPeriodicityWithAllSuccess() throws Exception { + CoordinatorRuntime runtime = mockRuntime(); + MockTime time = new MockTime(); + MockTimer timer = new MockTimer(time); + PartitionWriter writer = mock(PartitionWriter.class); + + when(writer.deleteRecords( + any(), + eq(10L) + )).thenReturn( + CompletableFuture.completedFuture(null) + ); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any() + )).thenReturn( + CompletableFuture.completedFuture(Optional.of(10L)) + ).thenReturn( + CompletableFuture.completedFuture(Optional.of(11L)) + ); + + ShareCoordinatorService service = spy(new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + time, + timer, + writer + )); + + service.startup(() -> 1); + verify(runtime, times(0)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(1)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(2)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + verify(writer, times(2)) + .deleteRecords(any(), anyLong()); + service.shutdown(); + } + + @Test + public void testRecordPruningTaskPeriodicityWithSomeFailures() throws Exception { + CoordinatorRuntime runtime = mockRuntime(); + MockTime time = new MockTime(); + MockTimer timer = new MockTimer(time); + PartitionWriter writer = mock(PartitionWriter.class); + TopicPartition tp1 = new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 0); + TopicPartition tp2 = new TopicPartition(Topic.SHARE_GROUP_STATE_TOPIC_NAME, 1); + + when(runtime.activeTopicPartitions()) + .thenReturn(List.of(tp1, tp2)); + + when(writer.deleteRecords( + any(), + eq(10L) + )).thenReturn( + CompletableFuture.completedFuture(null) + ); + + when(writer.deleteRecords( + any(), + eq(20L) + )).thenReturn( + CompletableFuture.failedFuture(new Exception("bad stuff")) + ); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + eq(tp1), + any(), + any() + )).thenReturn( + CompletableFuture.completedFuture(Optional.of(10L)) + ).thenReturn( + CompletableFuture.completedFuture(Optional.of(11L)) + ); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + eq(tp2), + any(), + any() + )).thenReturn( + CompletableFuture.completedFuture(Optional.of(20L)) + ).thenReturn( + CompletableFuture.completedFuture(Optional.of(21L)) + ); + + ShareCoordinatorService service = spy(new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + time, + timer, + writer + )); + + service.startup(() -> 2); + verify(runtime, times(0)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // Prune should be called. + verify(runtime, times(2)) // For 2 topic partitions. + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // Prune should be called as future completes exceptionally. + verify(runtime, times(4)) // Second prune with 2 topic partitions. + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + verify(writer, times(4)) + .deleteRecords(any(), anyLong()); + service.shutdown(); + } + + @Test + public void testRecordPruningTaskException() throws Exception { + CoordinatorRuntime runtime = mockRuntime(); + MockTime time = new MockTime(); + MockTimer timer = new MockTimer(time); + PartitionWriter writer = mock(PartitionWriter.class); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any() + )).thenReturn(CompletableFuture.failedFuture(Errors.UNKNOWN_SERVER_ERROR.exception())); + + ShareCoordinatorService service = spy(new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + time, + timer, + writer + )); + + service.startup(() -> 1); + verify(runtime, times(0)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(1)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + verify(writer, times(0)) + .deleteRecords(any(), anyLong()); + service.shutdown(); + } + + @Test + public void testRecordPruningTaskSuccess() throws Exception { + CoordinatorRuntime runtime = mockRuntime(); + MockTime time = new MockTime(); + MockTimer timer = new MockTimer(time); + PartitionWriter writer = mock(PartitionWriter.class); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any() + )).thenReturn(CompletableFuture.completedFuture(Optional.of(20L))); + + ShareCoordinatorService service = spy(new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + time, + timer, + writer + )); + + service.startup(() -> 1); + verify(runtime, times(0)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(1)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + verify(writer, times(1)) + .deleteRecords(any(), eq(20L)); + service.shutdown(); + } + + @Test + public void testRecordPruningTaskEmptyOffsetReturned() throws Exception { + CoordinatorRuntime runtime = mockRuntime(); + MockTime time = new MockTime(); + MockTimer timer = new MockTimer(time); + PartitionWriter writer = mock(PartitionWriter.class); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any() + )).thenReturn(CompletableFuture.completedFuture(Optional.empty())); + + ShareCoordinatorService service = spy(new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + time, + timer, + writer + )); + + service.startup(() -> 1); + verify(runtime, times(0)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(1)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + verify(writer, times(0)) + .deleteRecords(any(), anyLong()); + service.shutdown(); + } + + @Test + public void testRecordPruningTaskRepeatedSameOffsetForTopic() throws Exception { + CoordinatorRuntime runtime = mockRuntime(); + MockTime time = new MockTime(); + MockTimer timer = new MockTimer(time); + PartitionWriter writer = mock(PartitionWriter.class); + + when(writer.deleteRecords( + any(), + eq(10L) + )).thenReturn( + CompletableFuture.completedFuture(null) + ); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any() + )).thenReturn( + CompletableFuture.completedFuture(Optional.of(10L)) + ).thenReturn( + CompletableFuture.completedFuture(Optional.of(10L)) + ); + + ShareCoordinatorService service = spy(new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + time, + timer, + writer + )); + + service.startup(() -> 1); + verify(runtime, times(0)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(1)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(2)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + verify(writer, times(1)) + .deleteRecords(any(), anyLong()); + service.shutdown(); + } + + @Test + public void testRecordPruningTaskRetriesRepeatedSameOffsetForTopic() throws Exception { + CoordinatorRuntime runtime = mockRuntime(); + MockTime time = new MockTime(); + MockTimer timer = new MockTimer(time); + PartitionWriter writer = mock(PartitionWriter.class); + CompletableFuture fut1 = new CompletableFuture<>(); + fut1.completeExceptionally(new Exception("bad stuff")); + + when(writer.deleteRecords( + any(), + eq(10L) + )).thenReturn( + fut1 + ).thenReturn( + CompletableFuture.completedFuture(null) + ); + + when(runtime.scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any() + )).thenReturn( + CompletableFuture.completedFuture(Optional.of(10L)) + ).thenReturn( + CompletableFuture.completedFuture(Optional.of(10L)) + ); + + ShareCoordinatorService service = spy(new ShareCoordinatorService( + new LogContext(), + ShareCoordinatorTestConfig.testConfig(), + runtime, + new ShareCoordinatorMetrics(), + time, + timer, + writer + )); + + service.startup(() -> 1); + verify(runtime, times(0)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(1)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + timer.advanceClock(30005L); // prune should be called + verify(runtime, times(2)) + .scheduleWriteOperation( + eq("write-state-record-prune"), + any(), + any(), + any()); + + verify(writer, times(2)) + .deleteRecords(any(), anyLong()); + service.shutdown(); + } } diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorShardTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorShardTest.java index a197d76368a33..4dca0a25bc37f 100644 --- a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorShardTest.java +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorShardTest.java @@ -20,11 +20,14 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; +import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ReadShareGroupStateResponse; +import org.apache.kafka.common.requests.ReadShareGroupStateSummaryResponse; import org.apache.kafka.common.requests.WriteShareGroupStateResponse; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.coordinator.common.runtime.CoordinatorMetrics; @@ -39,8 +42,8 @@ import org.apache.kafka.image.TopicImage; import org.apache.kafka.metadata.PartitionRegistration; import org.apache.kafka.server.common.ApiMessageAndVersion; -import org.apache.kafka.server.config.ShareCoordinatorConfig; import org.apache.kafka.server.share.SharePartitionKey; +import org.apache.kafka.server.share.persister.PartitionFactory; import org.apache.kafka.server.share.persister.PersisterStateBatch; import org.apache.kafka.timeline.SnapshotRegistry; @@ -51,9 +54,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.RETURNS_DEEP_STUBS; @@ -71,16 +77,17 @@ class ShareCoordinatorShardTest { public static class ShareCoordinatorShardBuilder { private final LogContext logContext = new LogContext(); private ShareCoordinatorConfig config = null; - private CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); - private CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); + private final CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); + private final CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); private final SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext); private MetadataImage metadataImage = null; private Map configOverrides = new HashMap<>(); + ShareCoordinatorOffsetsManager offsetsManager = mock(ShareCoordinatorOffsetsManager.class); ShareCoordinatorShard build() { if (metadataImage == null) metadataImage = mock(MetadataImage.class, RETURNS_DEEP_STUBS); if (config == null) { - config = ShareCoordinatorConfigTest.createConfig(ShareCoordinatorConfigTest.testConfigMap(configOverrides)); + config = ShareCoordinatorTestConfig.createConfig(ShareCoordinatorTestConfig.testConfigMap(configOverrides)); } ShareCoordinatorShard shard = new ShareCoordinatorShard( @@ -88,7 +95,8 @@ ShareCoordinatorShard build() { config, coordinatorMetrics, metricsShard, - snapshotRegistry + snapshotRegistry, + offsetsManager ); when(metadataImage.topics().getTopic((Uuid) any())).thenReturn(mock(TopicImage.class)); when(metadataImage.topics().getPartition(any(), anyInt())).thenReturn(mock(PartitionRegistration.class)); @@ -100,6 +108,11 @@ public ShareCoordinatorShardBuilder setConfigOverrides(Map confi this.configOverrides = configOverrides; return this; } + + public ShareCoordinatorShardBuilder setOffsetsManager(ShareCoordinatorOffsetsManager offsetsManager) { + this.offsetsManager = offsetsManager; + return this; + } } private void writeAndReplayDefaultRecord(ShareCoordinatorShard shard) { @@ -186,7 +199,7 @@ public void testReplayWithShareSnapshot() { ) ); - // First replay should populate values in otherwise empty shareStateMap and leaderMap + // First replay should populate values in otherwise empty shareStateMap and leaderMap. shard.replay(offset, producerId, producerEpoch, record1); assertEquals(groupOffset(record1.value().message()), @@ -194,7 +207,7 @@ public void testReplayWithShareSnapshot() { assertEquals(leaderEpoch, shard.getLeaderMapValue(shareCoordinatorKey)); - // Second replay should update the existing values in shareStateMap and leaderMap + // Second replay should update the existing values in shareStateMap and leaderMap. shard.replay(offset + 1, producerId, producerEpoch, record2); assertEquals(groupOffset(record2.value().message()), shard.getShareStateMapValue(shareCoordinatorKey)); @@ -298,8 +311,8 @@ public void testSubsequentWriteStateSnapshotEpochUpdatesSuccessfully() { shard.replay(0L, 0L, (short) 0, result.records().get(0)); expectedData = WriteShareGroupStateResponse.toResponseData(TOPIC_ID, PARTITION); - // the snapshot epoch here will be 1 since this is a snapshot update record, - // and it refers to parent share snapshot + // The snapshot epoch here will be 1 since this is a snapshot update record, + // and it refers to parent share snapshot. expectedRecords = Collections.singletonList(ShareCoordinatorRecordHelpers.newShareSnapshotUpdateRecord( GROUP_ID, TOPIC_ID, PARTITION, ShareGroupOffset.fromRequest(request2.topics().get(0).partitions().get(0), 0) )); @@ -312,7 +325,7 @@ public void testSubsequentWriteStateSnapshotEpochUpdatesSuccessfully() { assertEquals(incrementalUpdate.snapshotEpoch(), combinedState.snapshotEpoch()); assertEquals(incrementalUpdate.leaderEpoch(), combinedState.leaderEpoch()); assertEquals(incrementalUpdate.startOffset(), combinedState.startOffset()); - // the batches should have combined to 1 since same state + // The batches should have combined to 1 since same state. assertEquals(Collections.singletonList(new PersisterStateBatch(0, 20, (byte) 0, (short) 1)), combinedState.stateBatches()); assertEquals(0, shard.getLeaderMapValue(shareCoordinatorKey)); @@ -418,7 +431,7 @@ public void testWriteStateFencedLeaderEpochError() { .setPartition(PARTITION) .setStartOffset(0) .setStateEpoch(0) - .setLeaderEpoch(3) // lower leader epoch in the second request + .setLeaderEpoch(3) // Lower leader epoch in the second request. .setStateBatches(Collections.singletonList(new WriteShareGroupStateRequestData.StateBatch() .setFirstOffset(11) .setLastOffset(20) @@ -443,7 +456,7 @@ public void testWriteStateFencedLeaderEpochError() { result = shard.writeState(request2); - // Since the leader epoch in the second request was lower than the one in the first request, FENCED_LEADER_EPOCH error is expected + // Since the leader epoch in the second request was lower than the one in the first request, FENCED_LEADER_EPOCH error is expected. expectedData = WriteShareGroupStateResponse.toErrorResponseData( TOPIC_ID, PARTITION, Errors.FENCED_LEADER_EPOCH, Errors.FENCED_LEADER_EPOCH.message()); expectedRecords = Collections.emptyList(); @@ -451,7 +464,7 @@ public void testWriteStateFencedLeaderEpochError() { assertEquals(expectedData, result.response()); assertEquals(expectedRecords, result.records()); - // No changes to the leaderMap + // No changes to the leaderMap. assertEquals(5, shard.getLeaderMapValue(shareCoordinatorKey)); } @@ -483,7 +496,7 @@ public void testWriteStateFencedStateEpochError() { .setPartitions(Collections.singletonList(new WriteShareGroupStateRequestData.PartitionData() .setPartition(PARTITION) .setStartOffset(0) - .setStateEpoch(0) // lower state epoch in the second request + .setStateEpoch(0) // Lower state epoch in the second request. .setLeaderEpoch(5) .setStateBatches(Collections.singletonList(new WriteShareGroupStateRequestData.StateBatch() .setFirstOffset(11) @@ -509,7 +522,7 @@ public void testWriteStateFencedStateEpochError() { result = shard.writeState(request2); - // Since the leader epoch in the second request was lower than the one in the first request, FENCED_LEADER_EPOCH error is expected + // Since the leader epoch in the second request was lower than the one in the first request, FENCED_LEADER_EPOCH error is expected. expectedData = WriteShareGroupStateResponse.toErrorResponseData( TOPIC_ID, PARTITION, Errors.FENCED_STATE_EPOCH, Errors.FENCED_STATE_EPOCH.message()); expectedRecords = Collections.emptyList(); @@ -517,7 +530,7 @@ public void testWriteStateFencedStateEpochError() { assertEquals(expectedData, result.response()); assertEquals(expectedRecords, result.records()); - // No changes to the stateEpochMap + // No changes to the stateEpochMap. assertEquals(1, shard.getStateEpochMapValue(shareCoordinatorKey)); } @@ -537,7 +550,7 @@ public void testReadStateSuccess() { .setPartition(PARTITION) .setLeaderEpoch(1))))); - ReadShareGroupStateResponseData result = shard.readState(request, 0L); + CoordinatorResult result = shard.readStateAndMaybeUpdateLeaderEpoch(request); assertEquals(ReadShareGroupStateResponse.toResponseData( TOPIC_ID, @@ -550,9 +563,37 @@ public void testReadStateSuccess() { .setDeliveryCount((short) 1) .setDeliveryState((byte) 0) ) - ), result); + ), result.response()); - assertEquals(1, shard.getLeaderMapValue(coordinatorKey)); + assertEquals(0, shard.getLeaderMapValue(coordinatorKey)); + } + + @Test + public void testReadStateSummarySuccess() { + ShareCoordinatorShard shard = new ShareCoordinatorShardBuilder().build(); + + SharePartitionKey coordinatorKey = SharePartitionKey.getInstance(GROUP_ID, TOPIC_ID, PARTITION); + + writeAndReplayDefaultRecord(shard); + + ReadShareGroupStateSummaryRequestData request = new ReadShareGroupStateSummaryRequestData() + .setGroupId(GROUP_ID) + .setTopics(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(TOPIC_ID) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(PARTITION) + .setLeaderEpoch(1))))); + + CoordinatorResult result = shard.readStateSummary(request); + + assertEquals(ReadShareGroupStateSummaryResponse.toResponseData( + TOPIC_ID, + PARTITION, + 0, + 0 + ), result.response()); + + assertEquals(0, shard.getLeaderMapValue(coordinatorKey)); } @Test @@ -573,14 +614,43 @@ public void testReadStateInvalidRequestData() { .setPartition(partition) .setLeaderEpoch(5))))); - ReadShareGroupStateResponseData result = shard.readState(request, 0L); + CoordinatorResult result = shard.readStateAndMaybeUpdateLeaderEpoch(request); ReadShareGroupStateResponseData expectedData = ReadShareGroupStateResponse.toErrorResponseData( TOPIC_ID, partition, Errors.INVALID_REQUEST, ShareCoordinatorShard.NEGATIVE_PARTITION_ID.getMessage()); - assertEquals(expectedData, result); + assertEquals(expectedData, result.response()); + + // Leader epoch should not be changed because the request failed. + assertEquals(0, shard.getLeaderMapValue(shareCoordinatorKey)); + } + + @Test + public void testReadStateSummaryInvalidRequestData() { + ShareCoordinatorShard shard = new ShareCoordinatorShardBuilder().build(); + + int partition = -1; + + writeAndReplayDefaultRecord(shard); + + SharePartitionKey shareCoordinatorKey = SharePartitionKey.getInstance(GROUP_ID, TOPIC_ID, PARTITION); + + ReadShareGroupStateSummaryRequestData request = new ReadShareGroupStateSummaryRequestData() + .setGroupId(GROUP_ID) + .setTopics(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(TOPIC_ID) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(partition) + .setLeaderEpoch(5))))); + + CoordinatorResult result = shard.readStateSummary(request); + + ReadShareGroupStateSummaryResponseData expectedData = ReadShareGroupStateSummaryResponse.toErrorResponseData( + TOPIC_ID, partition, Errors.INVALID_REQUEST, ShareCoordinatorShard.NEGATIVE_PARTITION_ID.getMessage()); + + assertEquals(expectedData, result.response()); - // Leader epoch should not be changed because the request failed + // Leader epoch should not be changed because the request failed. assertEquals(0, shard.getLeaderMapValue(shareCoordinatorKey)); } @@ -602,14 +672,14 @@ public void testReadNullMetadataImage() { .setPartition(0) .setLeaderEpoch(5))))); - ReadShareGroupStateResponseData result = shard.readState(request, 0L); + CoordinatorResult result = shard.readStateAndMaybeUpdateLeaderEpoch(request); ReadShareGroupStateResponseData expectedData = ReadShareGroupStateResponse.toErrorResponseData( TOPIC_ID, 0, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.UNKNOWN_TOPIC_OR_PARTITION.message()); - assertEquals(expectedData, result); + assertEquals(expectedData, result.response()); - // Leader epoch should not be changed because the request failed + // Leader epoch should not be changed because the request failed. assertEquals(0, shard.getLeaderMapValue(shareCoordinatorKey)); } @@ -619,7 +689,7 @@ public void testReadStateFencedLeaderEpochError() { int leaderEpoch = 5; - writeAndReplayRecord(shard, leaderEpoch); // leaderEpoch in the leaderMap will be 5 + writeAndReplayRecord(shard, leaderEpoch); // leaderEpoch in the leaderMap will be 5. SharePartitionKey shareCoordinatorKey = SharePartitionKey.getInstance(GROUP_ID, TOPIC_ID, PARTITION); @@ -629,9 +699,9 @@ public void testReadStateFencedLeaderEpochError() { .setTopicId(TOPIC_ID) .setPartitions(Collections.singletonList(new ReadShareGroupStateRequestData.PartitionData() .setPartition(PARTITION) - .setLeaderEpoch(3))))); // lower leaderEpoch than the one stored in leaderMap + .setLeaderEpoch(3))))); // Lower leaderEpoch than the one stored in leaderMap. - ReadShareGroupStateResponseData result = shard.readState(request, 0L); + CoordinatorResult result = shard.readStateAndMaybeUpdateLeaderEpoch(request); ReadShareGroupStateResponseData expectedData = ReadShareGroupStateResponse.toErrorResponseData( TOPIC_ID, @@ -639,7 +709,7 @@ public void testReadStateFencedLeaderEpochError() { Errors.FENCED_LEADER_EPOCH, Errors.FENCED_LEADER_EPOCH.message()); - assertEquals(expectedData, result); + assertEquals(expectedData, result.response()); assertEquals(leaderEpoch, shard.getLeaderMapValue(shareCoordinatorKey)); } @@ -677,7 +747,7 @@ public void testNonSequentialBatchUpdates() { SharePartitionKey shareCoordinatorKey = SharePartitionKey.getInstance(GROUP_ID, TOPIC_ID, PARTITION); - // set initial state + // Set initial state. WriteShareGroupStateRequestData request = new WriteShareGroupStateRequestData() .setGroupId(GROUP_ID) .setTopics(Collections.singletonList(new WriteShareGroupStateRequestData.WriteStateData() @@ -724,7 +794,7 @@ public void testNonSequentialBatchUpdates() { assertEquals(0, shard.getLeaderMapValue(shareCoordinatorKey)); verify(shard.getMetricsShard()).record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME); - // acknowledge b1 + // Acknowledge b1. WriteShareGroupStateRequestData requestUpdateB1 = new WriteShareGroupStateRequestData() .setGroupId(GROUP_ID) .setTopics(Collections.singletonList(new WriteShareGroupStateRequestData.WriteStateData() @@ -739,14 +809,14 @@ public void testNonSequentialBatchUpdates() { .setFirstOffset(100) .setLastOffset(109) .setDeliveryCount((short) 1) - .setDeliveryState((byte) 2))) // acked + .setDeliveryState((byte) 2))) // Acked )) )); result = shard.writeState(requestUpdateB1); shard.replay(0L, 0L, (short) 0, result.records().get(0)); - // ack batch 3 and move start offset + // Ack batch 3 and move start offset. WriteShareGroupStateRequestData requestUpdateStartOffsetAndB3 = new WriteShareGroupStateRequestData() .setGroupId(GROUP_ID) .setTopics(Collections.singletonList(new WriteShareGroupStateRequestData.WriteStateData() @@ -761,7 +831,7 @@ public void testNonSequentialBatchUpdates() { .setFirstOffset(120) .setLastOffset(129) .setDeliveryCount((short) 1) - .setDeliveryState((byte) 2))) //acked + .setDeliveryState((byte) 2))) //Acked )) )); @@ -775,7 +845,7 @@ public void testNonSequentialBatchUpdates() { .setStateEpoch(0) .setSnapshotEpoch(2) // since 2nd share snapshot .setStateBatches(Arrays.asList( - new PersisterStateBatch(110, 119, (byte) 1, (short) 2), // b2 not lost + new PersisterStateBatch(110, 119, (byte) 1, (short) 2), // b2 not lost new PersisterStateBatch(120, 129, (byte) 2, (short) 1) )) .build(); @@ -793,6 +863,106 @@ public void testNonSequentialBatchUpdates() { verify(shard.getMetricsShard(), times(3)).record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME); } + @Test + public void testLastRedundantOffset() { + ShareCoordinatorOffsetsManager manager = mock(ShareCoordinatorOffsetsManager.class); + ShareCoordinatorShard shard = new ShareCoordinatorShardBuilder() + .setOffsetsManager(manager) + .build(); + + when(manager.lastRedundantOffset()).thenReturn(Optional.of(10L)); + assertEquals(new CoordinatorResult<>(Collections.emptyList(), Optional.of(10L)), shard.lastRedundantOffset()); + } + + @Test + public void testReadStateLeaderEpochUpdateSuccess() { + ShareCoordinatorShard shard = new ShareCoordinatorShardBuilder().build(); + + SharePartitionKey shareCoordinatorKey = SharePartitionKey.getInstance(GROUP_ID, TOPIC_ID, PARTITION); + + ReadShareGroupStateRequestData request = new ReadShareGroupStateRequestData() + .setGroupId(GROUP_ID) + .setTopics(Collections.singletonList(new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(TOPIC_ID) + .setPartitions(Collections.singletonList(new ReadShareGroupStateRequestData.PartitionData() + .setPartition(PARTITION) + .setLeaderEpoch(2) + )))); + + CoordinatorResult result = shard.readStateAndMaybeUpdateLeaderEpoch(request); + + shard.replay(0L, 0L, (short) 0, result.records().get(0)); + + ReadShareGroupStateResponseData expectedData = ReadShareGroupStateResponse.toResponseData( + TOPIC_ID, PARTITION, + PartitionFactory.UNINITIALIZED_START_OFFSET, + PartitionFactory.DEFAULT_STATE_EPOCH, + Collections.emptyList()); + List expectedRecords = Collections.singletonList(ShareCoordinatorRecordHelpers.newShareSnapshotRecord( + GROUP_ID, TOPIC_ID, PARTITION, new ShareGroupOffset.Builder() + .setStartOffset(PartitionFactory.UNINITIALIZED_START_OFFSET) + .setLeaderEpoch(2) + .setStateBatches(Collections.emptyList()) + .setSnapshotEpoch(0) + .setStateEpoch(PartitionFactory.DEFAULT_STATE_EPOCH) + .build() + )); + + assertEquals(expectedData, result.response()); + assertEquals(expectedRecords, result.records()); + + assertEquals(groupOffset(expectedRecords.get(0).value().message()), shard.getShareStateMapValue(shareCoordinatorKey)); + assertEquals(2, shard.getLeaderMapValue(shareCoordinatorKey)); + verify(shard.getMetricsShard()).record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME); + } + + @Test + public void testReadStateLeaderEpochUpdateNoUpdate() { + ShareCoordinatorShard shard = new ShareCoordinatorShardBuilder().build(); + + ReadShareGroupStateRequestData request1 = new ReadShareGroupStateRequestData() + .setGroupId(GROUP_ID) + .setTopics(Collections.singletonList(new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(TOPIC_ID) + .setPartitions(Collections.singletonList(new ReadShareGroupStateRequestData.PartitionData() + .setPartition(PARTITION) + .setLeaderEpoch(2) + )))); + + CoordinatorResult result = shard.readStateAndMaybeUpdateLeaderEpoch(request1); + assertFalse(result.records().isEmpty()); // Record generated. + + // Apply record to update soft state. + shard.replay(0L, 0L, (short) 0, result.records().get(0)); + + ReadShareGroupStateRequestData request2 = new ReadShareGroupStateRequestData() + .setGroupId(GROUP_ID) + .setTopics(Collections.singletonList(new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(TOPIC_ID) + .setPartitions(Collections.singletonList(new ReadShareGroupStateRequestData.PartitionData() + .setPartition(PARTITION) + .setLeaderEpoch(-1) + )))); + + CoordinatorResult result2 = shard.readStateAndMaybeUpdateLeaderEpoch(request2); + + assertTrue(result2.records().isEmpty()); // Leader epoch -1 - no update. + + ReadShareGroupStateRequestData request3 = new ReadShareGroupStateRequestData() + .setGroupId(GROUP_ID) + .setTopics(Collections.singletonList(new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(TOPIC_ID) + .setPartitions(Collections.singletonList(new ReadShareGroupStateRequestData.PartitionData() + .setPartition(PARTITION) + .setLeaderEpoch(-1) + )))); + + CoordinatorResult result3 = shard.readStateAndMaybeUpdateLeaderEpoch(request3); + + assertTrue(result3.records().isEmpty()); // Same leader epoch - no update. + verify(shard.getMetricsShard()).record(ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME); + } + private static ShareGroupOffset groupOffset(ApiMessage record) { if (record instanceof ShareSnapshotValue) { return ShareGroupOffset.fromRecord((ShareSnapshotValue) record); diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorConfigTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorTestConfig.java similarity index 95% rename from share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorConfigTest.java rename to share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorTestConfig.java index 5f8c37fc1e6e0..75916187b28a0 100644 --- a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorConfigTest.java +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorTestConfig.java @@ -21,14 +21,13 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.server.config.ShareCoordinatorConfig; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -public class ShareCoordinatorConfigTest { +public class ShareCoordinatorTestConfig { private static final List CONFIG_DEF_LIST = Collections.singletonList( ShareCoordinatorConfig.CONFIG_DEF @@ -50,6 +49,7 @@ private static Map testConfigMapRaw() { configs.put(ShareCoordinatorConfig.LOAD_BUFFER_SIZE_CONFIG, "555"); configs.put(ShareCoordinatorConfig.APPEND_LINGER_MS_CONFIG, "10"); configs.put(ShareCoordinatorConfig.STATE_TOPIC_COMPRESSION_CODEC_CONFIG, String.valueOf(CompressionType.NONE.id)); + configs.put(ShareCoordinatorConfig.STATE_TOPIC_PRUNE_INTERVAL_MS_CONFIG, "30000"); // 30 seconds return configs; } diff --git a/share/src/main/java/org/apache/kafka/server/share/fetch/ShareFetchData.java b/share/src/main/java/org/apache/kafka/server/share/fetch/ShareFetchData.java deleted file mode 100644 index c32b32800177f..0000000000000 --- a/share/src/main/java/org/apache/kafka/server/share/fetch/ShareFetchData.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.server.share.fetch; - -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; -import org.apache.kafka.server.storage.log.FetchParams; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -/** - * The ShareFetchData class is used to store the fetch parameters for a share fetch request. - */ -public class ShareFetchData { - - private final FetchParams fetchParams; - private final String groupId; - private final String memberId; - private final CompletableFuture> future; - private final Map partitionMaxBytes; - private final int maxFetchRecords; - - public ShareFetchData( - FetchParams fetchParams, - String groupId, - String memberId, - CompletableFuture> future, - Map partitionMaxBytes, - int maxFetchRecords - ) { - this.fetchParams = fetchParams; - this.groupId = groupId; - this.memberId = memberId; - this.future = future; - this.partitionMaxBytes = partitionMaxBytes; - this.maxFetchRecords = maxFetchRecords; - } - - public String groupId() { - return groupId; - } - - public String memberId() { - return memberId; - } - - public CompletableFuture> future() { - return future; - } - - public Map partitionMaxBytes() { - return partitionMaxBytes; - } - - public FetchParams fetchParams() { - return fetchParams; - } - - public int maxFetchRecords() { - return maxFetchRecords; - } -} diff --git a/share/src/test/java/org/apache/kafka/server/share/persister/DefaultStatePersisterTest.java b/share/src/test/java/org/apache/kafka/server/share/persister/DefaultStatePersisterTest.java deleted file mode 100644 index f2cca9c4b3fc3..0000000000000 --- a/share/src/test/java/org/apache/kafka/server/share/persister/DefaultStatePersisterTest.java +++ /dev/null @@ -1,581 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.server.share.persister; - -import org.apache.kafka.clients.KafkaClient; -import org.apache.kafka.clients.MockClient; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.FindCoordinatorResponseData; -import org.apache.kafka.common.message.ReadShareGroupStateRequestData; -import org.apache.kafka.common.message.ReadShareGroupStateResponseData; -import org.apache.kafka.common.message.WriteShareGroupStateRequestData; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.requests.FindCoordinatorRequest; -import org.apache.kafka.common.requests.FindCoordinatorResponse; -import org.apache.kafka.common.requests.ReadShareGroupStateRequest; -import org.apache.kafka.common.requests.ReadShareGroupStateResponse; -import org.apache.kafka.common.requests.WriteShareGroupStateRequest; -import org.apache.kafka.common.requests.WriteShareGroupStateResponse; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.server.share.SharePartitionKey; -import org.apache.kafka.server.util.MockTime; -import org.apache.kafka.server.util.timer.MockTimer; -import org.apache.kafka.server.util.timer.Timer; - -import org.junit.jupiter.api.Test; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -class DefaultStatePersisterTest { - private static final KafkaClient CLIENT = mock(KafkaClient.class); - private static final Time MOCK_TIME = new MockTime(); - private static final Timer MOCK_TIMER = new MockTimer(); - private static final ShareCoordinatorMetadataCacheHelper CACHE_HELPER = mock(ShareCoordinatorMetadataCacheHelper.class); - - private static final String HOST = "localhost"; - private static final int PORT = 9092; - - private static class DefaultStatePersisterBuilder { - - private KafkaClient client = CLIENT; - private Time time = MOCK_TIME; - private Timer timer = MOCK_TIMER; - private ShareCoordinatorMetadataCacheHelper cacheHelper = CACHE_HELPER; - - private DefaultStatePersisterBuilder withKafkaClient(KafkaClient client) { - this.client = client; - return this; - } - - private DefaultStatePersisterBuilder withCacheHelper(ShareCoordinatorMetadataCacheHelper cacheHelper) { - this.cacheHelper = cacheHelper; - return this; - } - - private DefaultStatePersisterBuilder withTime(Time time) { - this.time = time; - return this; - } - - private DefaultStatePersisterBuilder withTimer(Timer timer) { - this.timer = timer; - return this; - } - - public static DefaultStatePersisterBuilder builder() { - return new DefaultStatePersisterBuilder(); - } - - public DefaultStatePersister build() { - PersisterStateManager persisterStateManager = new PersisterStateManager(client, cacheHelper, time, timer); - return new DefaultStatePersister(persisterStateManager); - } - } - - private ShareCoordinatorMetadataCacheHelper getDefaultCacheHelper(Node suppliedNode) { - return new ShareCoordinatorMetadataCacheHelper() { - @Override - public boolean containsTopic(String topic) { - return false; - } - - @Override - public Node getShareCoordinator(SharePartitionKey key, String internalTopicName) { - return Node.noNode(); - } - - @Override - public List getClusterNodes() { - return Collections.singletonList(suppliedNode); - } - }; - } - - @Test - public void testWriteStateValidate() { - - String groupId = "group1"; - Uuid topicId = Uuid.randomUuid(); - int partition = 0; - int incorrectPartition = -1; - - // Request Parameters are null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.writeState(null); - }); - - // groupTopicPartitionData is null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder().setGroupTopicPartitionData(null).build()); - }); - - // groupId is null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(null).build()).build()); - }); - - // topicsData is empty - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.emptyList()).build()).build()); - }); - - // topicId is null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.singletonList(new TopicData<>(null, - Collections.singletonList(PartitionFactory.newPartitionStateBatchData( - partition, 1, 0, 0, null)))) - ).build()).build()); - }); - - // partitionData is empty - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.singletonList(new TopicData<>(topicId, - Collections.emptyList())) - ).build()).build()); - }); - - // partition value is incorrect - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.writeState(new WriteShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.singletonList(new TopicData<>(topicId, - Collections.singletonList(PartitionFactory.newPartitionStateBatchData( - incorrectPartition, 1, 0, 0, null)))) - ).build()).build()); - }); - } - - @Test - public void testReadStateValidate() { - - String groupId = "group1"; - Uuid topicId = Uuid.randomUuid(); - int partition = 0; - int incorrectPartition = -1; - - // Request Parameters are null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.readState(null); - }); - - // groupTopicPartitionData is null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder().setGroupTopicPartitionData(null).build()); - }); - - // groupId is null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(null).build()).build()); - }); - - // topicsData is empty - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.emptyList()).build()).build()); - }); - - // topicId is null - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.singletonList(new TopicData<>(null, - Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData( - partition, 1)))) - ).build()).build()); - }); - - // partitionData is empty - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.singletonList(new TopicData<>(topicId, - Collections.emptyList())) - ).build()).build()); - }); - - // partition value is incorrect - assertThrows(IllegalArgumentException.class, () -> { - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder().build(); - defaultStatePersister.readState(new ReadShareGroupStateParameters.Builder() - .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() - .setGroupId(groupId) - .setTopicsData(Collections.singletonList(new TopicData<>(topicId, - Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData( - incorrectPartition, 1)))) - ).build()).build()); - }); - } - - @Test - public void testWriteStateSuccess() { - - MockClient client = new MockClient(MOCK_TIME); - - String groupId = "group1"; - Uuid topicId1 = Uuid.randomUuid(); - int partition1 = 10; - - Uuid topicId2 = Uuid.randomUuid(); - int partition2 = 8; - - Node suppliedNode = new Node(0, HOST, PORT); - Node coordinatorNode1 = new Node(5, HOST, PORT); - Node coordinatorNode2 = new Node(6, HOST, PORT); - - String coordinatorKey1 = SharePartitionKey.asCoordinatorKey(groupId, topicId1, partition1); - String coordinatorKey2 = SharePartitionKey.asCoordinatorKey(groupId, topicId2, partition2); - - client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest - && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() - && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey1), - new FindCoordinatorResponse( - new FindCoordinatorResponseData() - .setCoordinators(Collections.singletonList( - new FindCoordinatorResponseData.Coordinator() - .setNodeId(5) - .setHost(HOST) - .setPort(PORT) - .setErrorCode(Errors.NONE.code()) - )) - ), - suppliedNode - ); - - client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest - && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() - && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey2), - new FindCoordinatorResponse( - new FindCoordinatorResponseData() - .setCoordinators(Collections.singletonList( - new FindCoordinatorResponseData.Coordinator() - .setNodeId(6) - .setHost(HOST) - .setPort(PORT) - .setErrorCode(Errors.NONE.code()) - )) - ), - suppliedNode - ); - - client.prepareResponseFrom( - body -> { - WriteShareGroupStateRequest request = (WriteShareGroupStateRequest) body; - String requestGroupId = request.data().groupId(); - Uuid requestTopicId = request.data().topics().get(0).topicId(); - int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); - - return requestGroupId.equals(groupId) && requestTopicId == topicId1 && requestPartition == partition1; - }, - new WriteShareGroupStateResponse(WriteShareGroupStateResponse.toResponseData(topicId1, partition1)), - coordinatorNode1); - - client.prepareResponseFrom( - body -> { - WriteShareGroupStateRequest request = (WriteShareGroupStateRequest) body; - String requestGroupId = request.data().groupId(); - Uuid requestTopicId = request.data().topics().get(0).topicId(); - int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); - - return requestGroupId.equals(groupId) && requestTopicId == topicId2 && requestPartition == partition2; - }, - new WriteShareGroupStateResponse(WriteShareGroupStateResponse.toResponseData(topicId2, partition2)), - coordinatorNode2); - - ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); - - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder() - .withKafkaClient(client) - .withCacheHelper(cacheHelper) - .build(); - - WriteShareGroupStateParameters request = WriteShareGroupStateParameters.from( - new WriteShareGroupStateRequestData() - .setGroupId(groupId) - .setTopics(Arrays.asList( - new WriteShareGroupStateRequestData.WriteStateData() - .setTopicId(topicId1) - .setPartitions(Collections.singletonList( - new WriteShareGroupStateRequestData.PartitionData() - .setPartition(partition1) - .setStateEpoch(0) - .setLeaderEpoch(1) - .setStartOffset(0) - .setStateBatches(Collections.singletonList(new WriteShareGroupStateRequestData.StateBatch() - .setFirstOffset(0) - .setLastOffset(10) - .setDeliveryCount((short) 1) - .setDeliveryState((byte) 0))) - )), - new WriteShareGroupStateRequestData.WriteStateData() - .setTopicId(topicId2) - .setPartitions(Collections.singletonList( - new WriteShareGroupStateRequestData.PartitionData() - .setPartition(partition2) - .setStateEpoch(0) - .setLeaderEpoch(1) - .setStartOffset(0) - .setStateBatches(Arrays.asList( - new WriteShareGroupStateRequestData.StateBatch() - .setFirstOffset(0) - .setLastOffset(10) - .setDeliveryCount((short) 1) - .setDeliveryState((byte) 0), - new WriteShareGroupStateRequestData.StateBatch() - .setFirstOffset(11) - .setLastOffset(20) - .setDeliveryCount((short) 1) - .setDeliveryState((byte) 0))) - )) - )) - ); - - CompletableFuture resultFuture = defaultStatePersister.writeState(request); - - WriteShareGroupStateResult result = null; - try { - result = resultFuture.get(); - } catch (Exception e) { - fail("Unexpected exception", e); - } - - HashSet resultMap = new HashSet<>(); - result.topicsData().forEach( - topicData -> topicData.partitions().forEach( - partitionData -> resultMap.add((PartitionData) partitionData) - ) - ); - - - HashSet expectedResultMap = new HashSet<>(); - expectedResultMap.add((PartitionData) PartitionFactory.newPartitionErrorData(partition1, Errors.NONE.code(), null)); - - expectedResultMap.add((PartitionData) PartitionFactory.newPartitionErrorData(partition2, Errors.NONE.code(), null)); - - assertEquals(2, result.topicsData().size()); - assertEquals(expectedResultMap, resultMap); - } - - @Test - public void testReadStateSuccess() { - - MockClient client = new MockClient(MOCK_TIME); - - String groupId = "group1"; - Uuid topicId1 = Uuid.randomUuid(); - int partition1 = 10; - - Uuid topicId2 = Uuid.randomUuid(); - int partition2 = 8; - - Node suppliedNode = new Node(0, HOST, PORT); - Node coordinatorNode1 = new Node(5, HOST, PORT); - Node coordinatorNode2 = new Node(6, HOST, PORT); - - String coordinatorKey1 = SharePartitionKey.asCoordinatorKey(groupId, topicId1, partition1); - String coordinatorKey2 = SharePartitionKey.asCoordinatorKey(groupId, topicId2, partition2); - - client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest - && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() - && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey1), - new FindCoordinatorResponse( - new FindCoordinatorResponseData() - .setCoordinators(Collections.singletonList( - new FindCoordinatorResponseData.Coordinator() - .setNodeId(5) - .setHost(HOST) - .setPort(PORT) - .setErrorCode(Errors.NONE.code()) - )) - ), - suppliedNode - ); - - client.prepareResponseFrom(body -> body instanceof FindCoordinatorRequest - && ((FindCoordinatorRequest) body).data().keyType() == FindCoordinatorRequest.CoordinatorType.SHARE.id() - && ((FindCoordinatorRequest) body).data().coordinatorKeys().get(0).equals(coordinatorKey2), - new FindCoordinatorResponse( - new FindCoordinatorResponseData() - .setCoordinators(Collections.singletonList( - new FindCoordinatorResponseData.Coordinator() - .setNodeId(6) - .setHost(HOST) - .setPort(PORT) - .setErrorCode(Errors.NONE.code()) - )) - ), - suppliedNode - ); - - client.prepareResponseFrom( - body -> { - ReadShareGroupStateRequest request = (ReadShareGroupStateRequest) body; - String requestGroupId = request.data().groupId(); - Uuid requestTopicId = request.data().topics().get(0).topicId(); - int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); - - return requestGroupId.equals(groupId) && requestTopicId == topicId1 && requestPartition == partition1; - }, - new ReadShareGroupStateResponse(ReadShareGroupStateResponse.toResponseData(topicId1, partition1, 0, 1, - Collections.singletonList(new ReadShareGroupStateResponseData.StateBatch() - .setFirstOffset(0) - .setLastOffset(10) - .setDeliveryCount((short) 1) - .setDeliveryState((byte) 0)))), - coordinatorNode1); - - client.prepareResponseFrom( - body -> { - ReadShareGroupStateRequest request = (ReadShareGroupStateRequest) body; - String requestGroupId = request.data().groupId(); - Uuid requestTopicId = request.data().topics().get(0).topicId(); - int requestPartition = request.data().topics().get(0).partitions().get(0).partition(); - - return requestGroupId.equals(groupId) && requestTopicId == topicId2 && requestPartition == partition2; - }, - new ReadShareGroupStateResponse(ReadShareGroupStateResponse.toResponseData(topicId2, partition2, 0, 1, - Arrays.asList(new ReadShareGroupStateResponseData.StateBatch() - .setFirstOffset(0) - .setLastOffset(10) - .setDeliveryCount((short) 1) - .setDeliveryState((byte) 0), - new ReadShareGroupStateResponseData.StateBatch() - .setFirstOffset(11) - .setLastOffset(20) - .setDeliveryCount((short) 1) - .setDeliveryState((byte) 0)))), - coordinatorNode2); - - ShareCoordinatorMetadataCacheHelper cacheHelper = getDefaultCacheHelper(suppliedNode); - - DefaultStatePersister defaultStatePersister = DefaultStatePersisterBuilder.builder() - .withKafkaClient(client) - .withCacheHelper(cacheHelper) - .build(); - - ReadShareGroupStateParameters request = ReadShareGroupStateParameters.from( - new ReadShareGroupStateRequestData() - .setGroupId(groupId) - .setTopics(Arrays.asList( - new ReadShareGroupStateRequestData.ReadStateData() - .setTopicId(topicId1) - .setPartitions(Collections.singletonList( - new ReadShareGroupStateRequestData.PartitionData() - .setPartition(partition1) - .setLeaderEpoch(1) - )), - new ReadShareGroupStateRequestData.ReadStateData() - .setTopicId(topicId2) - .setPartitions(Collections.singletonList( - new ReadShareGroupStateRequestData.PartitionData() - .setPartition(partition2) - .setLeaderEpoch(1) - )) - )) - ); - - CompletableFuture resultFuture = defaultStatePersister.readState(request); - - ReadShareGroupStateResult result = null; - try { - result = resultFuture.get(); - } catch (Exception e) { - fail("Unexpected exception", e); - } - - HashSet resultMap = new HashSet<>(); - result.topicsData().forEach( - topicData -> topicData.partitions().forEach( - partitionData -> resultMap.add((PartitionData) partitionData) - ) - ); - - HashSet expectedResultMap = new HashSet<>(); - expectedResultMap.add( - (PartitionData) PartitionFactory.newPartitionAllData(partition1, 1, 0, Errors.NONE.code(), - null, Collections.singletonList(new PersisterStateBatch(0, 10, (byte) 0, (short) 1) - ))); - - expectedResultMap.add( - (PartitionData) PartitionFactory.newPartitionAllData(partition2, 1, 0, Errors.NONE.code(), - null, Arrays.asList( - new PersisterStateBatch(0, 10, (byte) 0, (short) 1), - new PersisterStateBatch(11, 20, (byte) 0, (short) 1) - ))); - - assertEquals(2, result.topicsData().size()); - assertEquals(expectedResultMap, resultMap); - } - - @Test - public void testDefaultPersisterClose() { - PersisterStateManager psm = mock(PersisterStateManager.class); - DefaultStatePersister dsp = new DefaultStatePersister(psm); - try { - verify(psm, times(0)).stop(); - - dsp.stop(); - - verify(psm, times(1)).stop(); - } catch (Exception e) { - fail("Unexpected exception", e); - } - } -} diff --git a/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java index 527a157f68738..57005db2f07f0 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/CatCommandHandler.java @@ -121,8 +121,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof CatCommandHandler)) return false; - CatCommandHandler o = (CatCommandHandler) other; + if (!(other instanceof CatCommandHandler o)) return false; return Objects.equals(o.targets, targets); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java index 3c441c2f0d31a..544320747c048 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/CdCommandHandler.java @@ -112,8 +112,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof CdCommandHandler)) return false; - CdCommandHandler o = (CdCommandHandler) other; + if (!(other instanceof CdCommandHandler o)) return false; return o.target.equals(target); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java index e8b8096f8b2d4..f3f5ac406227a 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/ErroneousCommandHandler.java @@ -50,8 +50,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof ErroneousCommandHandler)) return false; - ErroneousCommandHandler o = (ErroneousCommandHandler) other; + if (!(other instanceof ErroneousCommandHandler o)) return false; return Objects.equals(o.message, message); } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java index 46db3685f2579..8179203d0b1d9 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/FindCommandHandler.java @@ -127,8 +127,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof FindCommandHandler)) return false; - FindCommandHandler o = (FindCommandHandler) other; + if (!(other instanceof FindCommandHandler o)) return false; return Objects.equals(o.paths, paths); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/HistoryCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/HistoryCommandHandler.java index d47755977cf5f..99a13d4d17590 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/HistoryCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/HistoryCommandHandler.java @@ -93,7 +93,7 @@ public void run( PrintWriter writer, MetadataShellState state ) throws Exception { - if (!shell.isPresent()) { + if (shell.isEmpty()) { throw new RuntimeException("The history command requires a shell."); } Iterator> iter = shell.get().history(numEntriesToShow); @@ -110,8 +110,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof HistoryCommandHandler)) return false; - HistoryCommandHandler o = (HistoryCommandHandler) other; + if (!(other instanceof HistoryCommandHandler o)) return false; return o.numEntriesToShow == numEntriesToShow; } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java index f05c2bfc3b492..86ecb5fe8438a 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/LsCommandHandler.java @@ -191,7 +191,7 @@ static void printEntries(PrintWriter writer, static ColumnSchema calculateColumnSchema(OptionalInt screenWidth, List entries) { - if (!screenWidth.isPresent()) { + if (screenWidth.isEmpty()) { return new ColumnSchema(1, entries.size()); } int maxColumns = screenWidth.getAsInt() / 4; @@ -268,8 +268,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof ColumnSchema)) return false; - ColumnSchema other = (ColumnSchema) o; + if (!(o instanceof ColumnSchema other)) return false; if (entriesPerColumn != other.entriesPerColumn) return false; return Arrays.equals(columnWidths, other.columnWidths); } @@ -295,8 +294,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof LsCommandHandler)) return false; - LsCommandHandler o = (LsCommandHandler) other; + if (!(other instanceof LsCommandHandler o)) return false; return Objects.equals(o.targets, targets); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java index d94b2f062d660..6915577b7433d 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/ManCommandHandler.java @@ -110,8 +110,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof ManCommandHandler)) return false; - ManCommandHandler o = (ManCommandHandler) other; + if (!(other instanceof ManCommandHandler o)) return false; return o.cmd.equals(cmd); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java b/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java index e2b5a49fa7790..df331e8c68cf3 100644 --- a/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java +++ b/shell/src/main/java/org/apache/kafka/shell/command/TreeCommandHandler.java @@ -117,8 +117,7 @@ public int hashCode() { @Override public boolean equals(Object other) { - if (!(other instanceof TreeCommandHandler)) return false; - TreeCommandHandler o = (TreeCommandHandler) other; + if (!(other instanceof TreeCommandHandler o)) return false; return Objects.equals(o.targets, targets); } } diff --git a/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java b/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java index 6af3011d55946..1baac384f8b17 100644 --- a/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java +++ b/shell/src/main/java/org/apache/kafka/shell/glob/GlobVisitor.java @@ -73,8 +73,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - if (!(o instanceof MetadataNodeInfo)) return false; - MetadataNodeInfo other = (MetadataNodeInfo) o; + if (!(o instanceof MetadataNodeInfo other)) return false; if (!Arrays.equals(path, other.path)) return false; return node.equals(other.node); } diff --git a/shell/src/main/java/org/apache/kafka/shell/node/printer/ShellNodePrinter.java b/shell/src/main/java/org/apache/kafka/shell/node/printer/ShellNodePrinter.java index de412781442bf..e3e646241982f 100644 --- a/shell/src/main/java/org/apache/kafka/shell/node/printer/ShellNodePrinter.java +++ b/shell/src/main/java/org/apache/kafka/shell/node/printer/ShellNodePrinter.java @@ -35,13 +35,7 @@ public ShellNodePrinter(PrintWriter writer) { } String indentationString() { - StringBuilder bld = new StringBuilder(); - for (int i = 0; i < indentationLevel; i++) { - for (int j = 0; j < 2; j++) { - bld.append(" "); - } - } - return bld.toString(); + return " ".repeat(2).repeat(Math.max(0, indentationLevel)); } @Override diff --git a/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java b/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java index 0d96a113fedce..a97774cb87399 100644 --- a/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java +++ b/shell/src/test/java/org/apache/kafka/shell/glob/GlobVisitorTest.java @@ -105,7 +105,7 @@ static class InfoConsumer implements Consumer> { @Override public void accept(Optional info) { - if (!infos.isPresent()) { + if (infos.isEmpty()) { if (info.isPresent()) { infos = Optional.of(new ArrayList<>()); infos.get().add(info.get()); diff --git a/server-common/src/test/resources/log4j.properties b/shell/src/test/resources/log4j2.yaml similarity index 61% rename from server-common/src/test/resources/log4j.properties rename to shell/src/test/resources/log4j2.yaml index be36f90299a77..c229cbce316d1 100644 --- a/server-common/src/test/resources/log4j.properties +++ b/shell/src/test/resources/log4j2.yaml @@ -1,9 +1,9 @@ # Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with +# contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # @@ -12,10 +12,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=INFO, stdout -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" -log4j.logger.org.apache.kafka=INFO + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: DEBUG + AppenderRef: + - ref: STDOUT diff --git a/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManager.java b/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManager.java index 2280aa51132f7..efc37128ab27b 100644 --- a/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManager.java +++ b/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogMetadataManager.java @@ -230,4 +230,14 @@ default Optional nextSegmentWithTxnIndex(TopicIdPartit long offset) throws RemoteStorageException { return remoteLogSegmentMetadata(topicIdPartition, epoch, offset); } + + /** + * Denotes whether the partition metadata is ready to serve. + * + * @param topicIdPartition topic partition + * @return True if the partition is ready to serve for remote storage operations. + */ + default boolean isReady(TopicIdPartition topicIdPartition) { + return true; + } } \ No newline at end of file diff --git a/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageMetrics.java b/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageMetrics.java index 1170c7e688696..7922d88d831a5 100644 --- a/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageMetrics.java +++ b/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageMetrics.java @@ -20,8 +20,6 @@ import com.yammer.metrics.core.MetricName; -import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -54,8 +52,7 @@ public class RemoteStorageMetrics { private static final String REMOTE_LOG_READER_TASK_QUEUE_SIZE = REMOTE_LOG_READER_METRICS_NAME_PREFIX + TASK_QUEUE_SIZE; private static final String REMOTE_LOG_READER_AVG_IDLE_PERCENT = REMOTE_LOG_READER_METRICS_NAME_PREFIX + AVG_IDLE_PERCENT; private static final String REMOTE_LOG_READER_FETCH_RATE_AND_TIME_MS = REMOTE_LOG_READER_METRICS_NAME_PREFIX + "FetchRateAndTimeMs"; - public static final Set REMOTE_STORAGE_THREAD_POOL_METRICS = Collections.unmodifiableSet( - new HashSet<>(Arrays.asList(REMOTE_LOG_READER_TASK_QUEUE_SIZE, REMOTE_LOG_READER_AVG_IDLE_PERCENT))); + public static final Set REMOTE_STORAGE_THREAD_POOL_METRICS = Set.of(REMOTE_LOG_READER_TASK_QUEUE_SIZE, REMOTE_LOG_READER_AVG_IDLE_PERCENT); public static final MetricName REMOTE_COPY_BYTES_PER_SEC_METRIC = getMetricName( "kafka.server", "BrokerTopicMetrics", REMOTE_COPY_BYTES_PER_SEC); diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/ClassLoaderAwareRemoteLogMetadataManager.java b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/ClassLoaderAwareRemoteLogMetadataManager.java index 1abcbbc20ce07..5d5cba2ca11b7 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/ClassLoaderAwareRemoteLogMetadataManager.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/ClassLoaderAwareRemoteLogMetadataManager.java @@ -111,6 +111,11 @@ public Optional nextSegmentWithTxnIndex(TopicIdPartiti return withClassLoader(() -> delegate.nextSegmentWithTxnIndex(topicIdPartition, epoch, offset)); } + @Override + public boolean isReady(TopicIdPartition topicIdPartition) { + return withClassLoader(() -> delegate.isReady(topicIdPartition)); + } + @Override public void configure(Map configs) { withClassLoader(() -> { diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogSegmentMetadataSnapshot.java b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogSegmentMetadataSnapshot.java index 64540a7fabd8b..7c67adc5ba366 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogSegmentMetadataSnapshot.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogSegmentMetadataSnapshot.java @@ -242,8 +242,7 @@ public TopicIdPartition topicIdPartition() { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof RemoteLogSegmentMetadataSnapshot)) return false; - RemoteLogSegmentMetadataSnapshot that = (RemoteLogSegmentMetadataSnapshot) o; + if (!(o instanceof RemoteLogSegmentMetadataSnapshot that)) return false; return startOffset == that.startOffset && endOffset == that.endOffset && maxTimestampMs == that.maxTimestampMs diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManager.java b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManager.java index a5db1ea38ef75..58d571630d212 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManager.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManager.java @@ -396,6 +396,11 @@ public void configure(Map configs) { } } + @Override + public boolean isReady(TopicIdPartition topicIdPartition) { + return remotePartitionMetadataStore.isInitialized(topicIdPartition); + } + private void initializeResources() { log.info("Initializing topic-based RLMM resources"); final NewTopic remoteLogMetadataTopicRequest = createRemoteLogMetadataTopicRequest(); diff --git a/core/src/main/java/kafka/log/remote/CustomMetadataSizeLimitExceededException.java b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/CustomMetadataSizeLimitExceededException.java similarity index 86% rename from core/src/main/java/kafka/log/remote/CustomMetadataSizeLimitExceededException.java rename to storage/src/main/java/org/apache/kafka/server/log/remote/storage/CustomMetadataSizeLimitExceededException.java index c893f3488de30..98462d93b0913 100644 --- a/core/src/main/java/kafka/log/remote/CustomMetadataSizeLimitExceededException.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/CustomMetadataSizeLimitExceededException.java @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package kafka.log.remote; +package org.apache.kafka.server.log.remote.storage; -class CustomMetadataSizeLimitExceededException extends Exception { +public class CustomMetadataSizeLimitExceededException extends Exception { } diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java index 4f75130fcbaed..d6cf56eff441e 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.config.ConfigException; import java.util.Collections; import java.util.Map; @@ -54,8 +53,8 @@ public final class RemoteLogManagerConfig { public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX = "rlmm.config."; public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP = "remote.log.storage.system.enable"; - public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_DOC = "Whether to enable tiered storage functionality in a broker or not. Valid values " + - "are `true` or `false` and the default value is false. When it is true broker starts all the services required for the tiered storage functionality."; + public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_DOC = "Whether to enable tiered storage functionality in a broker or not. " + + "When it is true broker starts all the services required for the tiered storage functionality."; public static final boolean DEFAULT_REMOTE_LOG_STORAGE_SYSTEM_ENABLE = false; public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP = "remote.log.storage.manager.class.name"; @@ -94,28 +93,19 @@ public final class RemoteLogManagerConfig { public static final long DEFAULT_REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES = 1024 * 1024 * 1024L; public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP = "remote.log.manager.thread.pool.size"; - public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC = "Deprecated. Size of the thread pool used in scheduling tasks to copy " + - "segments, fetch remote log indexes and clean up remote log segments."; - public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10; - - private static final String REMOTE_LOG_MANAGER_THREAD_POOL_FALLBACK = "The default value of -1 means that this will be set to the configured value of " + - REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP + ", if available; otherwise, it defaults to " + DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE + "."; - private static final ConfigDef.Validator REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_VALIDATOR = ConfigDef.LambdaValidator.with( - (name, value) -> { - if ((int) value < -1 || (int) value == 0) throw new ConfigException(name, value, "Value can be -1 or greater than 0"); - }, - () -> "[-1,1,...]" - ); + public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in scheduling follower tasks to read " + + "the highest-uploaded remote-offset for follower partitions."; + public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 2; public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP = "remote.log.manager.copier.thread.pool.size"; public static final String REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in scheduling tasks " + - "to copy segments. " + REMOTE_LOG_MANAGER_THREAD_POOL_FALLBACK; - public static final int DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE = -1; + "to copy segments."; + public static final int DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE = 10; public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP = "remote.log.manager.expiration.thread.pool.size"; public static final String REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC = "Size of the thread pool used in scheduling tasks " + - "to clean up remote log segments. " + REMOTE_LOG_MANAGER_THREAD_POOL_FALLBACK; - public static final int DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE = -1; + "to clean up the expired remote log segments."; + public static final int DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE = 10; public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = "remote.log.manager.task.interval.ms"; public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = "Interval at which remote log manager runs the scheduled tasks like copy " + @@ -270,13 +260,13 @@ public static ConfigDef configDef() { .define(REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, INT, DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, - REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_VALIDATOR, + atLeast(1), MEDIUM, REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_DOC) .define(REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, INT, DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, - REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_VALIDATOR, + atLeast(1), MEDIUM, REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_DOC) .define(REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/AsyncOffsetReadFutureHolder.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/AsyncOffsetReadFutureHolder.java new file mode 100644 index 0000000000000..990a5ef67ddf0 --- /dev/null +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/AsyncOffsetReadFutureHolder.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.storage.internals.log; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; + +/** + * A remote log offset read task future holder. It contains two futures: + *

                + *
              1. JobFuture - Use this future to cancel the running job. + *
              2. TaskFuture - Use this future to get the result of the job/computation. + *
              + */ +public record AsyncOffsetReadFutureHolder(Future jobFuture, CompletableFuture taskFuture) { +} diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LazyIndex.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LazyIndex.java index f8da9552ed9d9..c78075595aec8 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LazyIndex.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LazyIndex.java @@ -175,8 +175,7 @@ public T get() throws IOException { try { if (indexWrapper instanceof IndexValue) return ((IndexValue) indexWrapper).index; - else if (indexWrapper instanceof IndexFile) { - IndexFile indexFile = (IndexFile) indexWrapper; + else if (indexWrapper instanceof IndexFile indexFile) { IndexValue indexValue = new IndexValue<>(loadIndex(indexFile.file)); indexWrapper = indexValue; return indexValue.index; diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LocalLog.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LocalLog.java index d2cd71addad64..ab40f19b0dbfb 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LocalLog.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LocalLog.java @@ -24,7 +24,6 @@ import org.apache.kafka.common.record.FileLogInputStream; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; @@ -221,13 +220,7 @@ public boolean renameDir(String name) { * @param newConfig the new configuration to be updated to */ public void updateConfig(LogConfig newConfig) { - LogConfig oldConfig = config; config = newConfig; - RecordVersion oldRecordVersion = oldConfig.recordVersion(); - RecordVersion newRecordVersion = newConfig.recordVersion(); - if (newRecordVersion.precedes(oldRecordVersion)) { - logger.warn("Record format version has been downgraded from {} to {}.", oldRecordVersion, newRecordVersion); - } } public void checkIfMemoryMappedBufferClosed() { @@ -364,7 +357,7 @@ public void deleteEmptyDir() { */ public List deleteAllSegments() { return maybeHandleIOException( - () -> "Error while deleting all segments for $topicPartition in dir ${dir.getParent}", + () -> String.format("Error while deleting all segments for %s in dir %s", topicPartition, dir.getParent()), () -> { List deletableSegments = new ArrayList<>(segments.values()); removeAndDeleteSegments( @@ -476,8 +469,8 @@ public FetchDataInfo read(long startOffset, return maybeHandleIOException( () -> "Exception while reading from " + topicPartition + " in dir " + dir.getParent(), () -> { - logger.trace("Reading maximum $maxLength bytes at offset {} from log with total length {} bytes", - startOffset, segments.sizeInBytes()); + logger.trace("Reading maximum {} bytes at offset {} from log with total length {} bytes", + maxLength, startOffset, segments.sizeInBytes()); LogOffsetMetadata endOffsetMetadata = nextOffsetMetadata; long endOffset = endOffsetMetadata.messageOffset; @@ -531,8 +524,8 @@ else if (segment.baseOffset() == maxOffsetMetadata.segmentBaseOffset && !maxOffs ); } - public void append(long lastOffset, long largestTimestamp, long shallowOffsetOfMaxTimestamp, MemoryRecords records) throws IOException { - segments.activeSegment().append(lastOffset, largestTimestamp, shallowOffsetOfMaxTimestamp, records); + public void append(long lastOffset, MemoryRecords records) throws IOException { + segments.activeSegment().append(lastOffset, records); updateLogEndOffset(lastOffset + 1); } @@ -949,7 +942,7 @@ public static SplitSegmentResult splitOverflowedSegment(LogSegment segment, throw new IllegalStateException("Inconsistent segment sizes after split before: " + segment.log().sizeInBytes() + " after: " + totalSizeOfNewSegments); } // replace old segment with new ones - LOG.info("{}Replacing overflowed segment $segment with split segments {}", logPrefix, newSegments); + LOG.info("{}Replacing overflowed segment {} with split segments {}", logPrefix, segment, newSegments); List deletedSegments = replaceSegments(existingSegments, newSegments, singletonList(segment), dir, topicPartition, config, scheduler, logDirFailureChannel, logPrefix, false); return new SplitSegmentResult(deletedSegments, newSegments); diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogAppendInfo.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogAppendInfo.java index 05e162a3042f1..63a8a5108189c 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogAppendInfo.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogAppendInfo.java @@ -31,13 +31,12 @@ public class LogAppendInfo { public static final LogAppendInfo UNKNOWN_LOG_APPEND_INFO = new LogAppendInfo(-1, -1, OptionalInt.empty(), - RecordBatch.NO_TIMESTAMP, -1L, RecordBatch.NO_TIMESTAMP, -1L, + RecordBatch.NO_TIMESTAMP, RecordBatch.NO_TIMESTAMP, -1L, RecordValidationStats.EMPTY, CompressionType.NONE, -1, -1L); private long firstOffset; private long lastOffset; private long maxTimestamp; - private long shallowOffsetOfMaxTimestamp; private long logAppendTime; private long logStartOffset; private RecordValidationStats recordValidationStats; @@ -52,31 +51,29 @@ public class LogAppendInfo { /** * Creates an instance with the given params. * - * @param firstOffset The first offset in the message set unless the message format is less than V2 and we are appending - * to the follower. - * @param lastOffset The last offset in the message set - * @param lastLeaderEpoch The partition leader epoch corresponding to the last offset, if available. - * @param maxTimestamp The maximum timestamp of the message set. - * @param shallowOffsetOfMaxTimestamp The last offset of the batch with the maximum timestamp. - * @param logAppendTime The log append time (if used) of the message set, otherwise Message.NoTimestamp - * @param logStartOffset The start offset of the log at the time of this append. - * @param recordValidationStats Statistics collected during record processing, `null` if `assignOffsets` is `false` - * @param sourceCompression The source codec used in the message set (send by the producer) - * @param validBytes The number of valid bytes - * @param lastOffsetOfFirstBatch The last offset of the first batch + * @param firstOffset The first offset in the message set unless the message format is less than V2 and we are appending + * to the follower. + * @param lastOffset The last offset in the message set + * @param lastLeaderEpoch The partition leader epoch corresponding to the last offset, if available. + * @param maxTimestamp The maximum timestamp of the message set. + * @param logAppendTime The log append time (if used) of the message set, otherwise Message.NoTimestamp + * @param logStartOffset The start offset of the log at the time of this append. + * @param recordValidationStats Statistics collected during record processing, `null` if `assignOffsets` is `false` + * @param sourceCompression The source codec used in the message set (send by the producer) + * @param validBytes The number of valid bytes + * @param lastOffsetOfFirstBatch The last offset of the first batch */ public LogAppendInfo(long firstOffset, long lastOffset, OptionalInt lastLeaderEpoch, long maxTimestamp, - long shallowOffsetOfMaxTimestamp, long logAppendTime, long logStartOffset, RecordValidationStats recordValidationStats, CompressionType sourceCompression, int validBytes, long lastOffsetOfFirstBatch) { - this(firstOffset, lastOffset, lastLeaderEpoch, maxTimestamp, shallowOffsetOfMaxTimestamp, logAppendTime, logStartOffset, + this(firstOffset, lastOffset, lastLeaderEpoch, maxTimestamp, logAppendTime, logStartOffset, recordValidationStats, sourceCompression, validBytes, lastOffsetOfFirstBatch, Collections.emptyList(), LeaderHwChange.NONE); } @@ -84,27 +81,25 @@ public LogAppendInfo(long firstOffset, /** * Creates an instance with the given params. * - * @param firstOffset The first offset in the message set unless the message format is less than V2 and we are appending - * to the follower. - * @param lastOffset The last offset in the message set - * @param lastLeaderEpoch The partition leader epoch corresponding to the last offset, if available. - * @param maxTimestamp The maximum timestamp of the message set. - * @param shallowOffsetOfMaxTimestamp The last offset of the batch with the maximum timestamp. - * @param logAppendTime The log append time (if used) of the message set, otherwise Message.NoTimestamp - * @param logStartOffset The start offset of the log at the time of this append. - * @param recordValidationStats Statistics collected during record processing, `null` if `assignOffsets` is `false` - * @param sourceCompression The source codec used in the message set (send by the producer) - * @param validBytes The number of valid bytes - * @param lastOffsetOfFirstBatch The last offset of the first batch - * @param recordErrors List of record errors that caused the respective batch to be dropped - * @param leaderHwChange Incremental if the high watermark needs to be increased after appending record - * Same if high watermark is not changed. None is the default value and it means append failed + * @param firstOffset The first offset in the message set unless the message format is less than V2 and we are appending + * to the follower. + * @param lastOffset The last offset in the message set + * @param lastLeaderEpoch The partition leader epoch corresponding to the last offset, if available. + * @param maxTimestamp The maximum timestamp of the message set. + * @param logAppendTime The log append time (if used) of the message set, otherwise Message.NoTimestamp + * @param logStartOffset The start offset of the log at the time of this append. + * @param recordValidationStats Statistics collected during record processing, `null` if `assignOffsets` is `false` + * @param sourceCompression The source codec used in the message set (send by the producer) + * @param validBytes The number of valid bytes + * @param lastOffsetOfFirstBatch The last offset of the first batch + * @param recordErrors List of record errors that caused the respective batch to be dropped + * @param leaderHwChange Incremental if the high watermark needs to be increased after appending record + * Same if high watermark is not changed. None is the default value and it means append failed */ public LogAppendInfo(long firstOffset, long lastOffset, OptionalInt lastLeaderEpoch, long maxTimestamp, - long shallowOffsetOfMaxTimestamp, long logAppendTime, long logStartOffset, RecordValidationStats recordValidationStats, @@ -117,7 +112,6 @@ public LogAppendInfo(long firstOffset, this.lastOffset = lastOffset; this.lastLeaderEpoch = lastLeaderEpoch; this.maxTimestamp = maxTimestamp; - this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; this.logAppendTime = logAppendTime; this.logStartOffset = logStartOffset; this.recordValidationStats = recordValidationStats; @@ -156,14 +150,6 @@ public void setMaxTimestamp(long maxTimestamp) { this.maxTimestamp = maxTimestamp; } - public long shallowOffsetOfMaxTimestamp() { - return shallowOffsetOfMaxTimestamp; - } - - public void setShallowOffsetOfMaxTimestamp(long shallowOffsetOfMaxTimestamp) { - this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; - } - public long logAppendTime() { return logAppendTime; } @@ -233,12 +219,12 @@ public long numMessages() { * @return a new instance with the given LeaderHwChange */ public LogAppendInfo copy(LeaderHwChange newLeaderHwChange) { - return new LogAppendInfo(firstOffset, lastOffset, lastLeaderEpoch, maxTimestamp, shallowOffsetOfMaxTimestamp, logAppendTime, logStartOffset, recordValidationStats, + return new LogAppendInfo(firstOffset, lastOffset, lastLeaderEpoch, maxTimestamp, logAppendTime, logStartOffset, recordValidationStats, sourceCompression, validBytes, lastOffsetOfFirstBatch, recordErrors, newLeaderHwChange); } public static LogAppendInfo unknownLogAppendInfoWithLogStartOffset(long logStartOffset) { - return new LogAppendInfo(-1, -1, OptionalInt.empty(), RecordBatch.NO_TIMESTAMP, -1L, RecordBatch.NO_TIMESTAMP, logStartOffset, + return new LogAppendInfo(-1, -1, OptionalInt.empty(), RecordBatch.NO_TIMESTAMP, RecordBatch.NO_TIMESTAMP, logStartOffset, RecordValidationStats.EMPTY, CompressionType.NONE, -1, -1L); } @@ -248,7 +234,7 @@ public static LogAppendInfo unknownLogAppendInfoWithLogStartOffset(long logStart * in unknownLogAppendInfoWithLogStartOffset, but with additional fields recordErrors */ public static LogAppendInfo unknownLogAppendInfoWithAdditionalInfo(long logStartOffset, List recordErrors) { - return new LogAppendInfo(-1, -1, OptionalInt.empty(), RecordBatch.NO_TIMESTAMP, -1L, RecordBatch.NO_TIMESTAMP, logStartOffset, + return new LogAppendInfo(-1, -1, OptionalInt.empty(), RecordBatch.NO_TIMESTAMP, RecordBatch.NO_TIMESTAMP, logStartOffset, RecordValidationStats.EMPTY, CompressionType.NONE, -1, -1L, recordErrors, LeaderHwChange.NONE); } @@ -259,7 +245,6 @@ public String toString() { ", lastOffset=" + lastOffset + ", lastLeaderEpoch=" + lastLeaderEpoch + ", maxTimestamp=" + maxTimestamp + - ", shallowOffsetOfMaxTimestamp=" + shallowOffsetOfMaxTimestamp + ", logAppendTime=" + logAppendTime + ", logStartOffset=" + logStartOffset + ", recordConversionStats=" + recordValidationStats + diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/package-info.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogCleaningAbortedException.java similarity index 81% rename from clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/package-info.java rename to storage/src/main/java/org/apache/kafka/storage/internals/log/LogCleaningAbortedException.java index 1d91c7d2f3362..1c1f8d9075291 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/secured/package-info.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogCleaningAbortedException.java @@ -14,8 +14,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.kafka.storage.internals.log; + /** - * This package is deprecated. - * See {@link org.apache.kafka.common.security.oauthbearer} + * Thrown when a log cleaning task is requested to be aborted. */ -package org.apache.kafka.common.security.oauthbearer.secured; \ No newline at end of file +public class LogCleaningAbortedException extends RuntimeException { +} diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java index 52574021d0ccb..f4294329f2505 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogConfig.java @@ -27,13 +27,11 @@ import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.LegacyRecord; -import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.record.Records; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.ConfigUtils; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.common.MetadataVersion; -import org.apache.kafka.server.common.MetadataVersionValidator; import org.apache.kafka.server.config.QuotaConfig; import org.apache.kafka.server.config.ServerLogConfigs; import org.apache.kafka.server.config.ServerTopicConfigSynonyms; @@ -69,45 +67,6 @@ public class LogConfig extends AbstractConfig { - public static class MessageFormatVersion { - private final String messageFormatVersionString; - private final String interBrokerProtocolVersionString; - private final MetadataVersion messageFormatVersion; - private final MetadataVersion interBrokerProtocolVersion; - - public MessageFormatVersion(String messageFormatVersionString, String interBrokerProtocolVersionString) { - this.messageFormatVersionString = messageFormatVersionString; - this.interBrokerProtocolVersionString = interBrokerProtocolVersionString; - this.messageFormatVersion = MetadataVersion.fromVersionString(messageFormatVersionString); - this.interBrokerProtocolVersion = MetadataVersion.fromVersionString(interBrokerProtocolVersionString); - } - - public MetadataVersion messageFormatVersion() { - return messageFormatVersion; - } - - public MetadataVersion interBrokerProtocolVersion() { - return interBrokerProtocolVersion; - } - - public boolean shouldIgnore() { - return shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion); - } - - public boolean shouldWarn() { - return interBrokerProtocolVersion.isAtLeast(IBP_3_0_IV1) - && messageFormatVersion.highestSupportedRecordVersion().precedes(RecordVersion.V2); - } - - @SuppressWarnings("deprecation") - public String topicWarningMessage(String topicName) { - return "Topic configuration " + TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG + " with value `" - + messageFormatVersionString + "` is ignored for `" + topicName + "` because the " - + "inter-broker protocol version `" + interBrokerProtocolVersionString + "` is greater or " - + "equal than 3.0. This configuration is deprecated and it will be removed in Apache Kafka 4.0."; - } - } - private static class RemoteLogConfig { private final boolean remoteStorageEnable; @@ -180,8 +139,6 @@ public Optional serverConfigName(String configName) { public static final boolean DEFAULT_UNCLEAN_LEADER_ELECTION_ENABLE = false; public static final String DEFAULT_COMPRESSION_TYPE = BrokerCompressionType.PRODUCER.name; public static final boolean DEFAULT_PREALLOCATE = false; - @Deprecated - public static final long DEFAULT_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS = ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DEFAULT; public static final boolean DEFAULT_REMOTE_STORAGE_ENABLE = false; public static final boolean DEFAULT_REMOTE_LOG_COPY_DISABLE_CONFIG = false; @@ -189,21 +146,6 @@ public Optional serverConfigName(String configName) { public static final long DEFAULT_LOCAL_RETENTION_BYTES = -2; // It indicates the value to be derived from RetentionBytes public static final long DEFAULT_LOCAL_RETENTION_MS = -2; // It indicates the value to be derived from RetentionMs - /* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details - * Keep DEFAULT_MESSAGE_FORMAT_VERSION as a way to handle the deprecated value */ - @Deprecated - public static final String DEFAULT_MESSAGE_FORMAT_VERSION = ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_DEFAULT; - - /* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */ - @SuppressWarnings("deprecation") - private static final String MESSAGE_FORMAT_VERSION_CONFIG = TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG; - - @SuppressWarnings("deprecation") - private static final String MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG; - - @SuppressWarnings("deprecation") - private static final String MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC = TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC; - // Visible for testing public static final Set CONFIGS_WITH_NO_SERVER_DEFAULTS = Set.of( TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, @@ -213,15 +155,11 @@ public Optional serverConfigName(String configName) { QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG ); - @SuppressWarnings("deprecation") - private static final String MESSAGE_FORMAT_VERSION_DOC = TopicConfig.MESSAGE_FORMAT_VERSION_DOC; - - @SuppressWarnings("deprecation") public static final ConfigDef SERVER_CONFIG_DEF = new ConfigDef() .define(ServerLogConfigs.NUM_PARTITIONS_CONFIG, INT, ServerLogConfigs.NUM_PARTITIONS_DEFAULT, atLeast(1), MEDIUM, ServerLogConfigs.NUM_PARTITIONS_DOC) .define(ServerLogConfigs.LOG_DIR_CONFIG, STRING, ServerLogConfigs.LOG_DIR_DEFAULT, HIGH, ServerLogConfigs.LOG_DIR_DOC) .define(ServerLogConfigs.LOG_DIRS_CONFIG, STRING, null, HIGH, ServerLogConfigs.LOG_DIRS_DOC) - .define(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, INT, DEFAULT_SEGMENT_BYTES, atLeast(LegacyRecord.RECORD_OVERHEAD_V0), HIGH, ServerLogConfigs.LOG_SEGMENT_BYTES_DOC) + .define(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, INT, DEFAULT_SEGMENT_BYTES, atLeast(1024 * 1024), HIGH, ServerLogConfigs.LOG_SEGMENT_BYTES_DOC) .define(ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG, LONG, null, HIGH, ServerLogConfigs.LOG_ROLL_TIME_MILLIS_DOC) .define(ServerLogConfigs.LOG_ROLL_TIME_HOURS_CONFIG, INT, (int) TimeUnit.MILLISECONDS.toHours(DEFAULT_SEGMENT_MS), atLeast(1), HIGH, ServerLogConfigs.LOG_ROLL_TIME_HOURS_DOC) @@ -248,14 +186,11 @@ public Optional serverConfigName(String configName) { .define(ServerLogConfigs.NUM_RECOVERY_THREADS_PER_DATA_DIR_CONFIG, INT, ServerLogConfigs.NUM_RECOVERY_THREADS_PER_DATA_DIR_DEFAULT, atLeast(1), HIGH, ServerLogConfigs.NUM_RECOVERY_THREADS_PER_DATA_DIR_DOC) .define(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, BOOLEAN, ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_DEFAULT, HIGH, ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_DOC) .define(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_CONFIG, INT, ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DEFAULT, atLeast(1), HIGH, ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DOC) - .define(ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_CONFIG, STRING, ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_DEFAULT, new MetadataVersionValidator(), MEDIUM, ServerLogConfigs.LOG_MESSAGE_FORMAT_VERSION_DOC) .define(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG, STRING, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_DEFAULT, ConfigDef.ValidString.in("CreateTime", "LogAppendTime"), MEDIUM, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_DOC) - .define(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, LONG, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DEFAULT, atLeast(0), MEDIUM, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC) .define(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, LONG, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DEFAULT, atLeast(0), MEDIUM, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC) .define(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, LONG, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DEFAULT, atLeast(0), MEDIUM, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DOC) .define(ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, CLASS, null, LOW, ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_DOC) .define(ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, CLASS, null, LOW, ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_DOC) - .define(ServerLogConfigs.LOG_MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, BOOLEAN, ServerLogConfigs.LOG_MESSAGE_DOWNCONVERSION_ENABLE_DEFAULT, LOW, ServerLogConfigs.LOG_MESSAGE_DOWNCONVERSION_ENABLE_DOC) .define(ServerLogConfigs.LOG_DIR_FAILURE_TIMEOUT_MS_CONFIG, LONG, ServerLogConfigs.LOG_DIR_FAILURE_TIMEOUT_MS_DEFAULT, atLeast(1), LOW, ServerLogConfigs.LOG_DIR_FAILURE_TIMEOUT_MS_DOC) .defineInternal(ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_CONFIG, LONG, ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DEFAULT, atLeast(0), LOW, ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DOC); @@ -307,12 +242,8 @@ public Optional serverConfigName(String configName) { .define(TopicConfig.COMPRESSION_ZSTD_LEVEL_CONFIG, INT, CompressionType.ZSTD.defaultLevel(), CompressionType.ZSTD.levelValidator(), MEDIUM, TopicConfig.COMPRESSION_ZSTD_LEVEL_DOC) .define(TopicConfig.PREALLOCATE_CONFIG, BOOLEAN, DEFAULT_PREALLOCATE, MEDIUM, TopicConfig.PREALLOCATE_DOC) - .define(MESSAGE_FORMAT_VERSION_CONFIG, STRING, DEFAULT_MESSAGE_FORMAT_VERSION, new MetadataVersionValidator(), MEDIUM, - MESSAGE_FORMAT_VERSION_DOC) .define(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, STRING, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_DEFAULT, in("CreateTime", "LogAppendTime"), MEDIUM, TopicConfig.MESSAGE_TIMESTAMP_TYPE_DOC) - .define(MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, LONG, DEFAULT_MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS, - atLeast(0), MEDIUM, MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_DOC) .define(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, LONG, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DEFAULT, atLeast(0), MEDIUM, TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_DOC) .define(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, LONG, ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_AFTER_MAX_MS_DEFAULT, @@ -321,8 +252,6 @@ public Optional serverConfigName(String configName) { ThrottledReplicaListValidator.INSTANCE, MEDIUM, QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_DOC) .define(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG, LIST, QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_DEFAULT, ThrottledReplicaListValidator.INSTANCE, MEDIUM, QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_DOC) - .define(TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, BOOLEAN, ServerLogConfigs.LOG_MESSAGE_DOWNCONVERSION_ENABLE_DEFAULT, LOW, - TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_DOC) .define(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, BOOLEAN, DEFAULT_REMOTE_STORAGE_ENABLE, null, MEDIUM, TopicConfig.REMOTE_LOG_STORAGE_ENABLE_DOC) .define(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, LONG, DEFAULT_LOCAL_RETENTION_MS, atLeast(-2), MEDIUM, @@ -361,20 +290,12 @@ public Optional serverConfigName(String configName) { public final Optional compression; public final boolean preallocate; - /* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details regarding the deprecation */ - @Deprecated - public final MetadataVersion messageFormatVersion; - public final TimestampType messageTimestampType; - /* See `TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG` for details regarding the deprecation */ - @Deprecated - public final long messageTimestampDifferenceMaxMs; public final long messageTimestampBeforeMaxMs; public final long messageTimestampAfterMaxMs; public final List leaderReplicationThrottledReplicas; public final List followerReplicationThrottledReplicas; - public final boolean messageDownConversionEnable; private final RemoteLogConfig remoteLogConfig; private final int maxMessageSize; @@ -384,7 +305,7 @@ public LogConfig(Map props) { this(props, Collections.emptySet()); } - @SuppressWarnings({"deprecation", "this-escape"}) + @SuppressWarnings({"this-escape"}) public LogConfig(Map props, Set overriddenConfigs) { super(CONFIG, props, false); this.props = Collections.unmodifiableMap(props); @@ -418,14 +339,11 @@ public LogConfig(Map props, Set overriddenConfigs) { this.compressionType = BrokerCompressionType.forName(getString(TopicConfig.COMPRESSION_TYPE_CONFIG)); this.compression = getCompression(); this.preallocate = getBoolean(TopicConfig.PREALLOCATE_CONFIG); - this.messageFormatVersion = MetadataVersion.fromVersionString(getString(TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG)); this.messageTimestampType = TimestampType.forName(getString(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG)); - this.messageTimestampDifferenceMaxMs = getLong(TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG); - this.messageTimestampBeforeMaxMs = getMessageTimestampBeforeMaxMs(); - this.messageTimestampAfterMaxMs = getMessageTimestampAfterMaxMs(); + this.messageTimestampBeforeMaxMs = getLong(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG); + this.messageTimestampAfterMaxMs = getLong(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG); this.leaderReplicationThrottledReplicas = Collections.unmodifiableList(getList(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG)); this.followerReplicationThrottledReplicas = Collections.unmodifiableList(getList(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG)); - this.messageDownConversionEnable = getBoolean(TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG); remoteLogConfig = new RemoteLogConfig(this); } @@ -455,32 +373,6 @@ private Optional getCompression() { } } - //In the transition period before messageTimestampDifferenceMaxMs is removed, to maintain backward compatibility, - // we are using its value if messageTimestampBeforeMaxMs default value hasn't changed. - private long getMessageTimestampBeforeMaxMs() { - final Long messageTimestampBeforeMaxMs = getLong(TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG); - if (!messageTimestampBeforeMaxMs.equals(Long.MAX_VALUE)) { - return messageTimestampBeforeMaxMs; - } else { - return messageTimestampDifferenceMaxMs; - } - } - - //In the transition period before messageTimestampDifferenceMaxMs is removed, to maintain backward compatibility, - // we are using its value if messageTimestampAfterMaxMs default value hasn't changed. - private long getMessageTimestampAfterMaxMs() { - final Long messageTimestampAfterMaxMs = getLong(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG); - if (!messageTimestampAfterMaxMs.equals(Long.MAX_VALUE)) { - return messageTimestampAfterMaxMs; - } else { - return messageTimestampDifferenceMaxMs; - } - } - - public RecordVersion recordVersion() { - return messageFormatVersion.highestSupportedRecordVersion(); - } - // Exposed as a method so it can be mocked public int maxMessageSize() { return maxMessageSize; @@ -781,12 +673,9 @@ public String toString() { ", minInSyncReplicas=" + minInSyncReplicas + ", compressionType='" + compressionType + '\'' + ", preallocate=" + preallocate + - ", messageFormatVersion=" + messageFormatVersion + ", messageTimestampType=" + messageTimestampType + - ", messageTimestampDifferenceMaxMs=" + messageTimestampDifferenceMaxMs + ", leaderReplicationThrottledReplicas=" + leaderReplicationThrottledReplicas + ", followerReplicationThrottledReplicas=" + followerReplicationThrottledReplicas + - ", messageDownConversionEnable=" + messageDownConversionEnable + ", remoteLogConfig=" + remoteLogConfig + ", maxMessageSize=" + maxMessageSize + '}'; diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogLoader.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogLoader.java index db631b25cf9b6..89780686995fb 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogLoader.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogLoader.java @@ -56,7 +56,7 @@ public class LogLoader { private final LogSegments segments; private final long logStartOffsetCheckpoint; private final long recoveryPointCheckpoint; - private final Optional leaderEpochCache; + private final LeaderEpochFileCache leaderEpochCache; private final ProducerStateManager producerStateManager; private final ConcurrentMap numRemainingSegments; private final boolean isRemoteLogEnabled; @@ -74,7 +74,7 @@ public class LogLoader { * @param segments The {@link LogSegments} instance into which segments recovered from disk will be populated * @param logStartOffsetCheckpoint The checkpoint of the log start offset * @param recoveryPointCheckpoint The checkpoint of the offset at which to begin the recovery - * @param leaderEpochCache An optional {@link LeaderEpochFileCache} instance to be updated during recovery + * @param leaderEpochCache A {@link LeaderEpochFileCache} instance to be updated during recovery * @param producerStateManager The {@link ProducerStateManager} instance to be updated during recovery * @param numRemainingSegments The remaining segments to be recovered in this log keyed by recovery thread name * @param isRemoteLogEnabled Boolean flag to indicate whether the remote storage is enabled or not @@ -90,7 +90,7 @@ public LogLoader( LogSegments segments, long logStartOffsetCheckpoint, long recoveryPointCheckpoint, - Optional leaderEpochCache, + LeaderEpochFileCache leaderEpochCache, ProducerStateManager producerStateManager, ConcurrentMap numRemainingSegments, boolean isRemoteLogEnabled) { @@ -215,13 +215,13 @@ public LoadedLogOffsets load() throws IOException { recoveryOffsets = new RecoveryOffsets(0L, 0L); } - leaderEpochCache.ifPresent(lec -> lec.truncateFromEndAsyncFlush(recoveryOffsets.nextOffset)); + leaderEpochCache.truncateFromEndAsyncFlush(recoveryOffsets.nextOffset); long newLogStartOffset = isRemoteLogEnabled ? logStartOffsetCheckpoint : Math.max(logStartOffsetCheckpoint, segments.firstSegment().get().baseOffset()); // The earliest leader epoch may not be flushed during a hard failure. Recover it here. - leaderEpochCache.ifPresent(lec -> lec.truncateFromStartAsyncFlush(logStartOffsetCheckpoint)); + leaderEpochCache.truncateFromStartAsyncFlush(logStartOffsetCheckpoint); // Any segment loading or recovery code must not use producerStateManager, so that we can build the full state here // from scratch. @@ -238,7 +238,6 @@ public LoadedLogOffsets load() throws IOException { segments, newLogStartOffset, recoveryOffsets.nextOffset, - config.recordVersion(), time, hadCleanShutdown, logPrefix); @@ -408,7 +407,6 @@ private int recoverSegment(LogSegment segment) throws IOException { segments, logStartOffsetCheckpoint, segment.baseOffset(), - config.recordVersion(), time, false, logPrefix); @@ -430,7 +428,7 @@ private Optional deleteSegmentsIfLogStartGreaterThanLogEnd() throws IOExce "is smaller than logStartOffset {}. " + "This could happen if segment files were deleted from the file system.", logEndOffset, logStartOffsetCheckpoint); removeAndDeleteSegmentsAsync(segments.values()); - leaderEpochCache.ifPresent(LeaderEpochFileCache::clearAndFlush); + leaderEpochCache.clearAndFlush(); producerStateManager.truncateFullyAndStartAt(logStartOffsetCheckpoint); return Optional.empty(); } diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogSegment.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogSegment.java index faf0ece60675f..12054273b7f45 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogSegment.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogSegment.java @@ -177,8 +177,8 @@ public void resizeIndexes(int size) throws IOException { public void sanityCheck(boolean timeIndexFileNewlyCreated) throws IOException { if (offsetIndexFile().exists()) { // Resize the time index file to 0 if it is newly created. - if (timeIndexFileNewlyCreated) - timeIndex().resize(0); + if (timeIndexFileNewlyCreated) + timeIndex().resize(0); // Sanity checks for time index and offset index are skipped because // we will recover the segments above the recovery point in recoverLog() // in any case so sanity checking them here is redundant. @@ -232,38 +232,38 @@ private boolean canConvertToRelativeOffset(long offset) throws IOException { * It is assumed this method is being called from within a lock, it is not thread-safe otherwise. * * @param largestOffset The last offset in the message set - * @param largestTimestampMs The largest timestamp in the message set. - * @param shallowOffsetOfMaxTimestamp The last offset of earliest batch with max timestamp in the messages to append. - * @param records The log entries to append. + * @param records The log entries to append. * @throws LogSegmentOffsetOverflowException if the largest offset causes index offset overflow */ public void append(long largestOffset, - long largestTimestampMs, - long shallowOffsetOfMaxTimestamp, MemoryRecords records) throws IOException { if (records.sizeInBytes() > 0) { - LOGGER.trace("Inserting {} bytes at end offset {} at position {} with largest timestamp {} at offset {}", - records.sizeInBytes(), largestOffset, log.sizeInBytes(), largestTimestampMs, shallowOffsetOfMaxTimestamp); + LOGGER.trace("Inserting {} bytes at end offset {} at position {}", + records.sizeInBytes(), largestOffset, log.sizeInBytes()); int physicalPosition = log.sizeInBytes(); - if (physicalPosition == 0) - rollingBasedTimestamp = OptionalLong.of(largestTimestampMs); ensureOffsetInRange(largestOffset); // append the messages long appendedBytes = log.append(records); LOGGER.trace("Appended {} to {} at end offset {}", appendedBytes, log.file(), largestOffset); - // Update the in memory max timestamp and corresponding offset. - if (largestTimestampMs > maxTimestampSoFar()) { - maxTimestampAndOffsetSoFar = new TimestampOffset(largestTimestampMs, shallowOffsetOfMaxTimestamp); - } - // append an entry to the index (if needed) - if (bytesSinceLastIndexEntry > indexIntervalBytes) { - offsetIndex().append(largestOffset, physicalPosition); - timeIndex().maybeAppend(maxTimestampSoFar(), shallowOffsetOfMaxTimestampSoFar()); - bytesSinceLastIndexEntry = 0; + + for (RecordBatch batch : records.batches()) { + long batchMaxTimestamp = batch.maxTimestamp(); + long batchLastOffset = batch.lastOffset(); + if (batchMaxTimestamp > maxTimestampSoFar()) { + maxTimestampAndOffsetSoFar = new TimestampOffset(batchMaxTimestamp, batchLastOffset); + } + + if (bytesSinceLastIndexEntry > indexIntervalBytes) { + offsetIndex().append(batchLastOffset, physicalPosition); + timeIndex().maybeAppend(maxTimestampSoFar(), shallowOffsetOfMaxTimestampSoFar()); + bytesSinceLastIndexEntry = 0; + } + var sizeInBytes = batch.sizeInBytes(); + physicalPosition += sizeInBytes; + bytesSinceLastIndexEntry += sizeInBytes; } - bytesSinceLastIndexEntry += records.sizeInBytes(); } } @@ -274,8 +274,6 @@ private void ensureOffsetInRange(long offset) throws IOException { private int appendChunkFromFile(FileRecords records, int position, BufferSupplier bufferSupplier) throws IOException { int bytesToAppend = 0; - long maxTimestamp = Long.MIN_VALUE; - long shallowOffsetOfMaxTimestamp = Long.MIN_VALUE; long maxOffset = Long.MIN_VALUE; ByteBuffer readBuffer = bufferSupplier.get(1024 * 1024); @@ -284,10 +282,6 @@ private int appendChunkFromFile(FileRecords records, int position, BufferSupplie Iterator nextBatches = records.batchesFrom(position).iterator(); FileChannelRecordBatch batch; while ((batch = nextAppendableBatch(nextBatches, readBuffer, bytesToAppend)) != null) { - if (batch.maxTimestamp() > maxTimestamp) { - maxTimestamp = batch.maxTimestamp(); - shallowOffsetOfMaxTimestamp = batch.lastOffset(); - } maxOffset = batch.lastOffset(); bytesToAppend += batch.sizeInBytes(); } @@ -300,7 +294,7 @@ private int appendChunkFromFile(FileRecords records, int position, BufferSupplie readBuffer.limit(bytesToAppend); records.readInto(readBuffer, position); - append(maxOffset, maxTimestamp, shallowOffsetOfMaxTimestamp, MemoryRecords.readableRecords(readBuffer)); + append(maxOffset, MemoryRecords.readableRecords(readBuffer)); } bufferSupplier.release(readBuffer); @@ -442,7 +436,7 @@ public FetchDataInfo read(long startOffset, int maxSize, Optional maxPosit // return empty records in the fetch-data-info when: // 1. adjustedMaxSize is 0 (or) // 2. maxPosition to read is unavailable - if (adjustedMaxSize == 0 || !maxPositionOpt.isPresent()) + if (adjustedMaxSize == 0 || maxPositionOpt.isEmpty()) return new FetchDataInfo(offsetMetadata, MemoryRecords.EMPTY); // calculate the length of the message set to read based on whether or not they gave us a maxOffset @@ -465,11 +459,11 @@ public OptionalLong fetchUpperBoundOffset(OffsetPosition startOffsetPosition, in * * @param producerStateManager Producer state corresponding to the segment's base offset. This is needed to recover * the transaction index. - * @param leaderEpochCache Optionally a cache for updating the leader epoch during recovery. + * @param leaderEpochCache a cache for updating the leader epoch during recovery. * @return The number of bytes truncated from the log * @throws LogSegmentOffsetOverflowException if the log segment contains an offset that causes the index offset to overflow */ - public int recover(ProducerStateManager producerStateManager, Optional leaderEpochCache) throws IOException { + public int recover(ProducerStateManager producerStateManager, LeaderEpochFileCache leaderEpochCache) throws IOException { offsetIndex().reset(); timeIndex().reset(); txnIndex.reset(); @@ -495,11 +489,9 @@ public int recover(ProducerStateManager producerStateManager, Optional= RecordBatch.MAGIC_VALUE_V2) { - leaderEpochCache.ifPresent(cache -> { - if (batch.partitionLeaderEpoch() >= 0 && - (!cache.latestEpoch().isPresent() || batch.partitionLeaderEpoch() > cache.latestEpoch().getAsInt())) - cache.assign(batch.partitionLeaderEpoch(), batch.baseOffset()); - }); + if (batch.partitionLeaderEpoch() >= 0 && + (leaderEpochCache.latestEpoch().isEmpty() || batch.partitionLeaderEpoch() > leaderEpochCache.latestEpoch().getAsInt())) + leaderEpochCache.assign(batch.partitionLeaderEpoch(), batch.baseOffset()); updateProducerState(producerStateManager, batch); } } @@ -686,7 +678,7 @@ public void onBecomeInactiveSegment() throws IOException { * load the timestamp of the first message into memory. */ private void loadFirstBatchTimestamp() { - if (!rollingBasedTimestamp.isPresent()) { + if (rollingBasedTimestamp.isEmpty()) { Iterator iter = log.batches().iterator(); if (iter.hasNext()) rollingBasedTimestamp = OptionalLong.of(iter.next().maxTimestamp()); diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogValidator.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogValidator.java index ea4723098d8ff..9f8c6fa3b53dc 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/LogValidator.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/LogValidator.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.errors.InvalidTimestampException; -import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.AbstractRecords; @@ -40,7 +39,6 @@ import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.PrimitiveRef.LongRef; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.server.common.MetadataVersion; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -49,8 +47,6 @@ import java.util.Optional; import java.util.stream.Collectors; -import static org.apache.kafka.server.common.MetadataVersion.IBP_2_1_IV0; - public class LogValidator { public interface MetricsRecorder { @@ -69,20 +65,15 @@ public static class ValidationResult { public final long logAppendTimeMs; public final MemoryRecords validatedRecords; public final long maxTimestampMs; - // we only maintain batch level offset for max timestamp since we want to align the behavior of updating time - // indexing entries. The paths of follower append and replica recovery do not iterate all records, so they have no - // idea about record level offset for max timestamp. - public final long shallowOffsetOfMaxTimestamp; public final boolean messageSizeMaybeChanged; public final RecordValidationStats recordValidationStats; public ValidationResult(long logAppendTimeMs, MemoryRecords validatedRecords, long maxTimestampMs, - long shallowOffsetOfMaxTimestamp, boolean messageSizeMaybeChanged, + boolean messageSizeMaybeChanged, RecordValidationStats recordValidationStats) { this.logAppendTimeMs = logAppendTimeMs; this.validatedRecords = validatedRecords; this.maxTimestampMs = maxTimestampMs; - this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; this.messageSizeMaybeChanged = messageSizeMaybeChanged; this.recordValidationStats = recordValidationStats; } @@ -110,7 +101,6 @@ private ApiRecordError(Errors apiError, RecordError recordError) { private final long timestampAfterMaxMs; private final int partitionLeaderEpoch; private final AppendOrigin origin; - private final MetadataVersion interBrokerProtocolVersion; public LogValidator(MemoryRecords records, TopicPartition topicPartition, @@ -123,8 +113,7 @@ public LogValidator(MemoryRecords records, long timestampBeforeMaxMs, long timestampAfterMaxMs, int partitionLeaderEpoch, - AppendOrigin origin, - MetadataVersion interBrokerProtocolVersion) { + AppendOrigin origin) { this.records = records; this.topicPartition = topicPartition; this.time = time; @@ -137,7 +126,6 @@ public LogValidator(MemoryRecords records, this.timestampAfterMaxMs = timestampAfterMaxMs; this.partitionLeaderEpoch = partitionLeaderEpoch; this.origin = origin; - this.interBrokerProtocolVersion = interBrokerProtocolVersion; } /** @@ -236,7 +224,6 @@ private ValidationResult convertAndAssignOffsetsNonCompressed(LongRef offsetCoun now, convertedRecords, info.maxTimestamp, - info.shallowOffsetOfMaxTimestamp, true, recordValidationStats); } @@ -246,8 +233,6 @@ public ValidationResult assignOffsetsNonCompressed(LongRef offsetCounter, MetricsRecorder metricsRecorder) { long now = time.milliseconds(); long maxTimestamp = RecordBatch.NO_TIMESTAMP; - long shallowOffsetOfMaxTimestamp = -1L; - long initialOffset = offsetCounter.value; RecordBatch firstBatch = getFirstBatchAndMaybeValidateNoMoreBatches(records, CompressionType.NONE); @@ -276,7 +261,6 @@ public ValidationResult assignOffsetsNonCompressed(LongRef offsetCounter, if (batch.magic() > RecordBatch.MAGIC_VALUE_V0 && maxBatchTimestamp > maxTimestamp) { maxTimestamp = maxBatchTimestamp; - shallowOffsetOfMaxTimestamp = offsetCounter.value - 1; } batch.setLastOffset(offsetCounter.value - 1); @@ -293,23 +277,10 @@ public ValidationResult assignOffsetsNonCompressed(LongRef offsetCounter, } if (timestampType == TimestampType.LOG_APPEND_TIME) { - maxTimestamp = now; - // those checks should be equal to MemoryRecordsBuilder#info - switch (toMagic) { - case RecordBatch.MAGIC_VALUE_V0: - maxTimestamp = RecordBatch.NO_TIMESTAMP; - // value will be the default value: -1 - shallowOffsetOfMaxTimestamp = -1; - break; - case RecordBatch.MAGIC_VALUE_V1: - // Those single-record batches have same max timestamp, so the initial offset is equal with - // the last offset of earliest batch - shallowOffsetOfMaxTimestamp = initialOffset; - break; - default: - // there is only one batch so use the last offset - shallowOffsetOfMaxTimestamp = offsetCounter.value - 1; - break; + if (toMagic == RecordBatch.MAGIC_VALUE_V0) { + maxTimestamp = RecordBatch.NO_TIMESTAMP; + } else { + maxTimestamp = now; } } @@ -317,7 +288,6 @@ public ValidationResult assignOffsetsNonCompressed(LongRef offsetCounter, now, records, maxTimestamp, - shallowOffsetOfMaxTimestamp, false, RecordValidationStats.EMPTY); } @@ -332,10 +302,6 @@ public ValidationResult assignOffsetsNonCompressed(LongRef offsetCounter, public ValidationResult validateMessagesAndAssignOffsetsCompressed(LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { - if (targetCompression.type() == CompressionType.ZSTD && interBrokerProtocolVersion.isLessThan(IBP_2_1_IV0)) - throw new UnsupportedCompressionTypeException("Produce requests to inter.broker.protocol.version < 2.1 broker " + - "are not allowed to use ZStandard compression"); - // No in place assignment situation 1 boolean inPlaceAssignment = sourceCompressionType == targetCompression.type(); long now = time.milliseconds(); @@ -383,12 +349,12 @@ public ValidationResult validateMessagesAndAssignOffsetsCompressed(LongRef offse Optional recordError = validateRecordCompression(sourceCompressionType, recordIndex, record); - if (!recordError.isPresent()) { + if (recordError.isEmpty()) { recordError = validateRecord(batch, topicPartition, record, recordIndex, now, timestampType, timestampBeforeMaxMs, timestampAfterMaxMs, compactedTopic, metricsRecorder); } - if (!recordError.isPresent() + if (recordError.isEmpty() && batch.magic() > RecordBatch.MAGIC_VALUE_V0 && toMagic > RecordBatch.MAGIC_VALUE_V0) { @@ -445,7 +411,6 @@ public ValidationResult validateMessagesAndAssignOffsetsCompressed(LongRef offse now, records, maxTimestamp, - lastOffset, false, recordValidationStats); } @@ -487,7 +452,6 @@ private ValidationResult buildRecordsAndAssignOffsets(LongRef offsetCounter, logAppendTime, records, info.maxTimestamp, - info.shallowOffsetOfMaxTimestamp, true, recordValidationStats); } diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/OffsetResultHolder.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/OffsetResultHolder.java new file mode 100644 index 0000000000000..f682b975ef3be --- /dev/null +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/OffsetResultHolder.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.storage.internals.log; + +import org.apache.kafka.common.errors.ApiException; +import org.apache.kafka.common.record.FileRecords.TimestampAndOffset; + +import java.util.Objects; +import java.util.Optional; + +public class OffsetResultHolder { + + private Optional timestampAndOffsetOpt; + private Optional> futureHolderOpt; + private Optional maybeOffsetsError = Optional.empty(); + private Optional lastFetchableOffset = Optional.empty(); + + public OffsetResultHolder() { + this(Optional.empty(), Optional.empty()); + } + + public OffsetResultHolder( + Optional timestampAndOffsetOpt, + Optional> futureHolderOpt + ) { + this.timestampAndOffsetOpt = timestampAndOffsetOpt; + this.futureHolderOpt = futureHolderOpt; + } + + public OffsetResultHolder(Optional timestampAndOffsetOpt) { + this(timestampAndOffsetOpt, Optional.empty()); + } + + public OffsetResultHolder(TimestampAndOffset timestampAndOffsetOpt) { + this(Optional.of(timestampAndOffsetOpt), Optional.empty()); + } + + public Optional timestampAndOffsetOpt() { + return timestampAndOffsetOpt; + } + + public Optional> futureHolderOpt() { + return futureHolderOpt; + } + + public Optional maybeOffsetsError() { + return maybeOffsetsError; + } + + public Optional lastFetchableOffset() { + return lastFetchableOffset; + } + + public void timestampAndOffsetOpt(Optional timestampAndOffsetOpt) { + this.timestampAndOffsetOpt = timestampAndOffsetOpt; + } + + public void maybeOffsetsError(Optional maybeOffsetsError) { + this.maybeOffsetsError = maybeOffsetsError; + } + + public void lastFetchableOffset(Optional lastFetchableOffset) { + this.lastFetchableOffset = lastFetchableOffset; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + OffsetResultHolder that = (OffsetResultHolder) o; + return Objects.equals(timestampAndOffsetOpt, that.timestampAndOffsetOpt) && Objects.equals(futureHolderOpt, that.futureHolderOpt) && Objects.equals(maybeOffsetsError, that.maybeOffsetsError) && Objects.equals(lastFetchableOffset, that.lastFetchableOffset); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(timestampAndOffsetOpt); + result = 31 * result + Objects.hashCode(futureHolderOpt); + result = 31 * result + Objects.hashCode(maybeOffsetsError); + result = 31 * result + Objects.hashCode(lastFetchableOffset); + return result; + } + + public static class FileRecordsOrError { + private Optional exception; + private Optional timestampAndOffset; + + public FileRecordsOrError( + Optional exception, + Optional timestampAndOffset + ) { + if (exception.isPresent() && timestampAndOffset.isPresent()) { + throw new IllegalArgumentException("Either exception or timestampAndOffset should be present, but not both"); + } + this.exception = exception; + this.timestampAndOffset = timestampAndOffset; + } + + public Optional exception() { + return exception; + } + + public Optional timestampAndOffset() { + return timestampAndOffset; + } + + public boolean hasException() { + return exception.isPresent(); + } + + public boolean hasTimestampAndOffset() { + return timestampAndOffset.isPresent(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FileRecordsOrError that = (FileRecordsOrError) o; + return Objects.equals(exception, that.exception) && Objects.equals(timestampAndOffset, that.timestampAndOffset); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(exception); + result = 31 * result + Objects.hashCode(timestampAndOffset); + return result; + } + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectClusterAssertions.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/OffsetsOutOfOrderException.java similarity index 74% rename from connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectClusterAssertions.java rename to storage/src/main/java/org/apache/kafka/storage/internals/log/OffsetsOutOfOrderException.java index e6f93f5000947..39f8d494979f5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectClusterAssertions.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/OffsetsOutOfOrderException.java @@ -14,16 +14,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.connect.util.clusters; +package org.apache.kafka.storage.internals.log; /** - * @deprecated Use {@link ConnectAssertions} instead. + * Indicates the follower received records with non-monotonically increasing offsets */ -@Deprecated -public class EmbeddedConnectClusterAssertions extends ConnectAssertions { +public class OffsetsOutOfOrderException extends RuntimeException { - EmbeddedConnectClusterAssertions(EmbeddedConnect connect) { - super(connect); + public OffsetsOutOfOrderException(String message) { + super(message); } - } diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerAppendInfo.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerAppendInfo.java index 9037d25672eb4..621f4da51f251 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerAppendInfo.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerAppendInfo.java @@ -181,7 +181,7 @@ public void appendDataBatch(short epoch, // Received a non-transactional message while a transaction is active throw new InvalidTxnStateException("Expected transactional write from producer " + producerId + " at " + "offset " + firstOffsetMetadata + " in partition " + topicPartition); - } else if (!currentTxnFirstOffset.isPresent() && isTransactional) { + } else if (currentTxnFirstOffset.isEmpty() && isTransactional) { // Began a new transaction updatedEntry.setCurrentTxnFirstOffset(firstOffset); transactions.add(new TxnMetadata(producerId, firstOffsetMetadata)); diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerStateManager.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerStateManager.java index 994f34744e43e..4b0175f3b2483 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerStateManager.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/ProducerStateManager.java @@ -248,9 +248,9 @@ public Optional firstUnstableOffset() { Optional unreplicatedFirstOffset = Optional.ofNullable(unreplicatedTxns.firstEntry()).map(e -> e.getValue().firstOffset); Optional undecidedFirstOffset = Optional.ofNullable(ongoingTxns.firstEntry()).map(e -> e.getValue().firstOffset); - if (!unreplicatedFirstOffset.isPresent()) + if (unreplicatedFirstOffset.isEmpty()) return undecidedFirstOffset; - else if (!undecidedFirstOffset.isPresent()) + else if (undecidedFirstOffset.isEmpty()) return unreplicatedFirstOffset; else if (undecidedFirstOffset.get().messageOffset < unreplicatedFirstOffset.get().messageOffset) return undecidedFirstOffset; @@ -328,7 +328,7 @@ public void loadProducerEntry(ProducerStateEntry entry) { } private boolean isProducerExpired(long currentTimeMs, ProducerStateEntry producerState) { - return !producerState.currentTxnFirstOffset().isPresent() && currentTimeMs - producerState.lastTimestamp() >= producerStateManagerConfig.producerIdExpirationMs(); + return producerState.currentTxnFirstOffset().isEmpty() && currentTimeMs - producerState.lastTimestamp() >= producerStateManagerConfig.producerIdExpirationMs(); } /** diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteStorageThreadPool.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteStorageThreadPool.java index 849ac55615512..9c6b9f644e486 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteStorageThreadPool.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteStorageThreadPool.java @@ -18,16 +18,15 @@ import org.apache.kafka.common.internals.FatalExitError; import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.common.utils.ThreadUtils; import org.apache.kafka.server.metrics.KafkaMetricsGroup; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import static org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics.REMOTE_LOG_READER_AVG_IDLE_PERCENT_METRIC; import static org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics.REMOTE_LOG_READER_TASK_QUEUE_SIZE_METRIC; @@ -37,11 +36,17 @@ public final class RemoteStorageThreadPool extends ThreadPoolExecutor { private static final Logger LOGGER = LoggerFactory.getLogger(RemoteStorageThreadPool.class); private final KafkaMetricsGroup metricsGroup = new KafkaMetricsGroup(this.getClass()); - public RemoteStorageThreadPool(String threadNamePrefix, + public RemoteStorageThreadPool(String threadNamePattern, int numThreads, int maxPendingTasks) { - super(numThreads, numThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(maxPendingTasks), - new RemoteStorageThreadFactory(threadNamePrefix)); + super(numThreads, + numThreads, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(maxPendingTasks), + ThreadUtils.createThreadFactory(threadNamePattern, false, + (t, e) -> LOGGER.error("Uncaught exception in thread '{}':", t.getName(), e)) + ); metricsGroup.newGauge(REMOTE_LOG_READER_TASK_QUEUE_SIZE_METRIC.getName(), () -> getQueue().size()); metricsGroup.newGauge(REMOTE_LOG_READER_AVG_IDLE_PERCENT_METRIC.getName(), @@ -61,21 +66,6 @@ protected void afterExecute(Runnable runnable, Throwable th) { } } - private static class RemoteStorageThreadFactory implements ThreadFactory { - private final String namePrefix; - private final AtomicInteger threadNumber = new AtomicInteger(0); - - RemoteStorageThreadFactory(String namePrefix) { - this.namePrefix = namePrefix; - } - - @Override - public Thread newThread(Runnable r) { - return new Thread(r, namePrefix + threadNumber.getAndIncrement()); - } - - } - public void removeMetrics() { REMOTE_STORAGE_THREAD_POOL_METRICS.forEach(metricsGroup::removeMetric); } diff --git a/core/src/main/scala/kafka/common/ThreadShutdownException.scala b/storage/src/main/java/org/apache/kafka/storage/internals/log/ThreadShutdownException.java similarity index 75% rename from core/src/main/scala/kafka/common/ThreadShutdownException.scala rename to storage/src/main/java/org/apache/kafka/storage/internals/log/ThreadShutdownException.java index 8cd6601ce5aa9..02c7167487a3c 100644 --- a/core/src/main/scala/kafka/common/ThreadShutdownException.scala +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/ThreadShutdownException.java @@ -1,10 +1,10 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with + * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,11 +14,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -package kafka.common +package org.apache.kafka.storage.internals.log; /** * An exception that indicates a thread is being shut down normally. */ -class ThreadShutdownException extends RuntimeException { +public class ThreadShutdownException extends RuntimeException { } diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/UnifiedLog.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/UnifiedLog.java index 6ad75a91d40d3..8c416e5b53602 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/UnifiedLog.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/UnifiedLog.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.record.Records; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; @@ -58,7 +57,6 @@ public static void rebuildProducerState(ProducerStateManager producerStateManage LogSegments segments, long logStartOffset, long lastOffset, - RecordVersion recordVersion, Time time, boolean reloadFromCleanShutdown, String logPrefix) throws IOException { @@ -72,22 +70,20 @@ public static void rebuildProducerState(ProducerStateManager producerStateManage } offsetsToSnapshot.add(Optional.of(lastOffset)); - LOG.info("{}Loading producer state till offset {} with message format version {}", logPrefix, lastOffset, recordVersion.value); + LOG.info("{}Loading producer state till offset {}", logPrefix, lastOffset); // We want to avoid unnecessary scanning of the log to build the producer state when the broker is being // upgraded. The basic idea is to use the absence of producer snapshot files to detect the upgrade case, - // but we have to be careful not to assume too much in the presence of broker failures. The two most common - // upgrade cases in which we expect to find no snapshots are the following: + // but we have to be careful not to assume too much in the presence of broker failures. The most common + // upgrade case in which we expect to find no snapshots is the following: // - // 1. The broker has been upgraded, but the topic is still on the old message format. - // 2. The broker has been upgraded, the topic is on the new message format, and we had a clean shutdown. + // * The broker has been upgraded, and we had a clean shutdown. // - // If we hit either of these cases, we skip producer state loading and write a new snapshot at the log end + // If we hit this case, we skip producer state loading and write a new snapshot at the log end // offset (see below). The next time the log is reloaded, we will load producer state using this snapshot // (or later snapshots). Otherwise, if there is no snapshot file, then we have to rebuild producer state // from the first segment. - if (recordVersion.value < RecordBatch.MAGIC_VALUE_V2 || - (!producerStateManager.latestSnapshotOffset().isPresent() && reloadFromCleanShutdown)) { + if (producerStateManager.latestSnapshotOffset().isEmpty() && reloadFromCleanShutdown) { // To avoid an expensive scan through all the segments, we take empty snapshots from the start of the // last two segments and the last offset. This should avoid the full scan in the case that the log needs // truncation. diff --git a/storage/src/main/java/org/apache/kafka/storage/log/metrics/BrokerTopicMetrics.java b/storage/src/main/java/org/apache/kafka/storage/log/metrics/BrokerTopicMetrics.java index a26c57b680d9b..46bb266073faf 100644 --- a/storage/src/main/java/org/apache/kafka/storage/log/metrics/BrokerTopicMetrics.java +++ b/storage/src/main/java/org/apache/kafka/storage/log/metrics/BrokerTopicMetrics.java @@ -84,7 +84,7 @@ private BrokerTopicMetrics(Optional name, boolean remoteStorageEnabled) metricTypeMap.put(INVALID_MESSAGE_CRC_RECORDS_PER_SEC, new MeterWrapper(INVALID_MESSAGE_CRC_RECORDS_PER_SEC, "requests")); metricTypeMap.put(INVALID_OFFSET_OR_SEQUENCE_RECORDS_PER_SEC, new MeterWrapper(INVALID_OFFSET_OR_SEQUENCE_RECORDS_PER_SEC, "requests")); - if (!name.isPresent()) { + if (name.isEmpty()) { metricTypeMap.put(REPLICATION_BYTES_IN_PER_SEC, new MeterWrapper(REPLICATION_BYTES_IN_PER_SEC, "bytes")); metricTypeMap.put(REPLICATION_BYTES_OUT_PER_SEC, new MeterWrapper(REPLICATION_BYTES_OUT_PER_SEC, "bytes")); metricTypeMap.put(REASSIGNMENT_BYTES_IN_PER_SEC, new MeterWrapper(REASSIGNMENT_BYTES_IN_PER_SEC, "bytes")); diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/ConsumerTaskTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/ConsumerTaskTest.java index 66176c68477a1..4bde23dc5c98c 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/ConsumerTaskTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/ConsumerTaskTest.java @@ -18,7 +18,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; @@ -84,7 +84,7 @@ public class ConsumerTaskTest { public void beforeEach() { final Map offsets = remoteLogPartitions.stream() .collect(Collectors.toMap(Function.identity(), e -> 0L)); - consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); consumer.updateBeginningOffsets(offsets); consumerTask = new ConsumerTask(handler, partitioner, consumer, 10L, 300_000L, Time.SYSTEM); } diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataCacheTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataCacheTest.java index 6b93f61dc7c97..6e619a1dbb6c7 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataCacheTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataCacheTest.java @@ -69,7 +69,7 @@ public void testCacheAddMetadataOnInvalidArgs() { } } - @ParameterizedTest(name = "isInitialized={0}") + @ParameterizedTest @ValueSource(booleans = {true, false}) public void testCacheUpdateMetadataOnInvalidArgs(boolean isInitialized) { if (isInitialized) { diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java index edc914d909051..0026f64581702 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerRestartTest.java @@ -62,7 +62,7 @@ public void testRLMMAPIsAfterRestart() throws Exception { // Create topics. String leaderTopic = "new-leader"; String followerTopic = "new-follower"; - try (Admin admin = clusterInstance.createAdminClient()) { + try (Admin admin = clusterInstance.admin()) { // Set broker id 0 as the first entry which is taken as the leader. NewTopic newLeaderTopic = new NewTopic(leaderTopic, Collections.singletonMap(0, Arrays.asList(0, 1, 2))); // Set broker id 1 as the first entry which is taken as the leader. diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerTest.java index 9937a9f37aeed..20d3d78e37f7e 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerTest.java @@ -80,7 +80,7 @@ public void teardown() throws IOException { @ClusterTest public void testDoesTopicExist() throws ExecutionException, InterruptedException { - try (Admin admin = clusterInstance.createAdminClient()) { + try (Admin admin = clusterInstance.admin()) { String topic = "test-topic-exist"; admin.createTopics(Collections.singletonList(new NewTopic(topic, 1, (short) 1))).all().get(); clusterInstance.waitForTopic(topic, 1); @@ -91,7 +91,7 @@ public void testDoesTopicExist() throws ExecutionException, InterruptedException @ClusterTest public void testTopicDoesNotExist() { - try (Admin admin = clusterInstance.createAdminClient()) { + try (Admin admin = clusterInstance.admin()) { String topic = "dummy-test-topic"; boolean doesTopicExist = topicBasedRlmm().doesTopicExist(admin, topic); assertFalse(doesTopicExist); @@ -110,7 +110,7 @@ public void testNewPartitionUpdates() throws Exception { // Create topics. String leaderTopic = "new-leader"; String followerTopic = "new-follower"; - try (Admin admin = clusterInstance.createAdminClient()) { + try (Admin admin = clusterInstance.admin()) { // Set broker id 0 as the first entry which is taken as the leader. admin.createTopics(Collections.singletonList(new NewTopic(leaderTopic, Collections.singletonMap(0, Arrays.asList(0, 1, 2))))).all().get(); clusterInstance.waitForTopic(leaderTopic, 1); @@ -152,6 +152,9 @@ public void testNewPartitionUpdates() throws Exception { assertThrows(RemoteResourceNotFoundException.class, () -> topicBasedRlmm().listRemoteLogSegments(newLeaderTopicIdPartition)); assertThrows(RemoteResourceNotFoundException.class, () -> topicBasedRlmm().listRemoteLogSegments(newFollowerTopicIdPartition)); + assertFalse(topicBasedRlmm().isReady(newLeaderTopicIdPartition)); + assertFalse(topicBasedRlmm().isReady(newFollowerTopicIdPartition)); + topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(newLeaderTopicIdPartition), Collections.singleton(newFollowerTopicIdPartition)); @@ -166,6 +169,9 @@ public void testNewPartitionUpdates() throws Exception { verify(spyRemotePartitionMetadataEventHandler).handleRemoteLogSegmentMetadata(followerSegmentMetadata); assertTrue(topicBasedRlmm().listRemoteLogSegments(newLeaderTopicIdPartition).hasNext()); assertTrue(topicBasedRlmm().listRemoteLogSegments(newFollowerTopicIdPartition).hasNext()); + + assertTrue(topicBasedRlmm().isReady(newLeaderTopicIdPartition)); + assertTrue(topicBasedRlmm().isReady(newFollowerTopicIdPartition)); } @ClusterTest diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java index 83884e6ce3da6..5359d9e5e407e 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorageEvent.java @@ -73,7 +73,7 @@ public boolean matches(final LocalTieredStorageCondition condition) { if (!exception.map(e -> condition.failed).orElseGet(() -> !condition.failed)) { return false; } - if (condition.baseOffset != null && !metadata.isPresent()) { + if (condition.baseOffset != null && metadata.isEmpty()) { return false; } return condition.baseOffset == null || metadata.get().startOffset() == condition.baseOffset; diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java index 7ce0c46a5156b..d7de1ab22ae6b 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfigTest.java @@ -61,8 +61,8 @@ public void testThreadPoolDefaults() { Map emptyProps = new HashMap<>(); RemoteLogManagerConfig remoteLogManagerConfigEmptyConfig = new RLMTestConfig(emptyProps).remoteLogManagerConfig(); assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE, remoteLogManagerConfigEmptyConfig.remoteLogManagerThreadPoolSize()); - assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE, remoteLogManagerConfigEmptyConfig.remoteLogManagerCopierThreadPoolSize()); - assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE, remoteLogManagerConfigEmptyConfig.remoteLogManagerExpirationThreadPoolSize()); + assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, remoteLogManagerConfigEmptyConfig.remoteLogManagerCopierThreadPoolSize()); + assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, remoteLogManagerConfigEmptyConfig.remoteLogManagerExpirationThreadPoolSize()); } @Test @@ -103,7 +103,7 @@ private Map getRLMProps(String rsmPrefix, String rlmmPrefix) { props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_RETRY_JITTER_PROP, 0.3); props.put(RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP, - 10); + 2); props.put(RemoteLogManagerConfig.REMOTE_LOG_READER_MAX_PENDING_TASKS_PROP, 100); props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_CUSTOM_METADATA_MAX_BYTES_PROP, diff --git a/storage/src/test/java/org/apache/kafka/storage/internals/log/LocalLogTest.java b/storage/src/test/java/org/apache/kafka/storage/internals/log/LocalLogTest.java new file mode 100644 index 0000000000000..a638f03abde45 --- /dev/null +++ b/storage/src/test/java/org/apache/kafka/storage/internals/log/LocalLogTest.java @@ -0,0 +1,754 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.storage.internals.log; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.errors.KafkaStorageException; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.Record; +import org.apache.kafka.common.record.SimpleRecord; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.util.MockTime; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; + +class LocalLogTest { + + private static final MockTime MOCK_TIME = new MockTime(); + + private final File tmpDir = TestUtils.tempDirectory(); + private final File logDir = TestUtils.randomPartitionLogDir(tmpDir); + private final TopicPartition topicPartition = new TopicPartition("test_topic", 1); + private final LogDirFailureChannel logDirFailureChannel = new LogDirFailureChannel(10); + + private LocalLog log; + + @BeforeEach + public void setUp() throws IOException { + log = createLocalLogWithActiveSegment(logDir, new LogConfig(new Properties())); + } + + @AfterEach + public void tearDown() throws IOException { + try { + log.close(); + } catch (KafkaStorageException kse) { + // ignore + } + } + + record KeyValue(String key, String value) { + + SimpleRecord toRecord(long timestamp) { + return new SimpleRecord(timestamp, key.getBytes(), value.getBytes()); + } + + SimpleRecord toRecord() { + return new SimpleRecord(MOCK_TIME.milliseconds(), key.getBytes(), value.getBytes()); + } + + static KeyValue fromRecord(Record record) { + String key = record.hasKey() + ? StandardCharsets.UTF_8.decode(record.key()).toString() + : ""; + String value = record.hasValue() + ? StandardCharsets.UTF_8.decode(record.value()).toString() + : ""; + return new KeyValue(key, value); + } + } + + private List kvsToRecords(List keyValues) { + return keyValues.stream().map(KeyValue::toRecord).collect(Collectors.toList()); + } + + private List recordsToKvs(Iterable records) { + List keyValues = new ArrayList<>(); + for (Record record : records) { + keyValues.add(KeyValue.fromRecord(record)); + } + return keyValues; + } + + private void appendRecords(List records, long initialOffset) throws IOException { + log.append(initialOffset + records.size() - 1, + MemoryRecords.withRecords(initialOffset, Compression.NONE, 0, records.toArray(new SimpleRecord[0]))); + } + + private FetchDataInfo readRecords(long startOffset) throws IOException { + return readRecords( + startOffset, + log.segments().activeSegment().size(), + log.logEndOffsetMetadata() + ); + } + + private FetchDataInfo readRecords(int maxLength) throws IOException { + return readRecords( + 0L, + maxLength, + log.logEndOffsetMetadata() + ); + } + + private FetchDataInfo readRecords(long startOffset, LogOffsetMetadata maxOffsetMetadata) throws IOException { + return readRecords( + startOffset, + log.segments().activeSegment().size(), + maxOffsetMetadata + ); + } + + private FetchDataInfo readRecords( + long startOffset, + int maxLength, + LogOffsetMetadata maxOffsetMetadata) throws IOException { + return log.read(startOffset, + maxLength, + false, + maxOffsetMetadata, + false); + } + + @Test + public void testLogDeleteSegmentsSuccess() throws IOException { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), 0L); + log.roll(0L); + assertEquals(2, log.segments().numberOfSegments()); + assertNotEquals(0, logDir.listFiles().length); + List segmentsBeforeDelete = new ArrayList<>(log.segments().values()); + List deletedSegments = log.deleteAllSegments(); + assertTrue(log.segments().isEmpty()); + assertEquals(segmentsBeforeDelete, deletedSegments); + assertThrows(KafkaStorageException.class, () -> log.checkIfMemoryMappedBufferClosed()); + assertTrue(logDir.exists()); + } + + @Test + public void testRollEmptyActiveSegment() { + LogSegment oldActiveSegment = log.segments().activeSegment(); + log.roll(0L); + assertEquals(1, log.segments().numberOfSegments()); + assertNotEquals(oldActiveSegment, log.segments().activeSegment()); + assertNotEquals(0, logDir.listFiles().length); + assertTrue(oldActiveSegment.hasSuffix(LogFileUtils.DELETED_FILE_SUFFIX)); + } + + @Test + public void testLogDeleteDirSuccessWhenEmptyAndFailureWhenNonEmpty() throws IOException { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), 0L); + log.roll(0L); + assertEquals(2, log.segments().numberOfSegments()); + assertNotEquals(0, logDir.listFiles().length); + + assertThrows(IllegalStateException.class, () -> log.deleteEmptyDir()); + assertTrue(logDir.exists()); + + log.deleteAllSegments(); + log.deleteEmptyDir(); + assertFalse(logDir.exists()); + } + + @Test + public void testUpdateConfig() { + LogConfig oldConfig = log.config(); + assertEquals(oldConfig, log.config()); + + Properties props = new Properties(); + props.put(TopicConfig.SEGMENT_BYTES_CONFIG, oldConfig.segmentSize + 1); + LogConfig newConfig = new LogConfig(props); + log.updateConfig(newConfig); + assertEquals(newConfig, log.config()); + } + + @Test + public void testLogDirRenameToNewDir() throws IOException { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), 0L); + log.roll(0L); + assertEquals(2, log.segments().numberOfSegments()); + File newLogDir = TestUtils.randomPartitionLogDir(tmpDir); + assertTrue(log.renameDir(newLogDir.getName())); + assertFalse(logDir.exists()); + assertTrue(newLogDir.exists()); + assertEquals(newLogDir, log.dir()); + assertEquals(newLogDir.getParent(), log.parentDir()); + assertEquals(newLogDir.getParent(), log.dir().getParent()); + log.segments().values().forEach(segment -> assertEquals(newLogDir.getPath(), segment.log().file().getParentFile().getPath())); + assertEquals(2, log.segments().numberOfSegments()); + } + + @Test + public void testLogDirRenameToExistingDir() { + assertFalse(log.renameDir(log.dir().getName())); + } + + @Test + public void testLogFlush() throws IOException { + assertEquals(0L, log.recoveryPoint()); + assertEquals(MOCK_TIME.milliseconds(), log.lastFlushTime()); + + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), 0L); + MOCK_TIME.sleep(1); + LogSegment newSegment = log.roll(0L); + log.flush(newSegment.baseOffset()); + log.markFlushed(newSegment.baseOffset()); + assertEquals(1L, log.recoveryPoint()); + assertEquals(MOCK_TIME.milliseconds(), log.lastFlushTime()); + } + + @Test + public void testLogAppend() throws IOException { + FetchDataInfo fetchDataInfoBeforeAppend = readRecords(1); + assertFalse(fetchDataInfoBeforeAppend.records.records().iterator().hasNext()); + + MOCK_TIME.sleep(1); + List keyValues = List.of(new KeyValue("abc", "ABC"), new KeyValue("de", "DE")); + appendRecords(kvsToRecords(keyValues), 0L); + assertEquals(2L, log.logEndOffset()); + assertEquals(0L, log.recoveryPoint()); + FetchDataInfo fetchDataInfo = readRecords(0L); + assertEquals(2L, Utils.toList(fetchDataInfo.records.records()).size()); + assertEquals(keyValues, recordsToKvs(fetchDataInfo.records.records())); + } + + @Test + public void testLogCloseSuccess() throws IOException { + List keyValues = List.of(new KeyValue("abc", "ABC"), new KeyValue("de", "DE")); + appendRecords(kvsToRecords(keyValues), 0L); + log.close(); + assertThrows(ClosedChannelException.class, () -> appendRecords(kvsToRecords(keyValues), 2L)); + } + + @Test + public void testLogCloseIdempotent() { + log.close(); + // Check that LocalLog.close() is idempotent + log.close(); + } + + @Test + public void testLogCloseFailureWhenInMemoryBufferClosed() throws IOException { + List keyValues = List.of(new KeyValue("abc", "ABC"), new KeyValue("de", "DE")); + appendRecords(kvsToRecords(keyValues), 0L); + log.closeHandlers(); + assertThrows(KafkaStorageException.class, () -> log.close()); + } + + @Test + public void testLogCloseHandlers() throws IOException { + List keyValues = List.of(new KeyValue("abc", "ABC"), new KeyValue("de", "DE")); + appendRecords(kvsToRecords(keyValues), 0L); + log.closeHandlers(); + assertThrows(ClosedChannelException.class, () -> appendRecords(kvsToRecords(keyValues), 2L)); + } + + @Test + public void testLogCloseHandlersIdempotent() { + log.closeHandlers(); + // Check that LocalLog.closeHandlers() is idempotent + log.closeHandlers(); + } + + static class TestDeletionReason implements SegmentDeletionReason { + private Collection deletedSegments = new ArrayList<>(); + + @Override + public void logReason(List toDelete) { + deletedSegments = new ArrayList<>(toDelete); + } + + Collection deletedSegments() { + return deletedSegments; + } + } + + private void testRemoveAndDeleteSegments(boolean asyncDelete) throws IOException { + for (int offset = 0; offset < 9; offset++) { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), offset); + log.roll(0L); + } + + assertEquals(10L, log.segments().numberOfSegments()); + + + TestDeletionReason reason = new TestDeletionReason(); + List toDelete = new ArrayList<>(log.segments().values()); + log.removeAndDeleteSegments(toDelete, asyncDelete, reason); + if (asyncDelete) { + MOCK_TIME.sleep(log.config().fileDeleteDelayMs + 1); + } + assertTrue(log.segments().isEmpty()); + assertEquals(toDelete, reason.deletedSegments()); + toDelete.forEach(segment -> assertTrue(segment.deleted())); + } + + @Test + public void testRemoveAndDeleteSegmentsSync() throws IOException { + testRemoveAndDeleteSegments(false); + } + + @Test + public void testRemoveAndDeleteSegmentsAsync() throws IOException { + testRemoveAndDeleteSegments(true); + } + + private void testDeleteSegmentFiles(boolean asyncDelete) throws IOException { + for (int offset = 0; offset < 9; offset++) { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), offset); + log.roll(0L); + } + + assertEquals(10L, log.segments().numberOfSegments()); + + Collection toDelete = log.segments().values(); + LocalLog.deleteSegmentFiles(toDelete, asyncDelete, log.dir(), log.topicPartition(), log.config(), log.scheduler(), log.logDirFailureChannel(), ""); + if (asyncDelete) { + toDelete.forEach(segment -> { + assertFalse(segment.deleted()); + assertTrue(segment.hasSuffix(LogFileUtils.DELETED_FILE_SUFFIX)); + }); + MOCK_TIME.sleep(log.config().fileDeleteDelayMs + 1); + } + toDelete.forEach(segment -> assertTrue(segment.deleted())); + } + + @Test + public void testDeleteSegmentFilesSync() throws IOException { + testDeleteSegmentFiles(false); + } + + @Test + public void testDeleteSegmentFilesAsync() throws IOException { + testDeleteSegmentFiles(true); + } + + @Test + public void testCreateAndDeleteSegment() throws IOException { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), 0L); + long newOffset = log.segments().activeSegment().baseOffset() + 1; + LogSegment oldActiveSegment = log.segments().activeSegment(); + LogSegment newActiveSegment = log.createAndDeleteSegment(newOffset, log.segments().activeSegment(), true, new LogTruncation(log.logger())); + assertEquals(1, log.segments().numberOfSegments()); + assertEquals(newActiveSegment, log.segments().activeSegment()); + assertNotEquals(oldActiveSegment, log.segments().activeSegment()); + assertTrue(oldActiveSegment.hasSuffix(LogFileUtils.DELETED_FILE_SUFFIX)); + assertEquals(newOffset, log.segments().activeSegment().baseOffset()); + assertEquals(0L, log.recoveryPoint()); + assertEquals(newOffset, log.logEndOffset()); + FetchDataInfo fetchDataInfo = readRecords(newOffset); + assertFalse(fetchDataInfo.records.records().iterator().hasNext()); + } + + @Test + public void testTruncateFullyAndStartAt() throws IOException { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + for (int offset = 0; offset < 8; offset++) { + appendRecords(List.of(record), offset); + if (offset % 2 != 0) + log.roll(0L); + } + for (int offset = 8; offset < 13; offset++) { + SimpleRecord r = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(r), offset); + } + assertEquals(5, log.segments().numberOfSegments()); + assertNotEquals(10L, log.segments().activeSegment().baseOffset()); + List expected = new ArrayList<>(log.segments().values()); + List deleted = log.truncateFullyAndStartAt(10L); + assertEquals(expected, deleted); + assertEquals(1, log.segments().numberOfSegments()); + assertEquals(10L, log.segments().activeSegment().baseOffset()); + assertEquals(0L, log.recoveryPoint()); + assertEquals(10L, log.logEndOffset()); + FetchDataInfo fetchDataInfo = readRecords(10L); + assertFalse(fetchDataInfo.records.records().iterator().hasNext()); + } + + @Test + public void testWhenFetchOffsetHigherThanMaxOffset() throws IOException { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + for (int offset = 0; offset < 5; offset++) { + appendRecords(List.of(record), offset); + if (offset % 2 != 0) + log.roll(0L); + } + assertEquals(3, log.segments().numberOfSegments()); + + // case-0: valid case, `startOffset` < `maxOffsetMetadata.offset` + var fetchDataInfo = readRecords(3L, new LogOffsetMetadata(4L, 4L, 0)); + assertEquals(1, Utils.toList(fetchDataInfo.records.records()).size()); + assertEquals(new LogOffsetMetadata(3, 2L, 69), fetchDataInfo.fetchOffsetMetadata); + + // case-1: `startOffset` == `maxOffsetMetadata.offset` + fetchDataInfo = readRecords(4L, new LogOffsetMetadata(4L, 4L, 0)); + assertFalse(fetchDataInfo.records.records().iterator().hasNext()); + assertEquals(new LogOffsetMetadata(4L, 4L, 0), fetchDataInfo.fetchOffsetMetadata); + + // case-2: `startOffset` > `maxOffsetMetadata.offset` + fetchDataInfo = readRecords(5L, new LogOffsetMetadata(4L, 4L, 0)); + assertFalse(fetchDataInfo.records.records().iterator().hasNext()); + assertEquals(new LogOffsetMetadata(5L, 4L, 69), fetchDataInfo.fetchOffsetMetadata); + + // case-3: `startOffset` < `maxMessageOffset.offset` but `maxMessageOffset.messageOnlyOffset` is true + fetchDataInfo = readRecords(3L, new LogOffsetMetadata(4L, -1L, -1)); + assertFalse(fetchDataInfo.records.records().iterator().hasNext()); + assertEquals(new LogOffsetMetadata(3L, 2L, 69), fetchDataInfo.fetchOffsetMetadata); + + // case-4: `startOffset` < `maxMessageOffset.offset`, `maxMessageOffset.messageOnlyOffset` is false, but + // `maxOffsetMetadata.segmentBaseOffset` < `startOffset.segmentBaseOffset` + fetchDataInfo = readRecords(3L, new LogOffsetMetadata(4L, 0L, 40)); + assertFalse(fetchDataInfo.records.records().iterator().hasNext()); + assertEquals(new LogOffsetMetadata(3L, 2L, 69), fetchDataInfo.fetchOffsetMetadata); + } + + @Test + public void testTruncateTo() throws IOException { + for (int offset = 0; offset < 12; offset++) { + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), offset); + if (offset % 3 == 2) + log.roll(0L); + } + assertEquals(5, log.segments().numberOfSegments()); + assertEquals(12L, log.logEndOffset()); + + List expected = new ArrayList<>(log.segments().values(9L, log.logEndOffset() + 1)); + // Truncate to an offset before the base offset of the active segment + Collection deleted = log.truncateTo(7L); + assertEquals(expected, deleted); + assertEquals(3, log.segments().numberOfSegments()); + assertEquals(6L, log.segments().activeSegment().baseOffset()); + assertEquals(0L, log.recoveryPoint()); + assertEquals(7L, log.logEndOffset()); + FetchDataInfo fetchDataInfo = readRecords(6L); + assertEquals(1, Utils.toList(fetchDataInfo.records.records()).size()); + assertEquals(List.of(new KeyValue("", "a")), recordsToKvs(fetchDataInfo.records.records())); + + // Verify that we can still append to the active segment + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), 7L); + assertEquals(8L, log.logEndOffset()); + } + + @Test + public void testNonActiveSegmentsFrom() throws IOException { + for (int i = 0; i < 5; i++) { + List keyValues = List.of(new KeyValue(String.valueOf(i), String.valueOf(i))); + appendRecords(kvsToRecords(keyValues), i); + log.roll(0L); + } + + assertEquals(5L, log.segments().activeSegment().baseOffset()); + assertEquals(List.of(0L, 1L, 2L, 3L, 4L), nonActiveBaseOffsetsFrom(0L)); + assertEquals(List.of(), nonActiveBaseOffsetsFrom(5L)); + assertEquals(List.of(2L, 3L, 4L), nonActiveBaseOffsetsFrom(2L)); + assertEquals(List.of(), nonActiveBaseOffsetsFrom(6L)); + } + + private List nonActiveBaseOffsetsFrom(long startOffset) { + return log.segments().nonActiveLogSegmentsFrom(startOffset).stream() + .map(LogSegment::baseOffset) + .collect(Collectors.toList()); + } + + private String topicPartitionName(String topic, String partition) { + return topic + "-" + partition; + } + + @Test + public void testParseTopicPartitionName() throws IOException { + String topic = "test_topic"; + String partition = "143"; + File dir = new File(logDir, topicPartitionName(topic, partition)); + TopicPartition topicPartition = LocalLog.parseTopicPartitionName(dir); + assertEquals(topic, topicPartition.topic()); + assertEquals(Integer.parseInt(partition), topicPartition.partition()); + } + + /** + * Tests that log directories with a period in their name that have been marked for deletion + * are parsed correctly by `Log.parseTopicPartitionName` (see KAFKA-5232 for details). + */ + @Test + public void testParseTopicPartitionNameWithPeriodForDeletedTopic() throws IOException { + String topic = "foo.bar-testtopic"; + String partition = "42"; + File dir = new File(logDir, LocalLog.logDeleteDirName(new TopicPartition(topic, Integer.parseInt(partition)))); + TopicPartition topicPartition = LocalLog.parseTopicPartitionName(dir); + assertEquals(topic, topicPartition.topic(), "Unexpected topic name parsed"); + assertEquals(Integer.parseInt(partition), topicPartition.partition(), "Unexpected partition number parsed"); + } + + @Test + public void testParseTopicPartitionNameForEmptyName() throws IOException { + File dir = new File(""); + String msg = "KafkaException should have been thrown for dir: " + dir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir), msg); + } + + @Test + public void testParseTopicPartitionNameForNull() { + File dir = null; + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir), + () -> "KafkaException should have been thrown for dir: " + dir); + } + + @Test + public void testParseTopicPartitionNameForMissingSeparator() throws IOException { + String topic = "test_topic"; + String partition = "1999"; + File dir = new File(logDir, topic + partition); + String msg = "KafkaException should have been thrown for dir: " + dir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir), msg); + // also test the "-delete" marker case + File deleteMarkerDir = new File(logDir, topic + partition + "." + LogFileUtils.DELETE_DIR_SUFFIX); + msg = "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(deleteMarkerDir), msg); + } + + @Test + public void testParseTopicPartitionNameForMissingTopic() throws IOException { + String topic = ""; + String partition = "1999"; + File dir = new File(logDir, topicPartitionName(topic, partition)); + String msg = "KafkaException should have been thrown for dir: " + dir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir), msg); + + // also test the "-delete" marker case + File deleteMarkerDir = new File(logDir, LocalLog.logDeleteDirName(new TopicPartition(topic, Integer.parseInt(partition)))); + + msg = "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(deleteMarkerDir), msg); + } + + @Test + public void testParseTopicPartitionNameForMissingPartition() throws IOException { + String topic = "test_topic"; + String partition = ""; + File dir = new File(logDir.getPath() + topicPartitionName(topic, partition)); + String msg = "KafkaException should have been thrown for dir: " + dir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir), msg); + + // also test the "-delete" marker case + File deleteMarkerDir = new File(logDir, topicPartitionName(topic, partition) + "." + LogFileUtils.DELETE_DIR_SUFFIX); + msg = "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(deleteMarkerDir), msg); + } + + @Test + public void testParseTopicPartitionNameForInvalidPartition() throws IOException { + String topic = "test_topic"; + String partition = "1999a"; + File dir = new File(logDir, topicPartitionName(topic, partition)); + String msg = "KafkaException should have been thrown for dir: " + dir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir), msg); + + // also test the "-delete" marker case + File deleteMarkerDir = new File(logDir, topic + partition + "." + LogFileUtils.DELETE_DIR_SUFFIX); + msg = "KafkaException should have been thrown for dir: " + deleteMarkerDir.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(deleteMarkerDir), msg); + } + + @Test + public void testParseTopicPartitionNameForExistingInvalidDir() throws IOException { + File dir1 = new File(logDir.getPath() + "/non_kafka_dir"); + String msg = "KafkaException should have been thrown for dir: " + dir1.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir1), msg); + File dir2 = new File(logDir.getPath() + "/non_kafka_dir-delete"); + msg = "KafkaException should have been thrown for dir: " + dir2.getCanonicalPath(); + assertThrows(KafkaException.class, () -> LocalLog.parseTopicPartitionName(dir2), msg); + } + + @Test + public void testLogDeleteDirName() { + String name1 = LocalLog.logDeleteDirName(new TopicPartition("foo", 3)); + assertTrue(name1.length() <= 255); + assertTrue(Pattern.compile("foo-3\\.[0-9a-z]{32}-delete").matcher(name1).matches()); + assertTrue(LocalLog.DELETE_DIR_PATTERN.matcher(name1).matches()); + assertFalse(LocalLog.FUTURE_DIR_PATTERN.matcher(name1).matches()); + String name2 = LocalLog.logDeleteDirName( + new TopicPartition("n" + String.join("", Collections.nCopies(248, "o")), 5)); + assertEquals(255, name2.length()); + assertTrue(Pattern.compile("n[o]{212}-5\\.[0-9a-z]{32}-delete").matcher(name2).matches()); + assertTrue(LocalLog.DELETE_DIR_PATTERN.matcher(name2).matches()); + assertFalse(LocalLog.FUTURE_DIR_PATTERN.matcher(name2).matches()); + } + + @Test + public void testOffsetFromFile() { + long offset = 23423423L; + + File logFile = LogFileUtils.logFile(tmpDir, offset); + assertEquals(offset, LogFileUtils.offsetFromFile(logFile)); + + File offsetIndexFile = LogFileUtils.offsetIndexFile(tmpDir, offset); + assertEquals(offset, LogFileUtils.offsetFromFile(offsetIndexFile)); + + File timeIndexFile = LogFileUtils.timeIndexFile(tmpDir, offset); + assertEquals(offset, LogFileUtils.offsetFromFile(timeIndexFile)); + } + + @Test + public void testRollSegmentThatAlreadyExists() throws IOException { + assertEquals(1, log.segments().numberOfSegments(), "Log begins with a single empty segment."); + + // roll active segment with the same base offset of size zero should recreate the segment + log.roll(0L); + assertEquals(1, log.segments().numberOfSegments(), "Expect 1 segment after roll() empty segment with base offset."); + + // should be able to append records to active segment + List keyValues1 = List.of(new KeyValue("k1", "v1")); + appendRecords(kvsToRecords(keyValues1), 0); + assertEquals(0L, log.segments().activeSegment().baseOffset()); + // make sure we can append more records + List keyValues2 = List.of(new KeyValue("k2", "v2")); + appendRecords(keyValues2.stream() + .map(kv -> kv.toRecord(MOCK_TIME.milliseconds() + 10)) + .collect(Collectors.toList()), + 1L); + assertEquals(2, log.logEndOffset(), "Expect two records in the log"); + FetchDataInfo readResult = readRecords(0L); + assertEquals(2L, Utils.toList(readResult.records.records()).size()); + assertEquals(Stream.concat(keyValues1.stream(), keyValues2.stream()).collect(Collectors.toList()), recordsToKvs(readResult.records.records())); + + // roll so that active segment is empty + log.roll(0L); + assertEquals(2L, log.segments().activeSegment().baseOffset(), "Expect base offset of active segment to be LEO"); + assertEquals(2, log.segments().numberOfSegments(), "Expect two segments."); + assertEquals(2L, log.logEndOffset()); + } + + @Test + public void testNewSegmentsAfterRoll() throws IOException { + assertEquals(1, log.segments().numberOfSegments(), "Log begins with a single empty segment."); + + // roll active segment with the same base offset of size zero should recreate the segment + { + LogSegment newSegment = log.roll(0L); + assertEquals(0L, newSegment.baseOffset()); + assertEquals(1, log.segments().numberOfSegments()); + assertEquals(0L, log.logEndOffset()); + } + + appendRecords(List.of(new KeyValue("k1", "v1").toRecord()), 0L); + + { + LogSegment newSegment = log.roll(0L); + assertEquals(1L, newSegment.baseOffset()); + assertEquals(2, log.segments().numberOfSegments()); + assertEquals(1L, log.logEndOffset()); + } + + appendRecords(List.of(new KeyValue("k2", "v2").toRecord()), 1L); + + { + LogSegment newSegment = log.roll(1L); + assertEquals(2L, newSegment.baseOffset()); + assertEquals(3, log.segments().numberOfSegments()); + assertEquals(2L, log.logEndOffset()); + } + } + + @Test + public void testRollSegmentErrorWhenNextOffsetIsIllegal() throws IOException { + assertEquals(1, log.segments().numberOfSegments(), "Log begins with a single empty segment."); + + List keyValues = List.of(new KeyValue("k1", "v1"), new KeyValue("k2", "v2"), new KeyValue("k3", "v3")); + appendRecords(kvsToRecords(keyValues), 0L); + assertEquals(0L, log.segments().activeSegment().baseOffset()); + assertEquals(3, log.logEndOffset(), "Expect two records in the log"); + + // roll to create an empty active segment + log.roll(0L); + assertEquals(3L, log.segments().activeSegment().baseOffset()); + + // intentionally setup the logEndOffset to introduce an error later + log.updateLogEndOffset(1L); + + // expect an error because of attempt to roll to a new offset (1L) that's lower than the + // base offset (3L) of the active segment + assertThrows(KafkaException.class, () -> log.roll(0L)); + } + + @Test + public void testFlushingNonExistentDir() throws IOException { + LocalLog spyLog = spy(log); + + SimpleRecord record = new SimpleRecord(MOCK_TIME.milliseconds(), "a".getBytes()); + appendRecords(List.of(record), 0L); + MOCK_TIME.sleep(1); + LogSegment newSegment = log.roll(0L); + + // simulate the directory is renamed concurrently + doReturn(new File("__NON_EXISTENT__")).when(spyLog).dir(); + assertDoesNotThrow(() -> spyLog.flush(newSegment.baseOffset())); + } + + private LocalLog createLocalLogWithActiveSegment(File dir, LogConfig config) throws IOException { + LogSegments segments = new LogSegments(topicPartition); + segments.add(LogSegment.open(dir, + 0L, + config, + MOCK_TIME, + config.initFileSize(), + config.preallocate)); + return new LocalLog(dir, + config, + segments, + 0L, + new LogOffsetMetadata(0L, 0L, 0), + MOCK_TIME.scheduler, + MOCK_TIME, + topicPartition, + logDirFailureChannel); + } +} diff --git a/storage/src/test/java/org/apache/kafka/storage/internals/log/LogSegmentTest.java b/storage/src/test/java/org/apache/kafka/storage/internals/log/LogSegmentTest.java index 616671a65491b..b62818ee3894c 100644 --- a/storage/src/test/java/org/apache/kafka/storage/internals/log/LogSegmentTest.java +++ b/storage/src/test/java/org/apache/kafka/storage/internals/log/LogSegmentTest.java @@ -24,6 +24,7 @@ import org.apache.kafka.common.record.FileLogInputStream; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MemoryRecordsBuilder; import org.apache.kafka.common.record.Record; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.Records; @@ -32,7 +33,6 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.coordinator.transaction.TransactionLogConfig; import org.apache.kafka.server.util.MockScheduler; import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; @@ -48,6 +48,7 @@ import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; +import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -145,7 +146,7 @@ public void testAppendForLogSegmentOffsetOverflowException(long baseOffset, long try (LogSegment seg = createSegment(baseOffset, 10, Time.SYSTEM)) { long currentTime = Time.SYSTEM.milliseconds(); MemoryRecords memoryRecords = v1Records(0, "hello"); - assertThrows(LogSegmentOffsetOverflowException.class, () -> seg.append(largestOffset, currentTime, largestOffset, memoryRecords)); + assertThrows(LogSegmentOffsetOverflowException.class, () -> seg.append(largestOffset, memoryRecords)); } } @@ -168,7 +169,7 @@ public void testReadOnEmptySegment() throws IOException { public void testReadBeforeFirstOffset() throws IOException { try (LogSegment seg = createSegment(40)) { MemoryRecords ms = v1Records(50, "hello", "there", "little", "bee"); - seg.append(53, RecordBatch.NO_TIMESTAMP, -1L, ms); + seg.append(53, ms); Records read = seg.read(41, 300).records; checkEquals(ms.records().iterator(), read.records().iterator()); } @@ -183,7 +184,7 @@ public void testReadFromMiddleOfBatch() throws IOException { long batchBaseOffset = 50; try (LogSegment seg = createSegment(40)) { MemoryRecords ms = v2Records(batchBaseOffset, "hello", "there", "little", "bee"); - seg.append(53, RecordBatch.NO_TIMESTAMP, -1L, ms); + seg.append(53, ms); FetchDataInfo readInfo = seg.read(52, 300); assertEquals(batchBaseOffset, readInfo.fetchOffsetMetadata.messageOffset); } @@ -196,7 +197,7 @@ public void testReadFromMiddleOfBatch() throws IOException { public void testReadAfterLast() throws IOException { try (LogSegment seg = createSegment(40)) { MemoryRecords ms = v1Records(50, "hello", "there"); - seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms); + seg.append(51, ms); FetchDataInfo read = seg.read(52, 200); assertNull(read, "Read beyond the last offset in the segment should give null"); } @@ -210,9 +211,9 @@ public void testReadAfterLast() throws IOException { public void testReadFromGap() throws IOException { try (LogSegment seg = createSegment(40)) { MemoryRecords ms = v1Records(50, "hello", "there"); - seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms); + seg.append(51, ms); MemoryRecords ms2 = v1Records(60, "alpha", "beta"); - seg.append(61, RecordBatch.NO_TIMESTAMP, -1L, ms2); + seg.append(61, ms2); FetchDataInfo read = seg.read(55, 200); checkEquals(ms2.records().iterator(), read.records.records().iterator()); } @@ -225,7 +226,7 @@ public void testReadWhenNoMaxPosition(boolean minOneMessage) throws IOException int maxSize = 1; try (LogSegment seg = createSegment(40)) { MemoryRecords ms = v1Records(50, "hello", "there"); - seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms); + seg.append(51, ms); // read at first offset FetchDataInfo read = seg.read(50, maxSize, maxPosition, minOneMessage); @@ -257,9 +258,9 @@ public void testTruncate() throws IOException { long offset = 40; for (int i = 0; i < 30; i++) { MemoryRecords ms1 = v1Records(offset, "hello"); - seg.append(offset, RecordBatch.NO_TIMESTAMP, -1L, ms1); + seg.append(offset, ms1); MemoryRecords ms2 = v1Records(offset + 1, "hello"); - seg.append(offset + 1, RecordBatch.NO_TIMESTAMP, -1L, ms2); + seg.append(offset + 1, ms2); // check that we can read back both messages FetchDataInfo read = seg.read(offset, 10000); @@ -320,7 +321,7 @@ public void testReloadLargestTimestampAndNextOffsetAfterTruncation() throws IOEx try (LogSegment seg = createSegment(40, 2 * v1Records(0, "hello").sizeInBytes() - 1)) { int offset = 40; for (int i = 0; i < numMessages; i++) { - seg.append(offset, offset, offset, v1Records(offset, "hello")); + seg.append(offset, v1Records(offset, "hello")); offset++; } assertEquals(offset, seg.readNextOffset()); @@ -343,7 +344,12 @@ public void testTruncateFull() throws IOException { MockTime time = new MockTime(); try (LogSegment seg = createSegment(40, time)) { - seg.append(41, RecordBatch.NO_TIMESTAMP, -1L, v1Records(40, "hello", "there")); + seg.append(41, + MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, 40, Compression.NONE, TimestampType.CREATE_TIME, + List.of( + new SimpleRecord("hello".getBytes()), + new SimpleRecord("there".getBytes()) + ).toArray(new SimpleRecord[0]))); // If the segment is empty after truncation, the create time should be reset time.sleep(500); @@ -355,7 +361,7 @@ public void testTruncateFull() throws IOException { assertFalse(seg.offsetIndex().isFull()); assertNull(seg.read(0, 1024), "Segment should be empty."); - seg.append(41, RecordBatch.NO_TIMESTAMP, -1L, v1Records(40, "hello", "there")); + seg.append(41, v1Records(40, "hello", "there")); } } @@ -368,7 +374,7 @@ public void testFindOffsetByTimestamp() throws IOException { try (LogSegment seg = createSegment(40, messageSize * 2 - 1)) { // Produce some messages for (int i = 40; i < 50; i++) { - seg.append(i, i * 10, i, v1Records(i, "msg" + i)); + seg.append(i, v1Records(i, "msg" + i)); } assertEquals(490, seg.largestTimestamp()); @@ -394,7 +400,7 @@ public void testFindOffsetByTimestamp() throws IOException { public void testNextOffsetCalculation() throws IOException { try (LogSegment seg = createSegment(40)) { assertEquals(40, seg.readNextOffset()); - seg.append(52, RecordBatch.NO_TIMESTAMP, -1L, v1Records(50, "hello", "there", "you")); + seg.append(52, v1Records(50, "hello", "there", "you")); assertEquals(53, seg.readNextOffset()); } } @@ -437,11 +443,11 @@ public void testChangeFileSuffixes() throws IOException { public void testRecoveryFixesCorruptIndex() throws Exception { try (LogSegment seg = createSegment(0)) { for (int i = 0; i < 100; i++) { - seg.append(i, RecordBatch.NO_TIMESTAMP, -1L, v1Records(i, Integer.toString(i))); + seg.append(i, v1Records(i, Integer.toString(i))); } File indexFile = seg.offsetIndexFile(); writeNonsenseToFile(indexFile, 5, (int) indexFile.length()); - seg.recover(newProducerStateManager(), Optional.empty()); + seg.recover(newProducerStateManager(), mock(LeaderEpochFileCache.class)); for (int i = 0; i < 100; i++) { Iterable records = seg.read(i, 1, Optional.of((long) seg.size()), true).records.records(); assertEquals(i, records.iterator().next().offset()); @@ -460,30 +466,30 @@ public void testRecoverTransactionIndex() throws Exception { long pid2 = 10L; // append transactional records from pid1 - segment.append(101L, RecordBatch.NO_TIMESTAMP, - 100L, MemoryRecords.withTransactionalRecords(100L, Compression.NONE, + segment.append(101L, + MemoryRecords.withTransactionalRecords(100L, Compression.NONE, pid1, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord("a".getBytes()), new SimpleRecord("b".getBytes()))); // append transactional records from pid2 - segment.append(103L, RecordBatch.NO_TIMESTAMP, - 102L, MemoryRecords.withTransactionalRecords(102L, Compression.NONE, + segment.append(103L, + MemoryRecords.withTransactionalRecords(102L, Compression.NONE, pid2, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord("a".getBytes()), new SimpleRecord("b".getBytes()))); // append non-transactional records - segment.append(105L, RecordBatch.NO_TIMESTAMP, - 104L, MemoryRecords.withRecords(104L, Compression.NONE, + segment.append(105L, + MemoryRecords.withRecords(104L, Compression.NONE, partitionLeaderEpoch, new SimpleRecord("a".getBytes()), new SimpleRecord("b".getBytes()))); // abort the transaction from pid2 - segment.append(106L, RecordBatch.NO_TIMESTAMP, - 106L, endTxnRecords(ControlRecordType.ABORT, pid2, producerEpoch, 106L)); + segment.append(106L, + endTxnRecords(ControlRecordType.ABORT, pid2, producerEpoch, 106L)); // commit the transaction from pid1 - segment.append(107L, RecordBatch.NO_TIMESTAMP, - 107L, endTxnRecords(ControlRecordType.COMMIT, pid1, producerEpoch, 107L)); + segment.append(107L, + endTxnRecords(ControlRecordType.COMMIT, pid1, producerEpoch, 107L)); ProducerStateManager stateManager = newProducerStateManager(); - segment.recover(stateManager, Optional.empty()); + segment.recover(stateManager, mock(LeaderEpochFileCache.class)); assertEquals(108L, stateManager.mapEndOffset()); List abortedTxns = segment.txnIndex().allAbortedTxns(); @@ -499,7 +505,7 @@ public void testRecoverTransactionIndex() throws Exception { stateManager.loadProducerEntry(new ProducerStateEntry(pid2, producerEpoch, 0, RecordBatch.NO_TIMESTAMP, OptionalLong.of(75L), Optional.of(new BatchMetadata(10, 10L, 5, RecordBatch.NO_TIMESTAMP)))); - segment.recover(stateManager, Optional.empty()); + segment.recover(stateManager, mock(LeaderEpochFileCache.class)); assertEquals(108L, stateManager.mapEndOffset()); abortedTxns = segment.txnIndex().allAbortedTxns(); @@ -522,19 +528,19 @@ public void testRecoveryRebuildsEpochCache() throws Exception { LeaderEpochCheckpointFile checkpoint = new LeaderEpochCheckpointFile(TestUtils.tempFile(), new LogDirFailureChannel(1)); LeaderEpochFileCache cache = new LeaderEpochFileCache(topicPartition, checkpoint, new MockScheduler(new MockTime())); - seg.append(105L, RecordBatch.NO_TIMESTAMP, 104L, MemoryRecords.withRecords(104L, Compression.NONE, 0, + seg.append(105L, MemoryRecords.withRecords(104L, Compression.NONE, 0, new SimpleRecord("a".getBytes()), new SimpleRecord("b".getBytes()))); - seg.append(107L, RecordBatch.NO_TIMESTAMP, 106L, MemoryRecords.withRecords(106L, Compression.NONE, 1, + seg.append(107L, MemoryRecords.withRecords(106L, Compression.NONE, 1, new SimpleRecord("a".getBytes()), new SimpleRecord("b".getBytes()))); - seg.append(109L, RecordBatch.NO_TIMESTAMP, 108L, MemoryRecords.withRecords(108L, Compression.NONE, 1, + seg.append(109L, MemoryRecords.withRecords(108L, Compression.NONE, 1, new SimpleRecord("a".getBytes()), new SimpleRecord("b".getBytes()))); - seg.append(111L, RecordBatch.NO_TIMESTAMP, 110L, MemoryRecords.withRecords(110L, Compression.NONE, 2, + seg.append(111L, MemoryRecords.withRecords(110L, Compression.NONE, 2, new SimpleRecord("a".getBytes()), new SimpleRecord("b".getBytes()))); - seg.recover(newProducerStateManager(), Optional.of(cache)); + seg.recover(newProducerStateManager(), cache); assertEquals(Arrays.asList( new EpochEntry(0, 104L), new EpochEntry(1, 106L), @@ -567,11 +573,11 @@ private MemoryRecords endTxnRecords( public void testRecoveryFixesCorruptTimeIndex() throws IOException { try (LogSegment seg = createSegment(0)) { for (int i = 0; i < 100; i++) { - seg.append(i, i * 10, i, v1Records(i, String.valueOf(i))); + seg.append(i, v1Records(i, String.valueOf(i))); } File timeIndexFile = seg.timeIndexFile(); writeNonsenseToFile(timeIndexFile, 5, (int) timeIndexFile.length()); - seg.recover(newProducerStateManager(), Optional.empty()); + seg.recover(newProducerStateManager(), mock(LeaderEpochFileCache.class)); for (int i = 0; i < 100; i++) { assertEquals(i, seg.findOffsetByTimestamp(i * 10, 0L).get().offset); if (i < 99) { @@ -590,7 +596,7 @@ public void testRecoveryWithCorruptMessage() throws IOException { for (int ignore = 0; ignore < 10; ignore++) { try (LogSegment seg = createSegment(0)) { for (int i = 0; i < messagesAppended; i++) { - seg.append(i, RecordBatch.NO_TIMESTAMP, -1L, v1Records(i, String.valueOf(i))); + seg.append(i, v1Records(i, String.valueOf(i))); } int offsetToBeginCorruption = TestUtils.RANDOM.nextInt(messagesAppended); // start corrupting somewhere in the middle of the chosen record all the way to the end @@ -598,7 +604,7 @@ public void testRecoveryWithCorruptMessage() throws IOException { FileRecords.LogOffsetPosition recordPosition = seg.log().searchForOffsetWithSize(offsetToBeginCorruption, 0); int position = recordPosition.position + TestUtils.RANDOM.nextInt(15); writeNonsenseToFile(seg.log().file(), position, (int) (seg.log().file().length() - position)); - seg.recover(newProducerStateManager(), Optional.empty()); + seg.recover(newProducerStateManager(), mock(LeaderEpochFileCache.class)); List expectList = new ArrayList<>(); for (long j = 0; j < offsetToBeginCorruption; j++) { @@ -627,9 +633,9 @@ public void testCreateWithInitFileSizeAppendMessage() throws IOException { 512 * 1024 * 1024, true, "")) { segments.add(seg); MemoryRecords ms = v1Records(50, "hello", "there"); - seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms); + seg.append(51, ms); MemoryRecords ms2 = v1Records(60, "alpha", "beta"); - seg.append(61, RecordBatch.NO_TIMESTAMP, -1L, ms2); + seg.append(61, ms2); FetchDataInfo read = seg.read(55, 200); checkEquals(ms2.records().iterator(), read.records.records().iterator()); } @@ -650,9 +656,9 @@ public void testCreateWithInitFileSizeClearShutdown() throws IOException { try (LogSegment seg = LogSegment.open(tempDir, 40, logConfig, Time.SYSTEM, 512 * 1024 * 1024, true)) { MemoryRecords ms = v1Records(50, "hello", "there"); - seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms); + seg.append(51, ms); MemoryRecords ms2 = v1Records(60, "alpha", "beta"); - seg.append(61, RecordBatch.NO_TIMESTAMP, -1L, ms2); + seg.append(61, ms2); FetchDataInfo read = seg.read(55, 200); checkEquals(ms2.records().iterator(), read.records.records().iterator()); long oldSize = seg.log().sizeInBytes(); @@ -690,9 +696,9 @@ public void shouldTruncateEvenIfOffsetPointsToAGapInTheLog() throws IOException // Given two messages with a gap between them (e.g. mid offset compacted away) MemoryRecords ms1 = recordsForTruncateEven(offset, "first message"); - seg.append(offset, RecordBatch.NO_TIMESTAMP, -1L, ms1); + seg.append(offset, ms1); MemoryRecords ms2 = recordsForTruncateEven(offset + 3, "message after gap"); - seg.append(offset + 3, RecordBatch.NO_TIMESTAMP, -1L, ms2); + seg.append(offset + 3, ms2); // When we truncate to an offset without a corresponding log entry seg.truncateTo(offset + 1); @@ -743,7 +749,8 @@ public void testGetFirstBatchTimestamp() throws IOException { try (LogSegment segment = createSegment(1)) { assertEquals(Long.MAX_VALUE, segment.getFirstBatchTimestamp()); - segment.append(1, 1000L, 1, MemoryRecords.withRecords(1, Compression.NONE, new SimpleRecord("one".getBytes()))); + segment.append(1, + MemoryRecords.withRecords(1, Compression.NONE, new SimpleRecord(1000L, "one".getBytes()))); assertEquals(1000L, segment.getFirstBatchTimestamp()); } } @@ -780,12 +787,83 @@ public void testDeleteIfExistsWithGetParentIsNull() throws IOException { } } + @Test + public void testIndexForMultipleBatchesInMemoryRecords() throws IOException { + LogSegment segment = createSegment(0, 1, Time.SYSTEM); + + ByteBuffer buffer1 = ByteBuffer.allocate(1024); + // append first batch to buffer1 + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 0); + builder.append(0L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + // append second batch to buffer1 + builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 1); + builder.append(1L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + buffer1.flip(); + MemoryRecords record = MemoryRecords.readableRecords(buffer1); + segment.append(1L, record); + + ByteBuffer buffer2 = ByteBuffer.allocate(1024); + // append first batch to buffer2 + builder = MemoryRecords.builder(buffer2, Compression.NONE, TimestampType.CREATE_TIME, 2); + builder.append(2L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + buffer2.flip(); + record = MemoryRecords.readableRecords(buffer2); + segment.append(2L, record); + + assertEquals(2, segment.offsetIndex().entries()); + assertEquals(1, segment.offsetIndex().entry(0).offset); + assertEquals(2, segment.offsetIndex().entry(1).offset); + + assertEquals(2, segment.timeIndex().entries()); + assertEquals(new TimestampOffset(1, 1), segment.timeIndex().entry(0)); + assertEquals(new TimestampOffset(2, 2), segment.timeIndex().entry(1)); + } + + @Test + public void testNonMonotonicTimestampForMultipleBatchesInMemoryRecords() throws IOException { + LogSegment segment = createSegment(0, 1, Time.SYSTEM); + + ByteBuffer buffer1 = ByteBuffer.allocate(1024); + // append first batch to buffer1 + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 0); + builder.append(1L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + // append second batch to buffer1 + builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 1); + builder.append(0L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + // append third batch to buffer1 + builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 2); + builder.append(2L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + buffer1.flip(); + MemoryRecords record = MemoryRecords.readableRecords(buffer1); + segment.append(2L, record); + + assertEquals(2, segment.offsetIndex().entries()); + assertEquals(1, segment.offsetIndex().entry(0).offset); + assertEquals(2, segment.offsetIndex().entry(1).offset); + + assertEquals(2, segment.timeIndex().entries()); + assertEquals(new TimestampOffset(1, 0), segment.timeIndex().entry(0)); + assertEquals(new TimestampOffset(2, 2), segment.timeIndex().entry(1)); + } + private ProducerStateManager newProducerStateManager() throws IOException { return new ProducerStateManager( topicPartition, logDir, (int) (Duration.ofMinutes(5).toMillis()), - new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), + new ProducerStateManagerConfig(86400000, false), new MockTime() ); } diff --git a/storage/src/test/java/org/apache/kafka/storage/internals/log/LogValidatorTest.java b/storage/src/test/java/org/apache/kafka/storage/internals/log/LogValidatorTest.java index 6f6d9ed37d00b..1efed158827d2 100644 --- a/storage/src/test/java/org/apache/kafka/storage/internals/log/LogValidatorTest.java +++ b/storage/src/test/java/org/apache/kafka/storage/internals/log/LogValidatorTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.errors.InvalidTimestampException; -import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.ControlRecordType; @@ -38,7 +37,6 @@ import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.common.RequestLocal; import org.apache.kafka.server.util.MockTime; import org.apache.kafka.storage.internals.log.LogValidator.ValidationResult; @@ -132,12 +130,12 @@ public void testValidationOfBatchesWithNonSequentialInnerOffsets() { @ParameterizedTest @CsvSource({ - "0,gzip,none", "1,gzip,none", "2,gzip,none", - "0,gzip,gzip", "1,gzip,gzip", "2,gzip,gzip", - "0,snappy,gzip", "1,snappy,gzip", "2,snappy,gzip", - "0,lz4,gzip", "1,lz4,gzip", "2,lz4,gzip", - "2,none,none", "2,none,gzip", - "2,zstd,gzip", + "0,gzip,none", "1,gzip,none", "2,gzip,none", + "0,gzip,gzip", "1,gzip,gzip", "2,gzip,gzip", + "0,snappy,gzip", "1,snappy,gzip", "2,snappy,gzip", + "0,lz4,gzip", "1,lz4,gzip", "2,lz4,gzip", + "2,none,none", "2,none,gzip", + "2,zstd,gzip", }) public void checkOnlyOneBatch(Byte magic, String sourceCompression, String targetCompression) { @@ -186,8 +184,7 @@ public void testBatchWithoutRecordsNotAllowed(String sourceCompressionName, Stri 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() )); @@ -221,8 +218,7 @@ public void testCreateTimeUpConversionV1ToV2() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ); LogValidator.ValidationResult validatedResults = validator.validateMessagesAndAssignOffsets( @@ -244,7 +240,6 @@ public void testCreateTimeUpConversionV1ToV2() { } assertEquals(timestamp, validatedResults.maxTimestampMs); - assertEquals(2, validatedResults.shallowOffsetOfMaxTimestamp, "Offset of max timestamp should be the last offset 2."); assertTrue(validatedResults.messageSizeMaybeChanged, "Message size should have been changed"); verifyRecordValidationStats( @@ -271,8 +266,7 @@ public void checkCreateTimeUpConversionFromV0(byte toMagic) { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ); LogValidator.ValidationResult validatedResults = logValidator.validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), @@ -292,7 +286,6 @@ public void checkCreateTimeUpConversionFromV0(byte toMagic) { } assertEquals(RecordBatch.NO_TIMESTAMP, validatedResults.maxTimestampMs, "Max timestamp should be " + RecordBatch.NO_TIMESTAMP); - assertEquals(-1, validatedResults.shallowOffsetOfMaxTimestamp); assertTrue(validatedResults.messageSizeMaybeChanged, "Message size should have been changed"); verifyRecordValidationStats(validatedResults.recordValidationStats, 3, records, true); @@ -355,8 +348,7 @@ public void checkRecompression(byte magic) { 1000L, 1000L, partitionLeaderEpoch, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, @@ -389,7 +381,6 @@ public void checkRecompression(byte magic) { // Both V2 and V1 have single batch in the validated records when compression is enabled, and hence their shallow // OffsetOfMaxTimestamp is the last offset of the single batch assertEquals(1, iteratorSize(validatedRecords.batches().iterator())); - assertEquals(2, validatingResults.shallowOffsetOfMaxTimestamp); assertTrue(validatingResults.messageSizeMaybeChanged, "Message size should have been changed"); @@ -461,8 +452,7 @@ private ValidationResult validateMessages(MemoryRecords records, 1000L, 1000L, RecordBatch.NO_PRODUCER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.IBP_2_3_IV1 + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()); } @@ -547,8 +537,7 @@ public void checkCompressed(byte magic) { 1000L, 1000L, partitionLeaderEpoch, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ); LogValidator.ValidationResult validatedResults = validator.validateMessagesAndAssignOffsets( @@ -579,8 +568,6 @@ public void checkCompressed(byte magic) { assertEquals(now + 1, validatedResults.maxTimestampMs, "Max timestamp should be " + (now + 1)); - int expectedShallowOffsetOfMaxTimestamp = 2; - assertEquals(expectedShallowOffsetOfMaxTimestamp, validatedResults.shallowOffsetOfMaxTimestamp, "Shallow offset of max timestamp should be 2"); assertFalse(validatedResults.messageSizeMaybeChanged, "Message size should not have been changed"); verifyRecordValidationStats(validatedResults.recordValidationStats, 0, records, true); @@ -636,8 +623,7 @@ void testInvalidCreateTimeNonCompressedV1() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, @@ -668,8 +654,7 @@ public void testInvalidCreateTimeCompressedV1() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, @@ -700,8 +685,7 @@ public void testInvalidCreateTimeNonCompressedV2() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, @@ -712,9 +696,9 @@ public void testInvalidCreateTimeNonCompressedV2() { @ParameterizedTest @CsvSource({ - "0,gzip,gzip", "1,gzip,gzip", - "0,lz4,lz4", "1,lz4,lz4", - "0,snappy,snappy", "1,snappy,snappy", + "0,gzip,gzip", "1,gzip,gzip", + "0,lz4,lz4", "1,lz4,lz4", + "0,snappy,snappy", "1,snappy,snappy", }) public void checkInvalidChecksum(byte magic, String compressionName, String typeName) { Compression compression = Compression.of(compressionName).build(); @@ -743,8 +727,7 @@ public void checkInvalidChecksum(byte magic, String compressionName, String type 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ); @@ -797,8 +780,7 @@ public void checkInvalidSequence(byte magic, String compressionName, String type 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ); @@ -813,10 +795,10 @@ public void checkInvalidSequence(byte magic, String compressionName, String type @ParameterizedTest @CsvSource({ - "0,gzip,gzip", "1,gzip,gzip", "2,gzip,gzip", - "0,lz4,lz4", "1,lz4,lz4", "2,lz4,lz4", - "0,snappy,snappy", "1,snappy,snappy", "2,snappy,snappy", - "2,zstd,zstd" + "0,gzip,gzip", "1,gzip,gzip", "2,gzip,gzip", + "0,lz4,lz4", "1,lz4,lz4", "2,lz4,lz4", + "0,snappy,snappy", "1,snappy,snappy", "2,snappy,snappy", + "2,zstd,zstd" }) public void checkNoKeyCompactedTopic(byte magic, String compressionName, String typeName) { Compression codec = Compression.of(compressionName).build(); @@ -835,8 +817,7 @@ public void checkNoKeyCompactedTopic(byte magic, String compressionName, String 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, @@ -869,8 +850,7 @@ public void testInvalidCreateTimeCompressedV2() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, @@ -899,8 +879,7 @@ public void testAbsoluteOffsetAssignmentNonCompressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -931,8 +910,7 @@ public void testAbsoluteOffsetAssignmentCompressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -962,8 +940,7 @@ public void testRelativeOffsetAssignmentNonCompressedV1() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -993,8 +970,7 @@ public void testRelativeOffsetAssignmentNonCompressedV2() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1025,8 +1001,7 @@ public void testRelativeOffsetAssignmentCompressedV1() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1057,8 +1032,7 @@ public void testRelativeOffsetAssignmentCompressedV2() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1087,8 +1061,7 @@ public void testOffsetAssignmentAfterUpConversionV0ToV1NonCompressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1123,8 +1096,7 @@ public void testOffsetAssignmentAfterUpConversionV0ToV2NonCompressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1160,8 +1132,7 @@ public void testOffsetAssignmentAfterUpConversionV0ToV1Compressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1197,8 +1168,7 @@ public void testOffsetAssignmentAfterUpConversionV0ToV2Compressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1231,8 +1201,7 @@ public void testControlRecordsNotAllowedFromClients() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1257,8 +1226,7 @@ public void testControlRecordsNotCompressed() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.COORDINATOR, - MetadataVersion.latestTesting() + AppendOrigin.COORDINATOR ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1287,8 +1255,7 @@ public void testOffsetAssignmentAfterDownConversionV1ToV0NonCompressed() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, @@ -1315,8 +1282,7 @@ public void testOffsetAssignmentAfterDownConversionV1ToV0Compressed() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); @@ -1339,8 +1305,7 @@ public void testOffsetAssignmentAfterUpConversionV1ToV2NonCompressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); @@ -1364,8 +1329,7 @@ public void testOffsetAssignmentAfterUpConversionV1ToV2Compressed() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); @@ -1389,8 +1353,7 @@ public void testOffsetAssignmentAfterDownConversionV2ToV1NonCompressed() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); @@ -1415,8 +1378,7 @@ public void testOffsetAssignmentAfterDownConversionV2ToV1Compressed() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); @@ -1442,8 +1404,7 @@ public void testDownConversionOfTransactionalRecordsNotPermitted() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() )); @@ -1470,8 +1431,7 @@ public void testDownConversionOfIdempotentRecordsNotPermitted() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() )); @@ -1495,8 +1455,7 @@ public void testOffsetAssignmentAfterDownConversionV2ToV0NonCompressed() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); @@ -1522,8 +1481,7 @@ public void testOffsetAssignmentAfterDownConversionV2ToV0Compressed() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); @@ -1551,8 +1509,7 @@ public void testNonIncreasingOffsetRecordBatchHasMetricsLogged() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() )); @@ -1560,31 +1517,6 @@ public void testNonIncreasingOffsetRecordBatchHasMetricsLogged() { assertEquals(metricsRecorder.recordInvalidOffsetCount, 1); } - @Test - public void testZStdCompressedWithUnavailableIBPVersion() { - // The timestamps should be overwritten - MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, 1234L, Compression.NONE); - assertThrows(UnsupportedCompressionTypeException.class, () -> - new LogValidator( - records, - topicPartition, - time, - CompressionType.NONE, - Compression.zstd().build(), - false, - RecordBatch.MAGIC_VALUE_V2, - TimestampType.LOG_APPEND_TIME, - 1000L, - 1000L, - RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.IBP_2_0_IV1 - ).validateMessagesAndAssignOffsets( - PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() - ) - ); - } - @Test public void testInvalidTimestampExceptionHasBatchIndex() { long now = System.currentTimeMillis(); @@ -1604,8 +1536,7 @@ public void testInvalidTimestampExceptionHasBatchIndex() { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ) @@ -1691,8 +1622,7 @@ public void testRecordWithPastTimestampIsRejected() { timestampBeforeMaxConfig, timestampAfterMaxConfig, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ) @@ -1724,8 +1654,7 @@ public void testRecordWithFutureTimestampIsRejected() { timestampBeforeMaxConfig, timestampAfterMaxConfig, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ) @@ -1766,8 +1695,7 @@ public void testDifferentLevelDoesNotCauseRecompression() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ); LogValidator.ValidationResult result = validator.validateMessagesAndAssignOffsets( @@ -1805,8 +1733,7 @@ public void testDifferentCodecCausesRecompression() { 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ); LogValidator.ValidationResult result = validator.validateMessagesAndAssignOffsets( @@ -1865,8 +1792,7 @@ public void checkNonCompressed(byte magic) { 1000L, 1000L, partitionLeaderEpoch, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( offsetCounter, metricsRecorder, @@ -1900,10 +1826,8 @@ public void checkNonCompressed(byte magic) { if (magic >= RecordBatch.MAGIC_VALUE_V2) { assertEquals(1, iteratorSize(records.batches().iterator())); - assertEquals(2, validatingResults.shallowOffsetOfMaxTimestamp); } else { assertEquals(3, iteratorSize(records.batches().iterator())); - assertEquals(1, validatingResults.shallowOffsetOfMaxTimestamp); } assertFalse(validatingResults.messageSizeMaybeChanged, @@ -1934,8 +1858,7 @@ private void validateRecordBatchWithCountOverrides(int lastOffsetDelta, int coun 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, @@ -1961,8 +1884,7 @@ public void checkLogAppendTimeWithoutRecompression(byte magic) { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, @@ -1979,8 +1901,6 @@ public void checkLogAppendTimeWithoutRecompression(byte magic) { "MessageSet should still valid"); assertEquals(now, validatedResults.maxTimestampMs, "Max timestamp should be " + now); - assertEquals(2, validatedResults.shallowOffsetOfMaxTimestamp, - "The shallow offset of max timestamp should be the last offset 2 if logAppendTime is used"); assertFalse(validatedResults.messageSizeMaybeChanged, "Message size should not have been changed"); @@ -2006,8 +1926,7 @@ public void checkLogAppendTimeWithRecompression(byte targetMagic) { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0), metricsRecorder, @@ -2022,8 +1941,6 @@ public void checkLogAppendTimeWithRecompression(byte targetMagic) { assertTrue(validatedRecords.batches().iterator().next().isValid(), "MessageSet should still valid"); assertEquals(now, validatedResults.maxTimestampMs, String.format("Max timestamp should be %d", now)); - assertEquals(2, validatedResults.shallowOffsetOfMaxTimestamp, - "The shallow offset of max timestamp should be 2 if logAppendTime is used"); assertTrue(validatedResults.messageSizeMaybeChanged, "Message size may have been changed"); @@ -2049,8 +1966,7 @@ public void checkLogAppendTimeNonCompressed(byte magic) { 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, - AppendOrigin.CLIENT, - MetadataVersion.latestTesting() + AppendOrigin.CLIENT ).validateMessagesAndAssignOffsets( offsetCounter, metricsRecorder, @@ -2075,19 +1991,6 @@ public void checkLogAppendTimeNonCompressed(byte magic) { assertFalse(validatedResults.messageSizeMaybeChanged, "Message size should not have been changed"); - int expectedMaxTimestampOffset; - switch (magic) { - case RecordBatch.MAGIC_VALUE_V0: - expectedMaxTimestampOffset = -1; - break; - case RecordBatch.MAGIC_VALUE_V1: - expectedMaxTimestampOffset = 0; - break; - default: - expectedMaxTimestampOffset = 2; - break; - } - assertEquals(expectedMaxTimestampOffset, validatedResults.shallowOffsetOfMaxTimestamp); verifyRecordValidationStats(validatedResults.recordValidationStats, 0, records, false); } @@ -2119,8 +2022,7 @@ private void checkOffsets(MemoryRecords records, long baseOffset) { } private void maybeCheckBaseTimestamp(long expected, RecordBatch batch) { - if (batch instanceof DefaultRecordBatch) { - DefaultRecordBatch b = (DefaultRecordBatch) batch; + if (batch instanceof DefaultRecordBatch b) { assertEquals(expected, b.baseTimestamp(), "Unexpected base timestamp of batch " + batch); } } @@ -2151,4 +2053,4 @@ else if (numConvertedRecords > 0 || compressed) else assertEquals(0, tempBytes); } -} \ No newline at end of file +} diff --git a/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java b/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java index acff9bc6f2969..f3c4ab5e8fa50 100644 --- a/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java +++ b/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java @@ -59,7 +59,6 @@ import static java.util.Collections.emptySet; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; -import static org.apache.kafka.coordinator.transaction.TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT; import static org.apache.kafka.storage.internals.log.ProducerStateManager.LATE_TRANSACTION_BUFFER_MS; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -89,7 +88,7 @@ public class ProducerStateManagerTest { public ProducerStateManagerTest() throws IOException { logDir = TestUtils.tempDirectory(); partition = new TopicPartition("test", 0); - producerStateManagerConfig = new ProducerStateManagerConfig(PRODUCER_ID_EXPIRATION_MS_DEFAULT, true); + producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true); time = new MockTime(); stateManager = new ProducerStateManager(partition, logDir, maxTransactionTimeoutMs, producerStateManagerConfig, time); diff --git a/storage/src/test/java/org/apache/kafka/storage/internals/log/VerificationGuardTest.java b/storage/src/test/java/org/apache/kafka/storage/internals/log/VerificationGuardTest.java new file mode 100644 index 0000000000000..d38a541f688e0 --- /dev/null +++ b/storage/src/test/java/org/apache/kafka/storage/internals/log/VerificationGuardTest.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.storage.internals.log; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class VerificationGuardTest { + + @Test + public void testEqualsAndHashCode() { + VerificationGuard verificationGuard1 = new VerificationGuard(); + VerificationGuard verificationGuard2 = new VerificationGuard(); + + assertNotEquals(verificationGuard1, verificationGuard2); + assertNotEquals(VerificationGuard.SENTINEL, verificationGuard1); + assertEquals(VerificationGuard.SENTINEL, VerificationGuard.SENTINEL); + + assertNotEquals(verificationGuard1.hashCode(), verificationGuard2.hashCode()); + assertNotEquals(VerificationGuard.SENTINEL.hashCode(), verificationGuard1.hashCode()); + assertEquals(VerificationGuard.SENTINEL.hashCode(), VerificationGuard.SENTINEL.hashCode()); + } + + @Test + public void testVerify() { + VerificationGuard verificationGuard1 = new VerificationGuard(); + VerificationGuard verificationGuard2 = new VerificationGuard(); + + assertFalse(verificationGuard1.verify(verificationGuard2)); + assertFalse(verificationGuard1.verify(VerificationGuard.SENTINEL)); + assertFalse(VerificationGuard.SENTINEL.verify(verificationGuard1)); + assertFalse(VerificationGuard.SENTINEL.verify(VerificationGuard.SENTINEL)); + assertTrue(verificationGuard1.verify(verificationGuard1)); + } +} diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java index ff61e29d93c28..2fdb9483fe628 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java @@ -302,11 +302,7 @@ public LocalTieredStorage remoteStorageManager(int brokerId) { // unused now, but it can be reused later as this is an utility method. public Optional leaderEpochFileCache(int brokerId, TopicPartition partition) { - Optional unifiedLogOpt = log(brokerId, partition); - if (unifiedLogOpt.isPresent() && unifiedLogOpt.get().leaderEpochCache().isDefined()) { - return Optional.of(unifiedLogOpt.get().leaderEpochCache().get()); - } - return Optional.empty(); + return log(brokerId, partition).map(log -> log.leaderEpochCache()); } public List remoteStorageManagers() { diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestHarness.java b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestHarness.java index 126f3e6c11bbb..ebf4a3f1269c9 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestHarness.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestHarness.java @@ -138,9 +138,7 @@ public static List remoteStorageManagers(Seq br if (broker.remoteLogManagerOpt().isDefined()) { RemoteLogManager remoteLogManager = broker.remoteLogManagerOpt().get(); RemoteStorageManager storageManager = remoteLogManager.storageManager(); - if (storageManager instanceof ClassLoaderAwareRemoteStorageManager) { - ClassLoaderAwareRemoteStorageManager loaderAwareRSM = - (ClassLoaderAwareRemoteStorageManager) storageManager; + if (storageManager instanceof ClassLoaderAwareRemoteStorageManager loaderAwareRSM) { if (loaderAwareRSM.delegate() instanceof LocalTieredStorage) { storages.add((LocalTieredStorage) loaderAwareRSM.delegate()); } diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/AlterLogDirAction.java b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/AlterLogDirAction.java index 34f4c7d0892aa..9e514cd231422 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/AlterLogDirAction.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/AlterLogDirAction.java @@ -47,16 +47,16 @@ public AlterLogDirAction(TopicPartition topicPartition, @Override public void doExecute(TieredStorageTestContext context) throws InterruptedException, ExecutionException, TimeoutException { Optional localStorage = context.localStorages().stream().filter(storage -> storage.getBrokerId() == brokerId).findFirst(); - if (!localStorage.isPresent()) { + if (localStorage.isEmpty()) { throw new IllegalArgumentException("cannot find local storage for this topic partition:" + topicPartition + " in this broker id:" + brokerId); } Optional sourceDir = localStorage.get().getBrokerStorageDirectories().stream().filter(dir -> localStorage.get().dirContainsTopicPartition(topicPartition, dir)).findFirst(); - if (!sourceDir.isPresent()) { + if (sourceDir.isEmpty()) { throw new IllegalArgumentException("No log dir with topic partition:" + topicPartition + " in this broker id:" + brokerId); } Optional targetDir = localStorage.get().getBrokerStorageDirectories().stream().filter(dir -> !localStorage.get().dirContainsTopicPartition(topicPartition, dir)).findFirst(); - if (!targetDir.isPresent()) { + if (targetDir.isEmpty()) { throw new IllegalArgumentException("No log dir without topic partition:" + topicPartition + " in this broker id:" + brokerId); } diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ConsumeAction.java b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ConsumeAction.java index 9549a6b691670..df8f255830a12 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ConsumeAction.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ConsumeAction.java @@ -100,7 +100,7 @@ public void doExecute(TieredStorageTestContext context) throws InterruptedExcept .filter(record -> record.offset() >= fetchOffset) .findFirst(); - if (!firstExpectedRecordOpt.isPresent()) { + if (firstExpectedRecordOpt.isEmpty()) { // If no records could be found in the second-tier storage, no record would be consumed from that storage. if (expectedFromSecondTierCount > 0) { fail("Could not find any record with offset >= " + fetchOffset + " from tier storage."); diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ExpectLeaderEpochCheckpointAction.java b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ExpectLeaderEpochCheckpointAction.java index 10231ad06ff04..da6839799834a 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ExpectLeaderEpochCheckpointAction.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/ExpectLeaderEpochCheckpointAction.java @@ -30,8 +30,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; -import scala.Option; - public final class ExpectLeaderEpochCheckpointAction implements TieredStorageTestAction { private final Integer brokerId; @@ -56,10 +54,8 @@ public void doExecute(TieredStorageTestContext context) throws InterruptedExcept EpochEntry earliestEntry = null; Optional log = context.log(brokerId, partition); if (log.isPresent()) { - Option leaderEpochCache = log.get().leaderEpochCache(); - if (leaderEpochCache.isDefined()) { - earliestEntry = leaderEpochCache.get().earliestEntry().orElse(null); - } + LeaderEpochFileCache leaderEpochCache = log.get().leaderEpochCache(); + earliestEntry = leaderEpochCache.earliestEntry().orElse(null); } earliestEntryOpt.set(earliestEntry); return earliestEntry != null && beginEpoch == earliestEntry.epoch diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseDeleteSegmentsTest.java b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseDeleteSegmentsTest.java index 11c3ca7ad090d..d94bb571cbe60 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseDeleteSegmentsTest.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseDeleteSegmentsTest.java @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; import static org.apache.kafka.server.log.remote.storage.LocalTieredStorageEvent.EventType.DELETE_SEGMENT; @@ -56,7 +55,7 @@ protected void writeTestSpecifications(TieredStorageTestBuilder builder) { .expectSegmentToBeOffloaded(broker0, topicA, p0, 2, new KeyValueSpec("k2", "v2")) .expectEarliestLocalOffsetInLogDirectory(topicA, p0, 3L) .produceWithTimestamp(topicA, p0, new KeyValueSpec("k0", "v0"), new KeyValueSpec("k1", "v1"), - new KeyValueSpec("k2", "v2"), new KeyValueSpec("k3", "v3", System.currentTimeMillis() + TimeUnit.DAYS.toMillis(1))) + new KeyValueSpec("k2", "v2"), new KeyValueSpec("k3", "v3", System.currentTimeMillis())) // update the topic config such that it triggers the deletion of segments .updateTopicConfig(topicA, configsToBeAdded(), Collections.emptyList()) // expect that the three offloaded remote log segments are deleted diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/integration/OffloadAndTxnConsumeFromLeaderTest.java b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/OffloadAndTxnConsumeFromLeaderTest.java index 38b7ae3df2d9e..9c298346cc324 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/integration/OffloadAndTxnConsumeFromLeaderTest.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/OffloadAndTxnConsumeFromLeaderTest.java @@ -29,7 +29,6 @@ import java.util.Properties; import static org.apache.kafka.tiered.storage.specs.RemoteFetchCount.FetchCountAndOp; -import static org.apache.kafka.tiered.storage.specs.RemoteFetchCount.OperationType.EQUALS_TO; import static org.apache.kafka.tiered.storage.specs.RemoteFetchCount.OperationType.LESS_THAN_OR_EQUALS_TO; /** @@ -95,7 +94,10 @@ protected void writeTestSpecifications(TieredStorageTestBuilder builder) { } private static RemoteFetchCount getRemoteFetchCount() { - FetchCountAndOp segmentFetchCountAndOp = new FetchCountAndOp(6, EQUALS_TO); + // Ideally, each remote-log segment should be fetched only once. For 6 segments, we would have 6 fetch-counts. + // But, the client can retry the FETCH request, to make the test deterministic, increasing the fetch-count + // to be at-max of 12 (2 times of fetch-count). + FetchCountAndOp segmentFetchCountAndOp = new FetchCountAndOp(12, LESS_THAN_OR_EQUALS_TO); // RemoteIndexCache might evict the entries much before reaching the maximum size. // To make the test deterministic, we are using the operation type as LESS_THAN_OR_EQUALS_TO which equals to the // number of times the RemoteIndexCache gets accessed. The RemoteIndexCache gets accessed twice for each read. diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/BrokerLocalStorage.java b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/BrokerLocalStorage.java index 639a464f3c910..b6cd73f7131e3 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/BrokerLocalStorage.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/BrokerLocalStorage.java @@ -181,7 +181,7 @@ private OffsetHolder getEarliestLocalOffset(TopicPartition topicPartition) { .filter(filename -> filename.endsWith(LogFileUtils.LOG_FILE_SUFFIX)) .sorted() .findFirst(); - if (!firstLogFile.isPresent()) { + if (firstLogFile.isEmpty()) { throw new IllegalArgumentException(String.format( "[BrokerId=%d] No log file found for the topic-partition %s", brokerId, topicPartition)); } diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/LocalTieredStorageOutput.java b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/LocalTieredStorageOutput.java index 4d5b577fbae9c..5d3029b71cb5b 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/LocalTieredStorageOutput.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/LocalTieredStorageOutput.java @@ -39,7 +39,7 @@ public LocalTieredStorageOutput(Deserializer keyDe, Deserializer valueDe) this.keyDe = keyDe; this.valueDe = valueDe; // Columns length + 5 column separators. - output += repeatString("-", 51 + 8 + 13 + 10 + (3 * 2)) + System.lineSeparator(); + output += "-".repeat(51 + 8 + 13 + 10 + (3 * 2)) + System.lineSeparator(); } private String row(String file, Object offset, String record, String ident) { @@ -54,14 +54,6 @@ private String row() { return row("", "", ""); } - private String repeatString(String str, int times) { - StringBuilder builder = new StringBuilder(); - for (int i = 0; i < times; i++) { - builder.append(str); - } - return builder.toString(); - } - @Override public void visitTopicIdPartition(TopicIdPartition topicIdPartition) { currentTopic = topicIdPartition.topicPartition().topic(); @@ -110,4 +102,4 @@ private static class Tuple2 { this.t2 = t2; } } -} \ No newline at end of file +} diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java index 4034cd63a2a87..3796426c1d1ed 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/utils/RecordsKeyValueMatcher.java @@ -136,8 +136,7 @@ private boolean compare(ByteBuffer lhs, @SuppressWarnings("unchecked") private SimpleRecord convert(Object recordCandidate) { - if (recordCandidate instanceof ProducerRecord) { - ProducerRecord record = (ProducerRecord) recordCandidate; + if (recordCandidate instanceof ProducerRecord record) { long timestamp = record.timestamp() != null ? record.timestamp() : RecordBatch.NO_TIMESTAMP; ByteBuffer keyBytes = Utils.wrapNullable(keySerde.serializer().serialize(topicPartition.topic(), (K) record.key())); @@ -145,16 +144,14 @@ private SimpleRecord convert(Object recordCandidate) { Utils.wrapNullable(valueSerde.serializer().serialize(topicPartition.topic(), (V) record.value())); Header[] headers = record.headers() != null ? record.headers().toArray() : Record.EMPTY_HEADERS; return new SimpleRecord(timestamp, keyBytes, valueBytes, headers); - } else if (recordCandidate instanceof ConsumerRecord) { - ConsumerRecord record = (ConsumerRecord) recordCandidate; + } else if (recordCandidate instanceof ConsumerRecord record) { ByteBuffer keyBytes = Utils.wrapNullable(keySerde.serializer().serialize(topicPartition.topic(), (K) record.key())); ByteBuffer valueBytes = Utils.wrapNullable(valueSerde.serializer().serialize(topicPartition.topic(), (V) record.value())); Header[] headers = record.headers() != null ? record.headers().toArray() : Record.EMPTY_HEADERS; return new SimpleRecord(record.timestamp(), keyBytes, valueBytes, headers); - } else if (recordCandidate instanceof Record) { - Record record = (Record) recordCandidate; + } else if (recordCandidate instanceof Record record) { return new SimpleRecord(record.timestamp(), record.key(), record.value(), record.headers()); } else { return null; @@ -180,4 +177,4 @@ public static RecordsKeyValueMatcher correspondTo(C Serde valueSerde) { return new RecordsKeyValueMatcher<>(expectedRecords, topicPartition, keySerde, valueSerde); } -} \ No newline at end of file +} diff --git a/storage/src/test/resources/log4j.properties b/storage/src/test/resources/log4j.properties deleted file mode 100644 index 7ee388a407f71..0000000000000 --- a/storage/src/test/resources/log4j.properties +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=OFF, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.appender.fileAppender=org.apache.log4j.RollingFileAppender -log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.fileAppender.layout.ConversionPattern=%d [%t] %-5p %c %x - %m%n -log4j.appender.fileAppender.File=storage.log - -log4j.logger.org.apache.kafka.server.log.remote.storage=INFO -log4j.logger.org.apache.kafka.server.log.remote.metadata.storage=INFO -log4j.logger.kafka.log.remote=INFO diff --git a/storage/src/test/resources/log4j2.yaml b/storage/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..e2050ad723d29 --- /dev/null +++ b/storage/src/test/resources/log4j2.yaml @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + - name: "fileLogPattern" + value: "%d [%t] %-5p %c %x - %m%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + RollingFile: + - name: FileAppender + fileName: storage.log + filePattern: "storage-%d{yyyy-MM-dd}.log" + PatternLayout: + pattern: "${fileLogPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + + Loggers: + Root: + level: OFF + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka.server.log.remote.storage + level: INFO + AppenderRef: + - ref: FileAppender + + - name: org.apache.kafka.server.log.remote.metadata.storage + level: INFO + AppenderRef: + - ref: FileAppender + + - name: kafka.log.remote + level: INFO + AppenderRef: + - ref: FileAppender diff --git a/streams/examples/src/main/java/org/apache/kafka/streams/examples/pageview/PageViewTypedDemo.java b/streams/examples/src/main/java/org/apache/kafka/streams/examples/pageview/PageViewTypedDemo.java index efa550222f859..6de15ef91a440 100644 --- a/streams/examples/src/main/java/org/apache/kafka/streams/examples/pageview/PageViewTypedDemo.java +++ b/streams/examples/src/main/java/org/apache/kafka/streams/examples/pageview/PageViewTypedDemo.java @@ -134,12 +134,12 @@ public Deserializer deserializer() { @SuppressWarnings("DefaultAnnotationParam") // being explicit for the example @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "_t") @JsonSubTypes({ - @JsonSubTypes.Type(value = PageView.class, name = "pv"), - @JsonSubTypes.Type(value = UserProfile.class, name = "up"), - @JsonSubTypes.Type(value = PageViewByRegion.class, name = "pvbr"), - @JsonSubTypes.Type(value = WindowedPageViewByRegion.class, name = "wpvbr"), - @JsonSubTypes.Type(value = RegionCount.class, name = "rc") - }) + @JsonSubTypes.Type(value = PageView.class, name = "pv"), + @JsonSubTypes.Type(value = UserProfile.class, name = "up"), + @JsonSubTypes.Type(value = PageViewByRegion.class, name = "pvbr"), + @JsonSubTypes.Type(value = WindowedPageViewByRegion.class, name = "wpvbr"), + @JsonSubTypes.Type(value = RegionCount.class, name = "rc") + }) public interface JSONSerdeCompatible { } diff --git a/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountProcessorTest.java b/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountProcessorTest.java index 50df2f1e4fc69..bc35ca17b85de 100644 --- a/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountProcessorTest.java +++ b/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountProcessorTest.java @@ -38,7 +38,7 @@ public class WordCountProcessorTest { @Test public void test() { - final MockProcessorContext context = new MockProcessorContext(); + final MockProcessorContext context = new MockProcessorContext<>(); // Create, initialize, and register the state store. final KeyValueStore store = diff --git a/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerTest.java b/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerTest.java index 7ef15804c3fc5..9ca80fa91d98b 100644 --- a/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerTest.java +++ b/streams/examples/src/test/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerTest.java @@ -54,7 +54,7 @@ public void test() { context.getStateStoreContext().register(store, null); } final Processor processor = supplier.get(); - processor.init(new org.apache.kafka.streams.processor.api.MockProcessorContext() { + processor.init(new org.apache.kafka.streams.processor.api.MockProcessorContext<>() { @Override public S getStateStore(final String name) { return context.getStateStore(name); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AbstractJoinIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AbstractJoinIntegrationTest.java index 3b84f286a7d63..d9dfc41c5118e 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AbstractJoinIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AbstractJoinIntegrationTest.java @@ -125,10 +125,16 @@ public abstract class AbstractJoinIntegrationTest { final ValueJoiner valueJoiner = (value1, value2) -> value1 + "-" + value2; Properties setupConfigsAndUtils(final boolean cacheEnabled) { + return setupConfigsAndUtils(cacheEnabled, true); + } + + Properties setupConfigsAndUtils(final boolean cacheEnabled, final boolean setSerdes) { final Properties streamsConfig = new Properties(); streamsConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - streamsConfig.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Long().getClass()); - streamsConfig.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + if (setSerdes) { + streamsConfig.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.LongSerde.class); + streamsConfig.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class); + } streamsConfig.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL); if (!cacheEnabled) { streamsConfig.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 0); @@ -260,16 +266,13 @@ void runSelfJoinTestWithDriver( private void checkQueryableStore(final String queryableName, final TestRecord expectedFinalResult, final TopologyTestDriver driver) { final ReadOnlyKeyValueStore> store = driver.getTimestampedKeyValueStore(queryableName); - final KeyValueIterator> all = store.all(); - final KeyValue> onlyEntry = all.next(); + try (final KeyValueIterator> all = store.all()) { + final KeyValue> onlyEntry = all.next(); - try { assertThat(onlyEntry.key, is(expectedFinalResult.key())); assertThat(onlyEntry.value.value(), is(expectedFinalResult.value())); assertThat(onlyEntry.value.timestamp(), is(expectedFinalResult.timestamp())); assertThat(all.hasNext(), is(false)); - } finally { - all.close(); } } diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java index d01a3acbb1ac1..6857bf9e380eb 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java @@ -379,10 +379,10 @@ public void testConcurrentlyAccessThreads() throws InterruptedException { executor.execute(() -> { try { for (int i = 0; i < loop + 1; i++) { - if (!kafkaStreams.addStreamThread().isPresent()) + if (kafkaStreams.addStreamThread().isEmpty()) throw new RuntimeException("failed to create stream thread"); kafkaStreams.metadataForLocalThreads(); - if (!kafkaStreams.removeStreamThread().isPresent()) + if (kafkaStreams.removeStreamThread().isEmpty()) throw new RuntimeException("failed to delete a stream thread"); } } catch (final Exception e) { diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/FineGrainedAutoResetIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/FineGrainedAutoResetIntegrationTest.java index 413686c2ea5d0..7f2459cab3150 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/FineGrainedAutoResetIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/FineGrainedAutoResetIntegrationTest.java @@ -25,11 +25,11 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.server.util.MockTime; +import org.apache.kafka.streams.AutoOffsetReset; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler; import org.apache.kafka.streams.errors.TopologyException; import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster; @@ -48,6 +48,7 @@ import org.junit.jupiter.api.Timeout; import java.io.IOException; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -57,6 +58,8 @@ import java.util.Properties; import java.util.regex.Pattern; +import static org.apache.kafka.common.utils.Utils.mkProperties; +import static org.apache.kafka.test.TestUtils.waitForCondition; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; @@ -70,8 +73,14 @@ public class FineGrainedAutoResetIntegrationTest { private static final String OUTPUT_TOPIC_0 = "outputTopic_0"; private static final String OUTPUT_TOPIC_1 = "outputTopic_1"; private static final String OUTPUT_TOPIC_2 = "outputTopic_2"; + private static final String OUTPUT_TOPIC_3 = "outputTopic_3"; + private static final String OUTPUT_TOPIC_4 = "outputTopic_4"; + private static final String OUTPUT_TOPIC_5 = "outputTopic_5"; - public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster(NUM_BROKERS); + public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster( + NUM_BROKERS, + mkProperties( + Collections.singletonMap("log.message.timestamp.after.max.ms", String.valueOf(Long.MAX_VALUE)))); @BeforeAll public static void startCluster() throws IOException, InterruptedException { @@ -95,6 +104,9 @@ public static void startCluster() throws IOException, InterruptedException { TOPIC_C_2, TOPIC_Y_2, TOPIC_Z_2, + TOPIC_DURATION_1, + TOPIC_DURATION_1, + TOPIC_DURATION_3, NOOP, DEFAULT_OUTPUT_TOPIC, OUTPUT_TOPIC_0, @@ -127,10 +139,13 @@ public static void closeCluster() { private static final String TOPIC_C_2 = "topic-C_2"; private static final String TOPIC_Y_2 = "topic-Y_2"; private static final String TOPIC_Z_2 = "topic-Z_2"; + private static final String TOPIC_DURATION_1 = "durationTopic-1"; + private static final String TOPIC_DURATION_2 = "durationTopic-2"; + private static final String TOPIC_DURATION_3 = "durationTopic-3"; private static final String NOOP = "noop"; private final Serde stringSerde = Serdes.String(); - private static final String STRING_SERDE_CLASSNAME = Serdes.String().getClass().getName(); + private static final String STRING_SERDE_CLASSNAME = Serdes.StringSerde.class.getName(); private Properties streamsConfiguration; private final String topic1TestMessage = "topic-1 test"; @@ -141,7 +156,7 @@ public static void closeCluster() { private final String topicZTestMessage = "topic-Z test"; @BeforeEach - public void setUp() throws IOException { + public void setUp() throws Exception { final Properties props = new Properties(); props.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 0); @@ -196,8 +211,8 @@ private void shouldOnlyReadForEarliest( final StreamsBuilder builder = new StreamsBuilder(); - final KStream pattern1Stream = builder.stream(Pattern.compile("topic-\\d" + topicSuffix), Consumed.with(Topology.AutoOffsetReset.EARLIEST)); - final KStream pattern2Stream = builder.stream(Pattern.compile("topic-[A-D]" + topicSuffix), Consumed.with(Topology.AutoOffsetReset.LATEST)); + final KStream pattern1Stream = builder.stream(Pattern.compile("topic-\\d" + topicSuffix), Consumed.with(AutoOffsetReset.earliest())); + final KStream pattern2Stream = builder.stream(Pattern.compile("topic-[A-D]" + topicSuffix), Consumed.with(AutoOffsetReset.latest())); final KStream namedTopicsStream = builder.stream(Arrays.asList(topicY, topicZ)); pattern1Stream.to(outputTopic, Produced.with(stringSerde, stringSerde)); @@ -215,17 +230,18 @@ private void shouldOnlyReadForEarliest( final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class); - final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration); - streams.start(); - - final List> receivedKeyValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedReceivedValues.size()); final List actualValues = new ArrayList<>(expectedReceivedValues.size()); - for (final KeyValue receivedKeyValue : receivedKeyValues) { - actualValues.add(receivedKeyValue.value); + try (final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration)) { + streams.start(); + + final List> receivedKeyValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedReceivedValues.size()); + + for (final KeyValue receivedKeyValue : receivedKeyValues) { + actualValues.add(receivedKeyValue.value); + } } - streams.close(); Collections.sort(actualValues); Collections.sort(expectedReceivedValues); assertThat(actualValues, equalTo(expectedReceivedValues)); @@ -251,14 +267,108 @@ private void commitInvalidOffsets() { consumer.close(); } + @Test + public void shouldFailForResetNone() throws Exception { + final Properties props = new Properties(); + props.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "1000"); + + final Properties localConfig = StreamsTestUtils.getStreamsConfig( + "testConfigAutoOffsetWithNone", + CLUSTER.bootstrapServers(), + STRING_SERDE_CLASSNAME, + STRING_SERDE_CLASSNAME, + props); + + final StreamsBuilder builder = new StreamsBuilder(); + final KStream exceptionStream = builder.stream(NOOP, Consumed.with(AutoOffsetReset.none())); + + exceptionStream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde)); + + try (final KafkaStreams streams = new KafkaStreams(builder.build(), localConfig)) { + final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler(); + streams.setUncaughtExceptionHandler(uncaughtExceptionHandler); + + streams.start(); + + waitForCondition( + () -> uncaughtExceptionHandler.correctExceptionThrown, + "The expected NoOffsetForPartitionException was never thrown" + ); + } + } + + @Test + public void shouldResetByDuration() throws Exception { + final StreamsBuilder builder = new StreamsBuilder(); + + builder.stream(TOPIC_DURATION_1, Consumed.with(AutoOffsetReset.byDuration(Duration.ofHours(6L).plus(Duration.ofMinutes(40L))))) + .to(OUTPUT_TOPIC_3, Produced.with(stringSerde, stringSerde)); + builder.stream(TOPIC_DURATION_2, Consumed.with(AutoOffsetReset.byDuration(Duration.ofMillis(mockTime.milliseconds()).minus(Duration.ofDays(1L))))) + .to(OUTPUT_TOPIC_4, Produced.with(stringSerde, stringSerde)); + builder.stream(TOPIC_DURATION_3, Consumed.with(AutoOffsetReset.byDuration(Duration.ZERO))) + .to(OUTPUT_TOPIC_5, Produced.with(stringSerde, stringSerde)); + + final Properties producerConfig = TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class); + + for (int i = 0; i < 10; ++i) { + mockTime.sleep(Duration.ofHours(1L).toMillis()); + IntegrationTestUtils.produceValuesSynchronously(TOPIC_DURATION_1, Collections.singletonList("" + i), producerConfig, mockTime); + IntegrationTestUtils.produceValuesSynchronously(TOPIC_DURATION_2, Collections.singletonList("" + i), producerConfig, mockTime); + IntegrationTestUtils.produceValuesSynchronously(TOPIC_DURATION_3, Collections.singletonList("" + i), producerConfig, mockTime); + } + + final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class); + + final List expectedValues = Arrays.asList("3", "4", "5", "6", "7", "8", "9"); + final List allExpectedValues = Arrays.asList("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"); + final List singleFinalExpectedValues = List.of("10"); + final List actualValuesOne = new ArrayList<>(expectedValues.size()); + final List actualValuesTwo = new ArrayList<>(allExpectedValues.size()); + final List actualValuesThree = new ArrayList<>(singleFinalExpectedValues.size()); + + final MockTime streamsMockTime = new MockTime(mockTime.milliseconds() + Duration.ofMinutes(20).toMillis(), 0); + try (final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration, streamsMockTime)) { + streams.start(); + + final List> receivedKeyValuesOne = + IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_TOPIC_3, expectedValues.size()); + for (final KeyValue receivedKeyValue : receivedKeyValuesOne) { + actualValuesOne.add(receivedKeyValue.value); + } + + final List> receivedKeyValuesTwo = + IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_TOPIC_4, allExpectedValues.size()); + for (final KeyValue receivedKeyValue : receivedKeyValuesTwo) { + actualValuesTwo.add(receivedKeyValue.value); + } + + IntegrationTestUtils.produceValuesSynchronously(TOPIC_DURATION_3, Collections.singletonList("10"), producerConfig, mockTime); + final List> receivedKeyValuesThree = + IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_TOPIC_5, singleFinalExpectedValues.size()); + for (final KeyValue receivedKeyValue : receivedKeyValuesThree) { + actualValuesThree.add(receivedKeyValue.value); + } + } + + Collections.sort(actualValuesOne); + Collections.sort(expectedValues); + assertThat(actualValuesOne, equalTo(expectedValues)); + + Collections.sort(actualValuesTwo); + Collections.sort(allExpectedValues); + assertThat(actualValuesTwo, equalTo(allExpectedValues)); + + assertThat(actualValuesThree, equalTo(singleFinalExpectedValues)); + } + @Test public void shouldThrowExceptionOverlappingPattern() { final StreamsBuilder builder = new StreamsBuilder(); //NOTE this would realistically get caught when building topology, the test is for completeness - builder.stream(Pattern.compile("topic-[A-D]_1"), Consumed.with(Topology.AutoOffsetReset.EARLIEST)); + builder.stream(Pattern.compile("topic-[A-D]_1"), Consumed.with(AutoOffsetReset.earliest())); try { - builder.stream(Pattern.compile("topic-[A-D]_1"), Consumed.with(Topology.AutoOffsetReset.LATEST)); + builder.stream(Pattern.compile("topic-[A-D]_1"), Consumed.with(AutoOffsetReset.latest())); builder.build(); fail("Should have thrown TopologyException"); } catch (final TopologyException expected) { @@ -270,9 +380,9 @@ public void shouldThrowExceptionOverlappingPattern() { public void shouldThrowExceptionOverlappingTopic() { final StreamsBuilder builder = new StreamsBuilder(); //NOTE this would realistically get caught when building topology, the test is for completeness - builder.stream(Pattern.compile("topic-[A-D]_1"), Consumed.with(Topology.AutoOffsetReset.EARLIEST)); + builder.stream(Pattern.compile("topic-[A-D]_1"), Consumed.with(AutoOffsetReset.earliest())); try { - builder.stream(Arrays.asList(TOPIC_A_1, TOPIC_Z_1), Consumed.with(Topology.AutoOffsetReset.LATEST)); + builder.stream(Arrays.asList(TOPIC_A_1, TOPIC_Z_1), Consumed.with(AutoOffsetReset.latest())); builder.build(); fail("Should have thrown TopologyException"); } catch (final TopologyException expected) { @@ -300,15 +410,17 @@ public void shouldThrowStreamsExceptionNoResetSpecified() throws InterruptedExce exceptionStream.to(DEFAULT_OUTPUT_TOPIC, Produced.with(stringSerde, stringSerde)); - final KafkaStreams streams = new KafkaStreams(builder.build(), localConfig); + try (final KafkaStreams streams = new KafkaStreams(builder.build(), localConfig)) { + final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler(); + streams.setUncaughtExceptionHandler(uncaughtExceptionHandler); - final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler(); + streams.start(); - streams.setUncaughtExceptionHandler(uncaughtExceptionHandler); - streams.start(); - TestUtils.waitForCondition(() -> uncaughtExceptionHandler.correctExceptionThrown, - "The expected NoOffsetForPartitionException was never thrown"); - streams.close(); + waitForCondition( + () -> uncaughtExceptionHandler.correctExceptionThrown, + "The expected NoOffsetForPartitionException was never thrown" + ); + } } diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/HighAvailabilityTaskAssignorIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/HighAvailabilityTaskAssignorIntegrationTest.java index df4d981fa7175..c57d7920faa5e 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/HighAvailabilityTaskAssignorIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/HighAvailabilityTaskAssignorIntegrationTest.java @@ -97,9 +97,10 @@ public static void closeCluster() { @ParameterizedTest @ValueSource(strings = { - StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, - StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC, - StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY}) + StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, + StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC, + StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY + }) public void shouldScaleOutWithWarmupTasksAndInMemoryStores(final String rackAwareStrategy, final TestInfo testInfo) throws InterruptedException { // NB: this test takes at least a minute to run, because it needs a probing rebalance, and the minimum // value is one minute @@ -108,9 +109,10 @@ public void shouldScaleOutWithWarmupTasksAndInMemoryStores(final String rackAwar @ParameterizedTest @ValueSource(strings = { - StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, - StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC, - StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY}) + StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, + StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC, + StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY + }) public void shouldScaleOutWithWarmupTasksAndPersistentStores(final String rackAwareStrategy, final TestInfo testInfo) throws InterruptedException { // NB: this test takes at least a minute to run, because it needs a probing rebalance, and the minimum // value is one minute @@ -285,11 +287,7 @@ private static Properties getConsumerProperties() { } private static String getKiloByteValue() { - final StringBuilder kiloBuilder = new StringBuilder(1000); - for (int i = 0; i < 1000; i++) { - kiloBuilder.append('0'); - } - return kiloBuilder.toString(); + return "0".repeat(1000); } private static void assertFalseNoRetry(final boolean assertion, final String message) { diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java index 2d76d21ae1023..dcd711a35c52a 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/IQv2IntegrationTest.java @@ -357,11 +357,11 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - context.register(root, (key, value) -> put(Bytes.wrap(key), value)); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + stateStoreContext.register(root, (key, value) -> put(Bytes.wrap(key), value)); this.open = true; this.position = Position.emptyPosition(); - this.context = context; + this.context = stateStoreContext; } @Override diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java index 97840cd4a8cda..bf0d54bc5c0aa 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamAggregationIntegrationTest.java @@ -90,6 +90,7 @@ import static java.time.Duration.ofMillis; import static java.time.Duration.ofMinutes; import static java.time.Instant.ofEpochMilli; +import static org.apache.kafka.common.utils.Utils.mkProperties; import static org.apache.kafka.streams.utils.TestUtils.safeUniqueTestName; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; @@ -102,7 +103,10 @@ public class KStreamAggregationIntegrationTest { private static final int NUM_BROKERS = 1; - public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster(NUM_BROKERS); + public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster( + NUM_BROKERS, + mkProperties( + Collections.singletonMap("log.message.timestamp.after.max.ms", String.valueOf(Long.MAX_VALUE)))); @BeforeAll public static void startCluster() throws Exception { diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamRepartitionIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamRepartitionIntegrationTest.java index d60a13915b543..d9c7c91bb5c72 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamRepartitionIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KStreamRepartitionIntegrationTest.java @@ -29,7 +29,6 @@ import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.KafkaStreams.State; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; @@ -76,6 +75,7 @@ import static org.apache.kafka.streams.KafkaStreams.State.ERROR; import static org.apache.kafka.streams.KafkaStreams.State.REBALANCING; import static org.apache.kafka.streams.KafkaStreams.State.RUNNING; +import static org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT; import static org.apache.kafka.streams.utils.TestUtils.safeUniqueTestName; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -168,17 +168,21 @@ public void shouldThrowAnExceptionWhenNumberOfPartitionsOfRepartitionOperationDo .to(outputTopic); final Properties streamsConfiguration = createStreamsConfig(topologyOptimization); - builder.build(streamsConfiguration); - - startStreams(builder, REBALANCING, ERROR, streamsConfiguration, (t, e) -> expectedThrowable.set(e)); - - final String expectedMsg = String.format("Number of partitions [%s] of repartition topic [%s] " + - "doesn't match number of partitions [%s] of the source topic.", - inputTopicRepartitionedNumOfPartitions, - toRepartitionTopicName(inputTopicRepartitionName), - topicBNumberOfPartitions); - assertNotNull(expectedThrowable.get()); - assertTrue(expectedThrowable.get().getMessage().contains(expectedMsg)); + try (final KafkaStreams ks = new KafkaStreams(builder.build(streamsConfiguration), streamsConfiguration)) { + ks.setUncaughtExceptionHandler(exception -> { + expectedThrowable.set(exception); + return SHUTDOWN_CLIENT; + }); + ks.start(); + TestUtils.waitForCondition(() -> ks.state() == ERROR, 30_000, "Kafka Streams never went into error state"); + final String expectedMsg = String.format("Number of partitions [%s] of repartition topic [%s] " + + "doesn't match number of partitions [%s] of the source topic.", + inputTopicRepartitionedNumOfPartitions, + toRepartitionTopicName(inputTopicRepartitionName), + topicBNumberOfPartitions); + assertNotNull(expectedThrowable.get()); + assertTrue(expectedThrowable.get().getMessage().contains(expectedMsg)); + } } @ParameterizedTest @@ -723,7 +727,7 @@ public void shouldGoThroughRebalancingCorrectly(final String topologyOptimizatio ) ); - kafkaStreamsToClose.close(); + kafkaStreamsToClose.close(Duration.ofSeconds(5)); sendEvents( timestamp, @@ -814,36 +818,12 @@ private void sendEvents(final String topic, } private KafkaStreams startStreams(final StreamsBuilder builder, final Properties streamsConfiguration) throws InterruptedException { - return startStreams(builder, REBALANCING, RUNNING, streamsConfiguration, null); - } - - private KafkaStreams startStreams(final StreamsBuilder builder, - final State expectedOldState, - final State expectedNewState, - final Properties streamsConfiguration, - final Thread.UncaughtExceptionHandler uncaughtExceptionHandler) throws InterruptedException { final CountDownLatch latch; final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(streamsConfiguration), streamsConfiguration); - - if (uncaughtExceptionHandler == null) { - latch = new CountDownLatch(1); - } else { - latch = new CountDownLatch(2); - kafkaStreams.setUncaughtExceptionHandler(e -> { - uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), e); - latch.countDown(); - if (e instanceof RuntimeException) { - throw (RuntimeException) e; - } else if (e instanceof Error) { - throw (Error) e; - } else { - throw new RuntimeException("Unexpected checked exception caught in the uncaught exception handler", e); - } - }); - } + latch = new CountDownLatch(1); kafkaStreams.setStateListener((newState, oldState) -> { - if (expectedOldState == oldState && expectedNewState == newState) { + if (REBALANCING == oldState && RUNNING == newState) { latch.countDown(); } }); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyJoinIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyJoinIntegrationTest.java index b4ac10fdbc661..4bd0f74da1d60 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyJoinIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyJoinIntegrationTest.java @@ -31,6 +31,7 @@ import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.ValueJoiner; +import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.ValueAndTimestamp; @@ -174,7 +175,7 @@ public void doJoinFromLeftThenDeleteLeftEntity(final boolean leftJoin, final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); final TestOutputTopic rejoinOutputTopic = rejoin ? driver.createOutputTopic(REJOIN_OUTPUT, new StringDeserializer(), new StringDeserializer()) : null; - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); // Pre-populate the RHS records. This test is all about what happens when we add/remove LHS records right.pipeInput("rhs1", "rhsValue1", baseTimestamp); @@ -257,7 +258,7 @@ public void doJoinFromLeftThenDeleteLeftEntity(final boolean leftJoin, } // Now delete one LHS entity such that one delete is propagated down to the output. - left.pipeInput("lhs1", (String) null, baseTimestamp + 6); + left.pipeInput("lhs1", null, baseTimestamp + 6); assertThat( outputTopic.readKeyValuesToMap(), is(mkMap( @@ -298,7 +299,7 @@ public void doJoinFromRightThenDeleteRightEntity(final boolean leftJoin, final TestInputTopic right = driver.createInputTopic(RIGHT_TABLE, new StringSerializer(), new StringSerializer()); final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); // Pre-populate the LHS records. This test is all about what happens when we add/remove RHS records left.pipeInput("lhs1", "lhsValue1|rhs1", baseTimestamp); @@ -381,7 +382,7 @@ public void doJoinFromRightThenDeleteRightEntity(final boolean leftJoin, } // Now delete the RHS entity such that all matching keys have deletes propagated. - right.pipeInput("rhs1", (String) null, baseTimestamp + 6); + right.pipeInput("rhs1", null, baseTimestamp + 6); assertThat( outputTopic.readKeyValuesToMap(), @@ -417,7 +418,7 @@ public void shouldEmitTombstoneWhenDeletingNonJoiningRecords(final boolean leftJ try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) { final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); left.pipeInput("lhs1", "lhsValue1|rhs1", baseTimestamp); @@ -439,7 +440,7 @@ public void shouldEmitTombstoneWhenDeletingNonJoiningRecords(final boolean leftJ // Deleting a non-joining record produces an unnecessary tombstone for inner joins, because // it's not possible to know whether a result was previously emitted. // For the left join, the tombstone is necessary. - left.pipeInput("lhs1", (String) null, baseTimestamp + 1); + left.pipeInput("lhs1", null, baseTimestamp + 1); { assertThat( outputTopic.readKeyValuesToMap(), @@ -454,7 +455,7 @@ public void shouldEmitTombstoneWhenDeletingNonJoiningRecords(final boolean leftJ } // Deleting a non-existing record is idempotent - left.pipeInput("lhs1", (String) null, baseTimestamp + 2); + left.pipeInput("lhs1", null, baseTimestamp + 2); { assertThat( outputTopic.readKeyValuesToMap(), @@ -483,10 +484,10 @@ public void shouldNotEmitTombstonesWhenDeletingNonExistingRecords(final boolean try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) { final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); // Deleting a record that never existed doesn't need to emit tombstones. - left.pipeInput("lhs1", (String) null, baseTimestamp); + left.pipeInput("lhs1", null, baseTimestamp); { assertThat( outputTopic.readKeyValuesToMap(), @@ -516,7 +517,7 @@ public void joinShouldProduceNullsWhenValueHasNonMatchingForeignKey(final boolea final TestInputTopic right = driver.createInputTopic(RIGHT_TABLE, new StringSerializer(), new StringSerializer()); final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); left.pipeInput("lhs1", "lhsValue1|rhs1", baseTimestamp); // no output for a new inner join on a non-existent FK @@ -623,7 +624,7 @@ public void shouldUnsubscribeOldForeignKeyIfLeftSideIsUpdated(final boolean left final TestInputTopic right = driver.createInputTopic(RIGHT_TABLE, new StringSerializer(), new StringSerializer()); final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); // Pre-populate the RHS records. This test is all about what happens when we change LHS records foreign key reference // then populate update on RHS @@ -707,7 +708,7 @@ public void shouldEmitRecordOnNullForeignKeyForLeftJoins(final String optimizati try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) { final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); left.pipeInput("lhs1", "lhsValue1|rhs1", baseTimestamp); { @@ -744,11 +745,11 @@ public void shouldEmitRecordWhenOldAndNewFkDiffer(final String optimization, try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) { final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); final String subscriptionStoreName = driver.getAllStateStores().entrySet().stream() .filter(e -> e.getKey().contains("SUBSCRIPTION-STATE-STORE")) .findAny().orElseThrow(() -> new RuntimeException("couldn't find store")).getKey(); - final KeyValueStore> subscriptionStore = driver.getKeyValueStore(subscriptionStoreName); + final KeyValueStore> subscriptionStore = driver.getTimestampedKeyValueStore(subscriptionStoreName); final Bytes key = subscriptionStoreKey("lhs1", "rhs1"); left.pipeInput("lhs1", "lhsValue1|rhs1", baseTimestamp); { @@ -786,9 +787,11 @@ private static Bytes subscriptionStoreKey(final String lhs, final String rhs) { return key; } - protected static Map asMap(final KeyValueStore store) { + protected static Map asMap(final KeyValueStore> store) { final HashMap result = new HashMap<>(); - store.all().forEachRemaining(kv -> result.put(kv.key, kv.value)); + try (final KeyValueIterator> it = store.all()) { + it.forEachRemaining(kv -> result.put(kv.key, kv.value.value())); + } return result; } @@ -921,7 +924,7 @@ public void shouldIgnoreOutOfOrderRecordsIffVersioned(final boolean leftJoin, final TestInputTopic right = driver.createInputTopic(RIGHT_TABLE, new StringSerializer(), new StringSerializer()); final TestInputTopic left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer()); final TestOutputTopic outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer()); - final KeyValueStore store = driver.getKeyValueStore("store"); + final KeyValueStore> store = driver.getTimestampedKeyValueStore("store"); // RHS record right.pipeInput("rhs1", "rhsValue1", baseTimestamp + 4); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KafkaStreamsTelemetryIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KafkaStreamsTelemetryIntegrationTest.java index 50b11cdc6b1cc..18dbd2fa6d8a6 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KafkaStreamsTelemetryIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KafkaStreamsTelemetryIntegrationTest.java @@ -55,7 +55,6 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; @@ -148,8 +147,8 @@ public void tearDown() throws Exception { @ParameterizedTest @ValueSource(strings = {"INFO", "DEBUG", "TRACE"}) - @DisplayName("End-to-end test validating metrics pushed to broker") public void shouldPushMetricsToBroker(final String recordingLevel) throws Exception { + // End-to-end test validating metrics pushed to broker streamsApplicationProperties = props(true); streamsApplicationProperties.put(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, recordingLevel); final Topology topology = simpleTopology(); @@ -161,7 +160,7 @@ public void shouldPushMetricsToBroker(final String recordingLevel) throws Except final Uuid mainConsumerInstanceId = clientInstanceIds.consumerInstanceIds().entrySet().stream() .filter(entry -> !entry.getKey().endsWith("-restore-consumer") - && !entry.getKey().endsWith("GlobalStreamThread")) + && !entry.getKey().endsWith("GlobalStreamThread-global-consumer")) .map(Map.Entry::getValue) .findFirst().orElseThrow(); assertNotNull(adminInstanceId); @@ -181,7 +180,8 @@ public void shouldPushMetricsToBroker(final String recordingLevel) throws Except final String name = mn.name().replace('-', '.'); final String group = mn.group().replace("-metrics", "").replace('-', '.'); return "org.apache.kafka." + group + "." + name; - }).sorted().collect(Collectors.toList()); + }).filter(name -> !name.equals("org.apache.kafka.stream.thread.state"))// telemetry reporter filters out string metrics + .sorted().collect(Collectors.toList()); final List actualMetrics = new ArrayList<>(TelemetryPlugin.SUBSCRIBED_METRICS.get(mainConsumerInstanceId)); assertEquals(expectedMetrics, actualMetrics); @@ -189,7 +189,12 @@ public void shouldPushMetricsToBroker(final String recordingLevel) throws Except 30_000, "Never received subscribed metrics"); final List actualInstanceMetrics = TelemetryPlugin.SUBSCRIBED_METRICS.get(adminInstanceId); - final List expectedInstanceMetrics = Arrays.asList("org.apache.kafka.stream.alive.stream.threads", "org.apache.kafka.stream.failed.stream.threads"); + final List expectedInstanceMetrics = Arrays.asList( + "org.apache.kafka.stream.alive.stream.threads", + "org.apache.kafka.stream.client.state", + "org.apache.kafka.stream.failed.stream.threads", + "org.apache.kafka.stream.recording.level"); + assertEquals(expectedInstanceMetrics, actualInstanceMetrics); TestUtils.waitForCondition(() -> TelemetryPlugin.processId != null, @@ -202,8 +207,8 @@ public void shouldPushMetricsToBroker(final String recordingLevel) throws Except @ParameterizedTest @MethodSource("singleAndMultiTaskParameters") - @DisplayName("Streams metrics should get passed to Admin and Consumer") public void shouldPassMetrics(final String topologyType, final boolean stateUpdaterEnabled) throws Exception { + // Streams metrics should get passed to Admin and Consumer streamsApplicationProperties = props(stateUpdaterEnabled); final Topology topology = topologyType.equals("simple") ? simpleTopology() : complexTopology(); @@ -232,8 +237,8 @@ public void shouldPassMetrics(final String topologyType, final boolean stateUpda @ParameterizedTest @MethodSource("multiTaskParameters") - @DisplayName("Correct streams metrics should get passed with dynamic membership") public void shouldPassCorrectMetricsDynamicInstances(final boolean stateUpdaterEnabled) throws Exception { + // Correct streams metrics should get passed with dynamic membership streamsApplicationProperties = props(stateUpdaterEnabled); streamsApplicationProperties.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(appId).getPath() + "-ks1"); streamsApplicationProperties.put(StreamsConfig.CLIENT_ID_CONFIG, appId + "-ks1"); @@ -324,8 +329,8 @@ public void shouldPassCorrectMetricsDynamicInstances(final boolean stateUpdaterE } @Test - @DisplayName("Streams metrics should not be visible in client metrics") public void passedMetricsShouldNotLeakIntoClientMetrics() throws Exception { + // Streams metrics should not be visible in client metrics streamsApplicationProperties = props(true); final Topology topology = complexTopology(); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/MetricsIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/MetricsIntegrationTest.java index d083a205e8035..c6dc962d6d611 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/MetricsIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/MetricsIntegrationTest.java @@ -105,6 +105,7 @@ public static void closeCluster() { private static final String APPLICATION_ID = "application-id"; private static final String TOPOLOGY_DESCRIPTION = "topology-description"; private static final String STATE = "state"; + private static final String CLIENT_STATE = "client-state"; private static final String ALIVE_STREAM_THREADS = "alive-stream-threads"; private static final String FAILED_STREAM_THREADS = "failed-stream-threads"; private static final String PUT_LATENCY_AVG = "put-latency-avg"; @@ -125,6 +126,7 @@ public static void closeCluster() { private static final String RANGE_LATENCY_MAX = "range-latency-max"; private static final String FLUSH_LATENCY_AVG = "flush-latency-avg"; private static final String FLUSH_LATENCY_MAX = "flush-latency-max"; + private static final String RECORDING_LEVEL = "recording-level"; private static final String RESTORE_LATENCY_AVG = "restore-latency-avg"; private static final String RESTORE_LATENCY_MAX = "restore-latency-max"; private static final String PUT_RATE = "put-rate"; @@ -167,6 +169,8 @@ public static void closeCluster() { private static final String PUNCTUATE_RATE = "punctuate-rate"; private static final String PUNCTUATE_TOTAL = "punctuate-total"; private static final String PUNCTUATE_RATIO = "punctuate-ratio"; + private static final String THREAD_STATE = "thread-state"; + private static final String THREAD_STATE_JMX = "state"; private static final String CREATE_RATE = "create-rate"; private static final String CREATE_TOTAL = "create-total"; private static final String DESTROY_RATE = "destroy-rate"; @@ -474,6 +478,8 @@ private void checkClientLevelMetrics() { checkMetricByName(listMetricThread, STATE, 1); checkMetricByName(listMetricThread, ALIVE_STREAM_THREADS, 1); checkMetricByName(listMetricThread, FAILED_STREAM_THREADS, 1); + checkMetricByName(listMetricThread, CLIENT_STATE, 1); + checkMetricByName(listMetricThread, RECORDING_LEVEL, 1); } private void checkThreadLevelMetrics() { @@ -510,6 +516,8 @@ private void checkThreadLevelMetrics() { checkMetricByName(listMetricThread, TASK_CLOSED_TOTAL, NUM_THREADS); checkMetricByName(listMetricThread, BLOCKED_TIME_TOTAL, NUM_THREADS); checkMetricByName(listMetricThread, THREAD_START_TIME, NUM_THREADS); + checkMetricByName(listMetricThread, THREAD_STATE, NUM_THREADS); + checkMetricByName(listMetricThread, THREAD_STATE_JMX, NUM_THREADS); } private void checkTaskLevelMetrics() { diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/ProcessingExceptionHandlerIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/ProcessingExceptionHandlerIntegrationTest.java index 9b848ab900543..13e291e887cc3 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/ProcessingExceptionHandlerIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/ProcessingExceptionHandlerIntegrationTest.java @@ -26,6 +26,8 @@ import org.apache.kafka.streams.TestInputTopic; import org.apache.kafka.streams.TopologyTestDriver; import org.apache.kafka.streams.errors.ErrorHandlerContext; +import org.apache.kafka.streams.errors.LogAndContinueProcessingExceptionHandler; +import org.apache.kafka.streams.errors.LogAndFailProcessingExceptionHandler; import org.apache.kafka.streams.errors.ProcessingExceptionHandler; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.kstream.Consumed; @@ -108,6 +110,52 @@ public void shouldFailWhenProcessingExceptionOccursIfExceptionHandlerReturnsFail } } + @Test + public void shouldFailWhenProcessingExceptionOccursFromFlushingCacheIfExceptionHandlerReturnsFail() { + final List> events = Arrays.asList( + new KeyValue<>("ID123-1", "ID123-A1"), + new KeyValue<>("ID123-1", "ID123-A2"), + new KeyValue<>("ID123-1", "ID123-A3"), + new KeyValue<>("ID123-1", "ID123-A4") + ); + + final List> expectedProcessedRecords = Arrays.asList( + new KeyValueTimestamp<>("ID123-1", "1", TIMESTAMP.toEpochMilli()), + new KeyValueTimestamp<>("ID123-1", "2", TIMESTAMP.toEpochMilli()) + ); + + final MockProcessorSupplier processor = new MockProcessorSupplier<>(); + final StreamsBuilder builder = new StreamsBuilder(); + builder + .stream("TOPIC_NAME", Consumed.with(Serdes.String(), Serdes.String())) + .groupByKey() + .count() + .toStream() + .mapValues(value -> value.toString()) + .process(runtimeErrorProcessorSupplierMock()) + .process(processor); + + final Properties properties = new Properties(); + properties.put(StreamsConfig.PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndFailProcessingExceptionHandler.class); + + try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), properties, Instant.ofEpochMilli(0L))) { + final TestInputTopic inputTopic = driver.createInputTopic("TOPIC_NAME", new StringSerializer(), new StringSerializer()); + + final StreamsException exception = assertThrows(StreamsException.class, + () -> inputTopic.pipeKeyValueList(events, TIMESTAMP, Duration.ZERO)); + + assertTrue(exception.getMessage().contains("Failed to flush cache of store KSTREAM-AGGREGATE-STATE-STORE-0000000001")); + assertEquals(expectedProcessedRecords.size(), processor.theCapturedProcessor().processed().size()); + assertIterableEquals(expectedProcessedRecords, processor.theCapturedProcessor().processed()); + + final MetricName dropTotal = droppedRecordsTotalMetric(); + final MetricName dropRate = droppedRecordsRateMetric(); + + assertEquals(0.0, driver.metrics().get(dropTotal).metricValue()); + assertEquals(0.0, driver.metrics().get(dropRate).metricValue()); + } + } + @Test public void shouldContinueWhenProcessingExceptionOccursIfExceptionHandlerReturnsContinue() { final List> events = Arrays.asList( @@ -153,6 +201,50 @@ public void shouldContinueWhenProcessingExceptionOccursIfExceptionHandlerReturns } } + @Test + public void shouldContinueWhenProcessingExceptionOccursFromFlushingCacheIfExceptionHandlerReturnsContinue() { + final List> events = Arrays.asList( + new KeyValue<>("ID123-1", "ID123-A1"), + new KeyValue<>("ID123-1", "ID123-A2"), + new KeyValue<>("ID123-1", "ID123-A3"), + new KeyValue<>("ID123-1", "ID123-A4") + ); + + final List> expectedProcessedRecords = Arrays.asList( + new KeyValueTimestamp<>("ID123-1", "1", TIMESTAMP.toEpochMilli()), + new KeyValueTimestamp<>("ID123-1", "2", TIMESTAMP.toEpochMilli()), + new KeyValueTimestamp<>("ID123-1", "4", TIMESTAMP.toEpochMilli()) + ); + + final MockProcessorSupplier processor = new MockProcessorSupplier<>(); + final StreamsBuilder builder = new StreamsBuilder(); + builder + .stream("TOPIC_NAME", Consumed.with(Serdes.String(), Serdes.String())) + .groupByKey() + .count() + .toStream() + .mapValues(value -> value.toString()) + .process(runtimeErrorProcessorSupplierMock()) + .process(processor); + + final Properties properties = new Properties(); + properties.put(StreamsConfig.PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndContinueProcessingExceptionHandler.class); + + try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), properties, Instant.ofEpochMilli(0L))) { + final TestInputTopic inputTopic = driver.createInputTopic("TOPIC_NAME", new StringSerializer(), new StringSerializer()); + inputTopic.pipeKeyValueList(events, TIMESTAMP, Duration.ZERO); + + assertEquals(expectedProcessedRecords.size(), processor.theCapturedProcessor().processed().size()); + assertIterableEquals(expectedProcessedRecords, processor.theCapturedProcessor().processed()); + + final MetricName dropTotal = droppedRecordsTotalMetric(); + final MetricName dropRate = droppedRecordsRateMetric(); + + assertEquals(1.0, driver.metrics().get(dropTotal).metricValue()); + assertTrue((Double) driver.metrics().get(dropRate).metricValue() > 0.0); + } + } + @Test public void shouldStopOnFailedProcessorWhenProcessingExceptionOccursInFailProcessingExceptionHandler() { final KeyValue event = new KeyValue<>("ID123-1", "ID123-A1"); @@ -377,7 +469,7 @@ private ProcessorSupplier runtimeErrorProcessorS return () -> new ContextualProcessor() { @Override public void process(final Record record) { - if (record.key().contains("ERR")) { + if (record.key().contains("ERR") || record.value().equals("3")) { throw new RuntimeException("Exception should be handled by processing exception handler"); } diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java index 6b67af943fcb3..ca3936633fc19 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java @@ -982,10 +982,11 @@ private void verifyCanQueryState(final int cacheSizeBytes) throws Exception { streamOne, batch1, TestUtils.producerConfig( - CLUSTER.bootstrapServers(), - StringSerializer.class, - StringSerializer.class, - new Properties()), + CLUSTER.bootstrapServers(), + StringSerializer.class, + StringSerializer.class, + new Properties() + ), mockTime); final KStream s1 = builder.stream(streamOne); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/RangeQueryIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/RangeQueryIntegrationTest.java index 6842c7718e101..5f45f3d7212e5 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/RangeQueryIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/RangeQueryIntegrationTest.java @@ -42,6 +42,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -59,6 +60,7 @@ import java.util.function.Supplier; import java.util.stream.Stream; +import static org.apache.kafka.streams.utils.TestUtils.safeUniqueTestName; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -68,7 +70,6 @@ public class RangeQueryIntegrationTest { private static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster(1); private static final Properties STREAMS_CONFIG = new Properties(); - private static final String APP_ID = "range-query-integration-test"; private static final Long COMMIT_INTERVAL = 100L; private static String inputStream; private static final String TABLE_NAME = "mytable"; @@ -136,7 +137,6 @@ public static void startCluster() throws IOException { STREAMS_CONFIG.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); STREAMS_CONFIG.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); STREAMS_CONFIG.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL); - STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID); STREAMS_CONFIG.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); } @@ -158,16 +158,16 @@ public void cleanup() throws InterruptedException { @ParameterizedTest @MethodSource("data") - public void testStoreConfig(final StoreType storeType, final boolean enableLogging, final boolean enableCaching, final boolean forward) throws Exception { + public void testStoreConfig(final StoreType storeType, final boolean enableLogging, final boolean enableCaching, final boolean forward, final TestInfo testInfo) throws Exception { + final String appID = safeUniqueTestName(testInfo); final StreamsBuilder builder = new StreamsBuilder(); final Materialized> stateStoreConfig = getStoreConfig(storeType, enableLogging, enableCaching); builder.table(inputStream, stateStoreConfig); - + STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, appID); try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), STREAMS_CONFIG)) { IntegrationTestUtils.startApplicationAndWaitUntilRunning(kafkaStreams); writeInputData(); - final ReadOnlyKeyValueStore stateStore = IntegrationTestUtils.getStore(1000_000L, TABLE_NAME, kafkaStreams, QueryableStoreTypes.keyValueStore()); // wait for the store to populate diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/SmokeTestDriverIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/SmokeTestDriverIntegrationTest.java index eb35666675e9f..b95e11df4c68a 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/SmokeTestDriverIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/SmokeTestDriverIntegrationTest.java @@ -35,10 +35,12 @@ import java.io.IOException; import java.time.Duration; import java.util.ArrayList; +import java.util.Collections; import java.util.Map; import java.util.Properties; import java.util.Set; +import static org.apache.kafka.common.utils.Utils.mkProperties; import static org.apache.kafka.streams.tests.SmokeTestDriver.generate; import static org.apache.kafka.streams.tests.SmokeTestDriver.verify; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -46,7 +48,10 @@ @Timeout(600) @Tag("integration") public class SmokeTestDriverIntegrationTest { - public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster(3); + public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster( + 3, + mkProperties( + Collections.singletonMap("log.message.timestamp.after.max.ms", String.valueOf(Long.MAX_VALUE)))); @BeforeAll public static void startCluster() throws IOException { diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StoreQuerySuite.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StoreQuerySuite.java index 08a163de59ef5..e6b212e7d1144 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StoreQuerySuite.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StoreQuerySuite.java @@ -36,13 +36,13 @@ */ @Suite @SelectClasses({ - CompositeReadOnlyKeyValueStoreTest.class, - CompositeReadOnlyWindowStoreTest.class, - CompositeReadOnlySessionStoreTest.class, - GlobalStateStoreProviderTest.class, - StreamThreadStateStoreProviderTest.class, - WrappingStoreProviderTest.class, - QueryableStateIntegrationTest.class, - }) + CompositeReadOnlyKeyValueStoreTest.class, + CompositeReadOnlyWindowStoreTest.class, + CompositeReadOnlySessionStoreTest.class, + GlobalStateStoreProviderTest.class, + StreamThreadStateStoreProviderTest.class, + WrappingStoreProviderTest.class, + QueryableStateIntegrationTest.class, +}) public class StoreQuerySuite { } diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StreamTableJoinWithGraceIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StreamTableJoinWithGraceIntegrationTest.java index bcfd2445de23c..6195cbeb281d7 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StreamTableJoinWithGraceIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/StreamTableJoinWithGraceIntegrationTest.java @@ -19,10 +19,12 @@ import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Joined; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; +import org.apache.kafka.streams.kstream.Produced; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.test.TestRecord; @@ -52,13 +54,13 @@ public class StreamTableJoinWithGraceIntegrationTest extends AbstractJoinIntegra @ValueSource(booleans = {true, false}) public void testInnerWithVersionedStore(final boolean cacheEnabled) { final StreamsBuilder builder = new StreamsBuilder(); - final KStream leftStream = builder.stream(INPUT_TOPIC_LEFT); - final KTable rightTable = builder.table(INPUT_TOPIC_RIGHT, Materialized.as( + final KStream leftStream = builder.stream(INPUT_TOPIC_LEFT, Consumed.with(Serdes.Long(), Serdes.String())); + final KTable rightTable = builder.table(INPUT_TOPIC_RIGHT, Consumed.with(Serdes.Long(), Serdes.String()), Materialized.as( Stores.persistentVersionedKeyValueStore(STORE_NAME, Duration.ofMinutes(5)))); - final Properties streamsConfig = setupConfigsAndUtils(cacheEnabled); + final Properties streamsConfig = setupConfigsAndUtils(cacheEnabled, false); streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID + "-inner"); - leftStream.join(rightTable, valueJoiner, JOINED).to(OUTPUT_TOPIC); + leftStream.join(rightTable, valueJoiner, JOINED).to(OUTPUT_TOPIC, Produced.with(Serdes.Long(), Serdes.String())); final List>> expectedResult = Arrays.asList( null, @@ -96,7 +98,7 @@ public void testLeftWithVersionedStore(final boolean cacheEnabled) { final KStream leftStream = builder.stream(INPUT_TOPIC_LEFT); final KTable rightTable = builder.table(INPUT_TOPIC_RIGHT, Materialized.as( Stores.persistentVersionedKeyValueStore(STORE_NAME, Duration.ofMinutes(5)))); - final Properties streamsConfig = setupConfigsAndUtils(cacheEnabled); + final Properties streamsConfig = setupConfigsAndUtils(cacheEnabled, true); streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID + "-left"); leftStream.leftJoin(rightTable, valueJoiner, JOINED).to(OUTPUT_TOPIC); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/TestingMetricsInterceptingAdminClient.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/TestingMetricsInterceptingAdminClient.java index 0afdd1fc85cca..a08b604cd5f58 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/TestingMetricsInterceptingAdminClient.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/TestingMetricsInterceptingAdminClient.java @@ -37,7 +37,6 @@ import org.apache.kafka.clients.admin.AlterReplicaLogDirsResult; import org.apache.kafka.clients.admin.AlterUserScramCredentialsOptions; import org.apache.kafka.clients.admin.AlterUserScramCredentialsResult; -import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.CreateAclsOptions; import org.apache.kafka.clients.admin.CreateAclsResult; import org.apache.kafka.clients.admin.CreateDelegationTokenOptions; @@ -108,8 +107,9 @@ import org.apache.kafka.clients.admin.ListOffsetsResult; import org.apache.kafka.clients.admin.ListPartitionReassignmentsOptions; import org.apache.kafka.clients.admin.ListPartitionReassignmentsResult; -import org.apache.kafka.clients.admin.ListShareGroupsOptions; -import org.apache.kafka.clients.admin.ListShareGroupsResult; +import org.apache.kafka.clients.admin.ListShareGroupOffsetsOptions; +import org.apache.kafka.clients.admin.ListShareGroupOffsetsResult; +import org.apache.kafka.clients.admin.ListShareGroupOffsetsSpec; import org.apache.kafka.clients.admin.ListTopicsOptions; import org.apache.kafka.clients.admin.ListTopicsResult; import org.apache.kafka.clients.admin.ListTransactionsOptions; @@ -216,12 +216,6 @@ public DescribeConfigsResult describeConfigs(final Collection re return adminDelegate.describeConfigs(resources, options); } - @Override - @SuppressWarnings("deprecation") - public AlterConfigsResult alterConfigs(final Map configs, final AlterConfigsOptions options) { - return adminDelegate.alterConfigs(configs, options); - } - @Override public AlterConfigsResult incrementalAlterConfigs(final Map> configs, final AlterConfigsOptions options) { return adminDelegate.incrementalAlterConfigs(configs, options); @@ -423,8 +417,8 @@ public DescribeShareGroupsResult describeShareGroups(final Collection gr } @Override - public ListShareGroupsResult listShareGroups(final ListShareGroupsOptions options) { - return adminDelegate.listShareGroups(options); + public ListShareGroupOffsetsResult listShareGroupOffsets(final Map groupSpecs, final ListShareGroupOffsetsOptions options) { + return adminDelegate.listShareGroupOffsets(groupSpecs, options); } @Override diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/VersionedKeyValueStoreIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/VersionedKeyValueStoreIntegrationTest.java index 86dd025e7fd45..d3ec4d9838e22 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/VersionedKeyValueStoreIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/VersionedKeyValueStoreIntegrationTest.java @@ -813,8 +813,8 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - context.register( + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + stateStoreContext.register( root, (key, value) -> { } ); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/CompositeStateListener.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/CompositeStateListener.java index edb4d08fdd776..30f0c24677f56 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/CompositeStateListener.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/CompositeStateListener.java @@ -19,10 +19,8 @@ import org.apache.kafka.streams.KafkaStreams.State; import org.apache.kafka.streams.KafkaStreams.StateListener; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.List; /** @@ -39,7 +37,7 @@ public CompositeStateListener(final StateListener... listeners) { } public CompositeStateListener(final Collection stateListeners) { - this.listeners = Collections.unmodifiableList(new ArrayList<>(stateListeners)); + this.listeners = List.copyOf(stateListeners); } @Override diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/EmbeddedKafkaCluster.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/EmbeddedKafkaCluster.java index dfdbc567d0ba6..aad73d2d89a36 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/EmbeddedKafkaCluster.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/EmbeddedKafkaCluster.java @@ -25,6 +25,7 @@ import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.GroupProtocol; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; @@ -71,6 +72,7 @@ import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.common.utils.Utils.mkProperties; @@ -170,7 +172,10 @@ public void start() { public void verifyClusterReadiness() { final UUID uuid = UUID.randomUUID(); final String consumerGroupId = "group-warmup-" + uuid; - final Map consumerConfig = Collections.singletonMap(GROUP_ID_CONFIG, consumerGroupId); + final Map consumerConfig = Map.of( + GROUP_ID_CONFIG, consumerGroupId, + GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name() + ); final String topic = "topic-warmup-" + uuid; createTopic(topic); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/IntegrationTestUtils.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/IntegrationTestUtils.java index 2d66e0fd86bd3..a7b3f838f2a95 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/IntegrationTestUtils.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/utils/IntegrationTestUtils.java @@ -22,6 +22,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.GroupProtocol; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; @@ -29,6 +30,7 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.Metric; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.utils.Time; @@ -1002,7 +1004,9 @@ public static boolean isEmptyConsumerGroup(final Admin adminClient, .get(applicationId) .get(); return groupDescription.members().isEmpty(); - } catch (final ExecutionException | InterruptedException e) { + } catch (final ExecutionException e) { + return e.getCause() instanceof GroupIdNotFoundException; + } catch (final InterruptedException e) { return false; } } @@ -1186,7 +1190,8 @@ private static boolean continueConsuming(final int messagesConsumed, final int m /** * Sets up a {@link KafkaConsumer} from a copy of the given configuration that has * {@link ConsumerConfig#AUTO_OFFSET_RESET_CONFIG} set to "earliest" and {@link ConsumerConfig#ENABLE_AUTO_COMMIT_CONFIG} - * set to "true" to prevent missing events as well as repeat consumption. + * set to "true" to prevent missing events as well as repeat consumption. This also sets + * {@link ConsumerConfig#GROUP_PROTOCOL_CONFIG} to "classic". * @param consumerConfig Consumer configuration * @return Consumer */ @@ -1195,6 +1200,7 @@ private static KafkaConsumer createConsumer(final Properties consum filtered.putAll(consumerConfig); filtered.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); filtered.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); + filtered.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name); return new KafkaConsumer<>(filtered); } diff --git a/streams/integration-tests/src/test/resources/log4j.properties b/streams/integration-tests/src/test/resources/log4j.properties deleted file mode 100644 index b7e1fb2d60ea4..0000000000000 --- a/streams/integration-tests/src/test/resources/log4j.properties +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.kafka=ERROR -log4j.logger.state.change.logger=ERROR -log4j.logger.org.apache.kafka=ERROR -log4j.logger.org.apache.zookeeper=ERROR -log4j.logger.org.apache.kafka.clients=ERROR - -# These are the only logs we will likely ever find anything useful in to debug Streams test failures -log4j.logger.org.apache.kafka.clients.consumer=INFO -log4j.logger.org.apache.kafka.clients.producer=INFO -log4j.logger.org.apache.kafka.streams=INFO - -# printing out the configs takes up a huge amount of the allotted characters, -# and provides little value as we can always figure out the test configs without the logs -log4j.logger.org.apache.kafka.clients.producer.ProducerConfig=ERROR -log4j.logger.org.apache.kafka.clients.consumer.ConsumerConfig=ERROR -log4j.logger.org.apache.kafka.clients.admin.AdminClientConfig=ERROR -log4j.logger.org.apache.kafka.streams.StreamsConfig=ERROR diff --git a/streams/integration-tests/src/test/resources/log4j2.yaml b/streams/integration-tests/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..0942036a33c80 --- /dev/null +++ b/streams/integration-tests/src/test/resources/log4j2.yaml @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: kafka + level: ERROR + + - name: state.change.logger + level: ERROR + + - name: org.apache.kafka + level: ERROR + + - name: org.apache.kafka.clients + level: ERROR + + - name: org.apache.kafka.clients.consumer + level: INFO + + - name: org.apache.kafka.clients.producer + level: INFO + + - name: org.apache.kafka.streams + level: INFO + + - name: org.apache.kafka.clients.producer.ProducerConfig + level: ERROR + + - name: org.apache.kafka.clients.consumer.ConsumerConfig + level: ERROR + + - name: org.apache.kafka.clients.admin.AdminClientConfig + level: ERROR + + - name: org.apache.kafka.streams.StreamsConfig + level: ERROR diff --git a/streams/quickstart/java/pom.xml b/streams/quickstart/java/pom.xml index e58187bb6e358..9e6d406898c1a 100644 --- a/streams/quickstart/java/pom.xml +++ b/streams/quickstart/java/pom.xml @@ -26,7 +26,7 @@ org.apache.kafka streams-quickstart - 4.0.0-SNAPSHOT + 4.1.0-SNAPSHOT .. diff --git a/streams/quickstart/java/src/main/resources/archetype-resources/pom.xml b/streams/quickstart/java/src/main/resources/archetype-resources/pom.xml index 3dad3ae7f8eec..133d0ee951e5d 100644 --- a/streams/quickstart/java/src/main/resources/archetype-resources/pom.xml +++ b/streams/quickstart/java/src/main/resources/archetype-resources/pom.xml @@ -29,8 +29,8 @@ UTF-8 - 4.0.0-SNAPSHOT - 1.7.36 + 4.1.0-SNAPSHOT + 2.0.16 @@ -47,41 +47,20 @@ - - org.apache.maven.plugins maven-compiler-plugin - 3.1 + 3.13.0 - 1.8 - 1.8 + 11 - - maven-compiler-plugin - - 1.8 - 1.8 - jdt - - - - org.eclipse.tycho - tycho-compiler-jdt - 0.21.0 - - - org.eclipse.m2e lifecycle-mapping diff --git a/streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j.properties b/streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j.properties deleted file mode 100644 index b620f1bb390e4..0000000000000 --- a/streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j.properties +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, console - -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n \ No newline at end of file diff --git a/group-coordinator/src/test/resources/log4j.properties b/streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.yaml similarity index 60% rename from group-coordinator/src/test/resources/log4j.properties rename to streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.yaml index db3879386f10f..0c112dd06d6ec 100644 --- a/group-coordinator/src/test/resources/log4j.properties +++ b/streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.yaml @@ -1,9 +1,9 @@ # Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with +# contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # @@ -12,11 +12,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=DEBUG, stdout -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +Configuration: + Properties: + Property: + - name: "logPattern" + value: "%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n" -log4j.logger.org.apache.kafka=DEBUG -log4j.logger.org.apache.zookeeper=WARN + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT diff --git a/streams/quickstart/pom.xml b/streams/quickstart/pom.xml index aa7f1884d190b..021e9d0c0d6f9 100644 --- a/streams/quickstart/pom.xml +++ b/streams/quickstart/pom.xml @@ -22,7 +22,7 @@ org.apache.kafka streams-quickstart pom - 4.0.0-SNAPSHOT + 4.1.0-SNAPSHOT Kafka Streams :: Quickstart diff --git a/streams/src/main/java/org/apache/kafka/streams/AutoOffsetReset.java b/streams/src/main/java/org/apache/kafka/streams/AutoOffsetReset.java new file mode 100644 index 0000000000000..f3f3a941d20f7 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/AutoOffsetReset.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams; + +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy.StrategyType; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KTable; + +import java.time.Duration; +import java.util.Optional; + +/** + * Sets the {@code auto.offset.reset} configuration when + * {@link Topology#addSource(AutoOffsetReset, String, String...) adding a source processor} + * or when creating {@link KStream} or {@link KTable} via {@link StreamsBuilder}. + */ +public class AutoOffsetReset { + protected final StrategyType offsetResetStrategy; + protected final Optional duration; + + private AutoOffsetReset(final StrategyType offsetResetStrategy, final Optional duration) { + this.offsetResetStrategy = offsetResetStrategy; + this.duration = duration; + } + + protected AutoOffsetReset(final AutoOffsetReset autoOffsetReset) { + this(autoOffsetReset.offsetResetStrategy, autoOffsetReset.duration); + } + + /** + * Creates an {@code AutoOffsetReset} instance representing "none". + * + * @return An {@link AutoOffsetReset} instance for no reset. + */ + public static AutoOffsetReset none() { + return new AutoOffsetReset(StrategyType.NONE, Optional.empty()); + } + + /** + * Creates an {@code AutoOffsetReset} instance representing "earliest". + * + * @return An {@link AutoOffsetReset} instance for the "earliest" offset. + */ + public static AutoOffsetReset earliest() { + return new AutoOffsetReset(StrategyType.EARLIEST, Optional.empty()); + } + + /** + * Creates an {@code AutoOffsetReset} instance representing "latest". + * + * @return An {@code AutoOffsetReset} instance for the "latest" offset. + */ + public static AutoOffsetReset latest() { + return new AutoOffsetReset(StrategyType.LATEST, Optional.empty()); + } + + /** + * Creates an {@code AutoOffsetReset} instance for the specified reset duration. + * + * @param duration The duration to use for the offset reset; must be non-negative. + * @return An {@code AutoOffsetReset} instance with the specified duration. + * @throws IllegalArgumentException If the duration is negative. + */ + public static AutoOffsetReset byDuration(final Duration duration) { + if (duration.isNegative()) { + throw new IllegalArgumentException("Duration cannot be negative"); + } + return new AutoOffsetReset(StrategyType.BY_DURATION, Optional.of(duration)); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AutoOffsetReset that = (AutoOffsetReset) o; + return offsetResetStrategy == that.offsetResetStrategy && duration.equals(that.duration); + } + + @Override + public int hashCode() { + int result = offsetResetStrategy.hashCode(); + result = 31 * result + duration.hashCode(); + return result; + } +} diff --git a/streams/src/main/java/org/apache/kafka/streams/KafkaStreams.java b/streams/src/main/java/org/apache/kafka/streams/KafkaStreams.java index 584f7be307c6e..79e6af29be7ee 100644 --- a/streams/src/main/java/org/apache/kafka/streams/KafkaStreams.java +++ b/streams/src/main/java/org/apache/kafka/streams/KafkaStreams.java @@ -110,6 +110,7 @@ import java.util.function.Consumer; import java.util.function.Supplier; +import static org.apache.kafka.streams.StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG; import static org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT; import static org.apache.kafka.streams.internals.ApiUtils.prepareMillisCheckFailMsgPrefix; import static org.apache.kafka.streams.internals.ApiUtils.validateMillisecondDuration; @@ -987,6 +988,8 @@ private KafkaStreams(final TopologyMetadata topologyMetadata, ClientMetrics.addApplicationIdMetric(streamsMetrics, applicationConfigs.getString(StreamsConfig.APPLICATION_ID_CONFIG)); ClientMetrics.addTopologyDescriptionMetric(streamsMetrics, (metricsConfig, now) -> this.topologyMetadata.topologyDescriptionString()); ClientMetrics.addStateMetric(streamsMetrics, (metricsConfig, now) -> state); + ClientMetrics.addClientStateTelemetryMetric(streamsMetrics, (metricsConfig, now) -> state.ordinal()); + ClientMetrics.addClientRecordingLevelMetric(streamsMetrics, calculateMetricsRecordingLevel()); threads = Collections.synchronizedList(new LinkedList<>()); ClientMetrics.addNumAliveStreamThreadMetric(streamsMetrics, (metricsConfig, now) -> numLiveStreamThreads()); @@ -1250,6 +1253,25 @@ private Optional removeStreamThread(final long timeoutMs) throws Timeout return Optional.empty(); } + private int calculateMetricsRecordingLevel() { + final int recordingLevel; + final String recordingLevelString = applicationConfigs.getString(METRICS_RECORDING_LEVEL_CONFIG); + switch (recordingLevelString) { + case "INFO": + recordingLevel = 0; + break; + case "DEBUG": + recordingLevel = 1; + break; + case "TRACE": + recordingLevel = 2; + break; + default: + throw new IllegalArgumentException("Unexpected recording level: " + recordingLevelString); + } + return recordingLevel; + } + /* * Takes a snapshot and counts the number of stream threads which are not in PENDING_SHUTDOWN or DEAD * @@ -1334,7 +1356,7 @@ private ScheduledExecutorService setupStateDirCleaner() { private static ScheduledExecutorService maybeCreateRocksDBMetricsRecordingService(final String clientId, final StreamsConfig config) { - if (RecordingLevel.forName(config.getString(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG)) == RecordingLevel.DEBUG) { + if (RecordingLevel.forName(config.getString(METRICS_RECORDING_LEVEL_CONFIG)) == RecordingLevel.DEBUG) { return Executors.newSingleThreadScheduledExecutor(r -> { final Thread thread = new Thread(r, clientId + "-RocksDBMetricsRecordingTrigger"); thread.setDaemon(true); @@ -1917,7 +1939,7 @@ public synchronized ClientInstanceIds clientInstanceIds(final Duration timeout) // could be `null` if telemetry is disabled on the client itself if (instanceId != null) { clientInstanceIds.addConsumerInstanceId( - globalStreamThread.getName(), + globalStreamThread.getName() + "-global-consumer", instanceId ); } else { diff --git a/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java b/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java index 2879436e5000d..7037e8d7fd3a5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java +++ b/streams/src/main/java/org/apache/kafka/streams/StreamsBuilder.java @@ -25,6 +25,7 @@ import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; +import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.kstream.internals.ConsumedInternal; import org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder; import org.apache.kafka.streams.kstream.internals.MaterializedInternal; @@ -144,7 +145,7 @@ public synchronized KStream stream(final String topic, * @return a {@link KStream} for the specified topics */ public synchronized KStream stream(final Collection topics) { - return stream(topics, Consumed.with(null, null, null, null)); + return stream(topics, Consumed.with(null, null)); } /** @@ -288,7 +289,7 @@ public synchronized KTable table(final String topic, * @return a {@link KTable} for the specified topic */ public synchronized KTable table(final String topic) { - return table(topic, new ConsumedInternal<>()); + return table(topic, Consumed.with(null, null)); } /** @@ -384,7 +385,9 @@ public synchronized GlobalKTable globalTable(final String topic, final MaterializedInternal> materializedInternal = new MaterializedInternal<>( Materialized.with(consumedInternal.keySerde(), consumedInternal.valueSerde()), - internalStreamsBuilder, topic + "-"); + internalStreamsBuilder, + topic + "-", + true /* force materializing global tables */); return internalStreamsBuilder.globalTable(topic, consumedInternal, materializedInternal); } @@ -508,8 +511,8 @@ public synchronized GlobalKTable globalTable(final String topic, * Adds a state store to the underlying {@link Topology}. *

              * It is required to connect state stores to {@link org.apache.kafka.streams.processor.api.Processor Processors}, - * {@link org.apache.kafka.streams.kstream.Transformer Transformers}, - * or {@link org.apache.kafka.streams.kstream.ValueTransformer ValueTransformers} before they can be used. + * or {@link org.apache.kafka.streams.kstream.KTable#transformValues(ValueTransformerWithKeySupplier, String...) ValueTransformers} + * before they can be used. * * @param builder the builder used to obtain this state store {@link StateStore} instance * @return itself @@ -517,7 +520,7 @@ public synchronized GlobalKTable globalTable(final String topic, */ public synchronized StreamsBuilder addStateStore(final StoreBuilder builder) { Objects.requireNonNull(builder, "builder can't be null"); - internalStreamsBuilder.addStateStore(new StoreBuilderWrapper(builder)); + internalStreamsBuilder.addStateStore(StoreBuilderWrapper.wrapStoreBuilder(builder)); return this; } @@ -538,8 +541,7 @@ public synchronized StreamsBuilder addStateStore(final StoreBuilder builder) * The default {@link TimestampExtractor} as specified in the {@link StreamsConfig config} is used. *

              * It is not required to connect a global store to the {@link Processor Processors}, - * {@link org.apache.kafka.streams.kstream.Transformer Transformers}, - * or {@link org.apache.kafka.streams.kstream.ValueTransformer ValueTransformer}; + * or {@link org.apache.kafka.streams.kstream.KTable#transformValues(ValueTransformerWithKeySupplier, String...) ValueTransformer}; * those have read-only access to all global stores by default. * * @param storeBuilder user defined {@link StoreBuilder}; can't be {@code null} @@ -556,7 +558,7 @@ public synchronized StreamsBuilder addGlobalStore(final StoreBuilder< Objects.requireNonNull(storeBuilder, "storeBuilder can't be null"); Objects.requireNonNull(consumed, "consumed can't be null"); internalStreamsBuilder.addGlobalStore( - new StoreBuilderWrapper(storeBuilder), + StoreBuilderWrapper.wrapStoreBuilder(storeBuilder), topic, new ConsumedInternal<>(consumed), stateUpdateSupplier, diff --git a/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java b/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java index dafef7d7bc4d4..1801eeab0a023 100644 --- a/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java +++ b/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java @@ -17,6 +17,7 @@ package org.apache.kafka.streams; import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.MetadataRecoveryStrategy; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.consumer.ConsumerConfig; @@ -48,6 +49,7 @@ import org.apache.kafka.streams.processor.TimestampExtractor; import org.apache.kafka.streams.processor.assignment.TaskAssignor; import org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier; +import org.apache.kafka.streams.processor.internals.NoOpProcessorWrapper; import org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor; import org.apache.kafka.streams.processor.internals.assignment.RackAwareTaskAssignor; import org.apache.kafka.streams.state.BuiltInDslStoreSuppliers; @@ -163,6 +165,7 @@ public class StreamsConfig extends AbstractConfig { @Deprecated @SuppressWarnings("unused") public static final int DUMMY_THREAD_INDEX = 1; + public static final long MAX_TASK_IDLE_MS_DISABLED = -1; // We impose these limitations because client tags are encoded into the subscription info, @@ -422,6 +425,12 @@ public class StreamsConfig extends AbstractConfig { @SuppressWarnings("WeakerAccess") public static final String UPGRADE_FROM_38 = UpgradeFromValues.UPGRADE_FROM_38.toString(); + /** + * Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.9.x}. + */ + @SuppressWarnings("WeakerAccess") + public static final String UPGRADE_FROM_39 = UpgradeFromValues.UPGRADE_FROM_39.toString(); + /** * Config value for parameter {@link #PROCESSING_GUARANTEE_CONFIG "processing.guarantee"} for at-least-once processing guarantees. @@ -478,7 +487,7 @@ public class StreamsConfig extends AbstractConfig { private static final String BUILT_IN_METRICS_VERSION_DOC = "Version of the built-in metrics to use."; /** {@code cache.max.bytes.buffering} - * @deprecated since 3.4.0 Use {@link #STATESTORE_CACHE_MAX_BYTES_CONFIG "statestore.cache.max.bytes"} instead. */ + * @deprecated Since 3.4. Use {@link #STATESTORE_CACHE_MAX_BYTES_CONFIG "statestore.cache.max.bytes"} instead. */ @SuppressWarnings("WeakerAccess") @Deprecated public static final String CACHE_MAX_BYTES_BUFFERING_CONFIG = "cache.max.bytes.buffering"; @@ -512,7 +521,7 @@ public class StreamsConfig extends AbstractConfig { /** * {@code default.deserialization.exception.handler} - * @deprecated since 4.0; use {@link #DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG} instead + * @deprecated Since 4.0. Use {@link #DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG} instead. */ @SuppressWarnings("WeakerAccess") @Deprecated @@ -527,7 +536,7 @@ public class StreamsConfig extends AbstractConfig { /** * {@code default.production.exception.handler} - * @deprecated since 4.0; Use {@link #PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG} instead + * @deprecated Since 4.0. Use {@link #PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG} instead. */ @SuppressWarnings("WeakerAccess") @Deprecated @@ -538,7 +547,10 @@ public class StreamsConfig extends AbstractConfig { public static final String PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG = "production.exception.handler"; private static final String PRODUCTION_EXCEPTION_HANDLER_CLASS_DOC = "Exception handling class that implements the org.apache.kafka.streams.errors.ProductionExceptionHandler interface."; - /** {@code default.dsl.store} */ + /** + * {@code default.dsl.store} + * @deprecated Since 3.7. Use {@link #DSL_STORE_SUPPLIERS_CLASS_CONFIG} instead. + */ @Deprecated @SuppressWarnings("WeakerAccess") public static final String DEFAULT_DSL_STORE_CONFIG = "default.dsl.store"; @@ -668,6 +680,11 @@ public class StreamsConfig extends AbstractConfig { "recommended setting for production; for development you can change this, by adjusting broker setting " + "transaction.state.log.replication.factor and transaction.state.log.min.isr."; + /** {@code processor.wrapper.class} */ + public static final String PROCESSOR_WRAPPER_CLASS_CONFIG = "processor.wrapper.class"; + public static final String PROCESSOR_WRAPPER_CLASS_DOC = "A processor wrapper class or class name that implements the org.apache.kafka.streams.state.ProcessorWrapper interface. " + + "Must be passed in to the StreamsBuilder or Topology constructor in order to take effect"; + /** {@code repartition.purge.interval.ms} */ @SuppressWarnings("WeakerAccess") public static final String REPARTITION_PURGE_INTERVAL_MS_CONFIG = "repartition.purge.interval.ms"; @@ -788,7 +805,7 @@ public class StreamsConfig extends AbstractConfig { UPGRADE_FROM_28 + "\", \"" + UPGRADE_FROM_30 + "\", \"" + UPGRADE_FROM_31 + "\", \"" + UPGRADE_FROM_32 + "\", \"" + UPGRADE_FROM_33 + "\", \"" + UPGRADE_FROM_34 + "\", \"" + UPGRADE_FROM_35 + "\", \"" + UPGRADE_FROM_36 + "\", \"" + UPGRADE_FROM_37 + "\", \"" + - UPGRADE_FROM_38 + "(for upgrading from the corresponding old version)."; + UPGRADE_FROM_38 + "\", \"" + UPGRADE_FROM_39 + "\", \"" + "(for upgrading from the corresponding old version)."; /** {@code topology.optimization} */ public static final String TOPOLOGY_OPTIMIZATION_CONFIG = "topology.optimization"; @@ -961,28 +978,6 @@ public class StreamsConfig extends AbstractConfig { DefaultProductionExceptionHandler.class.getName(), Importance.MEDIUM, PRODUCTION_EXCEPTION_HANDLER_CLASS_DOC) - .define(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, - Type.INT, - null, - Importance.MEDIUM, - RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_DOC) - .define(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, - Type.STRING, - RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, - in(RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC, RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY), - Importance.MEDIUM, - RACK_AWARE_ASSIGNMENT_STRATEGY_DOC) - .define(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, - Type.LIST, - Collections.emptyList(), - atMostOfSize(MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE), - Importance.MEDIUM, - RACK_AWARE_ASSIGNMENT_TAGS_DOC) - .define(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, - Type.INT, - null, - Importance.MEDIUM, - RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_DOC) .define(TASK_ASSIGNOR_CLASS_CONFIG, Type.STRING, null, @@ -1045,6 +1040,28 @@ public class StreamsConfig extends AbstractConfig { true, Importance.LOW, ENABLE_METRICS_PUSH_DOC) + .define(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, + Type.INT, + null, + Importance.LOW, + RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_DOC) + .define(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, + Type.STRING, + RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, + in(RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC, RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY), + Importance.LOW, + RACK_AWARE_ASSIGNMENT_STRATEGY_DOC) + .define(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, + Type.LIST, + Collections.emptyList(), + atMostOfSize(MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE), + Importance.LOW, + RACK_AWARE_ASSIGNMENT_TAGS_DOC) + .define(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, + Type.INT, + null, + Importance.LOW, + RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_DOC) .define(REPARTITION_PURGE_INTERVAL_MS_CONFIG, Type.LONG, DEFAULT_COMMIT_INTERVAL_MS, @@ -1117,6 +1134,11 @@ public class StreamsConfig extends AbstractConfig { atLeast(60 * 1000L), Importance.LOW, PROBING_REBALANCE_INTERVAL_MS_DOC) + .define(PROCESSOR_WRAPPER_CLASS_CONFIG, + Type.CLASS, + NoOpProcessorWrapper.class, + Importance.LOW, + PROCESSOR_WRAPPER_CLASS_DOC) .define(RECEIVE_BUFFER_CONFIG, Type.INT, 32 * 1024, @@ -1147,6 +1169,19 @@ public class StreamsConfig extends AbstractConfig { atLeast(0), Importance.LOW, CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC) + .define(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, + Type.STRING, + CommonClientConfigs.DEFAULT_METADATA_RECOVERY_STRATEGY, + ConfigDef.CaseInsensitiveValidString + .in(Utils.enumOptions(MetadataRecoveryStrategy.class)), + Importance.LOW, + CommonClientConfigs.METADATA_RECOVERY_STRATEGY_DOC) + .define(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, + Type.LONG, + CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, + atLeast(0), + Importance.LOW, + CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC) .define(ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, Type.CLASS, null, @@ -1191,34 +1226,27 @@ public class StreamsConfig extends AbstractConfig { // this is the list of configs for underlying clients // that streams prefer different default values - private static final Map PRODUCER_DEFAULT_OVERRIDES; - static { - final Map tempProducerDefaultOverrides = new HashMap<>(); - tempProducerDefaultOverrides.put(ProducerConfig.LINGER_MS_CONFIG, "100"); - PRODUCER_DEFAULT_OVERRIDES = Collections.unmodifiableMap(tempProducerDefaultOverrides); - } + private static final Map PRODUCER_DEFAULT_OVERRIDES = Map.of(ProducerConfig.LINGER_MS_CONFIG, "100"); private static final Map PRODUCER_EOS_OVERRIDES; static { final Map tempProducerDefaultOverrides = new HashMap<>(PRODUCER_DEFAULT_OVERRIDES); - tempProducerDefaultOverrides.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE); - tempProducerDefaultOverrides.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); - // Reduce the transaction timeout for quicker pending offset expiration on broker side. - tempProducerDefaultOverrides.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, DEFAULT_TRANSACTION_TIMEOUT); - + tempProducerDefaultOverrides.putAll(Map.of( + ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE, + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true, + // Reduce the transaction timeout for quicker pending offset expiration on broker side. + ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, DEFAULT_TRANSACTION_TIMEOUT + )); PRODUCER_EOS_OVERRIDES = Collections.unmodifiableMap(tempProducerDefaultOverrides); } - private static final Map CONSUMER_DEFAULT_OVERRIDES; - static { - final Map tempConsumerDefaultOverrides = new HashMap<>(); - tempConsumerDefaultOverrides.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000"); - tempConsumerDefaultOverrides.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - tempConsumerDefaultOverrides.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); - tempConsumerDefaultOverrides.put("internal.leave.group.on.close", false); - tempConsumerDefaultOverrides.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, "classic"); - CONSUMER_DEFAULT_OVERRIDES = Collections.unmodifiableMap(tempConsumerDefaultOverrides); - } + private static final Map CONSUMER_DEFAULT_OVERRIDES = Map.of( + ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000", + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest", + ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false", + "internal.leave.group.on.close", false, + ConsumerConfig.GROUP_PROTOCOL_CONFIG, "classic" + ); private static final Map CONSUMER_EOS_OVERRIDES; static { @@ -1227,12 +1255,8 @@ public class StreamsConfig extends AbstractConfig { CONSUMER_EOS_OVERRIDES = Collections.unmodifiableMap(tempConsumerDefaultOverrides); } - private static final Map ADMIN_CLIENT_OVERRIDES; - static { - final Map tempAdminClientDefaultOverrides = new HashMap<>(); - tempAdminClientDefaultOverrides.put(AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG, true); - ADMIN_CLIENT_OVERRIDES = Collections.unmodifiableMap(tempAdminClientDefaultOverrides); - } + private static final Map ADMIN_CLIENT_OVERRIDES = + Map.of(AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG, true); public static class InternalConfig { // This is settable in the main Streams config, but it's a private API for now @@ -2001,7 +2025,7 @@ public DeserializationExceptionHandler deserializationExceptionHandler() { } /** - * @deprecated since kafka 4.0; use {@link #deserializationExceptionHandler()} instead + * @deprecated Since 4.0. Use {@link #deserializationExceptionHandler()} instead. */ @Deprecated @SuppressWarnings("WeakerAccess") @@ -2023,7 +2047,7 @@ public ProductionExceptionHandler productionExceptionHandler() { } /** - * @deprecated since kafka 4.0; use {@link #productionExceptionHandler()} instead + * @deprecated Since 4.0. Use {@link #productionExceptionHandler()} instead. */ @Deprecated @SuppressWarnings("WeakerAccess") diff --git a/streams/src/main/java/org/apache/kafka/streams/Topology.java b/streams/src/main/java/org/apache/kafka/streams/Topology.java index 6b45d70a35ccc..fffca97400e33 100644 --- a/streams/src/main/java/org/apache/kafka/streams/Topology.java +++ b/streams/src/main/java/org/apache/kafka/streams/Topology.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.streams.errors.TopologyException; +import org.apache.kafka.streams.internals.AutoOffsetResetInternal; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.processor.ConnectedStoreProvider; @@ -29,12 +30,11 @@ import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.ProcessorAdapter; import org.apache.kafka.streams.processor.internals.ProcessorNode; import org.apache.kafka.streams.processor.internals.ProcessorTopology; import org.apache.kafka.streams.processor.internals.SinkNode; import org.apache.kafka.streams.processor.internals.SourceNode; -import org.apache.kafka.streams.processor.internals.StoreBuilderWrapper; +import org.apache.kafka.streams.processor.internals.StoreDelegatingProcessorSupplier; import org.apache.kafka.streams.state.StoreBuilder; import java.util.Set; @@ -73,11 +73,27 @@ protected Topology(final InternalTopologyBuilder internalTopologyBuilder) { * Sets the {@code auto.offset.reset} configuration when * {@link #addSource(AutoOffsetReset, String, String...) adding a source processor} or when creating {@link KStream} * or {@link KTable} via {@link StreamsBuilder}. + * + * @deprecated Since 4.0. Use {@link org.apache.kafka.streams.AutoOffsetReset} instead. */ + @Deprecated public enum AutoOffsetReset { EARLIEST, LATEST } + @Deprecated + private static AutoOffsetResetInternal convertOldToNew(final Topology.AutoOffsetReset resetPolicy) { + if (resetPolicy == null) { + return null; + } + + return new AutoOffsetResetInternal( + resetPolicy == org.apache.kafka.streams.Topology.AutoOffsetReset.EARLIEST + ? org.apache.kafka.streams.AutoOffsetReset.earliest() + : org.apache.kafka.streams.AutoOffsetReset.latest() + ); + } + /** * Add a new source that consumes the named topics and forward the records to child processor and/or sink nodes. * The source will use the {@link StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG default key deserializer} and @@ -130,11 +146,30 @@ public synchronized Topology addSource(final String name, * @param topics the name of one or more Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by another source + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, String, String...)} instead. */ + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final String name, final String... topics) { - internalTopologyBuilder.addSource(offsetReset, name, null, null, null, topics); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, null, null, null, topics); + return this; + } + + /** + * Adds a new source that consumes the specified topics and forwards the records to child processor and/or sink nodes. + * The source will use the specified {@link org.apache.kafka.streams.AutoOffsetReset offset reset policy} if no committed offsets are found. + * + * @param offsetReset the auto offset reset policy to use for this source if no committed offsets are found + * @param name the unique name of the source used to reference this node when {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children} + * @param topics the name of one or more Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if a processor is already added or if topics have already been registered by another source + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final String name, + final String... topics) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, null, null, null, topics); return this; } @@ -152,11 +187,35 @@ public synchronized Topology addSource(final AutoOffsetReset offsetReset, * @param topicPattern regular expression pattern to match Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by another source + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, String, Pattern)} instead. */ + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern) { - internalTopologyBuilder.addSource(offsetReset, name, null, null, null, topicPattern); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, null, null, null, topicPattern); + return this; + } + + /** + * Add a new source that consumes from topics matching the given pattern + * and forward the records to child processor and/or sink nodes. + * The source will use the {@link StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG default key deserializer} and + * {@link StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG default value deserializer} specified in the + * {@link StreamsConfig stream configuration}. + * The default {@link TimestampExtractor} as specified in the {@link StreamsConfig config} is used. + * + * @param offsetReset the auto offset reset policy value for this source if no committed offsets found + * @param name the unique name of the source used to reference this node when + * {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children}. + * @param topicPattern regular expression pattern to match Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if processor is already added or if topics have already been registered by another source + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final String name, + final Pattern topicPattern) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, null, null, null, topicPattern); return this; } @@ -218,12 +277,34 @@ public synchronized Topology addSource(final TimestampExtractor timestampExtract * @param topics the name of one or more Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by another source + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, TimestampExtractor, String, String...)} instead. */ + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics) { - internalTopologyBuilder.addSource(offsetReset, name, timestampExtractor, null, null, topics); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, timestampExtractor, null, null, topics); + return this; + } + + /** + * Adds a new source that consumes the specified topics with a specified {@link TimestampExtractor} + * and forwards the records to child processor and/or sink nodes. + * The source will use the provided timestamp extractor to determine the timestamp of each record. + * + * @param offsetReset the auto offset reset policy to use if no committed offsets are found + * @param timestampExtractor the timestamp extractor to use for this source + * @param name the unique name of the source used to reference this node when {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children} + * @param topics the name of one or more Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if a processor is already added or if topics have already been registered by another source + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final TimestampExtractor timestampExtractor, + final String name, + final String... topics) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, timestampExtractor, null, null, topics); return this; } @@ -243,12 +324,34 @@ public synchronized Topology addSource(final AutoOffsetReset offsetReset, * @param topicPattern regular expression pattern to match Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by another source + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, TimestampExtractor, String, Pattern)} instead. */ + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern) { - internalTopologyBuilder.addSource(offsetReset, name, timestampExtractor, null, null, topicPattern); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, timestampExtractor, null, null, topicPattern); + return this; + } + + /** + * Adds a new source that consumes from topics matching the given pattern with a specified {@link TimestampExtractor} + * and forwards the records to child processor and/or sink nodes. + * The source will use the provided timestamp extractor to determine the timestamp of each record. + * + * @param offsetReset the auto offset reset policy to use if no committed offsets are found + * @param timestampExtractor the timestamp extractor to use for this source + * @param name the unique name of the source used to reference this node when {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children} + * @param topicPattern the regular expression pattern to match Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if a processor is already added or if topics have already been registered by another source + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final TimestampExtractor timestampExtractor, + final String name, + final Pattern topicPattern) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, timestampExtractor, null, null, topicPattern); return this; } @@ -319,14 +422,42 @@ public synchronized Topology addSource(final String name, * @param topics the name of one or more Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by name + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, String, Deserializer, Deserializer, String...)} instead. */ - @SuppressWarnings("overloads") + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String... topics) { - internalTopologyBuilder.addSource(offsetReset, name, null, keyDeserializer, valueDeserializer, topics); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, null, keyDeserializer, valueDeserializer, topics); + return this; + } + + /** + * Add a new source that consumes from topics matching the given pattern and forwards the records to child processor + * and/or sink nodes. + * The source will use the specified key and value deserializers. + * The provided de-/serializers will be used for all the specified topics, so care should be taken when specifying + * topics that share the same key-value data format. + * + * @param offsetReset the auto offset reset policy to use for this stream if no committed offsets found + * @param name the unique name of the source used to reference this node when + * {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children} + * @param keyDeserializer key deserializer used to read this source, if not specified the default + * key deserializer defined in the configs will be used + * @param valueDeserializer value deserializer used to read this source, + * if not specified the default value deserializer defined in the configs will be used + * @param topics the name of one or more Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if processor is already added or if topics have already been registered by name + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final String name, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer, + final String... topics) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, null, keyDeserializer, valueDeserializer, topics); return this; } @@ -348,13 +479,42 @@ public synchronized Topology addSource(final AutoOffsetReset offsetReset, * @param topicPattern regular expression pattern to match Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by name + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, String, Deserializer, Deserializer, Pattern)} instead. */ + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final Pattern topicPattern) { - internalTopologyBuilder.addSource(offsetReset, name, null, keyDeserializer, valueDeserializer, topicPattern); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, null, keyDeserializer, valueDeserializer, topicPattern); + return this; + } + + /** + * Add a new source that consumes from topics matching the given pattern and forwards the records to child processor + * and/or sink nodes. + * The source will use the specified key and value deserializers. + * The provided de-/serializers will be used for all matched topics, so care should be taken to specify patterns for + * topics that share the same key-value data format. + * + * @param offsetReset the auto offset reset policy to use for this stream if no committed offsets found + * @param name the unique name of the source used to reference this node when + * {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children} + * @param keyDeserializer key deserializer used to read this source, if not specified the default + * key deserializer defined in the configs will be used + * @param valueDeserializer value deserializer used to read this source, + * if not specified the default value deserializer defined in the configs will be used + * @param topicPattern regular expression pattern to match Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if processor is already added or if topics have already been registered by name + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final String name, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer, + final Pattern topicPattern) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, null, keyDeserializer, valueDeserializer, topicPattern); return this; } @@ -375,15 +535,43 @@ public synchronized Topology addSource(final AutoOffsetReset offsetReset, * @param topics the name of one or more Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by another source + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, String, TimestampExtractor, Deserializer, Deserializer, String...)} instead. */ - @SuppressWarnings("overloads") + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String... topics) { - internalTopologyBuilder.addSource(offsetReset, name, timestampExtractor, keyDeserializer, valueDeserializer, topics); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, timestampExtractor, keyDeserializer, valueDeserializer, topics); + return this; + } + + /** + * Add a new source that consumes the named topics and forwards the records to child processor and/or sink nodes. + * The source will use the specified key and value deserializers. + * + * @param offsetReset the auto offset reset policy to use for this stream if no committed offsets found + * @param name the unique name of the source used to reference this node when + * {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children}. + * @param timestampExtractor the stateless timestamp extractor used for this source, + * if not specified the default extractor defined in the configs will be used + * @param keyDeserializer key deserializer used to read this source, if not specified the default + * key deserializer defined in the configs will be used + * @param valueDeserializer value deserializer used to read this source, + * if not specified the default value deserializer defined in the configs will be used + * @param topics the name of one or more Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if processor is already added or if topics have already been registered by another source + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final String name, + final TimestampExtractor timestampExtractor, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer, + final String... topics) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, timestampExtractor, keyDeserializer, valueDeserializer, topics); return this; } @@ -407,15 +595,46 @@ public synchronized Topology addSource(final AutoOffsetReset offsetReset, * @param topicPattern regular expression pattern to match Kafka topics that this source is to consume * @return itself * @throws TopologyException if processor is already added or if topics have already been registered by name + * @deprecated Since 4.0. Use {@link #addSource(org.apache.kafka.streams.AutoOffsetReset, String, TimestampExtractor, Deserializer, Deserializer, Pattern)} instead. */ - @SuppressWarnings("overloads") + @Deprecated public synchronized Topology addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final Pattern topicPattern) { - internalTopologyBuilder.addSource(offsetReset, name, timestampExtractor, keyDeserializer, valueDeserializer, topicPattern); + internalTopologyBuilder.addSource(convertOldToNew(offsetReset), name, timestampExtractor, keyDeserializer, valueDeserializer, topicPattern); + return this; + } + + /** + * Add a new source that consumes from topics matching the given pattern and forwards the records to child processor + * and/or sink nodes. + * The source will use the specified key and value deserializers. + * The provided de-/serializers will be used for all matched topics, so care should be taken to specify patterns for + * topics that share the same key-value data format. + * + * @param offsetReset the auto offset reset policy to use for this stream if no committed offsets found + * @param name the unique name of the source used to reference this node when + * {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children}. + * @param timestampExtractor the stateless timestamp extractor used for this source, + * if not specified the default extractor defined in the configs will be used + * @param keyDeserializer key deserializer used to read this source, if not specified the default + * key deserializer defined in the configs will be used + * @param valueDeserializer value deserializer used to read this source, + * if not specified the default value deserializer defined in the configs will be used + * @param topicPattern regular expression pattern to match Kafka topics that this source is to consume + * @return itself + * @throws TopologyException if processor is already added or if topics have already been registered by name + */ + public synchronized Topology addSource(final org.apache.kafka.streams.AutoOffsetReset offsetReset, + final String name, + final TimestampExtractor timestampExtractor, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer, + final Pattern topicPattern) { + internalTopologyBuilder.addSource(new AutoOffsetResetInternal(offsetReset), name, timestampExtractor, keyDeserializer, valueDeserializer, topicPattern); return this; } @@ -655,48 +874,6 @@ public synchronized Topology addSink(final String name, return this; } - /** - * Add a new processor node that receives and processes records output by one or more parent source or processor - * node. - * Any new record output by this processor will be forwarded to its child processor or sink nodes. - * The supplier should always generate a new instance each time - * {@link org.apache.kafka.streams.processor.ProcessorSupplier#get()} gets called. Creating a single - * {@link org.apache.kafka.streams.processor.Processor} object and returning the same object reference in - * {@link org.apache.kafka.streams.processor.ProcessorSupplier#get()} would be a violation of the supplier pattern - * and leads to runtime exceptions. - * If {@code supplier} provides stores via {@link ConnectedStoreProvider#stores()}, the provided {@link StoreBuilder}s - * will be added to the topology and connected to this processor automatically. - * - * @param name the unique name of the processor node - * @param supplier the supplier used to obtain this node's {@link org.apache.kafka.streams.processor.Processor} instance - * @param parentNames the name of one or more source or processor nodes whose output records this processor should receive - * and process - * @return itself - * @throws TopologyException if parent processor is not added yet, or if this processor's name is equal to the parent's name - * @deprecated Since 2.7.0 Use {@link #addProcessor(String, ProcessorSupplier, String...)} instead. - */ - @SuppressWarnings("rawtypes") - @Deprecated - public synchronized Topology addProcessor(final String name, - final org.apache.kafka.streams.processor.ProcessorSupplier supplier, - final String... parentNames) { - return addProcessor( - name, - new ProcessorSupplier() { - @Override - public Set> stores() { - return supplier.stores(); - } - - @Override - public org.apache.kafka.streams.processor.api.Processor get() { - return ProcessorAdapter.adaptRaw(supplier.get()); - } - }, - parentNames - ); - } - /** * Add a new processor node that receives and processes records output by one or more parent source or processor * node. @@ -714,8 +891,10 @@ public org.apache.kafka.streams.processor.api.Processor Topology addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { - internalTopologyBuilder.addProcessor(name, supplier, parentNames); - final Set> stores = supplier.stores(); + final ProcessorSupplier wrapped = internalTopologyBuilder.wrapProcessorSupplier(name, supplier); + internalTopologyBuilder.addProcessor(name, wrapped, parentNames); + final Set> stores = wrapped.stores(); + if (stores != null) { for (final StoreBuilder storeBuilder : stores) { internalTopologyBuilder.addStateStore(storeBuilder, name); @@ -771,7 +950,14 @@ public synchronized Topology addReadOnlyStateStore(final StoreBuilder final ProcessorSupplier stateUpdateSupplier) { storeBuilder.withLoggingDisabled(); - internalTopologyBuilder.addSource(AutoOffsetReset.EARLIEST, sourceName, timestampExtractor, keyDeserializer, valueDeserializer, topic); + internalTopologyBuilder.addSource( + new AutoOffsetResetInternal(org.apache.kafka.streams.AutoOffsetReset.earliest()), + sourceName, + timestampExtractor, + keyDeserializer, + valueDeserializer, + topic + ); internalTopologyBuilder.addProcessor(processorName, stateUpdateSupplier, sourceName); internalTopologyBuilder.addStateStore(storeBuilder, processorName); internalTopologyBuilder.connectSourceStoreAndTopic(storeBuilder.name(), topic); @@ -851,14 +1037,13 @@ public synchronized Topology addGlobalStore(final StoreBuilder sto final String processorName, final ProcessorSupplier stateUpdateSupplier) { internalTopologyBuilder.addGlobalStore( - new StoreBuilderWrapper(storeBuilder), sourceName, null, keyDeserializer, valueDeserializer, topic, processorName, - stateUpdateSupplier, + new StoreDelegatingProcessorSupplier<>(stateUpdateSupplier, Set.of(storeBuilder)), true ); return this; @@ -897,14 +1082,13 @@ public synchronized Topology addGlobalStore(final StoreBuilder sto final String processorName, final ProcessorSupplier stateUpdateSupplier) { internalTopologyBuilder.addGlobalStore( - new StoreBuilderWrapper(storeBuilder), sourceName, timestampExtractor, keyDeserializer, valueDeserializer, topic, processorName, - stateUpdateSupplier, + new StoreDelegatingProcessorSupplier<>(stateUpdateSupplier, Set.of(storeBuilder)), true ); return this; diff --git a/streams/src/main/java/org/apache/kafka/streams/TopologyConfig.java b/streams/src/main/java/org/apache/kafka/streams/TopologyConfig.java index 2e62cdccceeb2..e96d5281d090d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/TopologyConfig.java +++ b/streams/src/main/java/org/apache/kafka/streams/TopologyConfig.java @@ -27,6 +27,7 @@ import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.internals.MaterializedInternal; import org.apache.kafka.streams.processor.TimestampExtractor; +import org.apache.kafka.streams.processor.internals.NoOpProcessorWrapper; import org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper; import org.apache.kafka.streams.state.DslStoreSuppliers; @@ -38,6 +39,7 @@ import java.util.function.Supplier; import static org.apache.kafka.common.config.ConfigDef.ValidString.in; +import static org.apache.kafka.common.utils.Utils.mkObjectProperties; import static org.apache.kafka.streams.StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG; import static org.apache.kafka.streams.StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_DOC; import static org.apache.kafka.streams.StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG; @@ -57,6 +59,8 @@ import static org.apache.kafka.streams.StreamsConfig.MAX_TASK_IDLE_MS_CONFIG; import static org.apache.kafka.streams.StreamsConfig.MAX_TASK_IDLE_MS_DOC; import static org.apache.kafka.streams.StreamsConfig.PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG; +import static org.apache.kafka.streams.StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG; +import static org.apache.kafka.streams.StreamsConfig.PROCESSOR_WRAPPER_CLASS_DOC; import static org.apache.kafka.streams.StreamsConfig.ROCKS_DB; import static org.apache.kafka.streams.StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG; import static org.apache.kafka.streams.StreamsConfig.STATESTORE_CACHE_MAX_BYTES_DOC; @@ -68,13 +72,26 @@ * Streams configs that apply at the topology level. The values in the {@link StreamsConfig} parameter of the * {@link org.apache.kafka.streams.KafkaStreams} constructor or the {@link KafkaStreamsNamedTopologyWrapper} constructor (deprecated) * will determine the defaults, which can then be overridden for specific topologies by passing them in when creating the - * topology builders via the {@link org.apache.kafka.streams.StreamsBuilder#StreamsBuilder(TopologyConfig) StreamsBuilder(TopologyConfig)} method. + * topology builders via the {@link StreamsBuilder#StreamsBuilder(TopologyConfig)} constructor for DSL applications, + * or the {@link Topology#Topology(TopologyConfig)} for PAPI applications. + *

              + * Note that some configs, such as the {@code processor.wrapper.class} config, can only take effect while the + * topology is being built, which means they have to be passed in as a TopologyConfig to the + * {@link Topology#Topology(TopologyConfig)} constructor (PAPI) or the + * {@link StreamsBuilder#StreamsBuilder(TopologyConfig)} constructor (DSL). + * If they are only set in the configs passed in to the KafkaStreams constructor, it will be too late for them + * to be applied and the config will be ignored. */ @SuppressWarnings("deprecation") public final class TopologyConfig extends AbstractConfig { private static final ConfigDef CONFIG; static { CONFIG = new ConfigDef() + .define(PROCESSOR_WRAPPER_CLASS_CONFIG, + Type.CLASS, + NoOpProcessorWrapper.class.getName(), + Importance.LOW, + PROCESSOR_WRAPPER_CLASS_DOC) .define(BUFFERED_RECORDS_PER_PARTITION_CONFIG, Type.INT, null, @@ -147,8 +164,8 @@ public final class TopologyConfig extends AbstractConfig { public final Supplier deserializationExceptionHandlerSupplier; public final Supplier processingExceptionHandlerSupplier; - public TopologyConfig(final StreamsConfig globalAppConfigs) { - this(null, globalAppConfigs, new Properties()); + public TopologyConfig(final StreamsConfig configs) { + this(null, configs, mkObjectProperties(configs.originals())); } public TopologyConfig(final String topologyName, final StreamsConfig globalAppConfigs, final Properties topologyOverrides) { diff --git a/streams/src/main/java/org/apache/kafka/streams/TopologyDescription.java b/streams/src/main/java/org/apache/kafka/streams/TopologyDescription.java index 77dc5049c34df..0ab0e0d92ac2e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/TopologyDescription.java +++ b/streams/src/main/java/org/apache/kafka/streams/TopologyDescription.java @@ -32,7 +32,7 @@ * In contrast, two sub-topologies are not connected but can be linked to each other via topics, i.e., if one * sub-topology {@link Topology#addSink(String, String, String...) writes} into a topic and another sub-topology * {@link Topology#addSource(String, String...) reads} from the same topic. - * Message {@link ProcessorContext#forward(Record, String) forwards} using custom Processors and Transformers are not considered in the topology graph. + * Message {@link ProcessorContext#forward(Record, String) forwards} using custom Processors are not considered in the topology graph. *

              * When {@link KafkaStreams#start()} is called, different sub-topologies will be constructed and executed as independent * {@link StreamTask tasks}. diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/BrokerNotFoundException.java b/streams/src/main/java/org/apache/kafka/streams/errors/BrokerNotFoundException.java index d2df27f8d3495..bab44a653571a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/BrokerNotFoundException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/BrokerNotFoundException.java @@ -16,13 +16,13 @@ */ package org.apache.kafka.streams.errors; - /** * Indicates that none of the specified {@link org.apache.kafka.streams.StreamsConfig#BOOTSTRAP_SERVERS_CONFIG brokers} * could be found. * * @see org.apache.kafka.streams.StreamsConfig */ +@SuppressWarnings("unused") public class BrokerNotFoundException extends StreamsException { private static final long serialVersionUID = 1L; diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.java index d6cc8e915e76f..5994326770c89 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/DefaultProductionExceptionHandler.java @@ -26,6 +26,10 @@ * happens while attempting to produce result records. */ public class DefaultProductionExceptionHandler implements ProductionExceptionHandler { + /** + * @deprecated Since 3.9. Use {@link #handle(ErrorHandlerContext, ProducerRecord, Exception)} instead. + */ + @SuppressWarnings("deprecation") @Deprecated @Override public ProductionExceptionHandlerResponse handle(final ProducerRecord record, diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/DeserializationExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/DeserializationExceptionHandler.java index 198a97cce448a..0b44e04d79114 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/DeserializationExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/DeserializationExceptionHandler.java @@ -29,14 +29,20 @@ public interface DeserializationExceptionHandler extends Configurable { /** * Inspect a record and the exception received. - *

              - * Note, that the passed in {@link ProcessorContext} only allows to access metadata like the task ID. + * + *

              Note, that the passed in {@link ProcessorContext} only allows to access metadata like the task ID. * However, it cannot be used to emit records via {@link ProcessorContext#forward(Object, Object)}; * calling {@code forward()} (and some other methods) would result in a runtime exception. * - * @param context processor context - * @param record record that failed deserialization - * @param exception the actual exception + * @param context + * Processor context. + * @param record + * Record that failed deserialization. + * @param exception + * The actual exception. + * + * @return Whether to continue or stop processing. + * * @deprecated Since 3.9. Use {@link #handle(ErrorHandlerContext, ConsumerRecord, Exception)} instead. */ @Deprecated @@ -49,9 +55,14 @@ default DeserializationHandlerResponse handle(final ProcessorContext context, /** * Inspect a record and the exception received. * - * @param context error handler context - * @param record record that failed deserialization - * @param exception the actual exception + * @param context + * Error handler context. + * @param record + * Record that failed deserialization. + * @param exception + * The actual exception. + * + * @return Whether to continue or stop processing. */ default DeserializationHandlerResponse handle(final ErrorHandlerContext context, final ConsumerRecord record, @@ -63,15 +74,19 @@ default DeserializationHandlerResponse handle(final ErrorHandlerContext context, * Enumeration that describes the response from the exception handler. */ enum DeserializationHandlerResponse { - /* continue with processing */ + /** Continue processing. */ CONTINUE(0, "CONTINUE"), - /* fail the processing and stop */ + /** Fail processing. */ FAIL(1, "FAIL"); - /** an english description of the api--this is for debugging and can change */ + /** + * An english description for the used option. This is for debugging only and may change. + */ public final String name; - /** the permanent and immutable id of an API--this can't change ever */ + /** + * The permanent and immutable id for the used option. This can't change ever. + */ public final int id; DeserializationHandlerResponse(final int id, final String name) { diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/ErrorHandlerContext.java b/streams/src/main/java/org/apache/kafka/streams/errors/ErrorHandlerContext.java index 82d325812552d..d471673a48ed4 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/ErrorHandlerContext.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/ErrorHandlerContext.java @@ -26,7 +26,6 @@ import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; - /** * This interface allows user code to inspect the context of a record that has failed during processing. * @@ -48,7 +47,7 @@ public interface ErrorHandlerContext { * Additionally, when writing into a changelog topic, there is no associated input record, * and thus no topic name is available. * - * @return the topic name + * @return The topic name. */ String topic(); @@ -66,7 +65,7 @@ public interface ErrorHandlerContext { * Additionally, when writing into a changelog topic, there is no associated input record, * and thus no partition is available. * - * @return the partition ID + * @return The partition ID. */ int partition(); @@ -84,7 +83,7 @@ public interface ErrorHandlerContext { * Additionally, when writing into a changelog topic, there is no associated input record, * and thus no offset is available. * - * @return the offset + * @return The offset. */ long offset(); @@ -102,21 +101,21 @@ public interface ErrorHandlerContext { * Additionally, when writing into a changelog topic, there is no associated input record, * and thus no headers are available. * - * @return the headers + * @return The headers. */ Headers headers(); /** * Return the current processor node ID. * - * @return the processor node ID + * @return The processor node ID. */ String processorNodeId(); /** * Return the task ID. * - * @return the task ID + * @return The task ID. */ TaskId taskId(); @@ -138,14 +137,14 @@ public interface ErrorHandlerContext { * if this method is invoked from the punctuate call): *

                *
              • In case of {@link PunctuationType#STREAM_TIME} timestamp is defined as the current task's stream time, - * which is defined as the largest timestamp of any record processed by the task - *
              • In case of {@link PunctuationType#WALL_CLOCK_TIME} timestamp is defined the current system time + * which is defined as the largest timestamp of any record processed by the task
              • + *
              • In case of {@link PunctuationType#WALL_CLOCK_TIME} timestamp is defined the current system time
              • *
              * *

              If it is triggered from a deserialization failure, timestamp is defined as the timestamp of the * current rawRecord {@link org.apache.kafka.clients.consumer.ConsumerRecord ConsumerRecord}. * - * @return the timestamp + * @return The timestamp. */ long timestamp(); } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStoreException.java b/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStoreException.java index 6ad06a54096dd..f5fbef7254744 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStoreException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStoreException.java @@ -16,10 +16,9 @@ */ package org.apache.kafka.streams.errors; - /** * Indicates that there was a problem when trying to access a {@link org.apache.kafka.streams.processor.StateStore StateStore}. - * {@code InvalidStateStoreException} is not thrown directly but only its following sub-classes. + * {@code InvalidStateStoreException} is not thrown directly but only its following subclasses. */ public class InvalidStateStoreException extends StreamsException { diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStorePartitionException.java b/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStorePartitionException.java index e85a0375c4245..f27926bfc653b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStorePartitionException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/InvalidStateStorePartitionException.java @@ -21,7 +21,8 @@ /** * Indicates that the specific state store being queried via * {@link org.apache.kafka.streams.StoreQueryParameters} used a partitioning that is not assigned to this instance. - * You can use {@link KafkaStreams#metadataForAllStreamsClients()} to discover the correct instance that hosts the requested partition. + * You can use {@link KafkaStreams#metadataForAllStreamsClients()} to discover the correct instance + * that hosts the requested partition. */ public class InvalidStateStorePartitionException extends InvalidStateStoreException { @@ -31,6 +32,7 @@ public InvalidStateStorePartitionException(final String message) { super(message); } + @SuppressWarnings("unused") public InvalidStateStorePartitionException(final String message, final Throwable throwable) { super(message, throwable); } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/LockException.java b/streams/src/main/java/org/apache/kafka/streams/errors/LockException.java index 80bb592b4d4ab..a16171f5ee4aa 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/LockException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/LockException.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.streams.errors; - /** * Indicates that the state store directory lock could not be acquired because another thread holds the lock. * diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.java index a93b7c99517c3..6de997be98653 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueExceptionHandler.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.streams.errors; - import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.streams.processor.ProcessorContext; @@ -32,16 +31,24 @@ public class LogAndContinueExceptionHandler implements DeserializationExceptionHandler { private static final Logger log = LoggerFactory.getLogger(LogAndContinueExceptionHandler.class); + /** + * @deprecated Since 3.9. Use {@link #handle(ErrorHandlerContext, ConsumerRecord, Exception)} instead. + */ + @SuppressWarnings("deprecation") @Deprecated @Override public DeserializationHandlerResponse handle(final ProcessorContext context, final ConsumerRecord record, final Exception exception) { - log.warn("Exception caught during Deserialization, " + - "taskId: {}, topic: {}, partition: {}, offset: {}", - context.taskId(), record.topic(), record.partition(), record.offset(), - exception); + log.warn( + "Exception caught during Deserialization, taskId: {}, topic: {}, partition: {}, offset: {}", + context.taskId(), + record.topic(), + record.partition(), + record.offset(), + exception + ); return DeserializationHandlerResponse.CONTINUE; } @@ -51,10 +58,14 @@ public DeserializationHandlerResponse handle(final ErrorHandlerContext context, final ConsumerRecord record, final Exception exception) { - log.warn("Exception caught during Deserialization, " + - "taskId: {}, topic: {}, partition: {}, offset: {}", - context.taskId(), record.topic(), record.partition(), record.offset(), - exception); + log.warn( + "Exception caught during Deserialization, taskId: {}, topic: {}, partition: {}, offset: {}", + context.taskId(), + record.topic(), + record.partition(), + record.offset(), + exception + ); return DeserializationHandlerResponse.CONTINUE; } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.java index 113510d5889ae..c832ab142007c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndContinueProcessingExceptionHandler.java @@ -32,10 +32,15 @@ public class LogAndContinueProcessingExceptionHandler implements ProcessingExcep @Override public ProcessingHandlerResponse handle(final ErrorHandlerContext context, final Record record, final Exception exception) { - log.warn("Exception caught during message processing, " + - "processor node: {}, taskId: {}, source topic: {}, source partition: {}, source offset: {}", - context.processorNodeId(), context.taskId(), context.topic(), context.partition(), context.offset(), - exception); + log.warn( + "Exception caught during message processing, processor node: {}, taskId: {}, source topic: {}, source partition: {}, source offset: {}", + context.processorNodeId(), + context.taskId(), + context.topic(), + context.partition(), + context.offset(), + exception + ); return ProcessingHandlerResponse.CONTINUE; } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.java index 5fdda623bdd2c..20e6b9414de27 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailExceptionHandler.java @@ -24,7 +24,6 @@ import java.util.Map; - /** * Deserialization handler that logs a deserialization exception and then * signals the processing pipeline to stop processing more records and fail. @@ -32,16 +31,24 @@ public class LogAndFailExceptionHandler implements DeserializationExceptionHandler { private static final Logger log = LoggerFactory.getLogger(LogAndFailExceptionHandler.class); - @Override + /** + * @deprecated Since 3.9. Use {@link #handle(ErrorHandlerContext, ConsumerRecord, Exception)} instead. + */ + @SuppressWarnings("deprecation") @Deprecated + @Override public DeserializationHandlerResponse handle(final ProcessorContext context, final ConsumerRecord record, final Exception exception) { - log.error("Exception caught during Deserialization, " + - "taskId: {}, topic: {}, partition: {}, offset: {}", - context.taskId(), record.topic(), record.partition(), record.offset(), - exception); + log.error( + "Exception caught during Deserialization, taskId: {}, topic: {}, partition: {}, offset: {}", + context.taskId(), + record.topic(), + record.partition(), + record.offset(), + exception + ); return DeserializationHandlerResponse.FAIL; } @@ -51,10 +58,14 @@ public DeserializationHandlerResponse handle(final ErrorHandlerContext context, final ConsumerRecord record, final Exception exception) { - log.error("Exception caught during Deserialization, " + - "taskId: {}, topic: {}, partition: {}, offset: {}", - context.taskId(), record.topic(), record.partition(), record.offset(), - exception); + log.error( + "Exception caught during Deserialization, taskId: {}, topic: {}, partition: {}, offset: {}", + context.taskId(), + record.topic(), + record.partition(), + record.offset(), + exception + ); return DeserializationHandlerResponse.FAIL; } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.java index 9c2cf91c605c6..f592663a6c07a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/LogAndFailProcessingExceptionHandler.java @@ -32,10 +32,15 @@ public class LogAndFailProcessingExceptionHandler implements ProcessingException @Override public ProcessingHandlerResponse handle(final ErrorHandlerContext context, final Record record, final Exception exception) { - log.error("Exception caught during message processing, " + - "processor node: {}, taskId: {}, source topic: {}, source partition: {}, source offset: {}", - context.processorNodeId(), context.taskId(), context.topic(), context.partition(), context.offset(), - exception); + log.error( + "Exception caught during message processing, processor node: {}, taskId: {}, source topic: {}, source partition: {}, source offset: {}", + context.processorNodeId(), + context.taskId(), + context.topic(), + context.partition(), + context.offset(), + exception + ); return ProcessingHandlerResponse.FAIL; } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/ProcessingExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/ProcessingExceptionHandler.java index 33b2596be1227..7dc1b90bc2e9f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/ProcessingExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/ProcessingExceptionHandler.java @@ -26,25 +26,30 @@ public interface ProcessingExceptionHandler extends Configurable { /** * Inspect a record and the exception received * - * @param context processing context metadata - * @param record record where the exception occurred - * @param exception the actual exception + * @param context + * Processing context metadata. + * @param record + * Record where the exception occurred. + * @param exception + * The actual exception. + * + * @return Whether to continue or stop processing. */ ProcessingHandlerResponse handle(final ErrorHandlerContext context, final Record record, final Exception exception); enum ProcessingHandlerResponse { - /* continue with processing */ + /** Continue processing. */ CONTINUE(1, "CONTINUE"), - /* fail the processing and stop */ + /** Fail processing. */ FAIL(2, "FAIL"); /** - * the permanent and immutable name of processing exception response + * An english description for the used option. This is for debugging only and may change. */ public final String name; /** - * the permanent and immutable id of processing exception response + * The permanent and immutable id for the used option. This can't change ever. */ public final int id; diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/ProcessorStateException.java b/streams/src/main/java/org/apache/kafka/streams/errors/ProcessorStateException.java index 4463604378613..1e40b79fd8656 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/ProcessorStateException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/ProcessorStateException.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.streams.errors; - /** * Indicates a processor state operation (e.g. put, get) has failed. * diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/ProductionExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/ProductionExceptionHandler.java index 95127887b36d9..ed6b38a5692f7 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/ProductionExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/ProductionExceptionHandler.java @@ -26,10 +26,15 @@ public interface ProductionExceptionHandler extends Configurable { /** * Inspect a record that we attempted to produce, and the exception that resulted - * from attempting to produce it and determine whether or not to continue processing. + * from attempting to produce it and determine to continue or stop processing. + * + * @param record + * The record that failed to produce. + * @param exception + * The exception that occurred during production. + * + * @return Whether to continue or stop processing, or retry the failed operation. * - * @param record The record that failed to produce - * @param exception The exception that occurred during production * @deprecated Since 3.9. Use {@link #handle(ErrorHandlerContext, ProducerRecord, Exception)} instead. */ @Deprecated @@ -40,11 +45,16 @@ default ProductionExceptionHandlerResponse handle(final ProducerRecord record, @@ -56,10 +66,16 @@ default ProductionExceptionHandlerResponse handle(final ErrorHandlerContext cont * Handles serialization exception and determine if the process should continue. The default implementation is to * fail the process. * - * @param record the record that failed to serialize - * @param exception the exception that occurred during serialization + * @param record + * The record that failed to serialize. + * @param exception + * The exception that occurred during serialization. + * + * @return Whether to continue or stop processing, or retry the failed operation. + * * @deprecated Since 3.9. Use {@link #handleSerializationException(ErrorHandlerContext, ProducerRecord, Exception, SerializationExceptionOrigin)} instead. */ + @SuppressWarnings({"rawtypes", "unused"}) @Deprecated default ProductionExceptionHandlerResponse handleSerializationException(final ProducerRecord record, final Exception exception) { @@ -70,11 +86,18 @@ default ProductionExceptionHandlerResponse handleSerializationException(final Pr * Handles serialization exception and determine if the process should continue. The default implementation is to * fail the process. * - * @param context the error handler context metadata - * @param record the record that failed to serialize - * @param exception the exception that occurred during serialization - * @param origin the origin of the serialization exception + * @param context + * The error handler context metadata. + * @param record + * The record that failed to serialize. + * @param exception + * The exception that occurred during serialization. + * @param origin + * The origin of the serialization exception. + * + * @return Whether to continue or stop processing, or retry the failed operation. */ + @SuppressWarnings("rawtypes") default ProductionExceptionHandlerResponse handleSerializationException(final ErrorHandlerContext context, final ProducerRecord record, final Exception exception, diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreMigratedException.java b/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreMigratedException.java index 45329c8101b21..ed74d972a6d3c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreMigratedException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreMigratedException.java @@ -23,6 +23,7 @@ * This could happen because the store moved to some other instance during a rebalance so * rediscovery of the state store is required before retrying. */ +@SuppressWarnings("unused") public class StateStoreMigratedException extends InvalidStateStoreException { private static final long serialVersionUID = 1L; diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreNotAvailableException.java b/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreNotAvailableException.java index 7cec17c40d6fa..26660b2ea7eed 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreNotAvailableException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/StateStoreNotAvailableException.java @@ -22,6 +22,7 @@ * {@link org.apache.kafka.streams.KafkaStreams.State#NOT_RUNNING NOT_RUNNING} or * {@link org.apache.kafka.streams.KafkaStreams.State#ERROR ERROR} state. */ +@SuppressWarnings("unused") public class StateStoreNotAvailableException extends InvalidStateStoreException { private static final long serialVersionUID = 1L; diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsException.java b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsException.java index c3c03f53001ff..07cdeb4bc3b4a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsException.java @@ -61,9 +61,9 @@ public StreamsException(final Throwable throwable, final TaskId taskId) { } /** - * @return the {@link TaskId} that this exception originated from, or {@link Optional#empty()} if the exception - * cannot be traced back to a particular task. Note that the {@code TaskId} being empty does not - * guarantee that the exception wasn't directly related to a specific task. + * @return The {@link TaskId} that this exception originated from, or {@link Optional#empty()} if the exception + * cannot be traced back to a particular task. Note that the {@code TaskId} being empty does not + * guarantee that the exception wasn't directly related to a specific task. */ public Optional taskId() { return Optional.ofNullable(taskId); diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsNotStartedException.java b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsNotStartedException.java index 562be0ebde8b4..1179c07f422a0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsNotStartedException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsNotStartedException.java @@ -32,6 +32,7 @@ public StreamsNotStartedException(final String message) { super(message); } + @SuppressWarnings("unused") public StreamsNotStartedException(final String message, final Throwable throwable) { super(message, throwable); } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsRebalancingException.java b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsRebalancingException.java index 4b8e14c9b6590..7c6c027d2669a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsRebalancingException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsRebalancingException.java @@ -21,6 +21,7 @@ * cannot be queried by default. You can retry to query after the rebalance finished. As an alternative, you can also query * (potentially stale) state stores during a rebalance via {@link org.apache.kafka.streams.StoreQueryParameters#enableStaleStores()}. */ +@SuppressWarnings("unused") public class StreamsRebalancingException extends InvalidStateStoreException { private static final long serialVersionUID = 1L; diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsStoppedException.java b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsStoppedException.java index c05708bc921a4..252edbe31439e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsStoppedException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsStoppedException.java @@ -32,6 +32,7 @@ public StreamsStoppedException(final String message) { super(message); } + @SuppressWarnings("unused") public StreamsStoppedException(final String message, final Throwable throwable) { super(message, throwable); } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.java b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.java index 5502b35bc21d1..2e76d5a0788bc 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/StreamsUncaughtExceptionHandler.java @@ -19,7 +19,11 @@ public interface StreamsUncaughtExceptionHandler { /** * Inspect the exception received in a stream thread and respond with an action. - * @param exception the actual exception + * + * @param exception + * The actual exception. + * + * @return Whether to replace the failed thread, or to shut down the client or the whole application. */ StreamThreadExceptionResponse handle(final Throwable exception); @@ -27,14 +31,21 @@ public interface StreamsUncaughtExceptionHandler { * Enumeration that describes the response from the exception handler. */ enum StreamThreadExceptionResponse { + /** Replace the failed thread with a new one. */ REPLACE_THREAD(0, "REPLACE_THREAD"), + /** Shut down the client. */ SHUTDOWN_CLIENT(1, "SHUTDOWN_KAFKA_STREAMS_CLIENT"), + /** Try to shut down the whole application. */ SHUTDOWN_APPLICATION(2, "SHUTDOWN_KAFKA_STREAMS_APPLICATION"); - /** an english description of the api--this is for debugging and can change */ + /** + * An english description for the used option. This is for debugging only and may change. + */ public final String name; - /** the permanent and immutable id of an API--this can't change ever */ + /** + * The permanent and immutable id for the used option. This can't change ever. + */ public final int id; StreamThreadExceptionResponse(final int id, final String name) { diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/TaskCorruptedException.java b/streams/src/main/java/org/apache/kafka/streams/errors/TaskCorruptedException.java index 0f6c50579d3a9..8fda0fb19b1bb 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/TaskCorruptedException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/TaskCorruptedException.java @@ -24,10 +24,10 @@ /** * Indicates a specific task is corrupted and need to be re-initialized. It can be thrown when: * - *

                + *
                  *
                • Under EOS, if the checkpoint file does not contain offsets for corresponding store's changelogs, meaning previously it was not close cleanly.
                • *
                • Out-of-range exception thrown during restoration, meaning that the changelog has been modified and we re-bootstrap the store.
                • - *
              + * */ public class TaskCorruptedException extends StreamsException { diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/TaskIdFormatException.java b/streams/src/main/java/org/apache/kafka/streams/errors/TaskIdFormatException.java index 63493436816bb..04e60ffb6c403 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/TaskIdFormatException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/TaskIdFormatException.java @@ -35,6 +35,7 @@ public TaskIdFormatException(final String message, final Throwable throwable) { super("Task id cannot be parsed correctly" + (message == null ? "" : " from " + message), throwable); } + @SuppressWarnings("unused") public TaskIdFormatException(final Throwable throwable) { super(throwable); } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/TopologyException.java b/streams/src/main/java/org/apache/kafka/streams/errors/TopologyException.java index 1eaef0691b8c5..30ed93f2aa0aa 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/TopologyException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/TopologyException.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.streams.errors; - /** * Indicates a pre run time error occurred while parsing the {@link org.apache.kafka.streams.Topology logical topology} * to construct the {@link org.apache.kafka.streams.processor.internals.ProcessorTopology physical processor topology}. diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/UnknownStateStoreException.java b/streams/src/main/java/org/apache/kafka/streams/errors/UnknownStateStoreException.java index 0ee0658bec4ad..8fffe89be2e3a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/UnknownStateStoreException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/UnknownStateStoreException.java @@ -28,6 +28,7 @@ public UnknownStateStoreException(final String message) { super(message); } + @SuppressWarnings("unused") public UnknownStateStoreException(final String message, final Throwable throwable) { super(message, throwable); } diff --git a/streams/src/main/java/org/apache/kafka/streams/errors/UnknownTopologyException.java b/streams/src/main/java/org/apache/kafka/streams/errors/UnknownTopologyException.java index d7644841a23b0..accac453a3d2a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/errors/UnknownTopologyException.java +++ b/streams/src/main/java/org/apache/kafka/streams/errors/UnknownTopologyException.java @@ -27,6 +27,7 @@ public UnknownTopologyException(final String message, final String namedTopology super(message + " due to being unable to locate a Topology named " + namedTopology); } + @SuppressWarnings("unused") public UnknownTopologyException(final String message, final Throwable throwable, final String namedTopology) { super(message + " due to being unable to locate a Topology named " + namedTopology, throwable); } diff --git a/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java b/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java index c2502e31a488e..cce48cd0925ed 100644 --- a/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java +++ b/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java @@ -87,16 +87,4 @@ public static void checkSupplier(final Supplier supplier) { " %s#get() must return a new object each time it is called.", supplierClass, supplierClass)); } } - - /** - * @throws IllegalArgumentException if the same instance is obtained each time - */ - @SuppressWarnings("deprecation") - public static void checkSupplier(final org.apache.kafka.streams.kstream.ValueTransformerSupplier supplier) { - if (supplier.get() == supplier.get()) { - final String supplierClass = supplier.getClass().getName(); - throw new IllegalArgumentException(String.format("%s generates single reference." + - " %s#get() must return a new object each time it is called.", supplierClass, supplierClass)); - } - } } diff --git a/streams/src/main/java/org/apache/kafka/streams/internals/AutoOffsetResetInternal.java b/streams/src/main/java/org/apache/kafka/streams/internals/AutoOffsetResetInternal.java new file mode 100644 index 0000000000000..0fbd267a9dab2 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/internals/AutoOffsetResetInternal.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.internals; + +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy.StrategyType; +import org.apache.kafka.streams.AutoOffsetReset; + +import java.time.Duration; + +public class AutoOffsetResetInternal extends AutoOffsetReset { + + public AutoOffsetResetInternal(final AutoOffsetReset autoOffsetReset) { + super(autoOffsetReset); + } + + public StrategyType offsetResetStrategy() { + return offsetResetStrategy; + } + + public Duration duration() { + if (duration.isEmpty()) { + throw new IllegalStateException(String.format( + "Duration is only available for reset strategy '%s', but reset strategy is '%s'. " + + "Please check the reset strategy before calling duration() via offsetResetStrategy().", + StrategyType.BY_DURATION, + offsetResetStrategy + )); + } + return duration.get(); + } +} diff --git a/streams/src/main/java/org/apache/kafka/streams/internals/UpgradeFromValues.java b/streams/src/main/java/org/apache/kafka/streams/internals/UpgradeFromValues.java index 66e079eecacbd..617726cdf6462 100644 --- a/streams/src/main/java/org/apache/kafka/streams/internals/UpgradeFromValues.java +++ b/streams/src/main/java/org/apache/kafka/streams/internals/UpgradeFromValues.java @@ -40,7 +40,8 @@ public enum UpgradeFromValues { UPGRADE_FROM_35("3.5"), UPGRADE_FROM_36("3.6"), UPGRADE_FROM_37("3.7"), - UPGRADE_FROM_38("3.8"); + UPGRADE_FROM_38("3.8"), + UPGRADE_FROM_39("3.9"); private final String value; diff --git a/streams/src/main/java/org/apache/kafka/streams/internals/metrics/ClientMetrics.java b/streams/src/main/java/org/apache/kafka/streams/internals/metrics/ClientMetrics.java index 698e0da7b4d39..22e09042e16c1 100644 --- a/streams/src/main/java/org/apache/kafka/streams/internals/metrics/ClientMetrics.java +++ b/streams/src/main/java/org/apache/kafka/streams/internals/metrics/ClientMetrics.java @@ -40,11 +40,14 @@ private ClientMetrics() {} private static final String APPLICATION_ID = "application-id"; private static final String TOPOLOGY_DESCRIPTION = "topology-description"; private static final String STATE = "state"; + private static final String CLIENT_STATE = "client-state"; private static final String ALIVE_STREAM_THREADS = "alive-stream-threads"; private static final String VERSION_FROM_FILE; private static final String COMMIT_ID_FROM_FILE; private static final String DEFAULT_VALUE = "unknown"; private static final String FAILED_STREAM_THREADS = "failed-stream-threads"; + private static final String RECORDING_LEVEL = "recording-level"; + static { final Properties props = new Properties(); @@ -67,6 +70,7 @@ private ClientMetrics() {} private static final String STATE_DESCRIPTION = "The state of the Kafka Streams client"; private static final String ALIVE_STREAM_THREADS_DESCRIPTION = "The current number of alive stream threads that are running or participating in rebalance"; private static final String FAILED_STREAM_THREADS_DESCRIPTION = "The number of failed stream threads since the start of the Kafka Streams client"; + private static final String RECORDING_LEVEL_DESCRIPTION = "The metrics recording level of the Kafka Streams client"; public static String version() { return VERSION_FROM_FILE; @@ -123,6 +127,26 @@ public static void addStateMetric(final StreamsMetricsImpl streamsMetrics, ); } + public static void addClientStateTelemetryMetric(final StreamsMetricsImpl streamsMetrics, + final Gauge stateProvider) { + streamsMetrics.addClientLevelMutableMetric( + CLIENT_STATE, + STATE_DESCRIPTION, + RecordingLevel.INFO, + stateProvider + ); + } + + public static void addClientRecordingLevelMetric(final StreamsMetricsImpl streamsMetrics, + final int recordingLevel) { + streamsMetrics.addClientLevelImmutableMetric( + RECORDING_LEVEL, + RECORDING_LEVEL_DESCRIPTION, + RecordingLevel.INFO, + recordingLevel + ); + } + public static void addNumAliveStreamThreadMetric(final StreamsMetricsImpl streamsMetrics, final Gauge stateProvider) { streamsMetrics.addClientLevelMutableMetric( diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/Consumed.java b/streams/src/main/java/org/apache/kafka/streams/kstream/Consumed.java index d1713ab20a18e..ade104d6118ef 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/Consumed.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/Consumed.java @@ -17,6 +17,7 @@ package org.apache.kafka.streams.kstream; import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.streams.AutoOffsetReset; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.processor.TimestampExtractor; @@ -55,30 +56,48 @@ public class Consumed implements NamedOperation> { protected Serde keySerde; protected Serde valueSerde; protected TimestampExtractor timestampExtractor; - protected Topology.AutoOffsetReset resetPolicy; + @Deprecated + protected Topology.AutoOffsetReset legacyResetPolicy; + protected AutoOffsetReset resetPolicy; protected String processorName; + @SuppressWarnings("deprecation") private Consumed(final Serde keySerde, final Serde valueSerde, final TimestampExtractor timestampExtractor, - final Topology.AutoOffsetReset resetPolicy, + final Topology.AutoOffsetReset legacyResetPolicy, + final AutoOffsetReset resetPolicy, final String processorName) { this.keySerde = keySerde; this.valueSerde = valueSerde; this.timestampExtractor = timestampExtractor; + this.legacyResetPolicy = legacyResetPolicy; this.resetPolicy = resetPolicy; this.processorName = processorName; } protected Consumed(final Consumed consumed) { - this(consumed.keySerde, - consumed.valueSerde, - consumed.timestampExtractor, - consumed.resetPolicy, - consumed.processorName + this( + consumed.keySerde, + consumed.valueSerde, + consumed.timestampExtractor, + consumed.legacyResetPolicy, + consumed.resetPolicy, + consumed.processorName ); } + @Deprecated + private static AutoOffsetReset convertOldToNew(final Topology.AutoOffsetReset resetPolicy) { + if (resetPolicy == null) { + return null; + } + + return resetPolicy == org.apache.kafka.streams.Topology.AutoOffsetReset.EARLIEST + ? AutoOffsetReset.earliest() + : AutoOffsetReset.latest(); + } + /** * Create an instance of {@link Consumed} with the supplied arguments. {@code null} values are acceptable. * @@ -95,12 +114,39 @@ protected Consumed(final Consumed consumed) { * @param value type * * @return a new instance of {@link Consumed} + * + * @deprecated Since 4.0. Use {@link #with(Serde, Serde, TimestampExtractor, AutoOffsetReset)} instead. */ + @Deprecated public static Consumed with(final Serde keySerde, final Serde valueSerde, final TimestampExtractor timestampExtractor, final Topology.AutoOffsetReset resetPolicy) { - return new Consumed<>(keySerde, valueSerde, timestampExtractor, resetPolicy, null); + return new Consumed<>(keySerde, valueSerde, timestampExtractor, resetPolicy, convertOldToNew(resetPolicy), null); + } + + /** + * Create an instance of {@link Consumed} with the supplied arguments. {@code null} values are acceptable. + * + * @param keySerde + * the key serde. If {@code null} the default key serde from config will be used + * @param valueSerde + * the value serde. If {@code null} the default value serde from config will be used + * @param timestampExtractor + * the timestamp extractor to used. If {@code null} the default timestamp extractor from config will be used + * @param resetPolicy + * the offset reset policy to be used. If {@code null} the default reset policy from config will be used + * + * @param key type + * @param value type + * + * @return a new instance of {@link Consumed} + */ + public static Consumed with(final Serde keySerde, + final Serde valueSerde, + final TimestampExtractor timestampExtractor, + final AutoOffsetReset resetPolicy) { + return new Consumed<>(keySerde, valueSerde, timestampExtractor, null, resetPolicy, null); } /** @@ -118,7 +164,7 @@ public static Consumed with(final Serde keySerde, */ public static Consumed with(final Serde keySerde, final Serde valueSerde) { - return new Consumed<>(keySerde, valueSerde, null, null, null); + return new Consumed<>(keySerde, valueSerde, null, null, null, null); } /** @@ -133,7 +179,7 @@ public static Consumed with(final Serde keySerde, * @return a new instance of {@link Consumed} */ public static Consumed with(final TimestampExtractor timestampExtractor) { - return new Consumed<>(null, null, timestampExtractor, null, null); + return new Consumed<>(null, null, timestampExtractor, null, null, null); } /** @@ -146,9 +192,27 @@ public static Consumed with(final TimestampExtractor timestampExtra * @param value type * * @return a new instance of {@link Consumed} + * + * @deprecated Since 4.0. Use {@link #with(AutoOffsetReset)} instead. */ + @Deprecated public static Consumed with(final Topology.AutoOffsetReset resetPolicy) { - return new Consumed<>(null, null, null, resetPolicy, null); + return new Consumed<>(null, null, null, resetPolicy, convertOldToNew(resetPolicy), null); + } + + /** + * Create an instance of {@link Consumed} with a {@link org.apache.kafka.streams.Topology.AutoOffsetReset Topology.AutoOffsetReset}. + * + * @param resetPolicy + * the offset reset policy to be used. If {@code null} the default reset policy from config will be used + * + * @param key type + * @param value type + * + * @return a new instance of {@link Consumed} + */ + public static Consumed with(final AutoOffsetReset resetPolicy) { + return new Consumed<>(null, null, null, null, resetPolicy, null); } /** @@ -163,7 +227,7 @@ public static Consumed with(final Topology.AutoOffsetReset resetPol * @return a new instance of {@link Consumed} */ public static Consumed as(final String processorName) { - return new Consumed<>(null, null, null, null, processorName); + return new Consumed<>(null, null, null, null, null, processorName); } /** @@ -175,7 +239,7 @@ public static Consumed as(final String processorName) { * @return a new instance of {@link Consumed} */ public Consumed withKeySerde(final Serde keySerde) { - return new Consumed(keySerde, valueSerde, timestampExtractor, resetPolicy, processorName); + return new Consumed<>(keySerde, valueSerde, timestampExtractor, legacyResetPolicy, resetPolicy, processorName); } /** @@ -187,7 +251,7 @@ public Consumed withKeySerde(final Serde keySerde) { * @return a new instance of {@link Consumed} */ public Consumed withValueSerde(final Serde valueSerde) { - return new Consumed(keySerde, valueSerde, timestampExtractor, resetPolicy, processorName); + return new Consumed<>(keySerde, valueSerde, timestampExtractor, legacyResetPolicy, resetPolicy, processorName); } /** @@ -199,7 +263,7 @@ public Consumed withValueSerde(final Serde valueSerde) { * @return a new instance of {@link Consumed} */ public Consumed withTimestampExtractor(final TimestampExtractor timestampExtractor) { - return new Consumed(keySerde, valueSerde, timestampExtractor, resetPolicy, processorName); + return new Consumed<>(keySerde, valueSerde, timestampExtractor, legacyResetPolicy, resetPolicy, processorName); } /** @@ -209,9 +273,31 @@ public Consumed withTimestampExtractor(final TimestampExtractor timestampE * the offset reset policy to be used. If {@code null} the default reset policy from config will be used * * @return a new instance of {@link Consumed} + * + * @deprecated Since 4.0. Use {@link #withOffsetResetPolicy(AutoOffsetReset)} instead. */ + @Deprecated public Consumed withOffsetResetPolicy(final Topology.AutoOffsetReset resetPolicy) { - return new Consumed(keySerde, valueSerde, timestampExtractor, resetPolicy, processorName); + return new Consumed<>( + keySerde, + valueSerde, + timestampExtractor, + resetPolicy, + convertOldToNew(resetPolicy), + processorName + ); + } + + /** + * Configure the instance of {@link Consumed} with a {@link org.apache.kafka.streams.Topology.AutoOffsetReset Topology.AutoOffsetReset}. + * + * @param resetPolicy + * the offset reset policy to be used. If {@code null} the default reset policy from config will be used + * + * @return a new instance of {@link Consumed} + */ + public Consumed withOffsetResetPolicy(final AutoOffsetReset resetPolicy) { + return new Consumed<>(keySerde, valueSerde, timestampExtractor, null, resetPolicy, processorName); } /** @@ -224,7 +310,7 @@ public Consumed withOffsetResetPolicy(final Topology.AutoOffsetReset reset */ @Override public Consumed withName(final String processorName) { - return new Consumed(keySerde, valueSerde, timestampExtractor, resetPolicy, processorName); + return new Consumed<>(keySerde, valueSerde, timestampExtractor, legacyResetPolicy, resetPolicy, processorName); } @Override @@ -239,11 +325,12 @@ public boolean equals(final Object o) { return Objects.equals(keySerde, consumed.keySerde) && Objects.equals(valueSerde, consumed.valueSerde) && Objects.equals(timestampExtractor, consumed.timestampExtractor) && + legacyResetPolicy == consumed.legacyResetPolicy && resetPolicy == consumed.resetPolicy; } @Override public int hashCode() { - return Objects.hash(keySerde, valueSerde, timestampExtractor, resetPolicy); + return Objects.hash(keySerde, valueSerde, timestampExtractor, legacyResetPolicy, resetPolicy); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/ForeachProcessor.java b/streams/src/main/java/org/apache/kafka/streams/kstream/ForeachProcessor.java index 1555fbd7d05e3..da25edae6517b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/ForeachProcessor.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/ForeachProcessor.java @@ -20,7 +20,7 @@ import org.apache.kafka.streams.processor.api.Record; /** - * @deprecated since 4.0 and should not be used any longer. + * @deprecated Since 4.0 and should not be used any longer. */ @Deprecated public class ForeachProcessor implements Processor { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/JoinWindows.java b/streams/src/main/java/org/apache/kafka/streams/kstream/JoinWindows.java index f26aee5a4a41d..4690ffdfe7c47 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/JoinWindows.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/JoinWindows.java @@ -154,7 +154,7 @@ public static JoinWindows ofTimeDifferenceWithNoGrace(final Duration timeDiffere * @param timeDifference join window interval * @return a new JoinWindows object with the window definition with and grace period (default to 24 hours minus {@code timeDifference}) * @throws IllegalArgumentException if {@code timeDifference} is negative or can't be represented as {@code long milliseconds} - * @deprecated since 3.0. Use {@link #ofTimeDifferenceWithNoGrace(Duration)}} instead + * @deprecated Since 3.0. Use {@link #ofTimeDifferenceWithNoGrace(Duration)}} instead. */ @Deprecated public static JoinWindows of(final Duration timeDifference) throws IllegalArgumentException { @@ -221,7 +221,7 @@ public long size() { * @return this updated builder * @throws IllegalArgumentException if the {@code afterWindowEnd} is negative or can't be represented as {@code long milliseconds} * @throws IllegalStateException if {@link #grace(Duration)} is called after {@link #ofTimeDifferenceAndGrace(Duration, Duration)} or {@link #ofTimeDifferenceWithNoGrace(Duration)} - * @deprecated since 3.0. Use {@link #ofTimeDifferenceAndGrace(Duration, Duration)} instead + * @deprecated Since 3.0. Use {@link #ofTimeDifferenceAndGrace(Duration, Duration)} instead. */ @Deprecated public JoinWindows grace(final Duration afterWindowEnd) throws IllegalArgumentException { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java b/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java index 109879579a6fb..f1df05fe2e525 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/Joined.java @@ -273,7 +273,7 @@ public Joined withGracePeriod(final Duration gracePeriod) { /** - * @deprecated since 4.0 and should not be used any longer. + * @deprecated Since 4.0 and should not be used any longer. */ @Deprecated public Duration gracePeriod() { @@ -281,7 +281,7 @@ public Duration gracePeriod() { } /** - * @deprecated since 4.0 and should not be used any longer. + * @deprecated Since 4.0 and should not be used any longer. */ @Deprecated public Serde keySerde() { @@ -289,7 +289,7 @@ public Serde keySerde() { } /** - * @deprecated since 4.0 and should not be used any longer. + * @deprecated Since 4.0 and should not be used any longer. */ @Deprecated public Serde valueSerde() { @@ -297,7 +297,7 @@ public Serde valueSerde() { } /** - * @deprecated since 4.0 and should not be used any longer. + * @deprecated Since 4.0 and should not be used any longer. */ @Deprecated public Serde otherValueSerde() { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java index 516ffb80227e1..13d17aa679641 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java @@ -48,8 +48,8 @@ * A {@code KStream} can be transformed record by record, joined with another {@code KStream}, {@link KTable}, * {@link GlobalKTable}, or can be aggregated into a {@link KTable}. * Kafka Streams DSL can be mixed-and-matched with Processor API (PAPI) (c.f. {@link Topology}) via - * {@link #process(ProcessorSupplier, String...) process(...)} and {@link #transformValues(ValueTransformerSupplier, - * String...) transformValues(...)}. + * {@link #process(ProcessorSupplier, String...) process(...)} and {@link #processValues(FixedKeyProcessorSupplier, + * String...) processValues(...)}. * * @param Type of keys * @param Type of values @@ -206,8 +206,7 @@ KStream selectKey(final KeyValueMapper KStream map(final KeyValueMapper> mapper); @@ -245,8 +244,7 @@ KStream selectKey(final KeyValueMapper KStream map(final KeyValueMapper> mapper, final Named named); @@ -256,7 +254,7 @@ KStream map(final KeyValueMapper} can be transformed into an output record {@code }. * This is a stateless record-by-record operation (cf. - * {@link #transformValues(ValueTransformerSupplier, String...)} for stateful value transformation). + * {@link #processValues(FixedKeyProcessorSupplier, String...)} for stateful value processing). *

              * The example below counts the number of token of the value string. *

              {@code
              @@ -280,8 +278,7 @@  KStream map(final KeyValueMapper KStream mapValues(final ValueMapper mapper);
               
              @@ -290,7 +287,7 @@  KStream map(final KeyValueMapper} can be transformed into an output record {@code }.
                    * This is a stateless record-by-record operation (cf.
              -     * {@link #transformValues(ValueTransformerSupplier, String...)} for stateful value transformation).
              +     * {@link #processValues(FixedKeyProcessorSupplier, String...)} for stateful value processing).
                    * 

              * The example below counts the number of token of the value string. *

              {@code
              @@ -315,8 +312,7 @@  KStream map(final KeyValueMapper KStream mapValues(final ValueMapper mapper,
                                                 final Named named);
              @@ -326,7 +322,7 @@  KStream mapValues(final ValueMapper mapper,
                    * The provided {@link ValueMapperWithKey} is applied to each input record value and computes a new value for it.
                    * Thus, an input record {@code } can be transformed into an output record {@code }.
                    * This is a stateless record-by-record operation (cf.
              -     * {@link #transformValues(ValueTransformerWithKeySupplier, String...)} for stateful value transformation).
              +     * {@link #processValues(FixedKeyProcessorSupplier, String...)} for stateful value processing).
                    * 

              * The example below counts the number of tokens of key and value strings. *

              {@code
              @@ -351,8 +347,7 @@  KStream mapValues(final ValueMapper mapper,
                    * @see #flatMapValues(ValueMapper)
                    * @see #flatMapValues(ValueMapperWithKey)
                    * @see #process(ProcessorSupplier, String...)
              -     * @see #transformValues(ValueTransformerSupplier, String...)
              -     * @see #transformValues(ValueTransformerWithKeySupplier, String...)
              +     * @see #processValues(FixedKeyProcessorSupplier, String...)
                    */
                    KStream mapValues(final ValueMapperWithKey mapper);
               
              @@ -361,7 +356,7 @@  KStream mapValues(final ValueMapper mapper,
                    * The provided {@link ValueMapperWithKey} is applied to each input record value and computes a new value for it.
                    * Thus, an input record {@code } can be transformed into an output record {@code }.
                    * This is a stateless record-by-record operation (cf.
              -     * {@link #transformValues(ValueTransformerWithKeySupplier, String...)} for stateful value transformation).
              +     * {@link #processValues(FixedKeyProcessorSupplier, String...)} for stateful value processing).
                    * 

              * The example below counts the number of tokens of key and value strings. *

              {@code
              @@ -387,8 +382,7 @@  KStream mapValues(final ValueMapper mapper,
                    * @see #flatMapValues(ValueMapper)
                    * @see #flatMapValues(ValueMapperWithKey)
                    * @see #process(ProcessorSupplier, String...)
              -     * @see #transformValues(ValueTransformerSupplier, String...)
              -     * @see #transformValues(ValueTransformerWithKeySupplier, String...)
              +     * @see #processValues(FixedKeyProcessorSupplier, String...)
                    */
                    KStream mapValues(final ValueMapperWithKey mapper,
                                                 final Named named);
              @@ -436,10 +430,8 @@  KStream mapValues(final ValueMapperWithKey KStream flatMap(final KeyValueMapper>> mapper);
               
              @@ -487,10 +479,8 @@  KStream mapValues(final ValueMapperWithKey KStream flatMap(final KeyValueMapper>> mapper,
                                                    final Named named);
              @@ -502,8 +492,8 @@  KStream flatMap(final KeyValueMapper} can be transformed into output records {@code , , ...}.
              -     * This is a stateless record-by-record operation (cf. {@link #transformValues(ValueTransformerSupplier, String...)}
              -     * for stateful value transformation).
              +     * This is a stateless record-by-record operation (cf. {@link #processValues(FixedKeyProcessorSupplier, String...)}
              +     * for stateful value processing).
                    * 

              * The example below splits input records {@code } containing sentences as values into their words. *

              {@code
              @@ -530,10 +520,8 @@  KStream flatMap(final KeyValueMapper KStream flatMapValues(final ValueMapper> mapper);
               
              @@ -544,8 +532,8 @@  KStream flatMap(final KeyValueMapper} can be transformed into output records {@code , , ...}.
              -     * This is a stateless record-by-record operation (cf. {@link #transformValues(ValueTransformerSupplier, String...)}
              -     * for stateful value transformation).
              +     * This is a stateless record-by-record operation (cf. {@link #processValues(FixedKeyProcessorSupplier, String...)}
              +     * for stateful value processing).
                    * 

              * The example below splits input records {@code } containing sentences as values into their words. *

              {@code
              @@ -573,10 +561,8 @@  KStream flatMap(final KeyValueMapper KStream flatMapValues(final ValueMapper> mapper,
                                                     final Named named);
              @@ -587,8 +573,8 @@  KStream flatMapValues(final ValueMapper} can be transformed into output records {@code , , ...}.
              -     * This is a stateless record-by-record operation (cf. {@link #transformValues(ValueTransformerWithKeySupplier, String...)}
              -     * for stateful value transformation).
              +     * This is a stateless record-by-record operation (cf. {@link #processValues(FixedKeyProcessorSupplier, String...)}
              +     * for stateful value processing).
                    * 

              * The example below splits input records {@code }, with key=1, containing sentences as values * into their words. @@ -621,10 +607,8 @@ KStream flatMapValues(final ValueMapper KStream flatMapValues(final ValueMapperWithKey> mapper); @@ -635,8 +619,8 @@ KStream flatMapValues(final ValueMapper} can be transformed into output records {@code , , ...}. - * This is a stateless record-by-record operation (cf. {@link #transformValues(ValueTransformerWithKeySupplier, String...)} - * for stateful value transformation). + * This is a stateless record-by-record operation (cf. {@link #processValues(FixedKeyProcessorSupplier, String...)} + * for stateful value processing). *

              * The example below splits input records {@code }, with key=1, containing sentences as values * into their words. @@ -670,10 +654,8 @@ KStream flatMapValues(final ValueMapper KStream flatMapValues(final ValueMapperWithKey> mapper, final Named named); @@ -741,7 +723,8 @@ KStream flatMapValues(final ValueMapperWithKey * Note: Stream branching is a stateless record-by-record operation. * Please check {@link BranchedKStream} for detailed description and usage example @@ -752,7 +735,8 @@ KStream flatMapValues(final ValueMapperWithKey * Note: Stream branching is a stateless record-by-record operation. * Please check {@link BranchedKStream} for detailed description and usage example @@ -2983,1141 +2967,6 @@ KStream leftJoin(final GlobalKTable globalTable, final ValueJoinerWithKey valueJoiner, final Named named); - /** - * Transform the value of each input record into a new value (with possibly a new type) of the output record. - * A {@link ValueTransformer} (provided by the given {@link ValueTransformerSupplier}) is applied to each input - * record value and computes a new value for it. - * Thus, an input record {@code } can be transformed into an output record {@code }. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapper) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapper) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress - * can be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.transformValues(new ValueTransformerSupplier() {
              -     *     public ValueTransformer get() {
              -     *         return new MyValueTransformer();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerSupplier implements ValueTransformerSupplier {
              -     *     // supply transformer
              -     *     ValueTransformer get() {
              -     *         return new MyValueTransformer();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.transformValues(new MyValueTransformerSupplier());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformer}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformer} must return the new value in {@link ValueTransformer#transform(Object) transform()}. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformer} tries to - * emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformer implements ValueTransformer {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     NewValueType transform(V value) {
              -     *         // can access this.state
              -     *         return new NewValueType(); // or null
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code transformValues()}. - *

              - * Setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerSupplier} that generates a newly constructed {@link ValueTransformer} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformer} object - * and returning the same object reference in {@link ValueTransformer} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains records with unmodified key and new values (possibly of different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, String...)} instead. - */ - @Deprecated - KStream transformValues(final ValueTransformerSupplier valueTransformerSupplier, - final String... stateStoreNames); - /** - * Transform the value of each input record into a new value (with possibly a new type) of the output record. - * A {@link ValueTransformer} (provided by the given {@link ValueTransformerSupplier}) is applied to each input - * record value and computes a new value for it. - * Thus, an input record {@code } can be transformed into an output record {@code }. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapper) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapper) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress - * can be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.transformValues(new ValueTransformerSupplier() {
              -     *     public ValueTransformer get() {
              -     *         return new MyValueTransformer();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerSupplier implements ValueTransformerSupplier {
              -     *     // supply transformer
              -     *     ValueTransformer get() {
              -     *         return new MyValueTransformer();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.transformValues(new MyValueTransformerSupplier());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformer}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformer} must return the new value in {@link ValueTransformer#transform(Object) transform()}. - * No additional {@link KeyValue} pairs can be emitted via - * pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformer} tries to - * emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformer implements ValueTransformer {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     NewValueType transform(V value) {
              -     *         // can access this.state
              -     *         return new NewValueType(); // or null
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code transformValues()}. - *

              - * Setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerSupplier} that generates a newly constructed {@link ValueTransformer} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformer} object - * and returning the same object reference in {@link ValueTransformer} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param named a {@link Named} config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains records with unmodified key and new values (possibly of different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, Named, String...)} instead. - */ - @Deprecated - KStream transformValues(final ValueTransformerSupplier valueTransformerSupplier, - final Named named, - final String... stateStoreNames); - - /** - * Transform the value of each input record into a new value (with possibly a new type) of the output record. - * A {@link ValueTransformerWithKey} (provided by the given {@link ValueTransformerWithKeySupplier}) is applied to - * each input record value and computes a new value for it. - * Thus, an input record {@code } can be transformed into an output record {@code }. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapperWithKey) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapperWithKey) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress - * can be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.transformValues(new ValueTransformerWithKeySupplier() {
              -     *     public ValueTransformer get() {
              -     *         return new MyValueTransformer();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerWithKeySupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerWithKeySupplier implements ValueTransformerWithKeySupplier {
              -     *     // supply transformer
              -     *     ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.transformValues(new MyValueTransformerWithKeySupplier());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformerWithKey}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformerWithKey} must return the new value in - * {@link ValueTransformerWithKey#transform(Object, Object) transform()}. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformerWithKey} tries - * to emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformerWithKey implements ValueTransformerWithKey {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     NewValueType transform(K readOnlyKey, V value) {
              -     *         // can access this.state and use read-only key
              -     *         return new NewValueType(readOnlyKey); // or null
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code transformValues()}. - *

              - * Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. - * So, setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerWithKeySupplier} that generates a newly constructed {@link ValueTransformerWithKey} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformerWithKey} object - * and returning the same object reference in {@link ValueTransformerWithKey} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains records with unmodified key and new values (possibly of different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, String...)} instead. - */ - @Deprecated - KStream transformValues(final ValueTransformerWithKeySupplier valueTransformerSupplier, - final String... stateStoreNames); - - /** - * Transform the value of each input record into a new value (with possibly a new type) of the output record. - * A {@link ValueTransformerWithKey} (provided by the given {@link ValueTransformerWithKeySupplier}) is applied to - * each input record value and computes a new value for it. - * Thus, an input record {@code } can be transformed into an output record {@code }. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapperWithKey) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapperWithKey) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress - * can be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.transformValues(new ValueTransformerWithKeySupplier() {
              -     *     public ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerWithKeySupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerWithKeySupplier implements ValueTransformerWithKeySupplier {
              -     *     // supply transformer
              -     *     ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.transformValues(new MyValueTransformerWithKeySupplier());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformerWithKey}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformerWithKey} must return the new value in - * {@link ValueTransformerWithKey#transform(Object, Object) transform()}. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformerWithKey} tries - * to emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformerWithKey implements ValueTransformerWithKey {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     NewValueType transform(K readOnlyKey, V value) {
              -     *         // can access this.state and use read-only key
              -     *         return new NewValueType(readOnlyKey); // or null
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code transformValues()}. - *

              - * Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. - * So, setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerWithKeySupplier} that generates a newly constructed {@link ValueTransformerWithKey} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformerWithKey} object - * and returning the same object reference in {@link ValueTransformerWithKey} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param named a {@link Named} config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains records with unmodified key and new values (possibly of different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, Named, String...)} instead. - */ - @Deprecated - KStream transformValues(final ValueTransformerWithKeySupplier valueTransformerSupplier, - final Named named, - final String... stateStoreNames); - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformer} (provided by the given {@link ValueTransformerSupplier}) is applied to each input - * record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapper) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapper) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) Punctuator#punctuate()} - * the processing progress can be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerSupplier() {
              -     *     public ValueTransformer get() {
              -     *         return new MyValueTransformer();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerSupplier implements ValueTransformerSupplier {
              -     *     // supply transformer
              -     *     ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformer());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformer}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformer} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformer#transform(Object) - * transform()}. - * If the return value of {@link ValueTransformer#transform(Object) ValueTransformer#transform()} is an empty - * {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformer} tries to - * emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformer implements ValueTransformer {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     Iterable transform(V value) {
              -     *         // can access this.state
              -     *         List result = new ArrayList<>();
              -     *         for (int i = 0; i < 3; i++) {
              -     *             result.add(new NewValueType(value));
              -     *         }
              -     *         return result; // values
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

              - * Setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerSupplier} that generates a newly constructed {@link ValueTransformer} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformer} object - * and returning the same object reference in {@link ValueTransformer} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerSupplier> valueTransformerSupplier, - final String... stateStoreNames); - - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformer} (provided by the given {@link ValueTransformerSupplier}) is applied to each input - * record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapper) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapper) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) Punctuator#punctuate()} - * the processing progress can be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerSupplier() {
              -     *     public ValueTransformer get() {
              -     *         return new MyValueTransformer();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerSupplier implements ValueTransformerSupplier {
              -     *     // supply transformer
              -     *     ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformer());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformer}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformer} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformer#transform(Object) - * transform()}. - * If the return value of {@link ValueTransformer#transform(Object) ValueTransformer#transform()} is an empty - * {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformer} tries to - * emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformer implements ValueTransformer {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     Iterable transform(V value) {
              -     *         // can access this.state
              -     *         List result = new ArrayList<>();
              -     *         for (int i = 0; i < 3; i++) {
              -     *             result.add(new NewValueType(value));
              -     *         }
              -     *         return result; // values
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

              - * Setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerSupplier} that generates a newly constructed {@link ValueTransformer} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformer} object - * and returning the same object reference in {@link ValueTransformer} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param named a {@link Named} config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, Named, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerSupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames); - - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformerWithKey} (provided by the given {@link ValueTransformerWithKeySupplier}) is applied to - * each input record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #flatMapValues(ValueMapperWithKey) flatMapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #flatMapValues(ValueMapperWithKey) flatMapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress can - * be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerWithKeySupplier() {
              -     *     public ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerWithKeySupplier implements ValueTransformerWithKeySupplier {
              -     *     // supply transformer
              -     *     ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformerWithKey());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformerWithKey}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformerWithKey} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformerWithKey#transform(Object, Object) - * transform()}. - * If the return value of {@link ValueTransformerWithKey#transform(Object, Object) ValueTransformerWithKey#transform()} - * is an empty {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformerWithKey} tries - * to emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformerWithKey implements ValueTransformerWithKey {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     Iterable transform(K readOnlyKey, V value) {
              -     *         // can access this.state and use read-only key
              -     *         List result = new ArrayList<>();
              -     *         for (int i = 0; i < 3; i++) {
              -     *             result.add(new NewValueType(readOnlyKey));
              -     *         }
              -     *         return result; // values
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

              - * Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. - * So, setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerWithKeySupplier} that generates a newly constructed {@link ValueTransformerWithKey} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformerWithKey} object - * and returning the same object reference in {@link ValueTransformerWithKey} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final String... stateStoreNames); - - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformerWithKey} (provided by the given {@link ValueTransformerWithKeySupplier}) is applied to - * each input record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #flatMapValues(ValueMapperWithKey) flatMapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #flatMapValues(ValueMapperWithKey) flatMapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress can - * be observed and additional periodic actions can be performed. - *

              - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerWithKeySupplier() {
              -     *     public ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     * }, "myValueTransformState");
              -     * }
              - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
              {@code
              -     * class MyValueTransformerWithKeySupplier implements ValueTransformerWithKeySupplier {
              -     *     // supply transformer
              -     *     ValueTransformerWithKey get() {
              -     *         return new MyValueTransformerWithKey();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated transformer
              -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformerWithKey());
              -     * }
              - *

              - * With either strategy, within the {@link ValueTransformerWithKey}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformerWithKey} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformerWithKey#transform(Object, Object) - * transform()}. - * If the return value of {@link ValueTransformerWithKey#transform(Object, Object) ValueTransformerWithKey#transform()} - * is an empty {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformerWithKey} tries - * to emit a {@link KeyValue} pair. - *

              {@code
              -     * class MyValueTransformerWithKey implements ValueTransformerWithKey {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myValueTransformState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     Iterable transform(K readOnlyKey, V value) {
              -     *         // can access this.state and use read-only key
              -     *         List result = new ArrayList<>();
              -     *         for (int i = 0; i < 3; i++) {
              -     *             result.add(new NewValueType(readOnlyKey));
              -     *         }
              -     *         return result; // values
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

              - * Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. - * So, setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerWithKeySupplier} that generates a newly constructed {@link ValueTransformerWithKey} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformerWithKey} object - * and returning the same object reference in {@link ValueTransformerWithKey} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param named a {@link Named} config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, Named, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames); - - /** - * Process all records in this stream, one record at a time, by applying a - * {@link org.apache.kafka.streams.processor.Processor} (provided by the given - * {@link org.apache.kafka.streams.processor.ProcessorSupplier}). - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #foreach(ForeachAction)}). - * If you choose not to attach one, this operation is similar to the stateless {@link #foreach(ForeachAction)} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress - * can be observed and additional periodic actions can be performed. - * Note that this is a terminal operation that returns void. - *

              - * In order for the processor to use state stores, the stores must be added to the topology and connected to the - * processor using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the processor. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myProcessorState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.processor(new ProcessorSupplier() {
              -     *     public Processor get() {
              -     *         return new MyProcessor();
              -     *     }
              -     * }, "myProcessorState");
              -     * }
              - * The second strategy is for the given {@link org.apache.kafka.streams.processor.ProcessorSupplier} - * to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the processor. - *
              {@code
              -     * class MyProcessorSupplier implements ProcessorSupplier {
              -     *     // supply processor
              -     *     Processor get() {
              -     *         return new MyProcessor();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated processor
              -     *     // the store name from the builder ("myProcessorState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myProcessorState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.process(new MyProcessorSupplier());
              -     * }
              - *

              - * With either strategy, within the {@link org.apache.kafka.streams.processor.Processor}, - * the state is obtained via the {@link org.apache.kafka.streams.processor.ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - *

              {@code
              -     * class MyProcessor implements Processor {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myProcessorState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     void process(K key, V value) {
              -     *         // can access this.state
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before {@code process()}. - * - * @param processorSupplier an instance of {@link org.apache.kafka.streams.processor.ProcessorSupplier} - * that generates a newly constructed {@link org.apache.kafka.streams.processor.Processor} - * The supplier should always generate a new instance. Creating a single - * {@link org.apache.kafka.streams.processor.Processor} object - * and returning the same object reference in - * {@link org.apache.kafka.streams.processor.ProcessorSupplier#get()} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @see #foreach(ForeachAction) - * @deprecated Since 3.0. Use {@link KStream#process(org.apache.kafka.streams.processor.api.ProcessorSupplier, java.lang.String...)} instead. - */ - @Deprecated - void process(final org.apache.kafka.streams.processor.ProcessorSupplier processorSupplier, - final String... stateStoreNames); - - /** - * Process all records in this stream, one record at a time, by applying a - * {@link org.apache.kafka.streams.processor.Processor} (provided by the given - * {@link org.apache.kafka.streams.processor.ProcessorSupplier}). - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #foreach(ForeachAction)}). - * If you choose not to attach one, this operation is similar to the stateless {@link #foreach(ForeachAction)} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress - * can be observed and additional periodic actions can be performed. - * Note that this is a terminal operation that returns void. - *

              - * In order for the processor to use state stores, the stores must be added to the topology and connected to the - * processor using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

              - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the processor. - *

              {@code
              -     * // create store
              -     * StoreBuilder> keyValueStoreBuilder =
              -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myProcessorState"),
              -     *                 Serdes.String(),
              -     *                 Serdes.String());
              -     * // add store
              -     * builder.addStateStore(keyValueStoreBuilder);
              -     *
              -     * KStream outputStream = inputStream.processor(new ProcessorSupplier() {
              -     *     public Processor get() {
              -     *         return new MyProcessor();
              -     *     }
              -     * }, "myProcessorState");
              -     * }
              - * The second strategy is for the given {@link org.apache.kafka.streams.processor.ProcessorSupplier} - * to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the processor. - *
              {@code
              -     * class MyProcessorSupplier implements ProcessorSupplier {
              -     *     // supply processor
              -     *     Processor get() {
              -     *         return new MyProcessor();
              -     *     }
              -     *
              -     *     // provide store(s) that will be added and connected to the associated processor
              -     *     // the store name from the builder ("myProcessorState") is used to access the store later via the ProcessorContext
              -     *     Set stores() {
              -     *         StoreBuilder> keyValueStoreBuilder =
              -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myProcessorState"),
              -     *                   Serdes.String(),
              -     *                   Serdes.String());
              -     *         return Collections.singleton(keyValueStoreBuilder);
              -     *     }
              -     * }
              -     *
              -     * ...
              -     *
              -     * KStream outputStream = inputStream.process(new MyProcessorSupplier());
              -     * }
              - *

              - * With either strategy, within the {@link org.apache.kafka.streams.processor.Processor}, - * the state is obtained via the {@link org.apache.kafka.streams.processor.ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - *

              {@code
              -     * class MyProcessor implements Processor {
              -     *     private StateStore state;
              -     *
              -     *     void init(ProcessorContext context) {
              -     *         this.state = context.getStateStore("myProcessorState");
              -     *         // punctuate each second, can access this.state
              -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
              -     *     }
              -     *
              -     *     void process(K key, V value) {
              -     *         // can access this.state
              -     *     }
              -     *
              -     *     void close() {
              -     *         // can access this.state
              -     *     }
              -     * }
              -     * }
              - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before {@code process()}. - * - * @param processorSupplier an instance of {@link org.apache.kafka.streams.processor.ProcessorSupplier} - * that generates a newly constructed {@link org.apache.kafka.streams.processor.Processor} - * The supplier should always generate a new instance. Creating a single - * {@link org.apache.kafka.streams.processor.Processor} object - * and returning the same object reference in - * {@link org.apache.kafka.streams.processor.ProcessorSupplier#get()} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param named a {@link Named} config used to name the processor in the topology - * @param stateStoreNames the names of the state store used by the processor - * @see #foreach(ForeachAction) - * @deprecated Since 3.0. Use {@link KStream#process(org.apache.kafka.streams.processor.api.ProcessorSupplier, org.apache.kafka.streams.kstream.Named, java.lang.String...)} instead. - */ - @Deprecated - void process(final org.apache.kafka.streams.processor.ProcessorSupplier processorSupplier, - final Named named, - final String... stateStoreNames); - /** * Process all records in this stream, one record at a time, by applying a {@link Processor} (provided by the given * {@link ProcessorSupplier}). diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java index ed599d0e9022e..1c8fb3fea3983 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KTable.java @@ -31,6 +31,7 @@ import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.ReadOnlyKeyValueStore; +import java.util.function.BiFunction; import java.util.function.Function; /** @@ -2111,6 +2112,24 @@ KTable join(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed inner join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param other the other {@code KTable} to be joined with this {@code KTable}. Keyed by KO. + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * result is null, the update is ignored as invalid. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains the result of joining this table with {@code other} + */ + KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner); + /** * Join records of this {@code KTable} with another {@code KTable} using non-windowed inner join, * using the {@link TableJoined} instance for optional configurations including @@ -2134,6 +2153,28 @@ KTable join(final KTable other, final ValueJoiner joiner, final TableJoined tableJoined); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed inner join, + * using the {@link TableJoined} instance for optional configurations including + * {@link StreamPartitioner partitioners} when the tables being joined use non-default partitioning, + * and also the base name for components of the join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param other the other {@code KTable} to be joined with this {@code KTable}. Keyed by KO. + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * result is null, the update is ignored as invalid. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param tableJoined a {@link TableJoined} used to configure partitioners and names of internal topics and stores + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains the result of joining this table with {@code other} + */ + KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined); /** * Join records of this {@code KTable} with another {@code KTable} using non-windowed inner join. *

              @@ -2155,6 +2196,27 @@ KTable join(final KTable other, final ValueJoiner joiner, final Materialized> materialized); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed inner join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param other the other {@code KTable} to be joined with this {@code KTable}. Keyed by KO. + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * result is null, the update is ignored as invalid. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param materialized a {@link Materialized} that describes how the {@link StateStore} for the resulting {@code KTable} + * should be materialized. Cannot be {@code null} + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains the result of joining this table with {@code other} + */ + KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final Materialized> materialized); + /** * Join records of this {@code KTable} with another {@code KTable} using non-windowed inner join, * using the {@link TableJoined} instance for optional configurations including @@ -2181,6 +2243,32 @@ KTable join(final KTable other, final TableJoined tableJoined, final Materialized> materialized); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed inner join, + * using the {@link TableJoined} instance for optional configurations including + * {@link StreamPartitioner partitioners} when the tables being joined use non-default partitioning, + * and also the base name for components of the join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param other the other {@code KTable} to be joined with this {@code KTable}. Keyed by KO. + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * result is null, the update is ignored as invalid. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param tableJoined a {@link TableJoined} used to configure partitioners and names of internal topics and stores + * @param materialized a {@link Materialized} that describes how the {@link StateStore} for the resulting {@code KTable} + * should be materialized. Cannot be {@code null} + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains the result of joining this table with {@code other} + */ + KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined, + final Materialized> materialized); + /** * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join. *

              @@ -2199,6 +2287,24 @@ KTable leftJoin(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param other the other {@code KTable} to be joined with this {@code KTable}. Keyed by KO. + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * extract is null, then the right hand side of the result will be null. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains only those records that satisfy the given predicate + */ + KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner); + /** * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join, * using the {@link TableJoined} instance for optional configurations including @@ -2221,6 +2327,28 @@ KTable leftJoin(final KTable other, final ValueJoiner joiner, final TableJoined tableJoined); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join, + * using the {@link TableJoined} instance for optional configurations including + * {@link StreamPartitioner partitioners} when the tables being joined use non-default partitioning, + * and also the base name for components of the join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * extract is null, then the right hand side of the result will be null. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param tableJoined a {@link TableJoined} used to configure partitioners and names of internal topics and stores + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains the result of joining this table with {@code other} + */ + KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined); + /** * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join. *

              @@ -2242,6 +2370,27 @@ KTable leftJoin(final KTable other, final ValueJoiner joiner, final Materialized> materialized); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param other the other {@code KTable} to be joined with this {@code KTable}. Keyed by KO. + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * extract is null, then the right hand side of the result will be null. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param materialized a {@link Materialized} that describes how the {@link StateStore} for the resulting {@code KTable} + * should be materialized. Cannot be {@code null} + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains the result of joining this table with {@code other} + */ + KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final Materialized> materialized); + /** * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join, * using the {@link TableJoined} instance for optional configurations including @@ -2268,6 +2417,32 @@ KTable leftJoin(final KTable other, final TableJoined tableJoined, final Materialized> materialized); + /** + * Join records of this {@code KTable} with another {@code KTable} using non-windowed left join, + * using the {@link TableJoined} instance for optional configurations including + * {@link StreamPartitioner partitioners} when the tables being joined use non-default partitioning, + * and also the base name for components of the join. + *

              + * This is a foreign key join, where the joining key is determined by the {@code foreignKeyExtractor}. + * + * @param other the other {@code KTable} to be joined with this {@code KTable}. Keyed by KO. + * @param foreignKeyExtractor a {@link BiFunction} that extracts the key (KO) from this table's key and value (K, V). If the + * extract is null, then the right hand side of the result will be null. + * @param joiner a {@link ValueJoiner} that computes the join result for a pair of matching records + * @param tableJoined a {@link TableJoined} used to configure partitioners and names of internal topics and stores + * @param materialized a {@link Materialized} that describes how the {@link StateStore} for the resulting {@code KTable} + * should be materialized. Cannot be {@code null} + * @param the value type of the result {@code KTable} + * @param the key type of the other {@code KTable} + * @param the value type of the other {@code KTable} + * @return a {@code KTable} that contains the result of joining this table with {@code other} + */ + KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined, + final Materialized> materialized); + /** * Get the name of the local state store used that can be used to query this {@code KTable}. * diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/Suppressed.java b/streams/src/main/java/org/apache/kafka/streams/kstream/Suppressed.java index 57b18b4caf587..5bda71d487b5c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/Suppressed.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/Suppressed.java @@ -72,16 +72,16 @@ static EagerBufferConfig maxBytes(final long byteLimit) { /** * Create a buffer unconstrained by size (either keys or bytes). * - * As a result, the buffer will consume as much memory as it needs, dictated by the time bound. + *

              As a result, the buffer will consume as much memory as it needs, dictated by the time bound. * - * If there isn't enough heap available to meet the demand, the application will encounter an + *

              If there isn't enough heap available to meet the demand, the application will encounter an * {@link OutOfMemoryError} and shut down (not guaranteed to be a graceful exit). Also, note that * JVM processes under extreme memory pressure may exhibit poor GC behavior. * - * This is a convenient option if you doubt that your buffer will be that large, but also don't + *

              This is a convenient option if you doubt that your buffer will be that large, but also don't * wish to pick particular constraints, such as in testing. * - * This buffer is "strict" in the sense that it will enforce the time bound or crash. + *

              This buffer is "strict" in the sense that it will enforce the time bound or crash. * It will never emit early. */ static StrictBufferConfig unbounded() { @@ -91,16 +91,16 @@ static StrictBufferConfig unbounded() { /** * Set the buffer to be unconstrained by size (either keys or bytes). * - * As a result, the buffer will consume as much memory as it needs, dictated by the time bound. + *

              As a result, the buffer will consume as much memory as it needs, dictated by the time bound. * - * If there isn't enough heap available to meet the demand, the application will encounter an + *

              If there isn't enough heap available to meet the demand, the application will encounter an * {@link OutOfMemoryError} and shut down (not guaranteed to be a graceful exit). Also, note that * JVM processes under extreme memory pressure may exhibit poor GC behavior. * - * This is a convenient option if you doubt that your buffer will be that large, but also don't + *

              This is a convenient option if you doubt that your buffer will be that large, but also don't * wish to pick particular constraints, such as in testing. * - * This buffer is "strict" in the sense that it will enforce the time bound or crash. + *

              This buffer is "strict" in the sense that it will enforce the time bound or crash. * It will never emit early. */ StrictBufferConfig withNoBound(); @@ -108,7 +108,7 @@ static StrictBufferConfig unbounded() { /** * Set the buffer to gracefully shut down the application when any of its constraints are violated * - * This buffer is "strict" in the sense that it will enforce the time bound or shut down. + *

              This buffer is "strict" in the sense that it will enforce the time bound or shut down. * It will never emit early. */ StrictBufferConfig shutDownWhenFull(); @@ -116,7 +116,7 @@ static StrictBufferConfig unbounded() { /** * Set the buffer to just emit the oldest records when any of its constraints are violated. * - * This buffer is "not strict" in the sense that it may emit early, so it is suitable for reducing + *

              This buffer is "not strict" in the sense that it may emit early, so it is suitable for reducing * duplicate results downstream, but does not promise to eliminate them. */ EagerBufferConfig emitEarlyWhenFull(); @@ -125,6 +125,7 @@ static StrictBufferConfig unbounded() { * Disable the changelog for this suppression's internal buffer. * This will turn off fault-tolerance for the suppression, and will result in data loss in the event of a rebalance. * By default, the changelog is enabled. + * * @return this */ BC withLoggingDisabled(); @@ -144,14 +145,14 @@ static StrictBufferConfig unbounded() { /** * Configure the suppression to emit only the "final results" from the window. * - * By default, all Streams operators emit results whenever new results are available. + *

              By default, all Streams operators emit results whenever new results are available. * This includes windowed operations. * - * This configuration will instead emit just one result per key for each window, guaranteeing + *

              This configuration will instead emit just one result per key for each window, guaranteeing * to deliver only the final result. This option is suitable for use cases in which the business logic * requires a hard guarantee that only the final result is propagated. For example, sending alerts. * - * To accomplish this, the operator will buffer events from the window until the window close (that is, + *

              To accomplish this, the operator will buffer events from the window until the window close (that is, * until the end-time passes, and additionally until the grace period expires). Since windowed operators * are required to reject out-of-order events for a window whose grace period is expired, there is an additional * guarantee that the final results emitted from this suppression will match any queryable state upstream. @@ -161,7 +162,7 @@ static StrictBufferConfig unbounded() { * property to emit early and then issue an update later. * @return a "final results" mode suppression configuration */ - static Suppressed untilWindowCloses(final StrictBufferConfig bufferConfig) { + static Suppressed> untilWindowCloses(final StrictBufferConfig bufferConfig) { return new FinalResultsSuppressionBuilder<>(null, bufferConfig); } @@ -175,20 +176,20 @@ static Suppressed untilWindowCloses(final StrictBufferConfig bufferCon * @param The key type for the KTable to apply this suppression to. * @return a suppression configuration */ - static Suppressed untilTimeLimit(final Duration timeToWaitForMoreEvents, final BufferConfig bufferConfig) { + static Suppressed untilTimeLimit(final Duration timeToWaitForMoreEvents, final BufferConfig bufferConfig) { return new SuppressedInternal<>(null, timeToWaitForMoreEvents, bufferConfig, null, false); } /** * Use the specified name for the suppression node in the topology. - *

              - * This can be used to insert a suppression without changing the rest of the topology names + * + *

              This can be used to insert a suppression without changing the rest of the topology names * (and therefore not requiring an application reset). - *

              - * Note however, that once a suppression has buffered some records, removing it from the topology would cause + * + *

              Note however, that once a suppression has buffered some records, removing it from the topology would cause * the loss of those records. - *

              - * A suppression can be "disabled" with the configuration {@code untilTimeLimit(Duration.ZERO, ...}. + * + *

              A suppression can be "disabled" with the configuration {@code untilTimeLimit(Duration.ZERO, ...}. * * @param name The name to be used for the suppression node and changelog topic * @return The same configuration with the addition of the given {@code name}. diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/TransformerSupplier.java b/streams/src/main/java/org/apache/kafka/streams/kstream/TransformerSupplier.java index 228b1d7123120..222cdc1bbc277 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/TransformerSupplier.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/TransformerSupplier.java @@ -35,7 +35,6 @@ * @see Transformer * @see ValueTransformer * @see ValueTransformerSupplier - * @see KStream#transformValues(ValueTransformerSupplier, String...) * @deprecated Since 4.0. Use {@link org.apache.kafka.streams.processor.api.ProcessorSupplier api.ProcessorSupplier} instead. */ @Deprecated diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapper.java b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapper.java index be550a1f7b63f..9bd16bc785772 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapper.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapper.java @@ -16,20 +16,20 @@ */ package org.apache.kafka.streams.kstream; +import org.apache.kafka.streams.processor.api.FixedKeyProcessor; /** * The {@code ValueMapper} interface for mapping a value to a new value of arbitrary type. * This is a stateless record-by-record operation, i.e, {@link #apply(Object)} is invoked individually for each record - * of a stream (cf. {@link ValueTransformer} for stateful value transformation). - * If {@code ValueMapper} is applied to a {@link org.apache.kafka.streams.KeyValue key-value pair} record the record's + * of a stream (cf. {@link org.apache.kafka.streams.processor.api.FixedKeyProcessor} for stateful value transformation). + * If {@code ValueMapper} is applied to a {@link org.apache.kafka.streams.processor.api.Record} the record's * key is preserved. * If a record's key and value should be modified {@link KeyValueMapper} can be used. * * @param value type * @param mapped value type * @see KeyValueMapper - * @see ValueTransformer - * @see ValueTransformerWithKey + * @see FixedKeyProcessor * @see KStream#mapValues(ValueMapper) * @see KStream#mapValues(ValueMapperWithKey) * @see KStream#flatMapValues(ValueMapper) diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapperWithKey.java b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapperWithKey.java index b20c61ae682f7..0c315f0e0c1b3 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapperWithKey.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueMapperWithKey.java @@ -19,8 +19,8 @@ /** * The {@code ValueMapperWithKey} interface for mapping a value to a new value of arbitrary type. * This is a stateless record-by-record operation, i.e, {@link #apply(Object, Object)} is invoked individually for each - * record of a stream (cf. {@link ValueTransformer} for stateful value transformation). - * If {@code ValueMapperWithKey} is applied to a {@link org.apache.kafka.streams.KeyValue key-value pair} record the + * record of a stream (cf. {@link org.apache.kafka.streams.processor.api.FixedKeyProcessor} for stateful value transformation). + * If {@code ValueMapperWithKey} is applied to a {@link org.apache.kafka.streams.processor.api.Record} the * record's key is preserved. * Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. * If a record's key and value should be modified {@link KeyValueMapper} can be used. @@ -29,8 +29,7 @@ * @param value type * @param mapped value type * @see KeyValueMapper - * @see ValueTransformer - * @see ValueTransformerWithKey + * @see org.apache.kafka.streams.processor.api.FixedKeyProcessor * @see KStream#mapValues(ValueMapper) * @see KStream#mapValues(ValueMapperWithKey) * @see KStream#flatMapValues(ValueMapper) diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformer.java b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformer.java index 6d4e4fe8f1cd4..ae1d21334ca43 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformer.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformer.java @@ -44,8 +44,7 @@ * @param transformed value type * @see ValueTransformerSupplier * @see ValueTransformerWithKeySupplier - * @see KStream#transformValues(ValueTransformerSupplier, String...) - * @see KStream#transformValues(ValueTransformerWithKeySupplier, String...) + * @see KTable#transformValues(ValueTransformerWithKeySupplier, Materialized, String...) * @see Transformer * @deprecated Since 4.0. Use {@link FixedKeyProcessor} instead. */ @@ -77,7 +76,7 @@ public interface ValueTransformer { /** * Transform the given value to a new value. - * Additionally, any {@link StateStore} that is {@link KStream#transformValues(ValueTransformerSupplier, String...) + * Additionally, any {@link StateStore} that is {@link KTable#transformValues(ValueTransformerWithKeySupplier, String...) * attached} to this operator can be accessed and modified arbitrarily (cf. * {@link ProcessorContext#getStateStore(String)}). *

              diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerSupplier.java b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerSupplier.java index b0008744eac3d..6a4c25b0c1c9d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerSupplier.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerSupplier.java @@ -31,8 +31,6 @@ * @see ValueTransformer * @see ValueTransformerWithKey * @see ValueTransformerWithKeySupplier - * @see KStream#transformValues(ValueTransformerSupplier, String...) - * @see KStream#transformValues(ValueTransformerWithKeySupplier, String...) * @see Transformer * @see TransformerSupplier * @deprecated Since 4.0. Use {@link FixedKeyProcessorSupplier} instead. diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKey.java b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKey.java index 9c3552622adc5..cc0c38d01ef04 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKey.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKey.java @@ -47,8 +47,7 @@ * @param transformed value type * @see ValueTransformer * @see ValueTransformerWithKeySupplier - * @see KStream#transformValues(ValueTransformerSupplier, String...) - * @see KStream#transformValues(ValueTransformerWithKeySupplier, String...) + * @see KTable#transformValues(ValueTransformerWithKeySupplier, String...) * @see Transformer */ @@ -77,7 +76,7 @@ public interface ValueTransformerWithKey { /** * Transform the given [key and] value to a new value. - * Additionally, any {@link StateStore} that is {@link KStream#transformValues(ValueTransformerWithKeySupplier, String...) + * Additionally, any {@link StateStore} that is {@link KTable#transformValues(ValueTransformerWithKeySupplier, Named, String...) * attached} to this operator can be accessed and modified arbitrarily (cf. * {@link ProcessorContext#getStateStore(String)}). *

              diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKeySupplier.java b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKeySupplier.java index 1c0feb0015e0b..8b1e995f1c37a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKeySupplier.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/ValueTransformerWithKeySupplier.java @@ -32,8 +32,6 @@ * @param transformed value type * @see ValueTransformer * @see ValueTransformerWithKey - * @see KStream#transformValues(ValueTransformerSupplier, String...) - * @see KStream#transformValues(ValueTransformerWithKeySupplier, String...) * @see Transformer * @see TransformerSupplier */ diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractConfigurableStoreFactory.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractConfigurableStoreFactory.java index dc13a5d743649..8fd766a93c673 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractConfigurableStoreFactory.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractConfigurableStoreFactory.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.streams.kstream.internals; -import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.DslStoreSuppliers; @@ -35,9 +34,11 @@ public AbstractConfigurableStoreFactory(final DslStoreSuppliers initialStoreSupp @Override public void configure(final StreamsConfig config) { if (dslStoreSuppliers == null) { - dslStoreSuppliers = Utils.newInstance( - config.getClass(StreamsConfig.DSL_STORE_SUPPLIERS_CLASS_CONFIG), - DslStoreSuppliers.class); + dslStoreSuppliers = config.getConfiguredInstance( + StreamsConfig.DSL_STORE_SUPPLIERS_CLASS_CONFIG, + DslStoreSuppliers.class, + config.originals() + ); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java index 2483cbbbe1673..91a93d23a07e0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java @@ -17,17 +17,12 @@ package org.apache.kafka.streams.kstream.internals; import org.apache.kafka.common.serialization.Serde; -import org.apache.kafka.streams.internals.ApiUtils; import org.apache.kafka.streams.kstream.ValueJoiner; import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.ValueMapper; import org.apache.kafka.streams.kstream.ValueMapperWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.state.StoreBuilder; import java.util.Collection; import java.util.HashSet; @@ -109,40 +104,6 @@ static ValueMapperWithKey withKey(final ValueMapper return (readOnlyKey, value) -> valueMapper.apply(value); } - @SuppressWarnings("deprecation") - static ValueTransformerWithKeySupplier toValueTransformerWithKeySupplier( - final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - ApiUtils.checkSupplier(valueTransformerSupplier); - return new ValueTransformerWithKeySupplier() { - @Override - public ValueTransformerWithKey get() { - final org.apache.kafka.streams.kstream.ValueTransformer valueTransformer = valueTransformerSupplier.get(); - return new ValueTransformerWithKey() { - @Override - public void init(final ProcessorContext context) { - valueTransformer.init(context); - } - - @Override - public VR transform(final K readOnlyKey, final V value) { - return valueTransformer.transform(value); - } - - @Override - public void close() { - valueTransformer.close(); - } - }; - } - - @Override - public Set> stores() { - return valueTransformerSupplier.stores(); - } - }; - } - static ValueJoinerWithKey toValueJoinerWithKey(final ValueJoiner valueJoiner) { Objects.requireNonNull(valueJoiner, "joiner can't be null"); return (readOnlyKey, value1, value2) -> valueJoiner.apply(value1, value2); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/CogroupedStreamAggregateBuilder.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/CogroupedStreamAggregateBuilder.java index 161c4a85c5220..a450f8ead1ae6 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/CogroupedStreamAggregateBuilder.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/CogroupedStreamAggregateBuilder.java @@ -26,12 +26,11 @@ import org.apache.kafka.streams.kstream.SlidingWindows; import org.apache.kafka.streams.kstream.Window; import org.apache.kafka.streams.kstream.Windows; +import org.apache.kafka.streams.kstream.internals.graph.GracePeriodGraphNode; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; import org.apache.kafka.streams.kstream.internals.graph.OptimizableRepartitionNode.OptimizableRepartitionNodeBuilder; import org.apache.kafka.streams.kstream.internals.graph.ProcessorGraphNode; import org.apache.kafka.streams.kstream.internals.graph.ProcessorParameters; -import org.apache.kafka.streams.kstream.internals.graph.StatefulProcessorNode; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.StoreFactory; import java.util.ArrayList; @@ -58,29 +57,30 @@ KTable build(final Map, Aggregator valueSerde, final String queryableName, final boolean isOutputVersioned) { - processRepartitions(groupPatterns, storeFactory); + processRepartitions(groupPatterns, storeFactory.storeName()); final Collection processors = new ArrayList<>(); final Collection parentProcessors = new ArrayList<>(); - boolean stateCreated = false; + int counter = 0; for (final Entry, Aggregator> kGroupedStream : groupPatterns.entrySet()) { final KStreamAggProcessorSupplier parentProcessor = - new KStreamAggregate<>(storeFactory.name(), initializer, kGroupedStream.getValue()); + new KStreamAggregate<>(storeFactory, initializer, kGroupedStream.getValue()); parentProcessors.add(parentProcessor); - final StatefulProcessorNode statefulProcessorNode = getStatefulProcessorNode( - named.suffixWithOrElseGet( - "-cogroup-agg-" + counter++, - builder, - CogroupedKStreamImpl.AGGREGATE_NAME), - stateCreated, - storeFactory, - parentProcessor); - statefulProcessorNode.setOutputVersioned(isOutputVersioned); - stateCreated = true; - processors.add(statefulProcessorNode); - builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), statefulProcessorNode); + + final String kStreamAggProcessorName = named.suffixWithOrElseGet( + "-cogroup-agg-" + counter++, + builder, + CogroupedKStreamImpl.AGGREGATE_NAME); + final ProcessorGraphNode aggProcessorNode = + new ProcessorGraphNode<>( + kStreamAggProcessorName, + new ProcessorParameters<>(parentProcessor, kStreamAggProcessorName) + ); + aggProcessorNode.setOutputVersioned(isOutputVersioned); + processors.add(aggProcessorNode); + builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), aggProcessorNode); } - return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.name()); + return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.storeName()); } @SuppressWarnings("unchecked") @@ -92,34 +92,35 @@ KTable build(final Map final Serde valueSerde, final String queryableName, final Windows windows) { - processRepartitions(groupPatterns, storeFactory); + processRepartitions(groupPatterns, storeFactory.storeName()); final Collection processors = new ArrayList<>(); final Collection parentProcessors = new ArrayList<>(); - boolean stateCreated = false; int counter = 0; for (final Entry, Aggregator> kGroupedStream : groupPatterns.entrySet()) { final KStreamAggProcessorSupplier parentProcessor = (KStreamAggProcessorSupplier) new KStreamWindowAggregate( windows, - storeFactory.name(), + storeFactory, EmitStrategy.onWindowUpdate(), initializer, kGroupedStream.getValue()); parentProcessors.add(parentProcessor); - final StatefulProcessorNode statefulProcessorNode = getStatefulProcessorNode( - named.suffixWithOrElseGet( - "-cogroup-agg-" + counter++, - builder, - CogroupedKStreamImpl.AGGREGATE_NAME), - stateCreated, - storeFactory, - parentProcessor); - stateCreated = true; - processors.add(statefulProcessorNode); - builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), statefulProcessorNode); + + final String kStreamAggProcessorName = named.suffixWithOrElseGet( + "-cogroup-agg-" + counter++, + builder, + CogroupedKStreamImpl.AGGREGATE_NAME); + final GracePeriodGraphNode aggProcessorNode = + new GracePeriodGraphNode<>( + kStreamAggProcessorName, + new ProcessorParameters<>(parentProcessor, kStreamAggProcessorName), + windows.gracePeriodMs() + ); + processors.add(aggProcessorNode); + builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), aggProcessorNode); } - return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.name()); + return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.storeName()); } @SuppressWarnings("unchecked") @@ -132,34 +133,35 @@ KTable build(final Map, Aggregator sessionMerger) { - processRepartitions(groupPatterns, storeFactory); + processRepartitions(groupPatterns, storeFactory.storeName()); final Collection processors = new ArrayList<>(); final Collection parentProcessors = new ArrayList<>(); - boolean stateCreated = false; int counter = 0; for (final Entry, Aggregator> kGroupedStream : groupPatterns.entrySet()) { final KStreamAggProcessorSupplier parentProcessor = (KStreamAggProcessorSupplier) new KStreamSessionWindowAggregate( sessionWindows, - storeFactory.name(), + storeFactory, EmitStrategy.onWindowUpdate(), initializer, kGroupedStream.getValue(), sessionMerger); parentProcessors.add(parentProcessor); - final StatefulProcessorNode statefulProcessorNode = getStatefulProcessorNode( - named.suffixWithOrElseGet( - "-cogroup-agg-" + counter++, - builder, - CogroupedKStreamImpl.AGGREGATE_NAME), - stateCreated, - storeFactory, - parentProcessor); - stateCreated = true; - processors.add(statefulProcessorNode); - builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), statefulProcessorNode); + final String kStreamAggProcessorName = named.suffixWithOrElseGet( + "-cogroup-agg-" + counter++, + builder, + CogroupedKStreamImpl.AGGREGATE_NAME); + final long gracePeriod = sessionWindows.gracePeriodMs() + sessionWindows.inactivityGap(); + final GracePeriodGraphNode aggProcessorNode = + new GracePeriodGraphNode<>( + kStreamAggProcessorName, + new ProcessorParameters<>(parentProcessor, kStreamAggProcessorName), + gracePeriod + ); + processors.add(aggProcessorNode); + builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), aggProcessorNode); } - return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.name()); + return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.storeName()); } @SuppressWarnings("unchecked") @@ -171,38 +173,38 @@ KTable build(final Map, Aggregator valueSerde, final String queryableName, final SlidingWindows slidingWindows) { - processRepartitions(groupPatterns, storeFactory); + processRepartitions(groupPatterns, storeFactory.storeName()); final Collection parentProcessors = new ArrayList<>(); final Collection processors = new ArrayList<>(); - boolean stateCreated = false; int counter = 0; for (final Entry, Aggregator> kGroupedStream : groupPatterns.entrySet()) { final KStreamAggProcessorSupplier parentProcessor = (KStreamAggProcessorSupplier) new KStreamSlidingWindowAggregate( slidingWindows, - storeFactory.name(), + storeFactory, // TODO: We do not have other emit policies for co-group yet EmitStrategy.onWindowUpdate(), initializer, kGroupedStream.getValue()); parentProcessors.add(parentProcessor); - final StatefulProcessorNode statefulProcessorNode = getStatefulProcessorNode( - named.suffixWithOrElseGet( - "-cogroup-agg-" + counter++, - builder, - CogroupedKStreamImpl.AGGREGATE_NAME), - stateCreated, - storeFactory, - parentProcessor); - stateCreated = true; - processors.add(statefulProcessorNode); - builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), statefulProcessorNode); + final String kStreamAggProcessorName = named.suffixWithOrElseGet( + "-cogroup-agg-" + counter++, + builder, + CogroupedKStreamImpl.AGGREGATE_NAME); + final GracePeriodGraphNode aggProcessorNode = + new GracePeriodGraphNode<>( + kStreamAggProcessorName, + new ProcessorParameters<>(parentProcessor, kStreamAggProcessorName), + slidingWindows.gracePeriodMs() + ); + processors.add(aggProcessorNode); + builder.addGraphNode(parentNodes.get(kGroupedStream.getKey()), aggProcessorNode); } - return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.name()); + return createTable(processors, parentProcessors, named, keySerde, valueSerde, queryableName, storeFactory.storeName()); } private void processRepartitions(final Map, Aggregator> groupPatterns, - final StoreFactory storeFactory) { + final String storeName) { for (final KGroupedStreamImpl repartitionReqs : groupPatterns.keySet()) { if (repartitionReqs.repartitionRequired) { @@ -210,7 +212,7 @@ private void processRepartitions(final Map, Aggregator< final OptimizableRepartitionNodeBuilder repartitionNodeBuilder = optimizableRepartitionNodeBuilder(); final String repartitionNamePrefix = repartitionReqs.userProvidedRepartitionTopicName != null ? - repartitionReqs.userProvidedRepartitionTopicName : storeFactory.name(); + repartitionReqs.userProvidedRepartitionTopicName : storeName; createRepartitionSource(repartitionNamePrefix, repartitionNodeBuilder, repartitionReqs.keySerde, repartitionReqs.valueSerde); @@ -262,30 +264,6 @@ KTable createTable(final Collection processors, builder); } - private StatefulProcessorNode getStatefulProcessorNode(final String processorName, - final boolean stateCreated, - final StoreFactory storeFactory, - final ProcessorSupplier kStreamAggregate) { - final StatefulProcessorNode statefulProcessorNode; - if (!stateCreated) { - statefulProcessorNode = - new StatefulProcessorNode<>( - processorName, - new ProcessorParameters<>(kStreamAggregate, processorName), - storeFactory - ); - } else { - statefulProcessorNode = - new StatefulProcessorNode<>( - processorName, - new ProcessorParameters<>(kStreamAggregate, processorName), - new String[]{storeFactory.name()} - ); - } - - return statefulProcessorNode; - } - @SuppressWarnings("unchecked") private void createRepartitionSource(final String repartitionTopicNamePrefix, final OptimizableRepartitionNodeBuilder optimizableRepartitionNodeBuilder, diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/ConsumedInternal.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/ConsumedInternal.java index 40bd53a0b8c28..9ad1721dea8cd 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/ConsumedInternal.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/ConsumedInternal.java @@ -18,7 +18,7 @@ import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serde; -import org.apache.kafka.streams.Topology; +import org.apache.kafka.streams.internals.AutoOffsetResetInternal; import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.processor.TimestampExtractor; @@ -28,18 +28,6 @@ public ConsumedInternal(final Consumed consumed) { super(consumed); } - - public ConsumedInternal(final Serde keySerde, - final Serde valueSerde, - final TimestampExtractor timestampExtractor, - final Topology.AutoOffsetReset offsetReset) { - this(Consumed.with(keySerde, valueSerde, timestampExtractor, offsetReset)); - } - - public ConsumedInternal() { - this(Consumed.with(null, null)); - } - public Serde keySerde() { return keySerde; } @@ -60,11 +48,11 @@ public TimestampExtractor timestampExtractor() { return timestampExtractor; } - public Topology.AutoOffsetReset offsetResetPolicy() { - return resetPolicy; + public AutoOffsetResetInternal offsetResetPolicy() { + return resetPolicy == null ? null : new AutoOffsetResetInternal(resetPolicy); } public String name() { return processorName; } -} +} \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java index 8217bd025bb9c..b99034c5306b5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/GroupedStreamAggregateBuilder.java @@ -20,10 +20,10 @@ import org.apache.kafka.streams.kstream.Aggregator; import org.apache.kafka.streams.kstream.Initializer; import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.internals.graph.GracePeriodGraphNode; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; +import org.apache.kafka.streams.kstream.internals.graph.ProcessorGraphNode; import org.apache.kafka.streams.kstream.internals.graph.ProcessorParameters; -import org.apache.kafka.streams.kstream.internals.graph.StatefulProcessorNode; -import org.apache.kafka.streams.processor.internals.StoreFactory; import java.util.Collections; import java.util.Set; @@ -66,23 +66,67 @@ class GroupedStreamAggregateBuilder { this.userProvidedRepartitionTopicName = groupedInternal.name(); } - KTable build(final NamedInternal functionName, - final StoreFactory storeFactory, - final KStreamAggProcessorSupplier aggregateSupplier, - final String queryableStoreName, - final Serde keySerde, - final Serde valueSerde, - final boolean isOutputVersioned) { - assert queryableStoreName == null || queryableStoreName.equals(storeFactory.name()); + KTable buildNonWindowed(final NamedInternal functionName, + final String storeName, + final KStreamAggProcessorSupplier aggregateSupplier, + final String queryableStoreName, + final Serde keySerde, + final Serde valueSerde, + final boolean isOutputVersioned) { + final String aggFunctionName = functionName.name(); + + final ProcessorGraphNode aggProcessorNode = + new ProcessorGraphNode<>( + aggFunctionName, + new ProcessorParameters<>(aggregateSupplier, aggFunctionName) + ); + + aggProcessorNode.setOutputVersioned(isOutputVersioned); + return build(aggFunctionName, storeName, aggregateSupplier, aggProcessorNode, queryableStoreName, keySerde, valueSerde); + } + + KTable buildWindowed(final NamedInternal functionName, + final String storeName, + final long gracePeriod, + final KStreamAggProcessorSupplier aggregateSupplier, + final String queryableStoreName, + final Serde keySerde, + final Serde valueSerde, + final boolean isOutputVersioned) { final String aggFunctionName = functionName.name(); + final GracePeriodGraphNode gracePeriodAggProcessorNode = + new GracePeriodGraphNode<>( + aggFunctionName, + new ProcessorParameters<>(aggregateSupplier, aggFunctionName), + gracePeriod + ); + + gracePeriodAggProcessorNode.setOutputVersioned(isOutputVersioned); + + return build(aggFunctionName, storeName, aggregateSupplier, gracePeriodAggProcessorNode, queryableStoreName, keySerde, valueSerde); + } + + private KTable build(final String aggFunctionName, + final String storeName, + final KStreamAggProcessorSupplier aggregateSupplier, + final ProcessorGraphNode aggProcessorNode, + final String queryableStoreName, + final Serde keySerde, + final Serde valueSerde) { + if (!(queryableStoreName == null || queryableStoreName.equals(storeName))) { + throw new IllegalStateException(String.format("queryableStoreName should be null or equal to storeName" + + " but got storeName='%s' and queryableStoreName='%s'", + storeName, queryableStoreName)); + } + String sourceName = this.name; GraphNode parentNode = graphNode; if (repartitionRequired) { final OptimizableRepartitionNodeBuilder repartitionNodeBuilder = optimizableRepartitionNodeBuilder(); - final String repartitionTopicPrefix = userProvidedRepartitionTopicName != null ? userProvidedRepartitionTopicName : storeFactory.name(); + final String repartitionTopicPrefix = userProvidedRepartitionTopicName != null ? userProvidedRepartitionTopicName : storeName; sourceName = createRepartitionSource(repartitionTopicPrefix, repartitionNodeBuilder); // First time through we need to create a repartition node. @@ -97,15 +141,7 @@ KTable build(final NamedInternal functionName, parentNode = repartitionNode; } - final StatefulProcessorNode statefulProcessorNode = - new StatefulProcessorNode<>( - aggFunctionName, - new ProcessorParameters<>(aggregateSupplier, aggFunctionName), - storeFactory - ); - statefulProcessorNode.setOutputVersioned(isOutputVersioned); - - builder.addGraphNode(parentNode, statefulProcessorNode); + builder.addGraphNode(parentNode, aggProcessorNode); return new KTableImpl<>(aggFunctionName, keySerde, @@ -113,9 +149,8 @@ KTable build(final NamedInternal functionName, sourceName.equals(this.name) ? subTopologySourceNodes : Collections.singleton(sourceName), queryableStoreName, aggregateSupplier, - statefulProcessorNode, + aggProcessorNode, builder); - } /** diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java index 92dde06e9c05c..954b88bfbea2b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java @@ -140,7 +140,7 @@ public KTable table(final String topic, final String tableSourceName = named .orElseGenerateWithPrefix(this, KTableImpl.SOURCE_NAME); - final KTableSource tableSource = new KTableSource<>(materialized.storeName(), materialized.queryableStoreName()); + final KTableSource tableSource = new KTableSource<>(materialized); final ProcessorParameters processorParameters = new ProcessorParameters<>(tableSource, tableSourceName); final TableSourceNode tableSourceNode = TableSourceNode.tableSourceNodeBuilder() @@ -148,7 +148,6 @@ public KTable table(final String topic, .withSourceName(sourceName) .withNodeName(tableSourceName) .withConsumedInternal(consumed) - .withMaterializedInternal(materialized) .withProcessorParameters(processorParameters) .build(); tableSourceNode.setOutputVersioned(materialized.storeSupplier() instanceof VersionedBytesStoreSupplier); @@ -186,9 +185,7 @@ public GlobalKTable globalTable(final String topic, final String processorName = named .orElseGenerateWithPrefix(this, KTableImpl.SOURCE_NAME); - // enforce store name as queryable name to always materialize global table stores - final String storeName = materialized.storeName(); - final KTableSource tableSource = new KTableSource<>(storeName, storeName); + final KTableSource tableSource = new KTableSource<>(materialized); final ProcessorParameters processorParameters = new ProcessorParameters<>(tableSource, processorName); @@ -197,12 +194,12 @@ public GlobalKTable globalTable(final String topic, .isGlobalKTable(true) .withSourceName(sourceName) .withConsumedInternal(consumed) - .withMaterializedInternal(materialized) .withProcessorParameters(processorParameters) .build(); addGraphNode(root, tableSourceNode); + final String storeName = materialized.storeName(); return new GlobalKTableImpl<>(new KTableSourceValueGetterSupplier<>(storeName), materialized.queryableStoreName()); } @@ -445,9 +442,9 @@ private void rewriteSingleStoreSelfJoin( GraphNode left = null, right = null; for (final GraphNode child: parent.children()) { if (child instanceof WindowedStreamProcessorNode && child.buildPriority() < joinNode.buildPriority()) { - if (child.nodeName().equals(joinNode.thisWindowedStreamProcessorParameters().processorName())) { + if (child.nodeName().equals(joinNode.thisWindowedStreamProcessorName())) { left = child; - } else if (child.nodeName().equals(joinNode.otherWindowedStreamProcessorParameters().processorName())) { + } else if (child.nodeName().equals(joinNode.otherWindowedStreamProcessorName())) { right = child; } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java index 2ed1854d9cc45..73c6174b27bdc 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImpl.java @@ -97,10 +97,12 @@ public KTable reduce(final Reducer reducer, } final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, REDUCE_NAME); + final KeyValueStoreMaterializer storeFactory = new KeyValueStoreMaterializer<>(materializedInternal); + return doAggregate( - new KStreamReduce<>(materializedInternal.storeName(), reducer), + new KStreamReduce<>(storeFactory, reducer), name, - materializedInternal + storeFactory ); } @@ -129,10 +131,12 @@ public KTable aggregate(final Initializer initializer, } final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, AGGREGATE_NAME); + final KeyValueStoreMaterializer storeFactory = new KeyValueStoreMaterializer<>(materializedInternal); + return doAggregate( - new KStreamAggregate<>(materializedInternal.storeName(), initializer, aggregator), + new KStreamAggregate<>(storeFactory, initializer, aggregator), name, - materializedInternal + storeFactory ); } @@ -183,10 +187,12 @@ private KTable doCount(final Named named, final Materialized storeFactory = new KeyValueStoreMaterializer<>(materializedInternal); + return doAggregate( - new KStreamAggregate<>(materializedInternal.storeName(), aggregateBuilder.countInitializer, aggregateBuilder.countAggregator), + new KStreamAggregate<>(storeFactory, aggregateBuilder.countInitializer, aggregateBuilder.countAggregator), name, - materializedInternal); + storeFactory); } @Override @@ -236,15 +242,16 @@ public SessionWindowedKStream windowedBy(final SessionWindows windows) { private KTable doAggregate(final KStreamAggProcessorSupplier aggregateSupplier, final String functionName, - final MaterializedInternal> materializedInternal) { - return aggregateBuilder.build( + final KeyValueStoreMaterializer storeFactory) { + + return aggregateBuilder.buildNonWindowed( new NamedInternal(functionName), - new KeyValueStoreMaterializer<>(materializedInternal), + storeFactory.storeName(), aggregateSupplier, - materializedInternal.queryableStoreName(), - materializedInternal.keySerde(), - materializedInternal.valueSerde(), - materializedInternal.storeSupplier() instanceof VersionedBytesStoreSupplier); + storeFactory.queryableStoreName(), + storeFactory.keySerde(), + storeFactory.valueSerde(), + storeFactory.storeSupplier() instanceof VersionedBytesStoreSupplier); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java index d03cb65c021a5..fbce445e7ee0c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KGroupedTableImpl.java @@ -27,8 +27,8 @@ import org.apache.kafka.streams.kstream.Reducer; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; import org.apache.kafka.streams.kstream.internals.graph.GroupedTableOperationRepartitionNode; +import org.apache.kafka.streams.kstream.internals.graph.ProcessorGraphNode; import org.apache.kafka.streams.kstream.internals.graph.ProcessorParameters; -import org.apache.kafka.streams.kstream.internals.graph.StatefulProcessorNode; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.VersionedBytesStoreSupplier; @@ -88,10 +88,9 @@ private KTable doAggregate(final ProcessorSupplier, // the passed in StreamsGraphNode must be the parent of the repartition node builder.addGraphNode(this.graphNode, repartitionGraphNode); - final StatefulProcessorNode statefulProcessorNode = new StatefulProcessorNode<>( + final ProcessorGraphNode statefulProcessorNode = new ProcessorGraphNode<>( funcName, - new ProcessorParameters<>(aggregateSupplier, funcName), - new KeyValueStoreMaterializer<>(materialized) + new ProcessorParameters<>(aggregateSupplier, funcName) ); statefulProcessorNode.setOutputVersioned(materialized.storeSupplier() instanceof VersionedBytesStoreSupplier); @@ -148,7 +147,7 @@ public KTable reduce(final Reducer adder, materializedInternal.withValueSerde(valueSerde); } final ProcessorSupplier, K, Change> aggregateSupplier = new KTableReduce<>( - materializedInternal.storeName(), + materializedInternal, adder, subtractor); return doAggregate(aggregateSupplier, new NamedInternal(named), REDUCE_NAME, materializedInternal); @@ -179,7 +178,7 @@ public KTable count(final Named named, final Materialized, K, Change> aggregateSupplier = new KTableAggregate<>( - materializedInternal.storeName(), + materializedInternal, countInitializer, countAdder, countSubtractor); @@ -224,7 +223,7 @@ public KTable aggregate(final Initializer initializer, materializedInternal.withKeySerde(keySerde); } final ProcessorSupplier, K, Change> aggregateSupplier = new KTableAggregate<>( - materializedInternal.storeName(), + materializedInternal, initializer, adder, subtractor); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java index 3e906813d8410..bfbd16ffae8dd 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamAggregate.java @@ -24,13 +24,19 @@ import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; +import java.util.Set; + import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensor; import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; import static org.apache.kafka.streams.state.VersionedKeyValueStore.PUT_RETURN_CODE_NOT_PUT; @@ -41,19 +47,26 @@ public class KStreamAggregate implements KStreamAggProcessorSupp private static final Logger LOG = LoggerFactory.getLogger(KStreamAggregate.class); private final String storeName; + private final StoreFactory storeFactory; private final Initializer initializer; private final Aggregator aggregator; private boolean sendOldValues = false; - KStreamAggregate(final String storeName, + KStreamAggregate(final StoreFactory storeFactory, final Initializer initializer, final Aggregator aggregator) { - this.storeName = storeName; + this.storeFactory = storeFactory; + this.storeName = storeFactory.storeName(); this.initializer = initializer; this.aggregator = aggregator; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public Processor> get() { return new KStreamAggregateProcessor(); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransform.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransform.java deleted file mode 100644 index 5ce059990630a..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransform.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.api.ContextualProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.ProcessorContext; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.streams.state.StoreBuilder; - -import java.util.Set; - -public class KStreamFlatTransform implements ProcessorSupplier { - - @SuppressWarnings("deprecation") - private final org.apache.kafka.streams.kstream.TransformerSupplier>> transformerSupplier; - - @SuppressWarnings("deprecation") - public KStreamFlatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier>> transformerSupplier) { - this.transformerSupplier = transformerSupplier; - } - - @Override - public Processor get() { - return new KStreamFlatTransformProcessor<>(transformerSupplier.get()); - } - - @Override - public Set> stores() { - return transformerSupplier.stores(); - } - - public static class KStreamFlatTransformProcessor extends ContextualProcessor { - - @SuppressWarnings("deprecation") - private final org.apache.kafka.streams.kstream.Transformer>> transformer; - - @SuppressWarnings("deprecation") - public KStreamFlatTransformProcessor(final org.apache.kafka.streams.kstream.Transformer>> transformer) { - this.transformer = transformer; - } - - @Override - public void init(final ProcessorContext context) { - super.init(context); - transformer.init((InternalProcessorContext) context); - } - - @Override - public void process(final Record record) { - final Iterable> pairs = transformer.transform(record.key(), record.value()); - if (pairs != null) { - for (final KeyValue pair : pairs) { - context().forward(record.withKey(pair.key).withValue(pair.value)); - } - } - } - - @Override - public void close() { - transformer.close(); - } - } -} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValues.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValues.java deleted file mode 100644 index 5469c668dfee2..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValues.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; -import org.apache.kafka.streams.processor.api.ContextualProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.ProcessorContext; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.ForwardingDisabledProcessorContext; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.streams.state.StoreBuilder; - -import java.util.Set; - -public class KStreamFlatTransformValues implements ProcessorSupplier { - - private final ValueTransformerWithKeySupplier> valueTransformerSupplier; - - public KStreamFlatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerWithKeySupplier) { - this.valueTransformerSupplier = valueTransformerWithKeySupplier; - } - - @Override - public Processor get() { - return new KStreamFlatTransformValuesProcessor<>(valueTransformerSupplier.get()); - } - - @Override - public Set> stores() { - return valueTransformerSupplier.stores(); - } - - public static class KStreamFlatTransformValuesProcessor extends ContextualProcessor { - - private final ValueTransformerWithKey> valueTransformer; - - KStreamFlatTransformValuesProcessor(final ValueTransformerWithKey> valueTransformer) { - this.valueTransformer = valueTransformer; - } - - @Override - public void init(final ProcessorContext context) { - super.init(context); - valueTransformer.init(new ForwardingDisabledProcessorContext((InternalProcessorContext) context)); - } - - @Override - public void process(final Record record) { - final Iterable transformedValues = valueTransformer.transform(record.key(), record.value()); - if (transformedValues != null) { - for (final VOut transformedValue : transformedValues) { - context().forward(record.withValue(transformedValue)); - } - } - } - - @Override - public void close() { - super.close(); - valueTransformer.close(); - } - } - -} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java index a23c5ad4b0bea..94e0e9a0e36b7 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java @@ -41,7 +41,6 @@ import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.ValueMapper; import org.apache.kafka.streams.kstream.ValueMapperWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.kstream.internals.graph.BaseRepartitionNode; import org.apache.kafka.streams.kstream.internals.graph.BaseRepartitionNode.BaseRepartitionNodeBuilder; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; @@ -49,7 +48,7 @@ import org.apache.kafka.streams.kstream.internals.graph.OptimizableRepartitionNode.OptimizableRepartitionNodeBuilder; import org.apache.kafka.streams.kstream.internals.graph.ProcessorGraphNode; import org.apache.kafka.streams.kstream.internals.graph.ProcessorParameters; -import org.apache.kafka.streams.kstream.internals.graph.StatefulProcessorNode; +import org.apache.kafka.streams.kstream.internals.graph.ProcessorToStateConnectorNode; import org.apache.kafka.streams.kstream.internals.graph.StreamSinkNode; import org.apache.kafka.streams.kstream.internals.graph.StreamTableJoinNode; import org.apache.kafka.streams.kstream.internals.graph.StreamToTableNode; @@ -61,8 +60,8 @@ import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopicProperties; import org.apache.kafka.streams.processor.internals.StaticTopicNameExtractor; -import org.apache.kafka.streams.processor.internals.StoreBuilderWrapper; import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.VersionedBytesStoreSupplier; import org.apache.kafka.streams.state.internals.RocksDBTimeOrderedKeyValueBuffer; @@ -121,10 +120,6 @@ public class KStreamImpl extends AbstractStream implements KStream toTable(final Named named, subTopologySourceNodes = this.subTopologySourceNodes; } - final KTableSource tableSource = new KTableSource<>( - materializedInternal.storeName(), - materializedInternal.queryableStoreName() - ); + final KTableSource tableSource = new KTableSource<>(materializedInternal); final ProcessorParameters processorParameters = new ProcessorParameters<>(tableSource, name); final GraphNode tableNode = new StreamToTableNode<>( name, - processorParameters, - materializedInternal + processorParameters ); tableNode.setOutputVersioned(materializedInternal.storeSupplier() instanceof VersionedBytesStoreSupplier); @@ -1132,7 +1123,7 @@ private KStream globalTableJoin(final GlobalKTable g leftJoin); final ProcessorParameters processorParameters = new ProcessorParameters<>(processorSupplier, name); final StreamTableJoinNode streamTableJoinNode = - new StreamTableJoinNode<>(name, processorParameters, new String[] {}, null, null, Optional.empty()); + new StreamTableJoinNode<>(name, processorParameters, new String[] {}, null, null); if (leftJoin) { streamTableJoinNode.labels().add(GraphNode.Label.NULL_KEY_RELAXED_JOIN); @@ -1164,16 +1155,20 @@ private KStream doStreamTableJoin(final KTable table, final String name = renamed.orElseGenerateWithPrefix(builder, leftJoin ? LEFTJOIN_NAME : JOIN_NAME); - Optional bufferStoreName = Optional.empty(); + Optional> bufferStoreBuilder = Optional.empty(); if (joinedInternal.gracePeriod() != null) { if (!((KTableImpl) table).graphNode.isOutputVersioned().orElse(true)) { throw new IllegalArgumentException("KTable must be versioned to use a grace period in a stream table join."); } - bufferStoreName = Optional.of(name + "-Buffer"); - final RocksDBTimeOrderedKeyValueBuffer.Builder storeBuilder = - new RocksDBTimeOrderedKeyValueBuffer.Builder<>(bufferStoreName.get(), joinedInternal.gracePeriod(), name); - builder.addStateStore(new StoreBuilderWrapper(storeBuilder)); + final String bufferName = name + "-Buffer"; + bufferStoreBuilder = Optional.of(new RocksDBTimeOrderedKeyValueBuffer.Builder<>( + bufferName, + joinedInternal.keySerde() != null ? joinedInternal.keySerde() : keySerde, + joinedInternal.leftValueSerde() != null ? joinedInternal.leftValueSerde() : valueSerde, + joinedInternal.gracePeriod(), + name) + ); } final ProcessorSupplier processorSupplier = new KStreamKTableJoin<>( @@ -1181,7 +1176,8 @@ private KStream doStreamTableJoin(final KTable table, joiner, leftJoin, Optional.ofNullable(joinedInternal.gracePeriod()), - bufferStoreName); + bufferStoreBuilder + ); final ProcessorParameters processorParameters = new ProcessorParameters<>(processorSupplier, name); final StreamTableJoinNode streamTableJoinNode = new StreamTableJoinNode<>( @@ -1189,8 +1185,7 @@ private KStream doStreamTableJoin(final KTable table, processorParameters, ((KTableImpl) table).valueGetterSupplier().storeNames(), this.name, - joinedInternal.gracePeriod(), - bufferStoreName + joinedInternal.gracePeriod() ); builder.addGraphNode(graphNode, streamTableJoinNode); @@ -1209,175 +1204,6 @@ private KStream doStreamTableJoin(final KTable table, builder); } - @Override - @Deprecated - public KStream transformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doTransformValues( - toValueTransformerWithKeySupplier(valueTransformerSupplier), - NamedInternal.empty(), - stateStoreNames); - } - - @Override - @Deprecated - public KStream transformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - Objects.requireNonNull(named, "named can't be null"); - return doTransformValues( - toValueTransformerWithKeySupplier(valueTransformerSupplier), - new NamedInternal(named), - stateStoreNames); - } - - @Override - @Deprecated - public KStream transformValues(final ValueTransformerWithKeySupplier valueTransformerSupplier, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doTransformValues(valueTransformerSupplier, NamedInternal.empty(), stateStoreNames); - } - - @Override - @Deprecated - public KStream transformValues(final ValueTransformerWithKeySupplier valueTransformerSupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - Objects.requireNonNull(named, "named can't be null"); - return doTransformValues(valueTransformerSupplier, new NamedInternal(named), stateStoreNames); - } - - private KStream doTransformValues(final ValueTransformerWithKeySupplier valueTransformerWithKeySupplier, - final NamedInternal named, - final String... stateStoreNames) { - Objects.requireNonNull(stateStoreNames, "stateStoreNames can't be a null array"); - for (final String stateStoreName : stateStoreNames) { - Objects.requireNonNull(stateStoreName, "stateStoreNames can't contain `null` as store name"); - } - ApiUtils.checkSupplier(valueTransformerWithKeySupplier); - - final String name = named.orElseGenerateWithPrefix(builder, TRANSFORMVALUES_NAME); - final StatefulProcessorNode transformNode = new StatefulProcessorNode<>( - name, - new ProcessorParameters<>(new KStreamTransformValues<>(valueTransformerWithKeySupplier), name), - stateStoreNames); - transformNode.setValueChangingOperation(true); - - builder.addGraphNode(graphNode, transformNode); - - // cannot inherit value serde - return new KStreamImpl<>( - name, - keySerde, - null, - subTopologySourceNodes, - repartitionRequired, - transformNode, - builder); - } - - @Override - @Deprecated - public KStream flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier> valueTransformerSupplier, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues( - toValueTransformerWithKeySupplier(valueTransformerSupplier), - NamedInternal.empty(), - stateStoreNames); - } - - @Override - @Deprecated - public KStream flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues( - toValueTransformerWithKeySupplier(valueTransformerSupplier), - named, - stateStoreNames); - } - - @Override - @Deprecated - public KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues(valueTransformerSupplier, NamedInternal.empty(), stateStoreNames); - } - - @Override - @Deprecated - public KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues(valueTransformerSupplier, named, stateStoreNames); - } - - private KStream doFlatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerWithKeySupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(stateStoreNames, "stateStoreNames can't be a null array"); - for (final String stateStoreName : stateStoreNames) { - Objects.requireNonNull(stateStoreName, "stateStoreNames can't contain `null` as store name"); - } - ApiUtils.checkSupplier(valueTransformerWithKeySupplier); - - final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, TRANSFORMVALUES_NAME); - final StatefulProcessorNode transformNode = new StatefulProcessorNode<>( - name, - new ProcessorParameters<>(new KStreamFlatTransformValues<>(valueTransformerWithKeySupplier), name), - stateStoreNames); - transformNode.setValueChangingOperation(true); - - builder.addGraphNode(graphNode, transformNode); - - // cannot inherit value serde - return new KStreamImpl<>( - name, - keySerde, - null, - subTopologySourceNodes, - repartitionRequired, - transformNode, - builder); - } - - @Override - @Deprecated - public void process(final org.apache.kafka.streams.processor.ProcessorSupplier processorSupplier, - final String... stateStoreNames) { - process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames); - } - - @Override - @Deprecated - public void process(final org.apache.kafka.streams.processor.ProcessorSupplier processorSupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(processorSupplier, "processorSupplier can't be null"); - Objects.requireNonNull(named, "named can't be null"); - Objects.requireNonNull(stateStoreNames, "stateStoreNames can't be a null array"); - ApiUtils.checkSupplier(processorSupplier); - for (final String stateStoreName : stateStoreNames) { - Objects.requireNonNull(stateStoreName, "stateStoreNames can't be null"); - } - - final String name = new NamedInternal(named).name(); - final StatefulProcessorNode processNode = new StatefulProcessorNode<>( - name, - new ProcessorParameters<>(processorSupplier, name), - stateStoreNames); - - builder.addGraphNode(graphNode, processNode); - } - @Override public KStream process( final ProcessorSupplier processorSupplier, @@ -1405,7 +1231,7 @@ public KStream process( } final String name = new NamedInternal(named).name(); - final StatefulProcessorNode processNode = new StatefulProcessorNode<>( + final ProcessorToStateConnectorNode processNode = new ProcessorToStateConnectorNode<>( name, new ProcessorParameters<>(processorSupplier, name), stateStoreNames); @@ -1450,7 +1276,7 @@ public KStream processValues( } final String name = new NamedInternal(named).name(); - final StatefulProcessorNode processNode = new StatefulProcessorNode<>( + final ProcessorToStateConnectorNode processNode = new ProcessorToStateConnectorNode<>( name, new ProcessorParameters<>(processorSupplier, name), stateStoreNames); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImplJoin.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImplJoin.java index 394c130058801..aeece23cf3430 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImplJoin.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImplJoin.java @@ -146,16 +146,16 @@ public KStream join(final KStream lhs, otherWindowStore = joinWindowStoreBuilderFromSupplier(otherStoreSupplier, streamJoinedInternal.keySerde(), streamJoinedInternal.otherValueSerde()); } - final KStreamJoinWindow thisWindowedStream = new KStreamJoinWindow<>(thisWindowStore.name()); + final KStreamJoinWindow thisWindowedStream = new KStreamJoinWindow<>(thisWindowStore); final ProcessorParameters thisWindowStreamProcessorParams = new ProcessorParameters<>(thisWindowedStream, thisWindowStreamProcessorName); - final ProcessorGraphNode thisWindowedStreamsNode = new WindowedStreamProcessorNode<>(thisWindowStore.name(), thisWindowStreamProcessorParams); + final ProcessorGraphNode thisWindowedStreamsNode = new WindowedStreamProcessorNode<>(thisWindowStore.storeName(), thisWindowStreamProcessorParams); builder.addGraphNode(thisGraphNode, thisWindowedStreamsNode); - final KStreamJoinWindow otherWindowedStream = new KStreamJoinWindow<>(otherWindowStore.name()); + final KStreamJoinWindow otherWindowedStream = new KStreamJoinWindow<>(otherWindowStore); final ProcessorParameters otherWindowStreamProcessorParams = new ProcessorParameters<>(otherWindowedStream, otherWindowStreamProcessorName); - final ProcessorGraphNode otherWindowedStreamsNode = new WindowedStreamProcessorNode<>(otherWindowStore.name(), otherWindowStreamProcessorParams); + final ProcessorGraphNode otherWindowedStreamsNode = new WindowedStreamProcessorNode<>(otherWindowStore.storeName(), otherWindowStreamProcessorParams); builder.addGraphNode(otherGraphNode, otherWindowedStreamsNode); Optional outerJoinWindowStore = Optional.empty(); @@ -173,25 +173,25 @@ public KStream join(final KStream lhs, final JoinWindowsInternal internalWindows = new JoinWindowsInternal(windows); final KStreamKStreamJoinLeftSide joinThis = new KStreamKStreamJoinLeftSide<>( - otherWindowStore.name(), internalWindows, joiner, leftOuter, - outerJoinWindowStore.map(StoreFactory::name), - sharedTimeTrackerSupplier + sharedTimeTrackerSupplier, + otherWindowStore, + outerJoinWindowStore ); final KStreamKStreamJoinRightSide joinOther = new KStreamKStreamJoinRightSide<>( - thisWindowStore.name(), internalWindows, AbstractStream.reverseJoinerWithKey(joiner), rightOuter, - outerJoinWindowStore.map(StoreFactory::name), - sharedTimeTrackerSupplier + sharedTimeTrackerSupplier, + thisWindowStore, + outerJoinWindowStore ); final KStreamKStreamSelfJoin selfJoin = new KStreamKStreamSelfJoin<>( - thisWindowStore.name(), + thisWindowStore, internalWindows, joiner, windows.size() + windows.gracePeriodMs() @@ -209,18 +209,11 @@ public KStream join(final KStream lhs, joinBuilder.withJoinMergeProcessorParameters(joinMergeProcessorParams) .withJoinThisProcessorParameters(joinThisProcessorParams) .withJoinOtherProcessorParameters(joinOtherProcessorParams) - .withThisWindowStoreBuilder(thisWindowStore) - .withOtherWindowStoreBuilder(otherWindowStore) - .withThisWindowedStreamProcessorParameters(thisWindowStreamProcessorParams) - .withOtherWindowedStreamProcessorParameters(otherWindowStreamProcessorParams) - .withOuterJoinWindowStoreBuilder(outerJoinWindowStore) + .withSelfJoinProcessorParameters(selfJoinProcessorParams) + .withThisWindowedStreamProcessorName(thisWindowStreamProcessorParams.processorName()) + .withOtherWindowedStreamProcessorName(otherWindowStreamProcessorParams.processorName()) .withValueJoiner(joiner) - .withNodeName(joinMergeName) - .withSelfJoinProcessorParameters(selfJoinProcessorParams); - - if (internalWindows.spuriousResultFixEnabled()) { - joinBuilder.withSpuriousResultFixEnabled(); - } + .withNodeName(joinMergeName); final GraphNode joinGraphNode = joinBuilder.build(); @@ -262,7 +255,7 @@ private void assertUniqueStoreNames(final WindowBytesStoreSupplier supplier, private static StoreFactory joinWindowStoreBuilderFromSupplier(final WindowBytesStoreSupplier storeSupplier, final Serde keySerde, final Serde valueSerde) { - return new StoreBuilderWrapper(Stores.windowStoreBuilder( + return StoreBuilderWrapper.wrapStoreBuilder(Stores.windowStoreBuilder( storeSupplier, keySerde, valueSerde diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamJoinWindow.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamJoinWindow.java index 13cfa0db29d19..bffdc7cfd0916 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamJoinWindow.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamJoinWindow.java @@ -21,14 +21,25 @@ import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.WindowStore; +import java.util.Collections; +import java.util.Set; + class KStreamJoinWindow implements ProcessorSupplier { - private final String windowName; + private final StoreFactory thisWindowStoreFactory; + + KStreamJoinWindow(final StoreFactory thisWindowStoreFactory) { + this.thisWindowStoreFactory = thisWindowStoreFactory; + } - KStreamJoinWindow(final String windowName) { - this.windowName = windowName; + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(thisWindowStoreFactory)); } @Override @@ -44,7 +55,7 @@ private class KStreamJoinWindowProcessor extends ContextualProcessor public void init(final ProcessorContext context) { super.init(context); - window = context.getStateStore(windowName); + window = context.getStateStore(thisWindowStoreFactory.storeName()); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoin.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoin.java index d3c0c4c0a222a..797f9d451411c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoin.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoin.java @@ -27,9 +27,12 @@ import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.state.WindowStoreIterator; import org.apache.kafka.streams.state.internals.LeftOrRightValue; @@ -38,7 +41,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.LinkedHashSet; import java.util.Optional; +import java.util.Set; import static org.apache.kafka.streams.StreamsConfig.InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_OUTER_JOIN_SPURIOUS_RESULTS_FIX; import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensor; @@ -46,7 +51,7 @@ abstract class KStreamKStreamJoin implements ProcessorSupplier { private static final Logger LOG = LoggerFactory.getLogger(KStreamKStreamJoin.class); - private final String otherWindowName; + private final StoreFactory otherWindowStoreFactory; private final long joinBeforeMs; private final long joinAfterMs; private final long joinGraceMs; @@ -55,20 +60,20 @@ abstract class KStreamKStreamJoin impleme private final long windowsAfterMs; private final boolean outer; - private final Optional outerJoinWindowName; + private final Optional outerJoinWindowStoreFactory; private final ValueJoinerWithKey joiner; private final TimeTrackerSupplier sharedTimeTrackerSupplier; - KStreamKStreamJoin(final String otherWindowName, - final JoinWindowsInternal windows, + KStreamKStreamJoin(final JoinWindowsInternal windows, final ValueJoinerWithKey joiner, final boolean outer, - final Optional outerJoinWindowName, final long joinBeforeMs, final long joinAfterMs, - final TimeTrackerSupplier sharedTimeTrackerSupplier) { - this.otherWindowName = otherWindowName; + final TimeTrackerSupplier sharedTimeTrackerSupplier, + final StoreFactory otherWindowStoreFactory, + final Optional outerJoinWindowStoreFactory) { + this.otherWindowStoreFactory = otherWindowStoreFactory; this.joinBeforeMs = joinBeforeMs; this.joinAfterMs = joinAfterMs; this.windowsAfterMs = windows.afterMs; @@ -77,10 +82,22 @@ abstract class KStreamKStreamJoin impleme this.enableSpuriousResultFix = windows.spuriousResultFixEnabled(); this.joiner = joiner; this.outer = outer; - this.outerJoinWindowName = outerJoinWindowName; + this.outerJoinWindowStoreFactory = outerJoinWindowStoreFactory; this.sharedTimeTrackerSupplier = sharedTimeTrackerSupplier; } + @Override + public Set> stores() { + // use ordered set for deterministic topology string in tests + final Set> stores = new LinkedHashSet<>(); + stores.add(new FactoryWrappingStoreBuilder<>(otherWindowStoreFactory)); + + if (outerJoinWindowStoreFactory.isPresent() && enableSpuriousResultFix) { + stores.add(new FactoryWrappingStoreBuilder<>(outerJoinWindowStoreFactory.get())); + } + return stores; + } + protected abstract class KStreamKStreamJoinProcessor extends ContextualProcessor { private WindowStore otherWindowStore; private Sensor droppedRecordsSensor; @@ -95,11 +112,11 @@ public void init(final ProcessorContext context) { final StreamsMetricsImpl metrics = (StreamsMetricsImpl) context.metrics(); droppedRecordsSensor = droppedRecordsSensor(Thread.currentThread().getName(), context.taskId().toString(), metrics); - otherWindowStore = context.getStateStore(otherWindowName); + otherWindowStore = context.getStateStore(otherWindowStoreFactory.storeName()); sharedTimeTracker = sharedTimeTrackerSupplier.get(context.taskId()); if (enableSpuriousResultFix) { - outerJoinStore = outerJoinWindowName.map(context::getStateStore); + outerJoinStore = outerJoinWindowStoreFactory.map(s -> context.getStateStore(s.storeName())); sharedTimeTracker.setEmitInterval( StreamsConfig.InternalConfig.getLong( @@ -154,7 +171,7 @@ public void process(final Record record) { // // This condition below allows us to process the out-of-order records without the need // to hold it in the temporary outer store - if (!outerJoinStore.isPresent() || timeTo < sharedTimeTracker.streamTime) { + if (outerJoinStore.isEmpty() || timeTo < sharedTimeTracker.streamTime) { context().forward(record.withValue(joiner.apply(record.key(), record.value(), null))); } else { sharedTimeTracker.updatedMinTime(inputRecordTimestamp); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinLeftSide.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinLeftSide.java index 2309033b23284..7629f89a3fa90 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinLeftSide.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinLeftSide.java @@ -19,6 +19,7 @@ import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.internals.KStreamImplJoin.TimeTrackerSupplier; import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.internals.LeftOrRightValue; import org.apache.kafka.streams.state.internals.TimestampedKeyAndJoinSide; @@ -26,14 +27,14 @@ class KStreamKStreamJoinLeftSide extends KStreamKStreamJoin { - KStreamKStreamJoinLeftSide(final String otherWindowName, - final JoinWindowsInternal windows, + KStreamKStreamJoinLeftSide(final JoinWindowsInternal windows, final ValueJoinerWithKey joiner, final boolean outer, - final Optional outerJoinWindowName, - final TimeTrackerSupplier sharedTimeTrackerSupplier) { - super(otherWindowName, windows, joiner, outer, outerJoinWindowName, windows.beforeMs, windows.afterMs, - sharedTimeTrackerSupplier); + final TimeTrackerSupplier sharedTimeTrackerSupplier, + final StoreFactory otherWindowStoreFactory, + final Optional outerJoinWindowStoreFactory) { + super(windows, joiner, outer, windows.beforeMs, windows.afterMs, + sharedTimeTrackerSupplier, otherWindowStoreFactory, outerJoinWindowStoreFactory); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinRightSide.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinRightSide.java index e9cb8b82ff100..7931853b8d0c4 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinRightSide.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinRightSide.java @@ -19,6 +19,7 @@ import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.internals.KStreamImplJoin.TimeTrackerSupplier; import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.internals.LeftOrRightValue; import org.apache.kafka.streams.state.internals.TimestampedKeyAndJoinSide; @@ -26,14 +27,14 @@ class KStreamKStreamJoinRightSide extends KStreamKStreamJoin { - KStreamKStreamJoinRightSide(final String otherWindowName, - final JoinWindowsInternal windows, + KStreamKStreamJoinRightSide(final JoinWindowsInternal windows, final ValueJoinerWithKey joiner, final boolean outer, - final Optional outerJoinWindowName, - final TimeTrackerSupplier sharedTimeTrackerSupplier) { - super(otherWindowName, windows, joiner, outer, outerJoinWindowName, windows.afterMs, windows.beforeMs, - sharedTimeTrackerSupplier); + final TimeTrackerSupplier sharedTimeTrackerSupplier, + final StoreFactory otherWindowStoreFactory, + final Optional outerJoinWindowStoreFactory) { + super(windows, joiner, outer, windows.afterMs, windows.beforeMs, sharedTimeTrackerSupplier, + otherWindowStoreFactory, outerJoinWindowStoreFactory); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamSelfJoin.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamSelfJoin.java index b627a98ef4a49..adcc501effef4 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamSelfJoin.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamSelfJoin.java @@ -25,19 +25,25 @@ import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.state.WindowStoreIterator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; +import java.util.Set; + import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensor; class KStreamKStreamSelfJoin implements ProcessorSupplier { private static final Logger LOG = LoggerFactory.getLogger(KStreamKStreamSelfJoin.class); - private final String windowName; + private final StoreFactory windowStoreFactory; private final long joinThisBeforeMs; private final long joinThisAfterMs; private final long joinOtherBeforeMs; @@ -45,13 +51,11 @@ class KStreamKStreamSelfJoin implements ProcessorSupplier joinerThis; - KStreamKStreamSelfJoin( - final String windowName, - final JoinWindowsInternal windows, - final ValueJoinerWithKey joinerThis, - final long retentionPeriod) { - - this.windowName = windowName; + KStreamKStreamSelfJoin(final StoreFactory windowStoreFactory, + final JoinWindowsInternal windows, + final ValueJoinerWithKey joinerThis, + final long retentionPeriod) { + this.windowStoreFactory = windowStoreFactory; this.joinThisBeforeMs = windows.beforeMs; this.joinThisAfterMs = windows.afterMs; this.joinOtherBeforeMs = windows.afterMs; @@ -60,6 +64,11 @@ class KStreamKStreamSelfJoin implements ProcessorSupplier> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(windowStoreFactory)); + } + @Override public Processor get() { return new KStreamKStreamSelfJoinProcessor(); @@ -76,7 +85,7 @@ public void init(final ProcessorContext context) { final StreamsMetricsImpl metrics = (StreamsMetricsImpl) context.metrics(); droppedRecordsSensor = droppedRecordsSensor(Thread.currentThread().getName(), context.taskId().toString(), metrics); - windowStore = context.getStateStore(windowName); + windowStore = context.getStateStore(windowStoreFactory.storeName()); } @SuppressWarnings("unchecked") diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoin.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoin.java index d1ea36a470c5b..8fc775e2b9ba6 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoin.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoin.java @@ -20,9 +20,13 @@ import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.state.StoreBuilder; import java.time.Duration; import java.util.Optional; +import java.util.Set; + +import static java.util.Collections.singleton; class KStreamKTableJoin implements ProcessorSupplier { @@ -32,18 +36,29 @@ class KStreamKTableJoin implements ProcessorSupplier gracePeriod; private final Optional storeName; - + private final Set> stores; KStreamKTableJoin(final KTableValueGetterSupplier valueGetterSupplier, final ValueJoinerWithKey joiner, final boolean leftJoin, final Optional gracePeriod, - final Optional storeName) { + final Optional> bufferStoreBuilder) { this.valueGetterSupplier = valueGetterSupplier; this.joiner = joiner; this.leftJoin = leftJoin; this.gracePeriod = gracePeriod; - this.storeName = storeName; + this.storeName = bufferStoreBuilder.map(StoreBuilder::name); + + if (bufferStoreBuilder.isEmpty()) { + this.stores = null; + } else { + this.stores = singleton(bufferStoreBuilder.get()); + } + } + + @Override + public Set> stores() { + return stores; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamReduce.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamReduce.java index 15528f5d150f4..2f04a8ea65eeb 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamReduce.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamReduce.java @@ -23,13 +23,19 @@ import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; +import java.util.Set; + import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensor; import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; import static org.apache.kafka.streams.state.VersionedKeyValueStore.PUT_RETURN_CODE_NOT_PUT; @@ -40,15 +46,23 @@ public class KStreamReduce implements KStreamAggProcessorSupplier reducer; private boolean sendOldValues = false; - KStreamReduce(final String storeName, final Reducer reducer) { - this.storeName = storeName; + KStreamReduce(final StoreFactory storeFactory, final Reducer reducer) { + this.storeFactory = storeFactory; + this.storeName = storeFactory.storeName(); this.reducer = reducer; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); + } + + @Override public Processor> get() { return new KStreamReduceProcessor(); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregate.java index 8f2c53c8a9a2a..f3ca9e6740af2 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregate.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregate.java @@ -33,16 +33,21 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.SessionStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Set; import static org.apache.kafka.streams.StreamsConfig.InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION; import static org.apache.kafka.streams.processor.internals.metrics.ProcessorNodeMetrics.emitFinalLatencySensor; @@ -54,6 +59,7 @@ public class KStreamSessionWindowAggregate implements KStreamAgg private static final Logger LOG = LoggerFactory.getLogger(KStreamSessionWindowAggregate.class); private final String storeName; + private final StoreFactory storeFactory; private final SessionWindows windows; private final Initializer initializer; private final Aggregator aggregator; @@ -63,19 +69,25 @@ public class KStreamSessionWindowAggregate implements KStreamAgg private boolean sendOldValues = false; public KStreamSessionWindowAggregate(final SessionWindows windows, - final String storeName, + final StoreFactory storeFactory, final EmitStrategy emitStrategy, final Initializer initializer, final Aggregator aggregator, final Merger sessionMerger) { this.windows = windows; - this.storeName = storeName; + this.storeName = storeFactory.storeName(); + this.storeFactory = storeFactory; this.emitStrategy = emitStrategy; this.initializer = initializer; this.aggregator = aggregator; this.sessionMerger = sessionMerger; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public Processor, Change> get() { return new KStreamSessionWindowAggregateProcessor(); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java index 4a288bb0e83f6..93935cbc1f0c1 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java @@ -28,7 +28,10 @@ import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.state.KeyValueIterator; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.TimestampedWindowStore; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.WindowStoreIterator; @@ -36,6 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -46,6 +50,7 @@ public class KStreamSlidingWindowAggregate implements KStreamAgg private static final Logger log = LoggerFactory.getLogger(KStreamSlidingWindowAggregate.class); private final String storeName; + private final StoreFactory storeFactory; private final SlidingWindows windows; private final Initializer initializer; private final Aggregator aggregator; @@ -54,17 +59,23 @@ public class KStreamSlidingWindowAggregate implements KStreamAgg private boolean sendOldValues = false; public KStreamSlidingWindowAggregate(final SlidingWindows windows, - final String storeName, + final StoreFactory storeFactory, final EmitStrategy emitStrategy, final Initializer initializer, final Aggregator aggregator) { this.windows = windows; - this.storeName = storeName; + this.storeName = storeFactory.storeName(); + this.storeFactory = storeFactory; this.initializer = initializer; this.aggregator = aggregator; this.emitStrategy = emitStrategy; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public Processor, Change> get() { return new KStreamSlidingWindowAggregateProcessor(storeName, emitStrategy, sendOldValues); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamTransformValues.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamTransformValues.java deleted file mode 100644 index 1b767ef396908..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamTransformValues.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; -import org.apache.kafka.streams.processor.api.ContextualProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.ProcessorContext; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.ForwardingDisabledProcessorContext; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.streams.state.StoreBuilder; - -import java.util.Set; - -public class KStreamTransformValues implements ProcessorSupplier { - - private final ValueTransformerWithKeySupplier valueTransformerSupplier; - - KStreamTransformValues(final ValueTransformerWithKeySupplier valueTransformerSupplier) { - this.valueTransformerSupplier = valueTransformerSupplier; - } - - @Override - public Processor get() { - return new KStreamTransformValuesProcessor<>(valueTransformerSupplier.get()); - } - - @Override - public Set> stores() { - return valueTransformerSupplier.stores(); - } - - public static class KStreamTransformValuesProcessor extends ContextualProcessor { - - private final ValueTransformerWithKey valueTransformer; - - KStreamTransformValuesProcessor(final ValueTransformerWithKey valueTransformer) { - this.valueTransformer = valueTransformer; - } - - @Override - public void init(final ProcessorContext context) { - super.init(context); - valueTransformer.init(new ForwardingDisabledProcessorContext((InternalProcessorContext) context)); - } - - @Override - public void process(final Record record) { - context().forward(record.withValue(valueTransformer.transform(record.key(), record.value()))); - } - - @Override - public void close() { - valueTransformer.close(); - } - } -} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java index 340ce82d85670..adb174c4ccdc5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java @@ -29,13 +29,18 @@ import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.TimestampedWindowStore; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.Map; +import java.util.Set; import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; @@ -44,6 +49,7 @@ public class KStreamWindowAggregate implements private static final Logger log = LoggerFactory.getLogger(KStreamWindowAggregate.class); private final String storeName; + private final StoreFactory storeFactory; private final Windows windows; private final Initializer initializer; private final Aggregator aggregator; @@ -52,12 +58,13 @@ public class KStreamWindowAggregate implements private boolean sendOldValues = false; public KStreamWindowAggregate(final Windows windows, - final String storeName, + final StoreFactory storeFactory, final EmitStrategy emitStrategy, final Initializer initializer, final Aggregator aggregator) { this.windows = windows; - this.storeName = storeName; + this.storeName = storeFactory.storeName(); + this.storeFactory = storeFactory; this.emitStrategy = emitStrategy; this.initializer = initializer; this.aggregator = aggregator; @@ -70,6 +77,11 @@ public KStreamWindowAggregate(final Windows windows, } } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public Processor, Change> get() { return new KStreamWindowAggregateProcessor(storeName, emitStrategy, sendOldValues); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java index f71143ff209da..cecb8048634e0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableAggregate.java @@ -16,15 +16,23 @@ */ package org.apache.kafka.streams.kstream.internals; +import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.kstream.Aggregator; import org.apache.kafka.streams.kstream.Initializer; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; +import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; +import java.util.Collections; +import java.util.Set; + import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; import static org.apache.kafka.streams.state.VersionedKeyValueStore.PUT_RETURN_CODE_NOT_PUT; import static org.apache.kafka.streams.state.internals.KeyValueStoreWrapper.PUT_RETURN_CODE_IS_LATEST; @@ -33,17 +41,19 @@ public class KTableAggregate implements KTableProcessorSupplier { private final String storeName; + private final StoreFactory storeFactory; private final Initializer initializer; private final Aggregator add; private final Aggregator remove; private boolean sendOldValues = false; - KTableAggregate(final String storeName, + KTableAggregate(final MaterializedInternal> materialized, final Initializer initializer, final Aggregator add, final Aggregator remove) { - this.storeName = storeName; + this.storeFactory = new KeyValueStoreMaterializer<>(materialized); + this.storeName = materialized.storeName(); this.initializer = initializer; this.add = add; this.remove = remove; @@ -56,6 +66,11 @@ public boolean enableSendingOldValues(final boolean forceMaterialization) { return true; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public Processor, KIn, Change> get() { return new KTableAggregateProcessor(); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableFilter.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableFilter.java index 475ea85db940a..91e2fac9411d0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableFilter.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableFilter.java @@ -20,9 +20,14 @@ import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; +import java.util.Collections; +import java.util.Set; + import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; import static org.apache.kafka.streams.state.VersionedKeyValueStore.PUT_RETURN_CODE_NOT_PUT; import static org.apache.kafka.streams.state.internals.KeyValueStoreWrapper.PUT_RETURN_CODE_IS_LATEST; @@ -34,17 +39,20 @@ public class KTableFilter implements KTableProcessorSupplier parent, final Predicate predicate, final boolean filterNot, - final String queryableName) { + final String queryableName, + final StoreFactory storeFactory) { this.parent = parent; this.predicate = predicate; this.filterNot = filterNot; this.queryableName = queryableName; // If upstream is already materialized, enable sending old values to avoid sending unnecessary tombstones: this.sendOldValues = parent.enableSendingOldValues(false); + this.storeFactory = storeFactory; } public void setUseVersionedSemantics(final boolean useVersionedSemantics) { @@ -61,6 +69,14 @@ public Processor, KIn, Change> get() { return new KTableFilterProcessor(); } + @Override + public Set> stores() { + if (storeFactory == null) { + return null; + } + return Collections.singleton(new StoreFactory.FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public boolean enableSendingOldValues(final boolean forceMaterialization) { if (queryableName != null) { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java index 09efdb780069c..817365c787f90 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableImpl.java @@ -38,6 +38,7 @@ import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.CombinedKey; import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.CombinedKeySchema; +import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.ForeignKeyExtractor; import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.ForeignTableJoinProcessorSupplier; import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.ResponseJoinProcessorSupplier; import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.SubscriptionJoinProcessorSupplier; @@ -53,11 +54,10 @@ import org.apache.kafka.streams.kstream.internals.graph.KTableKTableJoinNode; import org.apache.kafka.streams.kstream.internals.graph.ProcessorGraphNode; import org.apache.kafka.streams.kstream.internals.graph.ProcessorParameters; -import org.apache.kafka.streams.kstream.internals.graph.StatefulProcessorNode; +import org.apache.kafka.streams.kstream.internals.graph.ProcessorToStateConnectorNode; import org.apache.kafka.streams.kstream.internals.graph.StreamSinkNode; import org.apache.kafka.streams.kstream.internals.graph.StreamSourceNode; import org.apache.kafka.streams.kstream.internals.graph.TableFilterNode; -import org.apache.kafka.streams.kstream.internals.graph.TableProcessorNode; import org.apache.kafka.streams.kstream.internals.graph.TableRepartitionMapNode; import org.apache.kafka.streams.kstream.internals.graph.TableSuppressNode; import org.apache.kafka.streams.kstream.internals.suppress.FinalResultsSuppressionBuilder; @@ -68,8 +68,9 @@ import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopicProperties; import org.apache.kafka.streams.processor.internals.StaticTopicNameExtractor; -import org.apache.kafka.streams.processor.internals.StoreBuilderWrapper; +import org.apache.kafka.streams.processor.internals.StoreDelegatingProcessorSupplier; import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; @@ -86,6 +87,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Supplier; @@ -143,20 +145,6 @@ public class KTableImpl extends AbstractStream implements KTable< private boolean sendOldValues = false; - @SuppressWarnings("deprecation") // Old PAPI compatibility. - public KTableImpl(final String name, - final Serde keySerde, - final Serde valueSerde, - final Set subTopologySourceNodes, - final String queryableStoreName, - final org.apache.kafka.streams.processor.ProcessorSupplier processorSupplier, - final GraphNode graphNode, - final InternalStreamsBuilder builder) { - super(name, keySerde, valueSerde, subTopologySourceNodes, graphNode, builder); - this.processorSupplier = processorSupplier; - this.queryableStoreName = queryableStoreName; - } - public KTableImpl(final String name, final Serde keySerde, final Serde valueSerde, @@ -175,6 +163,7 @@ public String queryableStoreName() { return queryableStoreName; } + @SuppressWarnings("resource") private KTable doFilter(final Predicate predicate, final Named named, final MaterializedInternal> materializedInternal, @@ -207,17 +196,13 @@ private KTable doFilter(final Predicate predicate, final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, FILTER_NAME); final KTableProcessorSupplier processorSupplier = - new KTableFilter<>(this, predicate, filterNot, queryableStoreName); + new KTableFilter<>(this, predicate, filterNot, queryableStoreName, storeFactory); final ProcessorParameters processorParameters = unsafeCastProcessorParametersToCompletelyDifferentType( new ProcessorParameters<>(processorSupplier, name) ); - final GraphNode tableNode = new TableFilterNode<>( - name, - processorParameters, - storeFactory - ); + final GraphNode tableNode = new TableFilterNode<>(name, processorParameters); maybeSetOutputVersioned(tableNode, materializedInternal); builder.addGraphNode(this.graphNode, tableNode); @@ -292,6 +277,7 @@ public KTable filterNot(final Predicate predicate, return doFilter(predicate, renamed, materializedInternal, true); } + @SuppressWarnings("resource") private KTable doMapValues(final ValueMapperWithKey mapper, final Named named, final MaterializedInternal> materializedInternal) { @@ -320,17 +306,16 @@ private KTable doMapValues(final ValueMapperWithKey processorSupplier = new KTableMapValues<>(this, mapper, queryableStoreName); + final KTableProcessorSupplier processorSupplier = new KTableMapValues<>(this, mapper, queryableStoreName, storeFactory); // leaving in calls to ITB until building topology with graph final ProcessorParameters processorParameters = unsafeCastProcessorParametersToCompletelyDifferentType( new ProcessorParameters<>(processorSupplier, name) ); - final GraphNode tableNode = new TableProcessorNode<>( + final GraphNode tableNode = new ProcessorGraphNode<>( name, - processorParameters, - storeFactory + processorParameters ); maybeSetOutputVersioned(tableNode, materializedInternal); @@ -446,6 +431,7 @@ public KTable transformValues(final ValueTransformerWithKeySupplier< return doTransformValues(transformerSupplier, materializedInternal, new NamedInternal(named), stateStoreNames); } + @SuppressWarnings("resource") private KTable doTransformValues(final ValueTransformerWithKeySupplier transformerSupplier, final MaterializedInternal> materializedInternal, final NamedInternal namedInternal, @@ -454,7 +440,7 @@ private KTable doTransformValues(final ValueTransformerWithKeySuppli final Serde keySerde; final Serde valueSerde; final String queryableStoreName; - final StoreFactory storeFactory; + final Set> storeBuilder; if (materializedInternal != null) { // don't inherit parent value serde, since this operation may change the value type, more specifically: @@ -464,12 +450,13 @@ private KTable doTransformValues(final ValueTransformerWithKeySuppli valueSerde = materializedInternal.valueSerde(); queryableStoreName = materializedInternal.queryableStoreName(); // only materialize if materialized is specified and it has queryable name - storeFactory = queryableStoreName != null ? (new KeyValueStoreMaterializer<>(materializedInternal)) : null; + final StoreFactory storeFactory = queryableStoreName != null ? (new KeyValueStoreMaterializer<>(materializedInternal)) : null; + storeBuilder = Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); } else { keySerde = this.keySerde; valueSerde = null; queryableStoreName = null; - storeFactory = null; + storeBuilder = null; } final String name = namedInternal.orElseGenerateWithPrefix(builder, TRANSFORMVALUES_NAME); @@ -479,14 +466,18 @@ private KTable doTransformValues(final ValueTransformerWithKeySuppli transformerSupplier, queryableStoreName); - final ProcessorParameters processorParameters = unsafeCastProcessorParametersToCompletelyDifferentType( - new ProcessorParameters<>(processorSupplier, name) - ); + final ProcessorParameters processorParameters = + unsafeCastProcessorParametersToCompletelyDifferentType( + new ProcessorParameters<>( + new StoreDelegatingProcessorSupplier<>( + processorSupplier, + storeBuilder), + name + )); - final GraphNode tableNode = new TableProcessorNode<>( + final GraphNode tableNode = new ProcessorToStateConnectorNode<>( name, processorParameters, - storeFactory, stateStoreNames ); maybeSetOutputVersioned(tableNode, materializedInternal); @@ -564,12 +555,6 @@ public KTable suppress(final Suppressed suppressed) { final String storeName = suppressedInternal.name() != null ? suppressedInternal.name() + "-store" : builder.newStoreName(SUPPRESS_NAME); - final ProcessorSupplier, K, Change> suppressionSupplier = new KTableSuppressProcessorSupplier<>( - suppressedInternal, - storeName, - this - ); - final StoreBuilder>> storeBuilder; if (suppressedInternal.bufferConfig().isLoggingEnabled()) { @@ -587,11 +572,17 @@ public KTable suppress(final Suppressed suppressed) { .withLoggingDisabled(); } + final ProcessorSupplier, K, Change> suppressionSupplier = new KTableSuppressProcessorSupplier<>( + suppressedInternal, + storeBuilder, + this + ); + final ProcessorGraphNode> node = new TableSuppressNode<>( name, - new ProcessorParameters<>(suppressionSupplier, name), - new StoreBuilderWrapper(storeBuilder) + new ProcessorParameters<>(suppressionSupplier, name) ); + node.setOutputVersioned(false); builder.addGraphNode(graphNode, node); @@ -724,7 +715,7 @@ public KTable leftJoin(final KTable other, return doJoin(other, joiner, named, materializedInternal, true, false); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "resource"}) private KTable doJoin(final KTable other, final ValueJoiner joiner, final Named joinName, @@ -746,26 +737,6 @@ private KTable doJoin(final KTable other, ((KTableImpl) other).enableSendingOldValues(true); } - final KTableKTableAbstractJoin joinThis; - final KTableKTableAbstractJoin joinOther; - - if (!leftOuter) { // inner - joinThis = new KTableKTableInnerJoin<>(this, (KTableImpl) other, joiner); - joinOther = new KTableKTableInnerJoin<>((KTableImpl) other, this, reverseJoiner(joiner)); - } else if (!rightOuter) { // left - joinThis = new KTableKTableLeftJoin<>(this, (KTableImpl) other, joiner); - joinOther = new KTableKTableRightJoin<>((KTableImpl) other, this, reverseJoiner(joiner)); - } else { // outer - joinThis = new KTableKTableOuterJoin<>(this, (KTableImpl) other, joiner); - joinOther = new KTableKTableOuterJoin<>((KTableImpl) other, this, reverseJoiner(joiner)); - } - - final String joinThisName = renamed.suffixWithOrElseGet("-join-this", builder, JOINTHIS_NAME); - final String joinOtherName = renamed.suffixWithOrElseGet("-join-other", builder, JOINOTHER_NAME); - - final ProcessorParameters, ?, ?> joinThisProcessorParameters = new ProcessorParameters<>(joinThis, joinThisName); - final ProcessorParameters, ?, ?> joinOtherProcessorParameters = new ProcessorParameters<>(joinOther, joinOtherName); - final Serde keySerde; final Serde valueSerde; final String queryableStoreName; @@ -786,19 +757,45 @@ private KTable doJoin(final KTable other, storeFactory = null; } + final KTableKTableAbstractJoin joinThis; + final KTableKTableAbstractJoin joinOther; + + if (!leftOuter) { // inner + joinThis = new KTableKTableInnerJoin<>(this, (KTableImpl) other, joiner); + joinOther = new KTableKTableInnerJoin<>((KTableImpl) other, this, reverseJoiner(joiner)); + } else if (!rightOuter) { // left + joinThis = new KTableKTableLeftJoin<>(this, (KTableImpl) other, joiner); + joinOther = new KTableKTableRightJoin<>((KTableImpl) other, this, reverseJoiner(joiner)); + } else { // outer + joinThis = new KTableKTableOuterJoin<>(this, (KTableImpl) other, joiner); + joinOther = new KTableKTableOuterJoin<>((KTableImpl) other, this, reverseJoiner(joiner)); + } + + final String joinThisName = renamed.suffixWithOrElseGet("-join-this", builder, JOINTHIS_NAME); + final String joinOtherName = renamed.suffixWithOrElseGet("-join-other", builder, JOINOTHER_NAME); + + final ProcessorParameters, ?, ?> joinThisProcessorParameters = new ProcessorParameters<>(joinThis, joinThisName); + final ProcessorParameters, ?, ?> joinOtherProcessorParameters = new ProcessorParameters<>(joinOther, joinOtherName); + final ProcessorParameters, ?, ?> joinMergeProcessorParameters = new ProcessorParameters<>( + KTableKTableJoinMerger.of( + (KTableProcessorSupplier) joinThisProcessorParameters.processorSupplier(), + (KTableProcessorSupplier) joinOtherProcessorParameters.processorSupplier(), + queryableStoreName, + storeFactory), + joinMergeName); + final KTableKTableJoinNode kTableKTableJoinNode = KTableKTableJoinNode.kTableKTableJoinNodeBuilder() .withNodeName(joinMergeName) .withJoinThisProcessorParameters(joinThisProcessorParameters) .withJoinOtherProcessorParameters(joinOtherProcessorParameters) + .withMergeProcessorParameters(joinMergeProcessorParameters) .withThisJoinSideNodeName(name) .withOtherJoinSideNodeName(((KTableImpl) other).name) .withJoinThisStoreNames(valueGetterSupplier().storeNames()) .withJoinOtherStoreNames(((KTableImpl) other).valueGetterSupplier().storeNames()) .withKeySerde(keySerde) .withValueSerde(valueSerde) - .withQueryableStoreName(queryableStoreName) - .withStoreBuilder(storeFactory) .build(); final boolean isOutputVersioned = materializedInternal != null @@ -906,9 +903,25 @@ boolean sendingOldValueEnabled() { public KTable join(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); return doJoinOnForeignKey( other, - foreignKeyExtractor, + adaptedExtractor, + joiner, + TableJoined.with(null, null), + Materialized.with(null, null), + false + ); + } + + @Override + public KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); + return doJoinOnForeignKey( + other, + adaptedExtractor, joiner, TableJoined.with(null, null), Materialized.with(null, null), @@ -921,9 +934,26 @@ public KTable join(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner, final TableJoined tableJoined) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); + return doJoinOnForeignKey( + other, + adaptedExtractor, + joiner, + tableJoined, + Materialized.with(null, null), + false + ); + } + + @Override + public KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); return doJoinOnForeignKey( other, - foreignKeyExtractor, + adaptedExtractor, joiner, tableJoined, Materialized.with(null, null), @@ -936,7 +966,17 @@ public KTable join(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner, final Materialized> materialized) { - return doJoinOnForeignKey(other, foreignKeyExtractor, joiner, TableJoined.with(null, null), materialized, false); + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); + return doJoinOnForeignKey(other, adaptedExtractor, joiner, TableJoined.with(null, null), materialized, false); + } + + @Override + public KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final Materialized> materialized) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); + return doJoinOnForeignKey(other, adaptedExtractor, joiner, TableJoined.with(null, null), materialized, false); } @Override @@ -945,9 +985,27 @@ public KTable join(final KTable other, final ValueJoiner joiner, final TableJoined tableJoined, final Materialized> materialized) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); + return doJoinOnForeignKey( + other, + adaptedExtractor, + joiner, + tableJoined, + materialized, + false + ); + } + + @Override + public KTable join(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined, + final Materialized> materialized) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); return doJoinOnForeignKey( other, - foreignKeyExtractor, + adaptedExtractor, joiner, tableJoined, materialized, @@ -959,9 +1017,25 @@ public KTable join(final KTable other, public KTable leftJoin(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); + return doJoinOnForeignKey( + other, + adaptedExtractor, + joiner, + TableJoined.with(null, null), + Materialized.with(null, null), + true + ); + } + + @Override + public KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); return doJoinOnForeignKey( other, - foreignKeyExtractor, + adaptedExtractor, joiner, TableJoined.with(null, null), Materialized.with(null, null), @@ -974,9 +1048,26 @@ public KTable leftJoin(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner, final TableJoined tableJoined) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); return doJoinOnForeignKey( other, - foreignKeyExtractor, + adaptedExtractor, + joiner, + tableJoined, + Materialized.with(null, null), + true + ); + } + + @Override + public KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); + return doJoinOnForeignKey( + other, + adaptedExtractor, joiner, tableJoined, Materialized.with(null, null), @@ -990,9 +1081,26 @@ public KTable leftJoin(final KTable other, final ValueJoiner joiner, final TableJoined tableJoined, final Materialized> materialized) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); + return doJoinOnForeignKey( + other, + adaptedExtractor, + joiner, + tableJoined, + materialized, + true); + } + + @Override + public KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final TableJoined tableJoined, + final Materialized> materialized) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); return doJoinOnForeignKey( other, - foreignKeyExtractor, + adaptedExtractor, joiner, tableJoined, materialized, @@ -1004,11 +1112,21 @@ public KTable leftJoin(final KTable other, final Function foreignKeyExtractor, final ValueJoiner joiner, final Materialized> materialized) { - return doJoinOnForeignKey(other, foreignKeyExtractor, joiner, TableJoined.with(null, null), materialized, true); + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromFunction(foreignKeyExtractor); + return doJoinOnForeignKey(other, adaptedExtractor, joiner, TableJoined.with(null, null), materialized, true); + } + + @Override + public KTable leftJoin(final KTable other, + final BiFunction foreignKeyExtractor, + final ValueJoiner joiner, + final Materialized> materialized) { + final ForeignKeyExtractor adaptedExtractor = ForeignKeyExtractor.fromBiFunction(foreignKeyExtractor); + return doJoinOnForeignKey(other, adaptedExtractor, joiner, TableJoined.with(null, null), materialized, true); } private final Function>, Optional>> getPartition = maybeMulticastPartitions -> { - if (!maybeMulticastPartitions.isPresent()) { + if (maybeMulticastPartitions.isEmpty()) { return Optional.empty(); } if (maybeMulticastPartitions.get().size() != 1) { @@ -1017,10 +1135,9 @@ public KTable leftJoin(final KTable other, return maybeMulticastPartitions; }; - - @SuppressWarnings({"unchecked", "deprecation"}) + @SuppressWarnings({"unchecked", "deprecation", "resource"}) private KTable doJoinOnForeignKey(final KTable foreignKeyTable, - final Function foreignKeyExtractor, + final ForeignKeyExtractor foreignKeyExtractor, final ValueJoiner joiner, final TableJoined tableJoined, final Materialized> materialized, @@ -1119,43 +1236,40 @@ private KTable doJoinOnForeignKey(final KTable forei final String subscriptionStoreName = renamed .suffixWithOrElseGet("-subscription-store", builder, FK_JOIN_STATE_STORE_NAME); - builder.addStateStore( - new SubscriptionStoreFactory<>(subscriptionStoreName, subscriptionWrapperSerde)); + final StoreFactory subscriptionStoreFactory = + new SubscriptionStoreFactory<>(subscriptionStoreName, subscriptionWrapperSerde); final String subscriptionReceiveName = renamed.suffixWithOrElseGet( "-subscription-receive", builder, SUBSCRIPTION_PROCESSOR); - final StatefulProcessorNode> subscriptionReceiveNode = - new StatefulProcessorNode<>( + final ProcessorGraphNode> subscriptionReceiveNode = + new ProcessorGraphNode<>( subscriptionReceiveName, new ProcessorParameters<>( - new SubscriptionReceiveProcessorSupplier<>(subscriptionStoreName, combinedKeySchema), - subscriptionReceiveName), - new String[]{subscriptionStoreName} + new SubscriptionReceiveProcessorSupplier<>(subscriptionStoreFactory, combinedKeySchema), + subscriptionReceiveName) ); builder.addGraphNode(subscriptionSource, subscriptionReceiveNode); final KTableValueGetterSupplier foreignKeyValueGetter = ((KTableImpl) foreignKeyTable).valueGetterSupplier(); - final StatefulProcessorNode, Change>>> subscriptionJoinNode = - new StatefulProcessorNode<>( + final ProcessorToStateConnectorNode, Change>>> subscriptionJoinNode = + new ProcessorToStateConnectorNode<>( new ProcessorParameters<>( new SubscriptionJoinProcessorSupplier<>( foreignKeyValueGetter ), renamed.suffixWithOrElseGet("-subscription-join-foreign", builder, SUBSCRIPTION_PROCESSOR) ), - Collections.emptySet(), Collections.singleton(foreignKeyValueGetter) ); builder.addGraphNode(subscriptionReceiveNode, subscriptionJoinNode); final String foreignTableJoinName = renamed .suffixWithOrElseGet("-foreign-join-subscription", builder, SUBSCRIPTION_PROCESSOR); - final StatefulProcessorNode> foreignTableJoinNode = new ForeignTableJoinNode<>( + final ProcessorGraphNode> foreignTableJoinNode = new ForeignTableJoinNode<>( new ProcessorParameters<>( - new ForeignTableJoinProcessorSupplier<>(subscriptionStoreName, combinedKeySchema), + new ForeignTableJoinProcessorSupplier<>(subscriptionStoreFactory, combinedKeySchema), foreignTableJoinName - ), - new String[]{subscriptionStoreName} + ) ); builder.addGraphNode(((KTableImpl) foreignKeyTable).graphNode, foreignTableJoinNode); @@ -1196,7 +1310,7 @@ private KTable doJoinOnForeignKey(final KTable forei builder.internalTopologyBuilder.copartitionSources(resultSourceNodes); final KTableValueGetterSupplier primaryKeyValueGetter = valueGetterSupplier(); - final StatefulProcessorNode> responseJoinNode = new StatefulProcessorNode<>( + final ProcessorToStateConnectorNode> responseJoinNode = new ProcessorToStateConnectorNode<>( new ProcessorParameters<>( new ResponseJoinProcessorSupplier<>( primaryKeyValueGetter, @@ -1207,7 +1321,6 @@ private KTable doJoinOnForeignKey(final KTable forei ), renamed.suffixWithOrElseGet("-subscription-response-resolver", builder, SUBSCRIPTION_RESPONSE_RESOLVER_PROCESSOR) ), - Collections.emptySet(), Collections.singleton(primaryKeyValueGetter) ); builder.addGraphNode(foreignResponseSource, responseJoinNode); @@ -1227,21 +1340,14 @@ private KTable doJoinOnForeignKey(final KTable forei materializedInternal.withKeySerde(keySerde); } - final KTableSource resultProcessorSupplier = new KTableSource<>( - materializedInternal.storeName(), - materializedInternal.queryableStoreName() - ); - - final StoreFactory resultStore = - new KeyValueStoreMaterializer<>(materializedInternal); + final KTableSource resultProcessorSupplier = new KTableSource<>(materializedInternal); - final TableProcessorNode resultNode = new TableProcessorNode<>( + final ProcessorGraphNode resultNode = new ProcessorGraphNode<>( resultProcessorName, new ProcessorParameters<>( resultProcessorSupplier, resultProcessorName - ), - resultStore + ) ); resultNode.setOutputVersioned(materializedInternal.storeSupplier() instanceof VersionedBytesStoreSupplier); builder.addGraphNode(responseJoinNode, resultNode); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableAbstractJoin.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableAbstractJoin.java index 21339c0b64966..6db3388c81cbb 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableAbstractJoin.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableAbstractJoin.java @@ -17,6 +17,9 @@ package org.apache.kafka.streams.kstream.internals; import org.apache.kafka.streams.kstream.ValueJoiner; +import org.apache.kafka.streams.state.StoreBuilder; + +import java.util.Set; public abstract class KTableKTableAbstractJoin implements KTableProcessorSupplier { @@ -49,6 +52,11 @@ public final boolean enableSendingOldValues(final boolean forceMaterialization) return true; } + @Override + public Set> stores() { + return null; + } + public void setUseVersionedSemantics(final boolean useVersionedSemantics) { this.useVersionedSemantics = useVersionedSemantics; } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableJoinMerger.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableJoinMerger.java index 7924f8ea85725..bf9ddef3356a9 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableJoinMerger.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableKTableJoinMerger.java @@ -20,6 +20,8 @@ import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; import java.util.Collections; @@ -34,14 +36,17 @@ public class KTableKTableJoinMerger implements KTableProcessorSupplier parent1; private final KTableProcessorSupplier parent2; private final String queryableName; + private final StoreFactory storeFactory; private boolean sendOldValues = false; KTableKTableJoinMerger(final KTableProcessorSupplier parent1, final KTableProcessorSupplier parent2, - final String queryableName) { + final String queryableName, + final StoreFactory storeFactory) { this.parent1 = parent1; this.parent2 = parent2; this.queryableName = queryableName; + this.storeFactory = storeFactory; } public String queryableName() { @@ -53,6 +58,13 @@ public Processor, K, Change> get() { return new KTableKTableJoinMergeProcessor(); } + @Override + public Set> stores() { + return storeFactory == null + ? null + : Set.of(new StoreFactory.FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public KTableValueGetterSupplier view() { // if the result KTable is materialized, use the materialized store to return getter value; @@ -90,13 +102,14 @@ public boolean enableSendingOldValues(final boolean forceMaterialization) { public static KTableKTableJoinMerger of(final KTableProcessorSupplier parent1, final KTableProcessorSupplier parent2) { - return of(parent1, parent2, null); + return of(parent1, parent2, null, null); } public static KTableKTableJoinMerger of(final KTableProcessorSupplier parent1, final KTableProcessorSupplier parent2, - final String queryableName) { - return new KTableKTableJoinMerger<>(parent1, parent2, queryableName); + final String queryableName, + final StoreFactory stores) { + return new KTableKTableJoinMerger<>(parent1, parent2, queryableName, stores); } private class KTableKTableJoinMergeProcessor extends ContextualProcessor, K, Change> { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableMapValues.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableMapValues.java index c26488c12a178..af495c9b9a34a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableMapValues.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableMapValues.java @@ -20,9 +20,14 @@ import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; +import java.util.Collections; +import java.util.Set; + import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; import static org.apache.kafka.streams.state.VersionedKeyValueStore.PUT_RETURN_CODE_NOT_PUT; import static org.apache.kafka.streams.state.internals.KeyValueStoreWrapper.PUT_RETURN_CODE_IS_LATEST; @@ -33,13 +38,16 @@ class KTableMapValues implements KTableProcessorSupplier mapper; private final String queryableName; private boolean sendOldValues = false; + private final StoreFactory storeFactory; KTableMapValues(final KTableImpl parent, final ValueMapperWithKey mapper, - final String queryableName) { + final String queryableName, + final StoreFactory storeFactory) { this.parent = parent; this.mapper = mapper; this.queryableName = queryableName; + this.storeFactory = storeFactory; } @Override @@ -47,6 +55,14 @@ public Processor, KIn, Change> get() { return new KTableMapValuesProcessor(); } + @Override + public Set> stores() { + if (storeFactory == null) { + return null; + } + return Collections.singleton(new StoreFactory.FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public KTableValueGetterSupplier view() { // if the KTable is materialized, use the materialized store to return getter value; diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableReduce.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableReduce.java index c577d30d984b3..d0b35098abe1d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableReduce.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableReduce.java @@ -16,14 +16,22 @@ */ package org.apache.kafka.streams.kstream.internals; +import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.kstream.Reducer; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; +import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; +import java.util.Collections; +import java.util.Set; + import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; import static org.apache.kafka.streams.state.VersionedKeyValueStore.PUT_RETURN_CODE_NOT_PUT; import static org.apache.kafka.streams.state.internals.KeyValueStoreWrapper.PUT_RETURN_CODE_IS_LATEST; @@ -31,13 +39,17 @@ public class KTableReduce implements KTableProcessorSupplier { private final String storeName; + private final StoreFactory storeFactory; private final Reducer addReducer; private final Reducer removeReducer; private boolean sendOldValues = false; - KTableReduce(final String storeName, final Reducer addReducer, final Reducer removeReducer) { - this.storeName = storeName; + KTableReduce(final MaterializedInternal> materialized, + final Reducer addReducer, + final Reducer removeReducer) { + this.storeFactory = new KeyValueStoreMaterializer<>(materialized); + this.storeName = materialized.storeName(); this.addReducer = addReducer; this.removeReducer = removeReducer; } @@ -49,6 +61,11 @@ public boolean enableSendingOldValues(final boolean forceMaterialization) { return true; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(storeFactory)); + } + @Override public Processor, K, Change> get() { return new KTableReduceProcessor(); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableSource.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableSource.java index b29a4fa51f133..e41f2bf06dd6a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableSource.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableSource.java @@ -17,12 +17,16 @@ package org.apache.kafka.streams.kstream.internals; import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; +import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; +import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.KeyValueStoreWrapper; @@ -30,6 +34,7 @@ import org.slf4j.LoggerFactory; import java.util.Objects; +import java.util.Set; import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensor; import static org.apache.kafka.streams.state.VersionedKeyValueStore.PUT_RETURN_CODE_NOT_PUT; @@ -40,15 +45,17 @@ public class KTableSource implements ProcessorSupplier> materialized) { + this.storeName = materialized.storeName(); Objects.requireNonNull(storeName, "storeName can't be null"); - - this.storeName = storeName; - this.queryableName = queryableName; + this.queryableName = materialized.queryableStoreName(); this.sendOldValues = false; + this.storeFactory = new KeyValueStoreMaterializer<>(materialized); } public String queryableName() { @@ -60,6 +67,15 @@ public Processor> get() { return new KTableSourceProcessor(); } + @Override + public Set> stores() { + if (materialized()) { + return Set.of(new StoreFactory.FactoryWrappingStoreBuilder<>(storeFactory)); + } else { + return null; + } + } + // when source ktable requires sending old values, we just // need to set the queryable name as the store name to enforce materialization public void enableSendingOldValues() { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KeyValueStoreMaterializer.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KeyValueStoreMaterializer.java index 3927e95c25ba0..d59d34e0e90cc 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KeyValueStoreMaterializer.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KeyValueStoreMaterializer.java @@ -17,7 +17,6 @@ package org.apache.kafka.streams.kstream.internals; import org.apache.kafka.common.utils.Bytes; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.state.DslKeyValueParams; import org.apache.kafka.streams.state.KeyValueBytesStoreSupplier; import org.apache.kafka.streams.state.KeyValueStore; @@ -44,7 +43,7 @@ public KeyValueStoreMaterializer( } @Override - public StateStore build() { + public StoreBuilder builder() { final KeyValueBytesStoreSupplier supplier = materialized.storeSupplier() == null ? dslStoreSuppliers().keyValueStore(new DslKeyValueParams(materialized.storeName(), true)) : (KeyValueBytesStoreSupplier) materialized.storeSupplier(); @@ -77,7 +76,7 @@ public StateStore build() { } - return builder.build(); + return builder; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedInternal.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedInternal.java index cf6ce76f8d563..d6cd130ba6db6 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedInternal.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedInternal.java @@ -39,12 +39,19 @@ public MaterializedInternal(final Materialized materialized) { public MaterializedInternal(final Materialized materialized, final InternalNameProvider nameProvider, final String generatedStorePrefix) { + this(materialized, nameProvider, generatedStorePrefix, false); + } + + public MaterializedInternal(final Materialized materialized, + final InternalNameProvider nameProvider, + final String generatedStorePrefix, + final boolean forceQueryable) { super(materialized); // if storeName is not provided, the corresponding KTable would never be queryable; // but we still need to provide an internal name for it in case we materialize. - queryable = storeName() != null; - if (!queryable && nameProvider != null) { + queryable = forceQueryable || storeName() != null; + if (storeName() == null && nameProvider != null) { storeName = nameProvider.newStoreName(generatedStorePrefix); } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedStoreFactory.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedStoreFactory.java index 99bd2e848b732..83cb6606790c2 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedStoreFactory.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/MaterializedStoreFactory.java @@ -16,8 +16,10 @@ */ package org.apache.kafka.streams.kstream.internals; +import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.state.StoreSupplier; import java.util.Map; @@ -39,10 +41,26 @@ public boolean loggingEnabled() { } @Override - public String name() { + public String storeName() { return materialized.storeName(); } + public String queryableStoreName() { + return materialized.queryableStoreName(); + } + + public Serde keySerde() { + return materialized.keySerde(); + } + + public Serde valueSerde() { + return materialized.valueSerde(); + } + + public StoreSupplier storeSupplier() { + return materialized.storeSupplier(); + } + @Override public Map logConfig() { return materialized.logConfig(); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/OuterStreamJoinStoreFactory.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/OuterStreamJoinStoreFactory.java index d864698408b31..645858d1a65d7 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/OuterStreamJoinStoreFactory.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/OuterStreamJoinStoreFactory.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.streams.kstream.JoinWindows; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.DslKeyValueParams; import org.apache.kafka.streams.state.DslStoreSuppliers; @@ -73,7 +72,7 @@ public OuterStreamJoinStoreFactory( } @Override - public StateStore build() { + public StoreBuilder builder() { final Duration retentionPeriod = Duration.ofMillis(retentionPeriod()); final Duration windowSize = Duration.ofMillis(windows.size()); final String rpMsgPrefix = prepareMillisCheckFailMsgPrefix(retentionPeriod, "retentionPeriod"); @@ -135,7 +134,7 @@ public StateStore build() { builder.withLoggingDisabled(); } - return builder.build(); + return builder; } @Override @@ -155,7 +154,7 @@ public boolean loggingEnabled() { } @Override - public String name() { + public String storeName() { return name; } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionStoreMaterializer.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionStoreMaterializer.java index 9f63b3fc27991..a5317f488809e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionStoreMaterializer.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionStoreMaterializer.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.kstream.EmitStrategy; import org.apache.kafka.streams.kstream.SessionWindows; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.state.DslSessionParams; import org.apache.kafka.streams.state.SessionBytesStoreSupplier; import org.apache.kafka.streams.state.SessionStore; @@ -58,7 +57,7 @@ public SessionStoreMaterializer( } @Override - public StateStore build() { + public StoreBuilder builder() { final SessionBytesStoreSupplier supplier = materialized.storeSupplier() == null ? dslStoreSuppliers().sessionStore(new DslSessionParams( materialized.storeName(), @@ -85,7 +84,7 @@ public StateStore build() { builder.withCachingDisabled(); } - return builder.build(); + return builder; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java index d8f3770b79aab..0c0f557b5c9b5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImpl.java @@ -32,6 +32,7 @@ import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.WindowedSerdes; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; +import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.SessionStore; import java.util.Objects; @@ -108,12 +109,16 @@ private KTable, Long> doCount(final Named named, } final String aggregateName = new NamedInternal(named).orElseGenerateWithPrefix(builder, AGGREGATE_NAME); - return aggregateBuilder.build( + final StoreFactory storeFactory = new SessionStoreMaterializer<>(materializedInternal, windows, emitStrategy); + final long gracePeriod = windows.gracePeriodMs() + windows.inactivityGap(); + + return aggregateBuilder.buildWindowed( new NamedInternal(aggregateName), - new SessionStoreMaterializer<>(materializedInternal, windows, emitStrategy), + storeFactory.storeName(), + gracePeriod, new KStreamSessionWindowAggregate<>( windows, - materializedInternal.storeName(), + storeFactory, emitStrategy, aggregateBuilder.countInitializer, aggregateBuilder.countAggregator, @@ -158,12 +163,16 @@ public KTable, V> reduce(final Reducer reducer, } final String reduceName = new NamedInternal(named).orElseGenerateWithPrefix(builder, REDUCE_NAME); - return aggregateBuilder.build( + final StoreFactory storeFactory = new SessionStoreMaterializer<>(materializedInternal, windows, emitStrategy); + final long gracePeriod = windows.gracePeriodMs() + windows.inactivityGap(); + + return aggregateBuilder.buildWindowed( new NamedInternal(reduceName), - new SessionStoreMaterializer<>(materializedInternal, windows, emitStrategy), + storeFactory.storeName(), + gracePeriod, new KStreamSessionWindowAggregate<>( windows, - materializedInternal.storeName(), + storeFactory, emitStrategy, aggregateBuilder.reduceInitializer, reduceAggregator, @@ -216,13 +225,16 @@ public KTable, VR> aggregate(final Initializer initializer, } final String aggregateName = new NamedInternal(named).orElseGenerateWithPrefix(builder, AGGREGATE_NAME); + final StoreFactory storeFactory = new SessionStoreMaterializer<>(materializedInternal, windows, emitStrategy); + final long gracePeriod = windows.gracePeriodMs() + windows.inactivityGap(); - return aggregateBuilder.build( + return aggregateBuilder.buildWindowed( new NamedInternal(aggregateName), - new SessionStoreMaterializer<>(materializedInternal, windows, emitStrategy), + storeFactory.storeName(), + gracePeriod, new KStreamSessionWindowAggregate<>( windows, - materializedInternal.storeName(), + storeFactory, emitStrategy, initializer, aggregator, diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowStoreMaterializer.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowStoreMaterializer.java index cea18f96d378b..0aca2643be712 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowStoreMaterializer.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowStoreMaterializer.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.kstream.EmitStrategy; import org.apache.kafka.streams.kstream.SlidingWindows; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.state.DslWindowParams; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; @@ -58,7 +57,7 @@ public SlidingWindowStoreMaterializer( } @Override - public StateStore build() { + public StoreBuilder builder() { final WindowBytesStoreSupplier supplier = materialized.storeSupplier() == null ? dslStoreSuppliers().windowStore(new DslWindowParams( materialized.storeName(), @@ -91,7 +90,7 @@ public StateStore build() { builder.withCachingDisabled(); } - return builder.build(); + return builder; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImpl.java index 3cb7b3f29bdcc..c2af4652f8fbc 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImpl.java @@ -30,6 +30,7 @@ import org.apache.kafka.streams.kstream.TimeWindowedKStream; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; +import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.WindowStore; import java.util.Objects; @@ -90,11 +91,13 @@ private KTable, Long> doCount(final Named named, } final String aggregateName = new NamedInternal(named).orElseGenerateWithPrefix(builder, AGGREGATE_NAME); + final StoreFactory storeFactory = new SlidingWindowStoreMaterializer<>(materializedInternal, windows, emitStrategy); - return aggregateBuilder.build( + return aggregateBuilder.buildWindowed( new NamedInternal(aggregateName), - new SlidingWindowStoreMaterializer<>(materializedInternal, windows, emitStrategy), - new KStreamSlidingWindowAggregate<>(windows, materializedInternal.storeName(), emitStrategy, aggregateBuilder.countInitializer, aggregateBuilder.countAggregator), + storeFactory.storeName(), + windows.gracePeriodMs(), + new KStreamSlidingWindowAggregate<>(windows, storeFactory, emitStrategy, aggregateBuilder.countInitializer, aggregateBuilder.countAggregator), materializedInternal.queryableStoreName(), materializedInternal.keySerde() != null ? new FullTimeWindowedSerde<>(materializedInternal.keySerde(), windows.timeDifferenceMs()) : null, materializedInternal.valueSerde(), @@ -135,11 +138,13 @@ public KTable, VR> aggregate(final Initializer initializer, materializedInternal.withKeySerde(keySerde); } final String aggregateName = new NamedInternal(named).orElseGenerateWithPrefix(builder, AGGREGATE_NAME); + final StoreFactory storeFactory = new SlidingWindowStoreMaterializer<>(materializedInternal, windows, emitStrategy); - return aggregateBuilder.build( + return aggregateBuilder.buildWindowed( new NamedInternal(aggregateName), - new SlidingWindowStoreMaterializer<>(materializedInternal, windows, emitStrategy), - new KStreamSlidingWindowAggregate<>(windows, materializedInternal.storeName(), emitStrategy, initializer, aggregator), + storeFactory.storeName(), + windows.gracePeriodMs(), + new KStreamSlidingWindowAggregate<>(windows, storeFactory, emitStrategy, initializer, aggregator), materializedInternal.queryableStoreName(), materializedInternal.keySerde() != null ? new FullTimeWindowedSerde<>(materializedInternal.keySerde(), windows.timeDifferenceMs()) : null, materializedInternal.valueSerde(), @@ -181,11 +186,13 @@ public KTable, V> reduce(final Reducer reducer, } final String reduceName = new NamedInternal(named).orElseGenerateWithPrefix(builder, REDUCE_NAME); + final StoreFactory storeFactory = new SlidingWindowStoreMaterializer<>(materializedInternal, windows, emitStrategy); - return aggregateBuilder.build( + return aggregateBuilder.buildWindowed( new NamedInternal(reduceName), - new SlidingWindowStoreMaterializer<>(materializedInternal, windows, emitStrategy), - new KStreamSlidingWindowAggregate<>(windows, materializedInternal.storeName(), emitStrategy, aggregateBuilder.reduceInitializer, aggregatorForReducer(reducer)), + storeFactory.storeName(), + windows.gracePeriodMs(), + new KStreamSlidingWindowAggregate<>(windows, storeFactory, emitStrategy, aggregateBuilder.reduceInitializer, aggregatorForReducer(reducer)), materializedInternal.queryableStoreName(), materializedInternal.keySerde() != null ? new FullTimeWindowedSerde<>(materializedInternal.keySerde(), windows.timeDifferenceMs()) : null, materializedInternal.valueSerde(), diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamJoinedStoreFactory.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamJoinedStoreFactory.java index b6e969572c85d..4da99a71d61fa 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamJoinedStoreFactory.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamJoinedStoreFactory.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.streams.kstream.EmitStrategy; import org.apache.kafka.streams.kstream.JoinWindows; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.DslWindowParams; import org.apache.kafka.streams.state.StoreBuilder; @@ -81,7 +80,7 @@ public StreamJoinedStoreFactory( } @Override - public StateStore build() { + public StoreBuilder builder() { final WindowBytesStoreSupplier supplier = storeSupplier == null ? dslStoreSuppliers().windowStore(new DslWindowParams( this.name, @@ -106,7 +105,7 @@ public StateStore build() { builder.withLoggingDisabled(); } - return builder.build(); + return builder; } @Override @@ -126,7 +125,7 @@ public boolean loggingEnabled() { } @Override - public String name() { + public String storeName() { return name; } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamStreamJoinUtil.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamStreamJoinUtil.java index ab23663bbc732..3e2c97f6e24a5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamStreamJoinUtil.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/StreamStreamJoinUtil.java @@ -26,7 +26,7 @@ public final class StreamStreamJoinUtil { - private StreamStreamJoinUtil(){ + private StreamStreamJoinUtil() { } public static boolean skipRecord( diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SubscriptionStoreFactory.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SubscriptionStoreFactory.java index f3c424efb3e10..10c8a5e110c5d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SubscriptionStoreFactory.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/SubscriptionStoreFactory.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.SubscriptionWrapper; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.DslKeyValueParams; import org.apache.kafka.streams.state.StoreBuilder; @@ -45,7 +44,7 @@ public SubscriptionStoreFactory( } @Override - public StateStore build() { + public StoreBuilder builder() { StoreBuilder builder; builder = Stores.timestampedKeyValueStoreBuilder( dslStoreSuppliers().keyValueStore(new DslKeyValueParams(name, true)), @@ -58,7 +57,7 @@ public StateStore build() { builder = builder.withLoggingDisabled(); } builder = builder.withCachingDisabled(); - return builder.build(); + return builder; } @Override @@ -78,7 +77,7 @@ public boolean loggingEnabled() { } @Override - public String name() { + public String storeName() { return name; } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java index b615e20714b58..5240f6f0ef095 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java @@ -33,6 +33,7 @@ import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.Windows; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; +import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.WindowStore; import java.util.Objects; @@ -102,13 +103,15 @@ private KTable, Long> doCount(final Named named, } final String aggregateName = new NamedInternal(named).orElseGenerateWithPrefix(builder, AGGREGATE_NAME); + final StoreFactory storeFactory = new WindowStoreMaterializer<>(materializedInternal, windows, emitStrategy); - return aggregateBuilder.build( + return aggregateBuilder.buildWindowed( new NamedInternal(aggregateName), - new WindowStoreMaterializer<>(materializedInternal, windows, emitStrategy), + storeFactory.storeName(), + windows.gracePeriodMs(), new KStreamWindowAggregate<>( windows, - materializedInternal.storeName(), + storeFactory, emitStrategy, aggregateBuilder.countInitializer, aggregateBuilder.countAggregator), @@ -154,13 +157,15 @@ public KTable, VR> aggregate(final Initializer initializer, } final String aggregateName = new NamedInternal(named).orElseGenerateWithPrefix(builder, AGGREGATE_NAME); + final StoreFactory storeFactory = new WindowStoreMaterializer<>(materializedInternal, windows, emitStrategy); - return aggregateBuilder.build( + return aggregateBuilder.buildWindowed( new NamedInternal(aggregateName), - new WindowStoreMaterializer<>(materializedInternal, windows, emitStrategy), + storeFactory.storeName(), + windows.gracePeriodMs(), new KStreamWindowAggregate<>( windows, - materializedInternal.storeName(), + storeFactory, emitStrategy, initializer, aggregator), @@ -205,13 +210,15 @@ public KTable, V> reduce(final Reducer reducer, } final String reduceName = new NamedInternal(named).orElseGenerateWithPrefix(builder, REDUCE_NAME); + final StoreFactory storeFactory = new WindowStoreMaterializer<>(materializedInternal, windows, emitStrategy); - return aggregateBuilder.build( + return aggregateBuilder.buildWindowed( new NamedInternal(reduceName), - new WindowStoreMaterializer<>(materializedInternal, windows, emitStrategy), + storeFactory.storeName(), + windows.gracePeriodMs(), new KStreamWindowAggregate<>( windows, - materializedInternal.storeName(), + storeFactory, emitStrategy, aggregateBuilder.reduceInitializer, aggregatorForReducer(reducer)), diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/WindowStoreMaterializer.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/WindowStoreMaterializer.java index eabce874f701c..2b9f3d3381481 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/WindowStoreMaterializer.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/WindowStoreMaterializer.java @@ -19,7 +19,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.kstream.EmitStrategy; import org.apache.kafka.streams.kstream.Windows; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.state.DslWindowParams; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; @@ -56,7 +55,7 @@ public WindowStoreMaterializer( } @Override - public StateStore build() { + public StoreBuilder builder() { final WindowBytesStoreSupplier supplier = materialized.storeSupplier() == null ? dslStoreSuppliers().windowStore(new DslWindowParams( materialized.storeName(), @@ -85,7 +84,7 @@ public StateStore build() { builder.withCachingEnabled(); } - return builder.build(); + return builder; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignKeyExtractor.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignKeyExtractor.java new file mode 100644 index 0000000000000..481182ee174e6 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignKeyExtractor.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.kstream.internals.foreignkeyjoin; + +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * An interface for extracting foreign keys from input records during foreign key joins in Kafka Streams. + * This extractor is used to determine the key of the foreign table to join with based on the primary + * table's record key and value. + *

              + * The interface provides two factory methods: + *

                + *
              • {@link #fromFunction(Function)} - when the foreign key depends only on the value
              • + *
              • {@link #fromBiFunction(BiFunction)} - when the foreign key depends on both key and value
              • + *
              + * + * @param Type of primary table's key + * @param Type of primary table's value + * @param Type of the foreign key to extract + */ +@FunctionalInterface +public interface ForeignKeyExtractor { + KO extract(K key, V value); + + static ForeignKeyExtractor fromFunction(Function function) { + return (key, value) -> function.apply(value); + } + + static ForeignKeyExtractor fromBiFunction(BiFunction biFunction) { + return biFunction::apply; + } +} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplier.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplier.java index 7c3e982a8ede3..a8b8228ed552f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplier.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplier.java @@ -28,8 +28,11 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.processor.internals.metrics.TaskMetrics; import org.apache.kafka.streams.state.KeyValueIterator; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.TimestampedKeyValueStore; import org.apache.kafka.streams.state.ValueAndTimestamp; @@ -37,22 +40,27 @@ import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Set; public class ForeignTableJoinProcessorSupplier implements ProcessorSupplier, K, SubscriptionResponseWrapper> { private static final Logger LOG = LoggerFactory.getLogger(ForeignTableJoinProcessorSupplier.class); - private final String storeName; + private final StoreFactory subscriptionStoreFactory; private final CombinedKeySchema keySchema; private boolean useVersionedSemantics = false; - public ForeignTableJoinProcessorSupplier( - final String storeName, - final CombinedKeySchema keySchema) { - - this.storeName = storeName; + public ForeignTableJoinProcessorSupplier(final StoreFactory subscriptionStoreFactory, + final CombinedKeySchema keySchema) { + this.subscriptionStoreFactory = subscriptionStoreFactory; this.keySchema = keySchema; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(subscriptionStoreFactory)); + } + @Override public Processor, K, SubscriptionResponseWrapper> get() { return new KTableKTableJoinProcessor(); @@ -80,7 +88,7 @@ public void init(final ProcessorContext> cont internalProcessorContext.taskId().toString(), internalProcessorContext.metrics() ); - subscriptionStore = internalProcessorContext.getStateStore(storeName); + subscriptionStore = internalProcessorContext.getStateStore(subscriptionStoreFactory.storeName()); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplier.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplier.java index a935797ad180e..825283e98f581 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplier.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplier.java @@ -28,32 +28,42 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.api.RecordMetadata; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.processor.internals.StoreFactory.FactoryWrappingStoreBuilder; import org.apache.kafka.streams.processor.internals.metrics.TaskMetrics; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.TimestampedKeyValueStore; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; +import java.util.Set; + public class SubscriptionReceiveProcessorSupplier implements ProcessorSupplier, CombinedKey, Change>>> { private static final Logger LOG = LoggerFactory.getLogger(SubscriptionReceiveProcessorSupplier.class); - private final String storeName; + private final StoreFactory subscriptionStoreFactory; private final CombinedKeySchema keySchema; - public SubscriptionReceiveProcessorSupplier( - final String storeName, - final CombinedKeySchema keySchema) { + public SubscriptionReceiveProcessorSupplier(final StoreFactory subscriptionStoreFactory, + final CombinedKeySchema keySchema) { - this.storeName = storeName; + this.subscriptionStoreFactory = subscriptionStoreFactory; this.keySchema = keySchema; } + @Override + public Set> stores() { + return Collections.singleton(new FactoryWrappingStoreBuilder<>(subscriptionStoreFactory)); + } + @Override public Processor, CombinedKey, Change>>> get() { - return new ContextualProcessor, CombinedKey, Change>>>() { + return new ContextualProcessor<>() { private TimestampedKeyValueStore> store; private Sensor droppedRecordsSensor; @@ -68,7 +78,7 @@ public void init(final ProcessorContext, Change implements ProcessorSupplier, KO, SubscriptionWrapper> { private static final Logger LOG = LoggerFactory.getLogger(SubscriptionSendProcessorSupplier.class); - private final Function foreignKeyExtractor; + private final ForeignKeyExtractor foreignKeyExtractor; private final Supplier foreignKeySerdeTopicSupplier; private final Supplier valueSerdeTopicSupplier; private final boolean leftJoin; @@ -55,7 +54,7 @@ public class SubscriptionSendProcessorSupplier implements ProcessorSup private Serializer valueSerializer; private boolean useVersionedSemantics; - public SubscriptionSendProcessorSupplier(final Function foreignKeyExtractor, + public SubscriptionSendProcessorSupplier(final ForeignKeyExtractor foreignKeyExtractor, final Supplier foreignKeySerdeTopicSupplier, final Supplier valueSerdeTopicSupplier, final Serde foreignKeySerde, @@ -129,27 +128,27 @@ public void process(final Record> record) { private void leftJoinInstructions(final Record> record) { if (record.value().oldValue != null) { - final KO oldForeignKey = foreignKeyExtractor.apply(record.value().oldValue); - final KO newForeignKey = record.value().newValue == null ? null : foreignKeyExtractor.apply(record.value().newValue); + final KO oldForeignKey = foreignKeyExtractor.extract(record.key(), record.value().oldValue); + final KO newForeignKey = record.value().newValue == null ? null : foreignKeyExtractor.extract(record.key(), record.value().newValue); if (oldForeignKey != null && !Arrays.equals(serialize(newForeignKey), serialize(oldForeignKey))) { forward(record, oldForeignKey, DELETE_KEY_AND_PROPAGATE); } forward(record, newForeignKey, PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE); } else if (record.value().newValue != null) { - final KO newForeignKey = foreignKeyExtractor.apply(record.value().newValue); + final KO newForeignKey = foreignKeyExtractor.extract(record.key(), record.value().newValue); forward(record, newForeignKey, PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE); } } private void defaultJoinInstructions(final Record> record) { if (record.value().oldValue != null) { - final KO oldForeignKey = record.value().oldValue == null ? null : foreignKeyExtractor.apply(record.value().oldValue); + final KO oldForeignKey = record.value().oldValue == null ? null : foreignKeyExtractor.extract(record.key(), record.value().oldValue); if (oldForeignKey == null) { logSkippedRecordDueToNullForeignKey(); return; } if (record.value().newValue != null) { - final KO newForeignKey = record.value().newValue == null ? null : foreignKeyExtractor.apply(record.value().newValue); + final KO newForeignKey = record.value().newValue == null ? null : foreignKeyExtractor.extract(record.key(), record.value().newValue); if (newForeignKey == null) { logSkippedRecordDueToNullForeignKey(); return; @@ -167,7 +166,7 @@ private void defaultJoinInstructions(final Record> record) { forward(record, oldForeignKey, DELETE_KEY_AND_PROPAGATE); } } else if (record.value().newValue != null) { - final KO newForeignKey = foreignKeyExtractor.apply(record.value().newValue); + final KO newForeignKey = foreignKeyExtractor.extract(record.key(), record.value().newValue); if (newForeignKey == null) { logSkippedRecordDueToNullForeignKey(); } else { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/BaseJoinProcessorNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/BaseJoinProcessorNode.java index 0c0dcb3bec9df..128de320f2bbb 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/BaseJoinProcessorNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/BaseJoinProcessorNode.java @@ -32,7 +32,6 @@ abstract class BaseJoinProcessorNode extends GraphNode { private final String thisJoinSideNodeName; private final String otherJoinSideNodeName; - BaseJoinProcessorNode(final String nodeName, final ValueJoinerWithKey valueJoiner, final ProcessorParameters joinThisProcessorParameters, diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignJoinSubscriptionSendNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignJoinSubscriptionSendNode.java index 4efbd9b29f1c1..afd9ee1e64d8d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignJoinSubscriptionSendNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignJoinSubscriptionSendNode.java @@ -22,7 +22,7 @@ public class ForeignJoinSubscriptionSendNode extends ProcessorGraphNode implements VersionedSemanticsGraphNode { public ForeignJoinSubscriptionSendNode(final ProcessorParameters processorParameters) { - super(processorParameters); + super(processorParameters.processorName(), processorParameters); } @SuppressWarnings("unchecked") diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignTableJoinNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignTableJoinNode.java index 16f096e820805..f187ea6305603 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignTableJoinNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ForeignTableJoinNode.java @@ -19,14 +19,12 @@ import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.ForeignTableJoinProcessorSupplier; import org.apache.kafka.streams.processor.api.ProcessorSupplier; -public class ForeignTableJoinNode extends StatefulProcessorNode implements VersionedSemanticsGraphNode { +public class ForeignTableJoinNode extends ProcessorGraphNode implements VersionedSemanticsGraphNode { - public ForeignTableJoinNode(final ProcessorParameters processorParameters, - final String[] storeNames) { - super(processorParameters.processorName(), processorParameters, storeNames); + public ForeignTableJoinNode(final ProcessorParameters processorParameters) { + super(processorParameters.processorName(), processorParameters); } - @SuppressWarnings("unchecked") @Override public void enableVersionedSemantics(final boolean useVersionedSemantics, final String parentNodeName) { final ProcessorSupplier processorSupplier = processorParameters().processorSupplier(); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GlobalStoreNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GlobalStoreNode.java index df6e7c263e6f1..a9093ad47701a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GlobalStoreNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GlobalStoreNode.java @@ -20,8 +20,11 @@ import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; +import org.apache.kafka.streams.processor.internals.StoreDelegatingProcessorSupplier; import org.apache.kafka.streams.processor.internals.StoreFactory; +import java.util.Set; + public class GlobalStoreNode extends StateStoreNode { private final String sourceName; @@ -52,15 +55,16 @@ public GlobalStoreNode(final StoreFactory storeBuilder, @Override public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { storeBuilder.withLoggingDisabled(); - topologyBuilder.addGlobalStore(storeBuilder, - sourceName, + topologyBuilder.addGlobalStore(sourceName, consumed.timestampExtractor(), consumed.keyDeserializer(), consumed.valueDeserializer(), topic, processorName, - stateUpdateSupplier, - reprocessOnRestore); + new StoreDelegatingProcessorSupplier<>( + stateUpdateSupplier, + Set.of(new StoreFactory.FactoryWrappingStoreBuilder<>(storeBuilder)) + ), reprocessOnRestore); } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GracePeriodGraphNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GracePeriodGraphNode.java new file mode 100644 index 0000000000000..c6ed537fd0bf9 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GracePeriodGraphNode.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.streams.kstream.internals.graph; + +/** + * Represents a stateful {@link ProcessorGraphNode} where a semantic grace period is defined for the processor + * and its state. + */ +public class GracePeriodGraphNode extends ProcessorGraphNode { + + private final long gracePeriod; + + public GracePeriodGraphNode(final String nodeName, + final ProcessorParameters processorParameters, + final long gracePeriod) { + super(nodeName, processorParameters); + this.gracePeriod = gracePeriod; + } + + public long gracePeriod() { + return gracePeriod; + } +} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtil.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtil.java index 66ffdc003ae30..09ed36284a811 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtil.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtil.java @@ -17,13 +17,6 @@ package org.apache.kafka.streams.kstream.internals.graph; import org.apache.kafka.streams.errors.TopologyException; -import org.apache.kafka.streams.kstream.SessionWindows; -import org.apache.kafka.streams.kstream.SlidingWindows; -import org.apache.kafka.streams.kstream.Windows; -import org.apache.kafka.streams.kstream.internals.KStreamSessionWindowAggregate; -import org.apache.kafka.streams.kstream.internals.KStreamSlidingWindowAggregate; -import org.apache.kafka.streams.kstream.internals.KStreamWindowAggregate; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; public final class GraphGraceSearchUtil { private GraphGraceSearchUtil() {} @@ -32,6 +25,7 @@ public static long findAndVerifyWindowGrace(final GraphNode graphNode) { return findAndVerifyWindowGrace(graphNode, ""); } + @SuppressWarnings("rawtypes") private static long findAndVerifyWindowGrace(final GraphNode graphNode, final String chain) { // error base case: we traversed off the end of the graph without finding a window definition if (graphNode == null) { @@ -40,11 +34,8 @@ private static long findAndVerifyWindowGrace(final GraphNode graphNode, final St ); } // base case: return if this node defines a grace period. - { - final Long gracePeriod = extractGracePeriod(graphNode); - if (gracePeriod != null) { - return gracePeriod; - } + if (graphNode instanceof GracePeriodGraphNode) { + return ((GracePeriodGraphNode) graphNode).gracePeriod(); } final String newChain = chain.equals("") ? graphNode.nodeName() : graphNode.nodeName() + "->" + chain; @@ -70,27 +61,4 @@ private static long findAndVerifyWindowGrace(final GraphNode graphNode, final St return inheritedGrace; } - @SuppressWarnings("rawtypes") - private static Long extractGracePeriod(final GraphNode node) { - if (node instanceof StatefulProcessorNode) { - final ProcessorSupplier processorSupplier = ((StatefulProcessorNode) node).processorParameters().processorSupplier(); - if (processorSupplier instanceof KStreamWindowAggregate) { - final KStreamWindowAggregate kStreamWindowAggregate = (KStreamWindowAggregate) processorSupplier; - final Windows windows = kStreamWindowAggregate.windows(); - return windows.gracePeriodMs(); - } else if (processorSupplier instanceof KStreamSessionWindowAggregate) { - final KStreamSessionWindowAggregate kStreamSessionWindowAggregate = (KStreamSessionWindowAggregate) processorSupplier; - final SessionWindows windows = kStreamSessionWindowAggregate.windows(); - return windows.gracePeriodMs() + windows.inactivityGap(); - } else if (processorSupplier instanceof KStreamSlidingWindowAggregate) { - final KStreamSlidingWindowAggregate kStreamSlidingWindowAggregate = (KStreamSlidingWindowAggregate) processorSupplier; - final SlidingWindows windows = kStreamSlidingWindowAggregate.windows(); - return windows.gracePeriodMs(); - } else { - return null; - } - } else { - return null; - } - } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/KTableKTableJoinNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/KTableKTableJoinNode.java index 6cfdd53784cca..275a88b767668 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/KTableKTableJoinNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/KTableKTableJoinNode.java @@ -21,10 +21,8 @@ import org.apache.kafka.streams.kstream.internals.Change; import org.apache.kafka.streams.kstream.internals.KTableKTableAbstractJoin; import org.apache.kafka.streams.kstream.internals.KTableKTableJoinMerger; -import org.apache.kafka.streams.kstream.internals.KTableProcessorSupplier; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.StoreFactory; import java.util.Arrays; @@ -37,7 +35,6 @@ public class KTableKTableJoinNode extends BaseJoinProcessorNode valueSerde; private final String[] joinThisStoreNames; private final String[] joinOtherStoreNames; - private final StoreFactory storeFactory; KTableKTableJoinNode(final String nodeName, final ProcessorParameters, ?, ?> joinThisProcessorParameters, @@ -48,8 +45,7 @@ public class KTableKTableJoinNode extends BaseJoinProcessorNode keySerde, final Serde valueSerde, final String[] joinThisStoreNames, - final String[] joinOtherStoreNames, - final StoreFactory storeFactory) { + final String[] joinOtherStoreNames) { super(nodeName, null, @@ -63,7 +59,6 @@ public class KTableKTableJoinNode extends BaseJoinProcessorNode keySerde() { @@ -120,30 +115,13 @@ private void enableVersionedSemantics(final ProcessorParameters proc public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { final String thisProcessorName = thisProcessorParameters().processorName(); final String otherProcessorName = otherProcessorParameters().processorName(); - final String mergeProcessorName = mergeProcessorParameters().processorName(); - topologyBuilder.addProcessor( - thisProcessorName, - thisProcessorParameters().processorSupplier(), - thisJoinSideNodeName()); - - topologyBuilder.addProcessor( - otherProcessorName, - otherProcessorParameters().processorSupplier(), - otherJoinSideNodeName()); - - topologyBuilder.addProcessor( - mergeProcessorName, - mergeProcessorParameters().processorSupplier(), - thisProcessorName, - otherProcessorName); + thisProcessorParameters().addProcessorTo(topologyBuilder, thisJoinSideNodeName()); + otherProcessorParameters().addProcessorTo(topologyBuilder, otherJoinSideNodeName()); + mergeProcessorParameters().addProcessorTo(topologyBuilder, thisProcessorName, otherProcessorName); topologyBuilder.connectProcessorAndStateStores(thisProcessorName, joinOtherStoreNames); topologyBuilder.connectProcessorAndStateStores(otherProcessorName, joinThisStoreNames); - - if (storeFactory != null) { - topologyBuilder.addStateStore(storeFactory, mergeProcessorName); - } } @Override @@ -168,8 +146,8 @@ public static final class KTableKTableJoinNodeBuilder { private Serde valueSerde; private String[] joinThisStoreNames; private String[] joinOtherStoreNames; - private String queryableStoreName; - private StoreFactory storeFactory; + private ProcessorParameters, ?, ?> + joinMergeProcessorParameters; private KTableKTableJoinNodeBuilder() { } @@ -219,35 +197,23 @@ public KTableKTableJoinNodeBuilder withJoinOtherStoreNames(final return this; } - public KTableKTableJoinNodeBuilder withQueryableStoreName(final String queryableStoreName) { - this.queryableStoreName = queryableStoreName; - return this; - } - - public KTableKTableJoinNodeBuilder withStoreBuilder(final StoreFactory storeFactory) { - this.storeFactory = storeFactory; + public KTableKTableJoinNodeBuilder withMergeProcessorParameters(final ProcessorParameters, ?, ?> joinMergeProcessorParameters) { + this.joinMergeProcessorParameters = joinMergeProcessorParameters; return this; } - @SuppressWarnings("unchecked") public KTableKTableJoinNode build() { return new KTableKTableJoinNode<>( nodeName, joinThisProcessorParameters, joinOtherProcessorParameters, - new ProcessorParameters<>( - KTableKTableJoinMerger.of( - (KTableProcessorSupplier) joinThisProcessorParameters.processorSupplier(), - (KTableProcessorSupplier) joinOtherProcessorParameters.processorSupplier(), - queryableStoreName), - nodeName), + joinMergeProcessorParameters, thisJoinSide, otherJoinSide, keySerde, valueSerde, joinThisStoreNames, - joinOtherStoreNames, - storeFactory + joinOtherStoreNames ); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorGraphNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorGraphNode.java index 1c8e8cace2b30..514676af1f6d4 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorGraphNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorGraphNode.java @@ -28,13 +28,6 @@ public class ProcessorGraphNode extends GraphNode { private final ProcessorParameters processorParameters; - public ProcessorGraphNode(final ProcessorParameters processorParameters) { - - super(processorParameters.processorName()); - - this.processorParameters = processorParameters; - } - public ProcessorGraphNode(final String nodeName, final ProcessorParameters processorParameters) { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorParameters.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorParameters.java index 563dad8897a7d..7cfbc94533e0a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorParameters.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorParameters.java @@ -17,12 +17,14 @@ package org.apache.kafka.streams.kstream.internals.graph; +import org.apache.kafka.streams.internals.ApiUtils; import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.ProcessorAdapter; import org.apache.kafka.streams.state.StoreBuilder; +import java.util.Set; + /** * Class used to represent a {@link ProcessorSupplier} or {@link FixedKeyProcessorSupplier} and the name * used to register it with the {@link org.apache.kafka.streams.processor.internals.InternalTopologyBuilder} @@ -35,26 +37,12 @@ */ public class ProcessorParameters { - // During the transition to KIP-478, we capture arguments passed from the old API to simplify - // the performance of casts that we still need to perform. This will eventually be removed. - @SuppressWarnings("deprecation") // Old PAPI. Needs to be migrated. - private final org.apache.kafka.streams.processor.ProcessorSupplier oldProcessorSupplier; private final ProcessorSupplier processorSupplier; private final FixedKeyProcessorSupplier fixedKeyProcessorSupplier; private final String processorName; - @SuppressWarnings("deprecation") // Old PAPI compatibility. - public ProcessorParameters(final org.apache.kafka.streams.processor.ProcessorSupplier processorSupplier, - final String processorName) { - oldProcessorSupplier = processorSupplier; - this.processorSupplier = () -> ProcessorAdapter.adapt(processorSupplier.get()); - fixedKeyProcessorSupplier = null; - this.processorName = processorName; - } - public ProcessorParameters(final ProcessorSupplier processorSupplier, final String processorName) { - oldProcessorSupplier = null; this.processorSupplier = processorSupplier; fixedKeyProcessorSupplier = null; this.processorName = processorName; @@ -62,7 +50,6 @@ public ProcessorParameters(final ProcessorSupplier process public ProcessorParameters(final FixedKeyProcessorSupplier processorSupplier, final String processorName) { - oldProcessorSupplier = null; this.processorSupplier = null; fixedKeyProcessorSupplier = processorSupplier; this.processorName = processorName; @@ -76,32 +63,36 @@ public FixedKeyProcessorSupplier fixedKeyProcessorSupplier() { return fixedKeyProcessorSupplier; } - public void addProcessorTo(final InternalTopologyBuilder topologyBuilder, final String[] parentNodeNames) { + public void addProcessorTo(final InternalTopologyBuilder topologyBuilder, final String... parentNodeNames) { if (processorSupplier != null) { - topologyBuilder.addProcessor(processorName, processorSupplier, parentNodeNames); - if (processorSupplier.stores() != null) { - for (final StoreBuilder storeBuilder : processorSupplier.stores()) { + ApiUtils.checkSupplier(processorSupplier); + + final ProcessorSupplier wrapped = + topologyBuilder.wrapProcessorSupplier(processorName, processorSupplier); + + topologyBuilder.addProcessor(processorName, wrapped, parentNodeNames); + final Set> stores = wrapped.stores(); + if (stores != null) { + for (final StoreBuilder storeBuilder : stores) { topologyBuilder.addStateStore(storeBuilder, processorName); } } } if (fixedKeyProcessorSupplier != null) { - topologyBuilder.addProcessor(processorName, fixedKeyProcessorSupplier, parentNodeNames); - if (fixedKeyProcessorSupplier.stores() != null) { - for (final StoreBuilder storeBuilder : fixedKeyProcessorSupplier.stores()) { + ApiUtils.checkSupplier(fixedKeyProcessorSupplier); + + final FixedKeyProcessorSupplier wrapped = + topologyBuilder.wrapFixedKeyProcessorSupplier(processorName, fixedKeyProcessorSupplier); + + topologyBuilder.addProcessor(processorName, wrapped, parentNodeNames); + final Set> stores = wrapped.stores(); + if (stores != null) { + for (final StoreBuilder storeBuilder : stores) { topologyBuilder.addStateStore(storeBuilder, processorName); } } } - - // temporary hack until KIP-478 is fully implemented - // Old PAPI. Needs to be migrated. - if (oldProcessorSupplier != null && oldProcessorSupplier.stores() != null) { - for (final StoreBuilder storeBuilder : oldProcessorSupplier.stores()) { - topologyBuilder.addStateStore(storeBuilder, processorName); - } - } } public String processorName() { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorToStateConnectorNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorToStateConnectorNode.java new file mode 100644 index 0000000000000..b476d6a7731b2 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/ProcessorToStateConnectorNode.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.kstream.internals.graph; + +import org.apache.kafka.streams.kstream.internals.KTableValueGetterSupplier; +import org.apache.kafka.streams.processor.ConnectedStoreProvider; +import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; + +import java.util.Arrays; +import java.util.Set; + +/** + * Used for stateful processors that need to be manually connected to the state store(s) + * they need to access. This should only be used in cases where the stores) cannot + * be connected automatically by implementing the {@link ConnectedStoreProvider#stores()} method + * and returning the store directly. Generally this will only apply to DSL operators that utilize + * value getters to access another processor's state store(s), and the process/processValues + * operator where the user's custom processor supplier doesn't implement the #stores method + * and has to be connected to the store when compiling the topology. + */ +public class ProcessorToStateConnectorNode extends ProcessorGraphNode { + + private final String[] storeNames; + + /** + * Create a node representing a stateful processor that uses value getters to access stores, and needs to + * be connected with those stores + */ + public ProcessorToStateConnectorNode(final ProcessorParameters processorParameters, + final Set> valueGetterSuppliers) { + super(processorParameters.processorName(), processorParameters); + storeNames = valueGetterSuppliers.stream().flatMap(s -> Arrays.stream(s.storeNames())).toArray(String[]::new); + } + + /** + * Create a node representing a stateful processor, which needs to be connected to the provided stores + */ + public ProcessorToStateConnectorNode(final String nodeName, + final ProcessorParameters processorParameters, + final String[] storeNames) { + super(nodeName, processorParameters); + this.storeNames = storeNames; + } + + @Override + public String toString() { + return "ProcessorNode{" + + "storeNames=" + Arrays.toString(storeNames) + + "} " + super.toString(); + } + + @Override + public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { + processorParameters().addProcessorTo(topologyBuilder, parentNodeNames()); + + if (storeNames != null && storeNames.length > 0) { + topologyBuilder.connectProcessorAndStateStores(processorParameters().processorName(), storeNames); + } + } +} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StateStoreNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StateStoreNode.java index fb3cec2dde453..05375d35efe83 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StateStoreNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StateStoreNode.java @@ -25,7 +25,7 @@ public class StateStoreNode extends GraphNode { protected final StoreFactory storeBuilder; public StateStoreNode(final StoreFactory storeBuilder) { - super(storeBuilder.name()); + super(storeBuilder.storeName()); this.storeBuilder = storeBuilder; } @@ -38,7 +38,7 @@ public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { @Override public String toString() { return "StateStoreNode{" + - " name='" + storeBuilder.name() + '\'' + + " name='" + storeBuilder.storeName() + '\'' + ", logConfig=" + storeBuilder.logConfig() + ", loggingEnabled='" + storeBuilder.loggingEnabled() + '\'' + "} "; diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StatefulProcessorNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StatefulProcessorNode.java deleted file mode 100644 index ec6c6583b3efe..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StatefulProcessorNode.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals.graph; - -import org.apache.kafka.streams.kstream.internals.KTableValueGetterSupplier; -import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.StoreFactory; -import org.apache.kafka.streams.state.StoreBuilder; - -import java.util.Arrays; -import java.util.Set; -import java.util.stream.Stream; - -public class StatefulProcessorNode extends ProcessorGraphNode { - - private final String[] storeNames; - private final StoreFactory storeFactory; - - /** - * Create a node representing a stateful processor, where the named stores have already been registered. - */ - public StatefulProcessorNode(final ProcessorParameters processorParameters, - final Set> preRegisteredStores, - final Set> valueGetterSuppliers) { - super(processorParameters.processorName(), processorParameters); - final Stream registeredStoreNames = preRegisteredStores.stream().map(StoreBuilder::name); - final Stream valueGetterStoreNames = valueGetterSuppliers.stream().flatMap(s -> Arrays.stream(s.storeNames())); - storeNames = Stream.concat(registeredStoreNames, valueGetterStoreNames).toArray(String[]::new); - storeFactory = null; - } - - /** - * Create a node representing a stateful processor, where the named stores have already been registered. - */ - public StatefulProcessorNode(final String nodeName, - final ProcessorParameters processorParameters, - final String[] storeNames) { - super(nodeName, processorParameters); - - this.storeNames = storeNames; - this.storeFactory = null; - } - - - /** - * Create a node representing a stateful processor, - * where the store needs to be built and registered as part of building this node. - */ - public StatefulProcessorNode(final String nodeName, - final ProcessorParameters processorParameters, - final StoreFactory materializedKTableStoreBuilder) { - super(nodeName, processorParameters); - - this.storeNames = null; - this.storeFactory = materializedKTableStoreBuilder; - } - - @Override - public String toString() { - return "StatefulProcessorNode{" + - "storeNames=" + Arrays.toString(storeNames) + - ", storeBuilder=" + storeFactory + - "} " + super.toString(); - } - - @Override - public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { - processorParameters().addProcessorTo(topologyBuilder, parentNodeNames()); - - if (storeNames != null && storeNames.length > 0) { - topologyBuilder.connectProcessorAndStateStores(processorParameters().processorName(), storeNames); - } - - if (storeFactory != null) { - topologyBuilder.addStateStore(storeFactory, processorParameters().processorName()); - } - } -} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamSourceNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamSourceNode.java index 97b686eaff632..d343bdb1bb586 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamSourceNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamSourceNode.java @@ -17,7 +17,7 @@ package org.apache.kafka.streams.kstream.internals.graph; -import org.apache.kafka.streams.Topology.AutoOffsetReset; +import org.apache.kafka.streams.AutoOffsetReset; import org.apache.kafka.streams.errors.TopologyException; import org.apache.kafka.streams.kstream.internals.ConsumedInternal; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamStreamJoinNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamStreamJoinNode.java index f9cf9164d2065..7448498a44286 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamStreamJoinNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamStreamJoinNode.java @@ -17,24 +17,16 @@ package org.apache.kafka.streams.kstream.internals.graph; -import org.apache.kafka.streams.kstream.Joined; import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.StoreFactory; -import java.util.Optional; /** * Too much information to generalize, so Stream-Stream joins are represented by a specific node. */ public class StreamStreamJoinNode extends BaseJoinProcessorNode { - private final ProcessorParameters thisWindowedStreamProcessorParameters; - private final ProcessorParameters otherWindowedStreamProcessorParameters; - private final StoreFactory thisWindowStoreBuilder; - private final StoreFactory otherWindowStoreBuilder; - private final Optional outerJoinWindowStoreBuilder; - private final Joined joined; - private final boolean enableSpuriousResultFix; + private final String thisWindowedStreamProcessorName; + private final String otherWindowedStreamProcessorName; private final ProcessorParameters selfJoinProcessorParameters; private boolean isSelfJoin; @@ -43,14 +35,9 @@ private StreamStreamJoinNode(final String nodeName, final ProcessorParameters joinThisProcessorParameters, final ProcessorParameters joinOtherProcessParameters, final ProcessorParameters joinMergeProcessorParameters, - final ProcessorParameters thisWindowedStreamProcessorParameters, - final ProcessorParameters otherWindowedStreamProcessorParameters, - final StoreFactory thisStoreFactory, - final StoreFactory otherStoreFactory, - final Optional outerJoinStoreFactory, - final Joined joined, - final boolean enableSpuriousResultFix, - final ProcessorParameters selfJoinProcessorParameters) { + final ProcessorParameters selfJoinProcessorParameters, + final String thisWindowedStreamProcessorName, + final String otherWindowedStreamProcessorName) { super(nodeName, valueJoiner, @@ -60,26 +47,16 @@ private StreamStreamJoinNode(final String nodeName, null, null); - this.thisWindowStoreBuilder = thisStoreFactory; - this.otherWindowStoreBuilder = otherStoreFactory; - this.joined = joined; - this.thisWindowedStreamProcessorParameters = thisWindowedStreamProcessorParameters; - this.otherWindowedStreamProcessorParameters = otherWindowedStreamProcessorParameters; - this.outerJoinWindowStoreBuilder = outerJoinStoreFactory; - this.enableSpuriousResultFix = enableSpuriousResultFix; + this.thisWindowedStreamProcessorName = thisWindowedStreamProcessorName; + this.otherWindowedStreamProcessorName = otherWindowedStreamProcessorName; this.selfJoinProcessorParameters = selfJoinProcessorParameters; } - @Override public String toString() { return "StreamStreamJoinNode{" + - "thisWindowedStreamProcessorParameters=" + thisWindowedStreamProcessorParameters + - ", otherWindowedStreamProcessorParameters=" + otherWindowedStreamProcessorParameters + - ", thisWindowStoreBuilder=" + thisWindowStoreBuilder + - ", otherWindowStoreBuilder=" + otherWindowStoreBuilder + - ", outerJoinWindowStoreBuilder=" + outerJoinWindowStoreBuilder + - ", joined=" + joined + + "thisWindowedStreamProcessorName=" + thisWindowedStreamProcessorName + + ", otherWindowedStreamProcessorName=" + otherWindowedStreamProcessorName + "} " + super.toString(); } @@ -89,22 +66,14 @@ public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { final String thisProcessorName = thisProcessorParameters().processorName(); final String otherProcessorName = otherProcessorParameters().processorName(); - final String thisWindowedStreamProcessorName = thisWindowedStreamProcessorParameters.processorName(); - final String otherWindowedStreamProcessorName = otherWindowedStreamProcessorParameters.processorName(); if (isSelfJoin) { - topologyBuilder.addProcessor(selfJoinProcessorParameters.processorName(), selfJoinProcessorParameters.processorSupplier(), thisWindowedStreamProcessorName); - topologyBuilder.addStateStore(thisWindowStoreBuilder, thisWindowedStreamProcessorName, selfJoinProcessorParameters.processorName()); + selfJoinProcessorParameters.addProcessorTo(topologyBuilder, new String[]{thisWindowedStreamProcessorName}); } else { - topologyBuilder.addProcessor(thisProcessorName, thisProcessorParameters().processorSupplier(), thisWindowedStreamProcessorName); - topologyBuilder.addProcessor(otherProcessorName, otherProcessorParameters().processorSupplier(), otherWindowedStreamProcessorName); - topologyBuilder.addProcessor(mergeProcessorParameters().processorName(), mergeProcessorParameters().processorSupplier(), thisProcessorName, otherProcessorName); - topologyBuilder.addStateStore(thisWindowStoreBuilder, thisWindowedStreamProcessorName, otherProcessorName); - topologyBuilder.addStateStore(otherWindowStoreBuilder, otherWindowedStreamProcessorName, thisProcessorName); - - if (enableSpuriousResultFix) { - outerJoinWindowStoreBuilder.ifPresent(builder -> topologyBuilder.addStateStore(builder, thisProcessorName, otherProcessorName)); - } + thisProcessorParameters().addProcessorTo(topologyBuilder, new String[]{thisWindowedStreamProcessorName}); + otherProcessorParameters().addProcessorTo(topologyBuilder, new String[]{otherWindowedStreamProcessorName}); + + mergeProcessorParameters().addProcessorTo(topologyBuilder, new String[]{thisProcessorName, otherProcessorName}); } } @@ -116,12 +85,12 @@ public boolean getSelfJoin() { return isSelfJoin; } - public ProcessorParameters thisWindowedStreamProcessorParameters() { - return thisWindowedStreamProcessorParameters; + public String thisWindowedStreamProcessorName() { + return thisWindowedStreamProcessorName; } - public ProcessorParameters otherWindowedStreamProcessorParameters() { - return otherWindowedStreamProcessorParameters; + public String otherWindowedStreamProcessorName() { + return otherWindowedStreamProcessorName; } public static StreamStreamJoinNodeBuilder streamStreamJoinNodeBuilder() { @@ -135,14 +104,9 @@ public static final class StreamStreamJoinNodeBuilder { private ProcessorParameters joinThisProcessorParameters; private ProcessorParameters joinOtherProcessorParameters; private ProcessorParameters joinMergeProcessorParameters; - private ProcessorParameters thisWindowedStreamProcessorParameters; - private ProcessorParameters otherWindowedStreamProcessorParameters; - private StoreFactory thisStoreFactory; - private StoreFactory otherStoreFactory; - private Optional outerJoinStoreFactory; - private Joined joined; - private boolean enableSpuriousResultFix = false; private ProcessorParameters selfJoinProcessorParameters; + private String thisWindowedStreamProcessorName; + private String otherWindowedStreamProcessorName; private StreamStreamJoinNodeBuilder() { } @@ -167,50 +131,24 @@ public StreamStreamJoinNodeBuilder withJoinOtherProcessorParamete return this; } - public StreamStreamJoinNodeBuilder withJoinMergeProcessorParameters(final ProcessorParameters joinMergeProcessorParameters) { - this.joinMergeProcessorParameters = joinMergeProcessorParameters; - return this; - } - - public StreamStreamJoinNodeBuilder withThisWindowedStreamProcessorParameters(final ProcessorParameters thisWindowedStreamProcessorParameters) { - this.thisWindowedStreamProcessorParameters = thisWindowedStreamProcessorParameters; - return this; - } - - public StreamStreamJoinNodeBuilder withOtherWindowedStreamProcessorParameters( - final ProcessorParameters otherWindowedStreamProcessorParameters) { - this.otherWindowedStreamProcessorParameters = otherWindowedStreamProcessorParameters; - return this; - } - - public StreamStreamJoinNodeBuilder withThisWindowStoreBuilder(final StoreFactory thisStoreFactory) { - this.thisStoreFactory = thisStoreFactory; - return this; - } - - public StreamStreamJoinNodeBuilder withOtherWindowStoreBuilder(final StoreFactory otherStoreFactory) { - this.otherStoreFactory = otherStoreFactory; - return this; - } - - public StreamStreamJoinNodeBuilder withOuterJoinWindowStoreBuilder(final Optional outerJoinWindowStoreBuilder) { - this.outerJoinStoreFactory = outerJoinWindowStoreBuilder; + public StreamStreamJoinNodeBuilder withSelfJoinProcessorParameters( + final ProcessorParameters selfJoinProcessorParameters) { + this.selfJoinProcessorParameters = selfJoinProcessorParameters; return this; } - public StreamStreamJoinNodeBuilder withJoined(final Joined joined) { - this.joined = joined; + public StreamStreamJoinNodeBuilder withJoinMergeProcessorParameters(final ProcessorParameters joinMergeProcessorParameters) { + this.joinMergeProcessorParameters = joinMergeProcessorParameters; return this; } - public StreamStreamJoinNodeBuilder withSpuriousResultFixEnabled() { - this.enableSpuriousResultFix = true; + public StreamStreamJoinNodeBuilder withThisWindowedStreamProcessorName(final String thisWindowedStreamProcessorName) { + this.thisWindowedStreamProcessorName = thisWindowedStreamProcessorName; return this; } - public StreamStreamJoinNodeBuilder withSelfJoinProcessorParameters( - final ProcessorParameters selfJoinProcessorParameters) { - this.selfJoinProcessorParameters = selfJoinProcessorParameters; + public StreamStreamJoinNodeBuilder withOtherWindowedStreamProcessorName(final String otherWindowedStreamProcessorName) { + this.otherWindowedStreamProcessorName = otherWindowedStreamProcessorName; return this; } @@ -221,14 +159,9 @@ public StreamStreamJoinNode build() { joinThisProcessorParameters, joinOtherProcessorParameters, joinMergeProcessorParameters, - thisWindowedStreamProcessorParameters, - otherWindowedStreamProcessorParameters, - thisStoreFactory, - otherStoreFactory, - outerJoinStoreFactory, - joined, - enableSpuriousResultFix, - selfJoinProcessorParameters); + selfJoinProcessorParameters, + thisWindowedStreamProcessorName, + otherWindowedStreamProcessorName); } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamTableJoinNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamTableJoinNode.java index ad6083cbbcfb9..b73a8caaa552f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamTableJoinNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamTableJoinNode.java @@ -17,12 +17,10 @@ package org.apache.kafka.streams.kstream.internals.graph; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; import java.time.Duration; import java.util.Arrays; -import java.util.Optional; /** * Represents a join between a KStream and a KTable or GlobalKTable @@ -34,15 +32,12 @@ public class StreamTableJoinNode extends GraphNode { private final ProcessorParameters processorParameters; private final String otherJoinSideNodeName; private final Duration gracePeriod; - private final Optional bufferName; - public StreamTableJoinNode(final String nodeName, final ProcessorParameters processorParameters, final String[] storeNames, final String otherJoinSideNodeName, - final Duration gracePeriod, - final Optional bufferName) { + final Duration gracePeriod) { super(nodeName); // in the case of Stream-Table join the state stores associated with the KTable @@ -50,7 +45,6 @@ public StreamTableJoinNode(final String nodeName, this.processorParameters = processorParameters; this.otherJoinSideNodeName = otherJoinSideNodeName; this.gracePeriod = gracePeriod; - this.bufferName = bufferName; } @Override @@ -65,15 +59,13 @@ public String toString() { @Override public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { final String processorName = processorParameters.processorName(); - final ProcessorSupplier processorSupplier = processorParameters.processorSupplier(); // Stream - Table join (Global or KTable) - topologyBuilder.addProcessor(processorName, processorSupplier, parentNodeNames()); + processorParameters.addProcessorTo(topologyBuilder, parentNodeNames()); // Steam - KTable join only if (otherJoinSideNodeName != null) { topologyBuilder.connectProcessorAndStateStores(processorName, storeNames); - bufferName.ifPresent(s -> topologyBuilder.connectProcessorAndStateStores(processorName, s)); if (gracePeriod != null) { for (final String storeName : storeNames) { if (!topologyBuilder.isStoreVersioned(storeName)) { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamToTableNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamToTableNode.java index a6c825be0c8ae..08c171e824aae 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamToTableNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/StreamToTableNode.java @@ -17,13 +17,7 @@ package org.apache.kafka.streams.kstream.internals.graph; -import org.apache.kafka.common.utils.Bytes; -import org.apache.kafka.streams.kstream.internals.KTableSource; -import org.apache.kafka.streams.kstream.internals.KeyValueStoreMaterializer; -import org.apache.kafka.streams.kstream.internals.MaterializedInternal; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.StoreFactory; -import org.apache.kafka.streams.state.KeyValueStore; /** * Represents a KTable convert From KStream @@ -31,37 +25,20 @@ public class StreamToTableNode extends GraphNode { private final ProcessorParameters processorParameters; - private final MaterializedInternal materializedInternal; public StreamToTableNode(final String nodeName, - final ProcessorParameters processorParameters, - final MaterializedInternal materializedInternal) { + final ProcessorParameters processorParameters) { super(nodeName); this.processorParameters = processorParameters; - this.materializedInternal = materializedInternal; } @Override public String toString() { - return "StreamToTableNode{" + - ", processorParameters=" + processorParameters + - ", materializedInternal=" + materializedInternal + - "} " + super.toString(); + return "StreamToTableNode{" + super.toString() + "}"; } - @SuppressWarnings("unchecked") @Override public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { - final StoreFactory storeFactory = - new KeyValueStoreMaterializer<>((MaterializedInternal>) materializedInternal); - - final String processorName = processorParameters.processorName(); - final KTableSource tableSource = processorParameters.processorSupplier() instanceof KTableSource ? - (KTableSource) processorParameters.processorSupplier() : null; - topologyBuilder.addProcessor(processorName, processorParameters.processorSupplier(), parentNodeNames()); - - if (storeFactory != null && tableSource.materialized()) { - topologyBuilder.addStateStore(storeFactory, processorName); - } + processorParameters.addProcessorTo(topologyBuilder, parentNodeNames()); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableFilterNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableFilterNode.java index 1874bd807ed47..a921dab0d1a06 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableFilterNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableFilterNode.java @@ -19,14 +19,12 @@ import org.apache.kafka.streams.kstream.internals.KTableFilter; import org.apache.kafka.streams.processor.api.ProcessorSupplier; -import org.apache.kafka.streams.processor.internals.StoreFactory; -public class TableFilterNode extends TableProcessorNode implements VersionedSemanticsGraphNode { +public class TableFilterNode extends ProcessorGraphNode implements VersionedSemanticsGraphNode { public TableFilterNode(final String nodeName, - final ProcessorParameters processorParameters, - final StoreFactory storeFactory) { - super(nodeName, processorParameters, storeFactory); + final ProcessorParameters processorParameters) { + super(nodeName, processorParameters); } @SuppressWarnings("unchecked") diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableProcessorNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableProcessorNode.java deleted file mode 100644 index ccd87855a0744..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableProcessorNode.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.streams.kstream.internals.graph; - -import org.apache.kafka.streams.kstream.internals.KTableSource; -import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.StoreFactory; - -import java.util.Arrays; -import java.util.Objects; - -public class TableProcessorNode extends GraphNode { - - private final ProcessorParameters processorParameters; - private final StoreFactory storeFactory; - private final String[] storeNames; - - public TableProcessorNode(final String nodeName, - final ProcessorParameters processorParameters, - final StoreFactory storeFactory) { - this(nodeName, processorParameters, storeFactory, null); - } - - public TableProcessorNode(final String nodeName, - final ProcessorParameters processorParameters, - final StoreFactory storeFactory, - final String[] storeNames) { - super(nodeName); - this.processorParameters = processorParameters; - this.storeFactory = storeFactory; - this.storeNames = storeNames != null ? storeNames : new String[] {}; - } - - public ProcessorParameters processorParameters() { - return processorParameters; - } - - @Override - public String toString() { - return "TableProcessorNode{" + - ", processorParameters=" + processorParameters + - ", storeFactory=" + (storeFactory == null ? "null" : storeFactory.name()) + - ", storeNames=" + Arrays.toString(storeNames) + - "} " + super.toString(); - } - - @SuppressWarnings("unchecked") - @Override - public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { - final String processorName = processorParameters.processorName(); - topologyBuilder.addProcessor(processorName, processorParameters.processorSupplier(), parentNodeNames()); - - if (storeNames.length > 0) { - topologyBuilder.connectProcessorAndStateStores(processorName, storeNames); - } - - final KTableSource tableSource = processorParameters.processorSupplier() instanceof KTableSource ? - (KTableSource) processorParameters.processorSupplier() : null; - if (tableSource != null) { - if (tableSource.materialized()) { - topologyBuilder.addStateStore(Objects.requireNonNull(storeFactory, "storeFactory was null"), - processorName); - } - } else if (storeFactory != null) { - topologyBuilder.addStateStore(storeFactory, processorName); - } - } -} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNode.java index f0f8e0dcb4a94..5e776a5c733d0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNode.java @@ -17,15 +17,10 @@ package org.apache.kafka.streams.kstream.internals.graph; -import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.kstream.internals.ConsumedInternal; import org.apache.kafka.streams.kstream.internals.KTableSource; -import org.apache.kafka.streams.kstream.internals.KeyValueStoreMaterializer; -import org.apache.kafka.streams.kstream.internals.MaterializedInternal; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.StoreFactory; -import org.apache.kafka.streams.state.KeyValueStore; import java.util.Collections; import java.util.Iterator; @@ -36,7 +31,6 @@ */ public class TableSourceNode extends SourceGraphNode { - private final MaterializedInternal materializedInternal; private final ProcessorParameters processorParameters; private final String sourceName; private final boolean isGlobalKTable; @@ -46,7 +40,6 @@ private TableSourceNode(final String nodeName, final String sourceName, final String topic, final ConsumedInternal consumedInternal, - final MaterializedInternal materializedInternal, final ProcessorParameters processorParameters, final boolean isGlobalKTable) { @@ -57,7 +50,6 @@ private TableSourceNode(final String nodeName, this.sourceName = sourceName; this.isGlobalKTable = isGlobalKTable; this.processorParameters = processorParameters; - this.materializedInternal = materializedInternal; } @@ -68,7 +60,6 @@ public void reuseSourceTopicForChangeLog(final boolean shouldReuseSourceTopicFor @Override public String toString() { return "TableSourceNode{" + - "materializedInternal=" + materializedInternal + ", processorParameters=" + processorParameters + ", sourceName='" + sourceName + '\'' + ", isGlobalKTable=" + isGlobalKTable + @@ -93,12 +84,8 @@ public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { throw new IllegalStateException("A table source node must have a single topic as input"); } - final StoreFactory storeFactory = - new KeyValueStoreMaterializer<>((MaterializedInternal>) materializedInternal); - if (isGlobalKTable) { topologyBuilder.addGlobalStore( - storeFactory, sourceName, consumedInternal().timestampExtractor(), consumedInternal().keyDeserializer(), @@ -116,16 +103,16 @@ public void writeToTopology(final InternalTopologyBuilder topologyBuilder) { consumedInternal().valueDeserializer(), topicName); - topologyBuilder.addProcessor(processorParameters.processorName(), processorParameters.processorSupplier(), sourceName); + processorParameters.addProcessorTo(topologyBuilder, new String[] {sourceName}); - // only add state store if the source KTable should be materialized + // if the KTableSource should not be materialized, stores will be null or empty final KTableSource tableSource = (KTableSource) processorParameters.processorSupplier(); - if (tableSource.materialized()) { - topologyBuilder.addStateStore(storeFactory, nodeName()); - + if (tableSource.stores() != null) { if (shouldReuseSourceTopicForChangelog) { - storeFactory.withLoggingDisabled(); - topologyBuilder.connectSourceStoreAndTopic(storeFactory.name(), topicName); + tableSource.stores().forEach(store -> { + store.withLoggingDisabled(); + topologyBuilder.connectSourceStoreAndTopic(store.name(), topicName); + }); } } } @@ -138,7 +125,6 @@ public static final class TableSourceNodeBuilder { private String sourceName; private String topic; private ConsumedInternal consumedInternal; - private MaterializedInternal materializedInternal; private ProcessorParameters processorParameters; private boolean isGlobalKTable = false; @@ -155,11 +141,6 @@ public TableSourceNodeBuilder withTopic(final String topic) { return this; } - public TableSourceNodeBuilder withMaterializedInternal(final MaterializedInternal materializedInternal) { - this.materializedInternal = materializedInternal; - return this; - } - public TableSourceNodeBuilder withConsumedInternal(final ConsumedInternal consumedInternal) { this.consumedInternal = consumedInternal; return this; @@ -185,7 +166,6 @@ public TableSourceNode build() { sourceName, topic, consumedInternal, - materializedInternal, processorParameters, isGlobalKTable); } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSuppressNode.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSuppressNode.java index 88e55f37a25ef..ac4a3f25c37e8 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSuppressNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/graph/TableSuppressNode.java @@ -16,12 +16,12 @@ */ package org.apache.kafka.streams.kstream.internals.graph; -import org.apache.kafka.streams.processor.internals.StoreFactory; - -public class TableSuppressNode extends StatefulProcessorNode { +/** + * Marker interface to identify suppression nodes since they have some special requirements + */ +public class TableSuppressNode extends ProcessorGraphNode { public TableSuppressNode(final String nodeName, - final ProcessorParameters processorParameters, - final StoreFactory materializedKTableStoreBuilder) { - super(nodeName, processorParameters, materializedKTableStoreBuilder); + final ProcessorParameters processorParameters) { + super(nodeName, processorParameters); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/FinalResultsSuppressionBuilder.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/FinalResultsSuppressionBuilder.java index e917556c8736e..9aff6e61b8477 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/FinalResultsSuppressionBuilder.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/FinalResultsSuppressionBuilder.java @@ -22,7 +22,7 @@ import java.time.Duration; import java.util.Objects; -public class FinalResultsSuppressionBuilder implements Suppressed, NamedSuppressed { +public class FinalResultsSuppressionBuilder> implements Suppressed, NamedSuppressed { private final String name; private final StrictBufferConfig bufferConfig; diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorSupplier.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorSupplier.java index 0b0c6ca15e9f7..eaf6b681e740e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorSupplier.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorSupplier.java @@ -33,23 +33,26 @@ import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.processor.internals.SerdeGetter; import org.apache.kafka.streams.processor.internals.metrics.ProcessorNodeMetrics; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.Maybe; import org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer; +import java.util.Set; + import static java.util.Objects.requireNonNull; public class KTableSuppressProcessorSupplier implements KTableProcessorSupplier { private final SuppressedInternal suppress; - private final String storeName; + private final StoreBuilder storeBuilder; private final KTableImpl parentKTable; public KTableSuppressProcessorSupplier(final SuppressedInternal suppress, - final String storeName, + final StoreBuilder storeBuilder, final KTableImpl parentKTable) { this.suppress = suppress; - this.storeName = storeName; + this.storeBuilder = storeBuilder; this.parentKTable = parentKTable; // The suppress buffer requires seeing the old values, to support the prior value view. parentKTable.enableSendingOldValues(true); @@ -57,25 +60,30 @@ public KTableSuppressProcessorSupplier(final SuppressedInternal suppress, @Override public Processor, K, Change> get() { - return new KTableSuppressProcessor<>(suppress, storeName); + return new KTableSuppressProcessor<>(suppress, storeBuilder.name()); + } + + @Override + public Set> stores() { + return Set.of(storeBuilder); } @Override public KTableValueGetterSupplier view() { final KTableValueGetterSupplier parentValueGetterSupplier = parentKTable.valueGetterSupplier(); - return new KTableValueGetterSupplier() { + return new KTableValueGetterSupplier<>() { @Override public KTableValueGetter get() { final KTableValueGetter parentGetter = parentValueGetterSupplier.get(); - return new KTableValueGetter() { + return new KTableValueGetter<>() { private TimeOrderedKeyValueBuffer> buffer; @Override public void init(final ProcessorContext context) { parentGetter.init(context); // the main processor is responsible for the buffer's lifecycle - buffer = requireNonNull(context.getStateStore(storeName)); + buffer = requireNonNull(context.getStateStore(storeBuilder.name())); } @Override @@ -107,7 +115,7 @@ public String[] storeNames() { final String[] parentStores = parentValueGetterSupplier.storeNames(); final String[] stores = new String[1 + parentStores.length]; System.arraycopy(parentStores, 0, stores, 1, parentStores.length); - stores[0] = storeName; + stores[0] = storeBuilder.name(); return stores; } }; @@ -166,7 +174,7 @@ public void process(final Record> record) { } private void buffer(final Record> record) { - final long bufferTime = bufferTimeDefinition.time(internalProcessorContext, record.key()); + final long bufferTime = bufferTimeDefinition.time(internalProcessorContext.recordContext(), record.key()); buffer.put(bufferTime, record, internalProcessorContext.recordContext()); } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/SuppressedInternal.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/SuppressedInternal.java index 51307bba9f5d7..89b07a9808b30 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/SuppressedInternal.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/SuppressedInternal.java @@ -27,7 +27,7 @@ public class SuppressedInternal implements Suppressed, NamedSuppressed private static final StrictBufferConfigImpl DEFAULT_BUFFER_CONFIG = (StrictBufferConfigImpl) BufferConfig.unbounded(); private final String name; - private final BufferConfigInternal bufferConfig; + private final BufferConfigInternal bufferConfig; private final Duration timeToWaitForMoreEvents; private final TimeDefinition timeDefinition; private final boolean safeToDropTombstones; @@ -39,7 +39,7 @@ public class SuppressedInternal implements Suppressed, NamedSuppressed * idempotent and correct). We decided that the unnecessary tombstones would not be * desirable in the output stream, though, hence the ability to drop them. * - * A alternative is to remember whether a result has previously been emitted + *

              A alternative is to remember whether a result has previously been emitted * for a key and drop tombstones in that case, but it would be a little complicated to * figure out when to forget the fact that we have emitted some result (currently, the * buffer immediately forgets all about a key when we emit, which helps to keep it @@ -47,13 +47,13 @@ public class SuppressedInternal implements Suppressed, NamedSuppressed */ public SuppressedInternal(final String name, final Duration suppressionTime, - final BufferConfig bufferConfig, + final BufferConfig bufferConfig, final TimeDefinition timeDefinition, final boolean safeToDropTombstones) { this.name = name; this.timeToWaitForMoreEvents = suppressionTime == null ? DEFAULT_SUPPRESSION_TIME : suppressionTime; this.timeDefinition = timeDefinition == null ? TimeDefinitions.RecordTimeDefinition.instance() : timeDefinition; - this.bufferConfig = bufferConfig == null ? DEFAULT_BUFFER_CONFIG : (BufferConfigInternal) bufferConfig; + this.bufferConfig = bufferConfig == null ? DEFAULT_BUFFER_CONFIG : (BufferConfigInternal) bufferConfig; this.safeToDropTombstones = safeToDropTombstones; } @@ -69,7 +69,7 @@ public String name() { @SuppressWarnings("unchecked") public > BufferConfigInternal bufferConfig() { - return bufferConfig; + return (BufferConfigInternal) bufferConfig; } TimeDefinition timeDefinition() { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/TimeDefinitions.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/TimeDefinitions.java index 640965fdd6a21..c4a38e23c97c8 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/TimeDefinitions.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/TimeDefinitions.java @@ -17,63 +17,47 @@ package org.apache.kafka.streams.kstream.internals.suppress; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.RecordContext; final class TimeDefinitions { private TimeDefinitions() {} - enum TimeDefinitionType { - RECORD_TIME, WINDOW_END_TIME - } - /** * This interface should never be instantiated outside of this class. */ interface TimeDefinition { - long time(final ProcessorContext context, final K key); - - TimeDefinitionType type(); + long time(final RecordContext context, final K key); } - public static class RecordTimeDefinition implements TimeDefinition { - private static final RecordTimeDefinition INSTANCE = new RecordTimeDefinition(); + static class RecordTimeDefinition implements TimeDefinition { + private static final RecordTimeDefinition INSTANCE = new RecordTimeDefinition<>(); private RecordTimeDefinition() {} @SuppressWarnings("unchecked") - public static RecordTimeDefinition instance() { - return RecordTimeDefinition.INSTANCE; + static RecordTimeDefinition instance() { + return (RecordTimeDefinition) RecordTimeDefinition.INSTANCE; } @Override - public long time(final ProcessorContext context, final K key) { + public long time(final RecordContext context, final K key) { return context.timestamp(); } - - @Override - public TimeDefinitionType type() { - return TimeDefinitionType.RECORD_TIME; - } } - public static class WindowEndTimeDefinition implements TimeDefinition { - private static final WindowEndTimeDefinition INSTANCE = new WindowEndTimeDefinition(); + static class WindowEndTimeDefinition> implements TimeDefinition { + private static final WindowEndTimeDefinition INSTANCE = new WindowEndTimeDefinition<>(); private WindowEndTimeDefinition() {} @SuppressWarnings("unchecked") - public static WindowEndTimeDefinition instance() { - return WindowEndTimeDefinition.INSTANCE; + static > WindowEndTimeDefinition instance() { + return (WindowEndTimeDefinition) WindowEndTimeDefinition.INSTANCE; } @Override - public long time(final ProcessorContext context, final K key) { + public long time(final RecordContext context, final K key) { return key.window().end(); } - - @Override - public TimeDefinitionType type() { - return TimeDefinitionType.WINDOW_END_TIME; - } } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/AbstractProcessor.java b/streams/src/main/java/org/apache/kafka/streams/processor/AbstractProcessor.java deleted file mode 100644 index 52a213d1a9050..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/processor/AbstractProcessor.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.processor; - -/** - * An abstract implementation of {@link Processor} that manages the {@link ProcessorContext} instance and provides default no-op - * implementation of {@link #close()}. - * - * @param the type of keys - * @param the type of values - * @deprecated Since 3.0. Use {@link org.apache.kafka.streams.processor.api.Processor} or - * {@link org.apache.kafka.streams.processor.api.ContextualProcessor} instead. - */ -@Deprecated -public abstract class AbstractProcessor implements Processor { - - protected ProcessorContext context; - - protected AbstractProcessor() {} - - @Override - public void init(final ProcessorContext context) { - this.context = context; - } - - /** - * Close this processor and clean up any resources. - *

              - * This method does nothing by default; if desired, subclasses should override it with custom functionality. - *

              - */ - @Override - public void close() { - // do nothing - } - - /** - * Get the processor's context set during {@link #init(ProcessorContext) initialization}. - * - * @return the processor context; null only when called prior to {@link #init(ProcessorContext) initialization}. - */ - protected final ProcessorContext context() { - return context; - } -} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/Cancellable.java b/streams/src/main/java/org/apache/kafka/streams/processor/Cancellable.java index 2acb7625ec1d2..abd141ac399b5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/Cancellable.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/Cancellable.java @@ -19,7 +19,7 @@ import java.time.Duration; /** - * Cancellable interface returned in {@link ProcessorContext#schedule(Duration, PunctuationType, Punctuator)}. + * Cancellable interface returned in {@link org.apache.kafka.streams.processor.api.ProcessorContext#schedule(Duration, PunctuationType, Punctuator)}. * * @see Punctuator */ diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java b/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java index 91824d5a5b817..ad3a834257d1b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java @@ -19,7 +19,7 @@ import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.Named; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; +import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; import org.apache.kafka.streams.state.StoreBuilder; import java.util.Set; @@ -91,14 +91,8 @@ * @see Topology#addProcessor(String, org.apache.kafka.streams.processor.api.ProcessorSupplier, String...) * @see KStream#process(org.apache.kafka.streams.processor.api.ProcessorSupplier, String...) * @see KStream#process(org.apache.kafka.streams.processor.api.ProcessorSupplier, Named, String...) - * @see KStream#transformValues(org.apache.kafka.streams.kstream.ValueTransformerSupplier, String...) - * @see KStream#transformValues(org.apache.kafka.streams.kstream.ValueTransformerSupplier, Named, String...) - * @see KStream#transformValues(ValueTransformerWithKeySupplier, String...) - * @see KStream#transformValues(ValueTransformerWithKeySupplier, Named, String...) - * @see KStream#flatTransformValues(org.apache.kafka.streams.kstream.ValueTransformerSupplier, String...) - * @see KStream#flatTransformValues(org.apache.kafka.streams.kstream.ValueTransformerSupplier, Named, String...) - * @see KStream#flatTransformValues(ValueTransformerWithKeySupplier, String...) - * @see KStream#flatTransformValues(ValueTransformerWithKeySupplier, Named, String...) + * @see KStream#processValues(FixedKeyProcessorSupplier, String...) + * @see KStream#processValues(FixedKeyProcessorSupplier, Named, String...) */ public interface ConnectedStoreProvider { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/Processor.java b/streams/src/main/java/org/apache/kafka/streams/processor/Processor.java deleted file mode 100644 index 9d724ec1378d4..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/processor/Processor.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.processor; - -import java.time.Duration; - -/** - * A processor of key-value pair records. - * - * @param the type of keys - * @param the type of values - * @deprecated Since 3.0. Use {@link org.apache.kafka.streams.processor.api.Processor} instead. - */ -@Deprecated -public interface Processor { - - /** - * Initialize this processor with the given context. The framework ensures this is called once per processor when the topology - * that contains it is initialized. When the framework is done with the processor, {@link #close()} will be called on it; the - * framework may later re-use the processor by calling {@code #init()} again. - *

              - * The provided {@link ProcessorContext context} can be used to access topology and record meta data, to - * {@link ProcessorContext#schedule(Duration, PunctuationType, Punctuator) schedule} a method to be - * {@link Punctuator#punctuate(long) called periodically} and to access attached {@link StateStore}s. - * - * @param context the context; may not be null - */ - void init(ProcessorContext context); - - /** - * Process the record with the given key and value. - * - * @param key the key for the record - * @param value the value for the record - */ - void process(K key, V value); - - /** - * Close this processor and clean up any resources. Be aware that {@code #close()} is called after an internal cleanup. - * Thus, it is not possible to write anything to Kafka as underlying clients are already closed. The framework may - * later re-use this processor by calling {@code #init()} on it again. - *

              - * Note: Do not close any streams managed resources, like {@link StateStore}s here, as they are managed by the library. - */ - void close(); -} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/ProcessorContext.java b/streams/src/main/java/org/apache/kafka/streams/processor/ProcessorContext.java index fd99985e6f8a9..3d057c5ce2b47 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/ProcessorContext.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/ProcessorContext.java @@ -35,7 +35,13 @@ /** * Processor context interface. */ -@SuppressWarnings("deprecation") // Not deprecating the old context, since it is used by Transformers. See KAFKA-10603. +/* This interface was technically deprecated via KIP-478 (AK 2.7), but we did not mark it as deprecated yet, + * as it's used on many other places + * + * We need to clean this all up (https://issues.apache.org/jira/browse/KAFKA-17131) and mark the interface + * deprecated afterward. + */ +@SuppressWarnings("deprecation") public interface ProcessorContext { /** @@ -104,9 +110,10 @@ void register(final StateStore store, S getStateStore(final String name); /** - * Schedule a periodic operation for processors. A processor may call this method during - * {@link Processor#init(ProcessorContext) initialization} or - * {@link Processor#process(Object, Object) processing} to + * Schedule a periodic operation for processors. A processor may call this method during a + * {@link org.apache.kafka.streams.kstream.KTable#transformValues(ValueTransformerWithKeySupplier, String...)}'s + * {@link org.apache.kafka.streams.kstream.ValueTransformerWithKey#init(ProcessorContext) initialization} or + * {@link org.apache.kafka.streams.kstream.ValueTransformerWithKey#transform(Object, Object) processing} to * schedule a periodic callback — called a punctuation — to {@link Punctuator#punctuate(long)}. * The type parameter controls what notion of time is used for punctuation: *

                @@ -239,8 +246,8 @@ Cancellable schedule(final Duration interval, *

                If it is triggered while processing a record streamed from the source processor, * timestamp is defined as the timestamp of the current input record; the timestamp is extracted from * {@link org.apache.kafka.clients.consumer.ConsumerRecord ConsumerRecord} by {@link TimestampExtractor}. - * Note, that an upstream {@link Processor} might have set a new timestamp by calling - * {@link ProcessorContext#forward(Object, Object, To) forward(..., To.all().withTimestamp(...))}. + * Note, that an upstream {@link org.apache.kafka.streams.processor.api.Processor} might have set a new timestamp by calling + * {@link org.apache.kafka.streams.processor.api.ProcessorContext#forward(org.apache.kafka.streams.processor.api.Record)}. * In particular, some Kafka Streams DSL operators set result record timestamps explicitly, * to guarantee deterministic results. * diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/ProcessorSupplier.java b/streams/src/main/java/org/apache/kafka/streams/processor/ProcessorSupplier.java deleted file mode 100644 index e53a63a4215f7..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/processor/ProcessorSupplier.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.processor; - -import org.apache.kafka.streams.Topology; - -import java.util.function.Supplier; - -/** - * A processor supplier that can create one or more {@link Processor} instances. - *

                - * It is used in {@link Topology} for adding new processor operators, whose generated - * topology can then be replicated (and thus creating one or more {@link Processor} instances) - * and distributed to multiple stream threads. - *

                - * The supplier should always generate a new instance each time {@link ProcessorSupplier#get()} gets called. Creating - * a single {@link Processor} object and returning the same object reference in {@link ProcessorSupplier#get()} would be - * a violation of the supplier pattern and leads to runtime exceptions. - * - * @param the type of keys - * @param the type of values - * @deprecated Since 3.0. Use {@link org.apache.kafka.streams.processor.api.ProcessorSupplier} instead. - */ -@Deprecated -public interface ProcessorSupplier extends ConnectedStoreProvider, Supplier> { - - /** - * Return a newly constructed {@link Processor} instance. - * The supplier should always generate a new instance each time {@link ProcessorSupplier#get()} gets called. - *

                - * Creating a single {@link Processor} object and returning the same object reference in {@link ProcessorSupplier#get()} - * is a violation of the supplier pattern and leads to runtime exceptions. - * - * @return a newly constructed {@link Processor} instance - */ - Processor get(); -} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/PunctuationType.java b/streams/src/main/java/org/apache/kafka/streams/processor/PunctuationType.java index 32965e815dead..2e50c17053082 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/PunctuationType.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/PunctuationType.java @@ -19,7 +19,8 @@ import java.time.Duration; /** - * Controls what notion of time is used for punctuation scheduled via {@link ProcessorContext#schedule(Duration, PunctuationType, Punctuator)} schedule}: + * Controls what notion of time is used for punctuation scheduled via + * {@link org.apache.kafka.streams.processor.api.ProcessorContext#schedule(Duration, PunctuationType, Punctuator) schedule}: *

                  *
                • STREAM_TIME - uses "stream time", which is advanced by the processing of messages * in accordance with the timestamp as extracted by the {@link TimestampExtractor} in use. diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/RecordContext.java b/streams/src/main/java/org/apache/kafka/streams/processor/RecordContext.java index 66b1f8dff05ac..6b6fd91c85355 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/RecordContext.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/RecordContext.java @@ -21,7 +21,7 @@ /** * The context associated with the current record being processed by - * a {@link org.apache.kafka.streams.processor.Processor} + * a {@link org.apache.kafka.streams.processor.api.Processor} */ public interface RecordContext { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/StateStore.java b/streams/src/main/java/org/apache/kafka/streams/processor/StateStore.java index db7d65570aaac..38a3e23e28a1e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/StateStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/StateStore.java @@ -56,7 +56,7 @@ public interface StateStore { /** * Initializes this state store. *

                  - * The implementation of this function must register the root store in the context via the + * The implementation of this function must register the root store in the stateStoreContext via the * {@link StateStoreContext#register(StateStore, StateRestoreCallback, CommitCallback)} function, where the * first {@link StateStore} parameter should always be the passed-in {@code root} object, and * the second parameter should be an object of user's implementation @@ -69,7 +69,7 @@ public interface StateStore { * @throws IllegalStateException If store gets registered after initialized is already finished * @throws StreamsException if the store's change log does not contain the partition */ - void init(final StateStoreContext context, final StateStore root); + void init(final StateStoreContext stateStoreContext, final StateStore root); /** * Flush any cached data diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/TaskId.java b/streams/src/main/java/org/apache/kafka/streams/processor/TaskId.java index af742e1a4e5b0..6b909ec72747f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/TaskId.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/TaskId.java @@ -24,7 +24,7 @@ import java.util.Objects; /** - * The task ID representation composed as subtopology (aka topicGroupId) plus the assigned partition ID. + * The task ID representation composed as subtopology plus the assigned partition ID. */ public class TaskId implements Comparable { @@ -32,20 +32,20 @@ public class TaskId implements Comparable { public static final String NAMED_TOPOLOGY_DELIMITER = "__"; - /** The ID of the subtopology, aka topicGroupId. */ - private final int topicGroupId; + /** The ID of the subtopology. */ + private final int subtopology; /** The ID of the partition. */ private final int partition; /** The namedTopology that this task belongs to, or null if it does not belong to one */ private final String topologyName; - public TaskId(final int topicGroupId, final int partition) { - this(topicGroupId, partition, null); + public TaskId(final int subtopology, final int partition) { + this(subtopology, partition, null); } - public TaskId(final int topicGroupId, final int partition, final String topologyName) { - this.topicGroupId = topicGroupId; + public TaskId(final int subtopology, final int partition, final String topologyName) { + this.subtopology = subtopology; this.partition = partition; if (topologyName != null && topologyName.length() == 0) { LOG.warn("Empty string passed in for task's namedTopology, since NamedTopology name cannot be empty, we " @@ -57,7 +57,7 @@ public TaskId(final int topicGroupId, final int partition, final String topology } public int subtopology() { - return topicGroupId; + return subtopology; } public int partition() { @@ -73,7 +73,7 @@ public String topologyName() { @Override public String toString() { - return topologyName != null ? topologyName + NAMED_TOPOLOGY_DELIMITER + topicGroupId + "_" + partition : topicGroupId + "_" + partition; + return topologyName != null ? topologyName + NAMED_TOPOLOGY_DELIMITER + subtopology + "_" + partition : subtopology + "_" + partition; } /** @@ -115,7 +115,7 @@ public boolean equals(final Object o) { } final TaskId taskId = (TaskId) o; - if (topicGroupId != taskId.topicGroupId || partition != taskId.partition) { + if (subtopology != taskId.subtopology || partition != taskId.partition) { return false; } @@ -128,7 +128,7 @@ public boolean equals(final Object o) { @Override public int hashCode() { - return Objects.hash(topicGroupId, partition, topologyName); + return Objects.hash(subtopology, partition, topologyName); } @Override @@ -142,7 +142,7 @@ public int compareTo(final TaskId other) { LOG.error("Tried to compare this = {} with other = {}, but only one had a valid named topology", this, other); throw new IllegalStateException("Can't compare a TaskId with a namedTopology to one without"); } - final int comparingTopicGroupId = Integer.compare(this.topicGroupId, other.topicGroupId); + final int comparingTopicGroupId = Integer.compare(this.subtopology, other.subtopology); return comparingTopicGroupId != 0 ? comparingTopicGroupId : Integer.compare(this.partition, other.partition); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/api/ProcessorWrapper.java b/streams/src/main/java/org/apache/kafka/streams/processor/api/ProcessorWrapper.java new file mode 100644 index 0000000000000..22b80a35ecb3e --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/processor/api/ProcessorWrapper.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.streams.processor.api; + +import org.apache.kafka.common.Configurable; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.Topology; +import org.apache.kafka.streams.TopologyConfig; +import org.apache.kafka.streams.processor.internals.NoOpProcessorWrapper.WrappedFixedKeyProcessorSupplierImpl; +import org.apache.kafka.streams.processor.internals.NoOpProcessorWrapper.WrappedProcessorSupplierImpl; + +import java.util.Map; + +/** + * Wrapper class that can be used to inject custom wrappers around the processors of their application topology. + * The returned instance should wrap the supplied {@code ProcessorSupplier} and the {@code Processor} it supplies + * to avoid disrupting the regular processing of the application, although this is not required and any processor + * implementation can be substituted in to replace the original processor entirely (which may be useful for example + * while testing or debugging an application topology). + *

                  + * NOTE: in order to use this feature, you must set the {@link StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG} config and pass it + * in as a {@link TopologyConfig} when creating the {@link StreamsBuilder} or {@link Topology} by using the + * appropriate constructor (ie {@link StreamsBuilder#StreamsBuilder(TopologyConfig)} or {@link Topology#Topology(TopologyConfig)}) + *

                  + * Can be configured, if desired, by implementing the {@link #configure(Map)} method. This will be invoked when + * the {@code ProcessorWrapper} is instantiated, and will provide it with the TopologyConfigs that were passed in + * to the {@link StreamsBuilder} or {@link Topology} constructor. + */ +public interface ProcessorWrapper extends Configurable { + + @Override + default void configure(final Map configs) { + // do nothing + } + + /** + * Wrap or replace the provided {@link ProcessorSupplier} and return a {@link WrappedProcessorSupplier} + * To convert a {@link ProcessorSupplier} instance into a {@link WrappedProcessorSupplier}, + * use the {@link ProcessorWrapper#asWrapped(ProcessorSupplier)} method + */ + WrappedProcessorSupplier wrapProcessorSupplier(final String processorName, + final ProcessorSupplier processorSupplier); + + /** + * Wrap or replace the provided {@link FixedKeyProcessorSupplier} and return a {@link WrappedFixedKeyProcessorSupplier} + * To convert a {@link FixedKeyProcessorSupplier} instance into a {@link WrappedFixedKeyProcessorSupplier}, + * use the {@link ProcessorWrapper#asWrappedFixedKey(FixedKeyProcessorSupplier)} method + */ + WrappedFixedKeyProcessorSupplier wrapFixedKeyProcessorSupplier(final String processorName, + final FixedKeyProcessorSupplier processorSupplier); + + /** + * Use to convert a {@link ProcessorSupplier} instance into a {@link WrappedProcessorSupplier} + */ + static WrappedProcessorSupplier asWrapped( + final ProcessorSupplier processorSupplier + ) { + return new WrappedProcessorSupplierImpl<>(processorSupplier); + } + + /** + * Use to convert a {@link FixedKeyProcessorSupplier} instance into a {@link WrappedFixedKeyProcessorSupplier} + */ + static WrappedFixedKeyProcessorSupplier asWrappedFixedKey( + final FixedKeyProcessorSupplier processorSupplier + ) { + return new WrappedFixedKeyProcessorSupplierImpl<>(processorSupplier); + } +} \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/api/WrappedFixedKeyProcessorSupplier.java b/streams/src/main/java/org/apache/kafka/streams/processor/api/WrappedFixedKeyProcessorSupplier.java new file mode 100644 index 0000000000000..c030832415c8e --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/processor/api/WrappedFixedKeyProcessorSupplier.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.streams.processor.api; + +/** + * Marker interface for classes implementing {@link FixedKeyProcessorSupplier} + * that have been wrapped via a {@link ProcessorWrapper}. + *

                  + * To convert a {@link FixedKeyProcessorSupplier} instance into a {@link WrappedFixedKeyProcessorSupplier}, + * use the {@link ProcessorWrapper#asWrappedFixedKey(FixedKeyProcessorSupplier)} method + */ +public interface WrappedFixedKeyProcessorSupplier extends FixedKeyProcessorSupplier { + +} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/api/WrappedProcessorSupplier.java b/streams/src/main/java/org/apache/kafka/streams/processor/api/WrappedProcessorSupplier.java new file mode 100644 index 0000000000000..8a2ab9e905573 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/processor/api/WrappedProcessorSupplier.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.streams.processor.api; + +/** + * Marker interface for classes implementing {@link ProcessorSupplier} + * that have been wrapped via a {@link ProcessorWrapper}. + *

                  + * To convert a {@link ProcessorSupplier} instance into a {@link WrappedProcessorSupplier}, + * use the {@link ProcessorWrapper#asWrapped(ProcessorSupplier)} method + */ +public interface WrappedProcessorSupplier extends ProcessorSupplier { + +} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.java b/streams/src/main/java/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.java index ad9128bde209d..2261e27c367f5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/assignment/TaskAssignmentUtils.java @@ -657,12 +657,12 @@ private static boolean canPerformRackAwareOptimization(final ApplicationState ap return false; } - if (!assignmentConfigs.rackAwareTrafficCost().isPresent()) { + if (assignmentConfigs.rackAwareTrafficCost().isEmpty()) { LOG.warn("Rack aware task assignment optimization unavailable: must configure {}", StreamsConfig.RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG); return false; } - if (!assignmentConfigs.rackAwareNonOverlapCost().isPresent()) { + if (assignmentConfigs.rackAwareNonOverlapCost().isEmpty()) { LOG.warn("Rack aware task assignment optimization unavailable: must configure {}", StreamsConfig.RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG); return false; } @@ -695,7 +695,7 @@ private static boolean hasValidRackInformation(final ApplicationState applicatio } private static boolean hasValidRackInformation(final KafkaStreamsState state) { - if (!state.rackId().isPresent()) { + if (state.rackId().isEmpty()) { LOG.error("KafkaStreams client {} doesn't have a rack id configured.", state.processId().id()); return false; } @@ -710,7 +710,7 @@ private static boolean hasValidRackInformation(final TaskInfo task, for (final TaskTopicPartition topicPartition : topicPartitions) { final Optional> racks = topicPartition.rackIds(); - if (!racks.isPresent() || racks.get().isEmpty()) { + if (racks.isEmpty() || racks.get().isEmpty()) { LOG.error("Topic partition {} for task {} does not have racks configured.", topicPartition, task.id()); return false; } @@ -1043,4 +1043,4 @@ public TagStatistics(final ApplicationState applicationState) { this.tagEntryToClients = tagEntryToClients; } } -} \ No newline at end of file +} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadOnlyDecorator.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadOnlyDecorator.java index 2f00803b7ee75..12326ef572114 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadOnlyDecorator.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadOnlyDecorator.java @@ -49,7 +49,7 @@ public void flush() { } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { throw new UnsupportedOperationException(ERROR_MESSAGE); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadWriteDecorator.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadWriteDecorator.java index 353c26ec25389..d9772027cb2d8 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadWriteDecorator.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/AbstractReadWriteDecorator.java @@ -43,7 +43,7 @@ private AbstractReadWriteDecorator(final T inner) { } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { throw new UnsupportedOperationException(ERROR_MESSAGE); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java index 6c973e096fc27..1dffc4ebbd3ff 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java @@ -119,7 +119,8 @@ private Producer producer() { } public void reInitializeProducer() { - streamsProducer.resetProducer(producer()); + if (!streamsProducer.isClosed()) + streamsProducer.resetProducer(producer()); } StreamsProducer streamsProducer() { diff --git a/core/src/main/scala/kafka/server/TopicKey.scala b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ConfigurableStore.java similarity index 78% rename from core/src/main/scala/kafka/server/TopicKey.scala rename to streams/src/main/java/org/apache/kafka/streams/processor/internals/ConfigurableStore.java index 86b4f505b93b5..296bf100536e7 100644 --- a/core/src/main/scala/kafka/server/TopicKey.scala +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ConfigurableStore.java @@ -14,12 +14,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package kafka.server +package org.apache.kafka.streams.processor.internals; -import org.apache.kafka.server.purgatory.DelayedOperationKey +import org.apache.kafka.streams.StreamsConfig; -/* used by delayed-topic operations */ -case class TopicKey(topic: String) extends DelayedOperationKey { +public interface ConfigurableStore { + + void configure(final StreamsConfig config); - override def keyLabel: String = topic } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdater.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdater.java index addef5a9f1565..306f4691e2f9a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdater.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdater.java @@ -320,7 +320,7 @@ private KafkaFutureImpl restoreConsumerInstanceId(final Duration timeout) private void handleRuntimeException(final RuntimeException runtimeException) { - log.error("An unexpected error occurred within the state updater thread: " + runtimeException); + log.error("An unexpected error occurred within the state updater thread: {}", String.valueOf(runtimeException)); addToExceptionsAndFailedTasksThenClearUpdatingAndPausedTasks(runtimeException); isRunning.set(false); } @@ -926,21 +926,21 @@ public boolean hasExceptionsAndFailedTasks() { public Set updatingStandbyTasks() { return stateUpdaterThread != null - ? Collections.unmodifiableSet(new HashSet<>(stateUpdaterThread.updatingStandbyTasks())) + ? Set.copyOf(stateUpdaterThread.updatingStandbyTasks()) : Collections.emptySet(); } @Override public Set updatingTasks() { return stateUpdaterThread != null - ? Collections.unmodifiableSet(new HashSet<>(stateUpdaterThread.updatingTasks())) + ? Set.copyOf(stateUpdaterThread.updatingTasks()) : Collections.emptySet(); } public Set restoredActiveTasks() { restoredActiveTasksLock.lock(); try { - return Collections.unmodifiableSet(new HashSet<>(restoredActiveTasks)); + return Set.copyOf(restoredActiveTasks); } finally { restoredActiveTasksLock.unlock(); } @@ -949,19 +949,19 @@ public Set restoredActiveTasks() { public List exceptionsAndFailedTasks() { exceptionsAndFailedTasksLock.lock(); try { - return Collections.unmodifiableList(new ArrayList<>(exceptionsAndFailedTasks)); + return List.copyOf(exceptionsAndFailedTasks); } finally { exceptionsAndFailedTasksLock.unlock(); } } public Set removedTasks() { - return Collections.unmodifiableSet(new HashSet<>(removedTasks)); + return Set.copyOf(removedTasks); } public Set pausedTasks() { return stateUpdaterThread != null - ? Collections.unmodifiableSet(new HashSet<>(stateUpdaterThread.pausedTasks())) + ? Set.copyOf(stateUpdaterThread.pausedTasks()) : Collections.emptySet(); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ForwardingDisabledProcessorContext.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ForwardingDisabledProcessorContext.java index 17028abe34b36..5091074d70b5c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ForwardingDisabledProcessorContext.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ForwardingDisabledProcessorContext.java @@ -42,8 +42,7 @@ public final class ForwardingDisabledProcessorContext implements ProcessorContex private static final String EXPLANATION = "ProcessorContext#forward() is not supported from this context, " + "as the framework must ensure the key is not changed (#forward allows changing the key on " - + "messages which are sent). Try another function, which doesn't allow the key to be changed " - + "(for example - #transformValues)."; + + "messages which are sent). Use KStream.process() if you need to change the key."; public ForwardingDisabledProcessorContext(final ProcessorContext delegate) { this.delegate = Objects.requireNonNull(delegate, "delegate"); diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManager.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManager.java index 479fd1f1853e7..f470254142ede 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManager.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManager.java @@ -22,7 +22,7 @@ public interface GlobalStateManager extends StateManager { - void setGlobalProcessorContext(final InternalProcessorContext processorContext); + void setGlobalProcessorContext(final InternalProcessorContext processorContext); /** * @throws IllegalStateException If store gets registered after initialized is already finished diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImpl.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImpl.java index ad53634386ee1..53064da37328a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImpl.java @@ -35,8 +35,8 @@ import org.apache.kafka.streams.processor.StateRestoreCallback; import org.apache.kafka.streams.processor.StateRestoreListener; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.Task.TaskType; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; @@ -83,7 +83,7 @@ public class GlobalStateManagerImpl implements GlobalStateManager { private final Set globalStoreNames = new HashSet<>(); private final Set globalNonPersistentStoresTopics = new HashSet<>(); private final FixedOrderMap> globalStores = new FixedOrderMap<>(); - private InternalProcessorContext globalProcessorContext; + private InternalProcessorContext globalProcessorContext; private DeserializationExceptionHandler deserializationExceptionHandler; public GlobalStateManagerImpl(final LogContext logContext, @@ -126,7 +126,7 @@ public GlobalStateManagerImpl(final LogContext logContext, } @Override - public void setGlobalProcessorContext(final InternalProcessorContext globalProcessorContext) { + public void setGlobalProcessorContext(final InternalProcessorContext globalProcessorContext) { this.globalProcessorContext = globalProcessorContext; } @@ -142,7 +142,7 @@ public Set initialize() { for (final StateStore stateStore : topology.globalStateStores()) { final String sourceTopic = storeToChangelogTopic.get(stateStore.name()); changelogTopics.add(sourceTopic); - stateStore.init((StateStoreContext) globalProcessorContext, stateStore); + stateStore.init(globalProcessorContext, stateStore); } // make sure each topic-partition from checkpointFileCache is associated with a global state store @@ -259,13 +259,13 @@ public void setDeserializationExceptionHandler(final DeserializationExceptionHan this.deserializationExceptionHandler = deserializationExceptionHandler; } - @SuppressWarnings("unchecked") + @SuppressWarnings({"rawtypes", "unchecked", "resource"}) private void reprocessState(final List topicPartitions, final Map highWatermarks, - final InternalTopologyBuilder.ReprocessFactory reprocessFactory, + final InternalTopologyBuilder.ReprocessFactory reprocessFactory, final String storeName) { - final Processor source = reprocessFactory.processorSupplier().get(); - source.init(globalProcessorContext); + final Processor source = reprocessFactory.processorSupplier().get(); + source.init((ProcessorContext) globalProcessorContext); for (final TopicPartition topicPartition : topicPartitions) { long currentDeadline = NO_DEADLINE; @@ -312,14 +312,17 @@ private void reprocessState(final List topicPartitions, try { if (record.key() != null) { - source.process(new Record<>( + source.process(new Record( reprocessFactory.keyDeserializer().deserialize(record.topic(), record.key()), reprocessFactory.valueDeserializer().deserialize(record.topic(), record.value()), record.timestamp(), record.headers())); restoreCount++; } - } catch (final RuntimeException deserializationException) { + } catch (final Exception deserializationException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages handleDeserializationFailure( deserializationExceptionHandler, globalProcessorContext, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicManager.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicManager.java index 2af238726152c..a8a044edb6b3c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicManager.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicManager.java @@ -659,7 +659,7 @@ private Set validateTopics(final Set topicsToValidate, final Set topicsToCreate = new HashSet<>(); for (final String topicName : topicsToValidate) { final Optional numberOfPartitions = topicsMap.get(topicName).numberOfPartitions(); - if (!numberOfPartitions.isPresent()) { + if (numberOfPartitions.isEmpty()) { log.error("Found undefined number of partitions for topic {}", topicName); throw new StreamsException("Topic " + topicName + " number of partitions not defined"); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java index 1d9e595c47e1d..2ff87ef19fa6b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java @@ -16,21 +16,25 @@ */ package org.apache.kafka.streams.processor.internals; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.TopologyConfig; import org.apache.kafka.streams.errors.TopologyException; import org.apache.kafka.streams.internals.ApiUtils; +import org.apache.kafka.streams.internals.AutoOffsetResetInternal; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StreamPartitioner; import org.apache.kafka.streams.processor.TimestampExtractor; import org.apache.kafka.streams.processor.TopicNameExtractor; import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.processor.api.ProcessorWrapper; +import org.apache.kafka.streams.processor.api.WrappedFixedKeyProcessorSupplier; +import org.apache.kafka.streams.processor.api.WrappedProcessorSupplier; import org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology; import org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology; import org.apache.kafka.streams.state.StoreBuilder; @@ -39,6 +43,7 @@ import org.slf4j.LoggerFactory; import java.io.Serializable; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -52,25 +57,39 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Properties; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.regex.Pattern; import java.util.stream.Collectors; -import static org.apache.kafka.clients.consumer.OffsetResetStrategy.EARLIEST; -import static org.apache.kafka.clients.consumer.OffsetResetStrategy.LATEST; -import static org.apache.kafka.clients.consumer.OffsetResetStrategy.NONE; +import static org.apache.kafka.streams.StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG; public class InternalTopologyBuilder { public InternalTopologyBuilder() { this.topologyName = null; + this.processorWrapper = new NoOpProcessorWrapper(); } public InternalTopologyBuilder(final TopologyConfig topologyConfigs) { this.topologyConfigs = topologyConfigs; this.topologyName = topologyConfigs.topologyName; + + try { + processorWrapper = topologyConfigs.getConfiguredInstance( + PROCESSOR_WRAPPER_CLASS_CONFIG, + ProcessorWrapper.class, + topologyConfigs.originals() + ); + } catch (final Exception e) { + final String errorMessage = String.format( + "Unable to instantiate ProcessorWrapper from value of config %s. Please provide a valid class " + + "that implements the ProcessorWrapper interface.", PROCESSOR_WRAPPER_CLASS_CONFIG); + log.error(errorMessage, e); + throw new ConfigException(errorMessage, e); + } } private static final Logger log = LoggerFactory.getLogger(InternalTopologyBuilder.class); @@ -130,19 +149,29 @@ public InternalTopologyBuilder(final TopologyConfig topologyConfigs) { // all global topics private final Set globalTopics = new HashSet<>(); + private final Set noneResetTopics = new HashSet<>(); + private final Set earliestResetTopics = new HashSet<>(); private final Set latestResetTopics = new HashSet<>(); + private final Map durationResetTopics = new HashMap<>(); + + private final Set noneResetPatterns = new HashSet<>(); + private final Set earliestResetPatterns = new HashSet<>(); private final Set latestResetPatterns = new HashSet<>(); + private final Map durationResetPatterns = new HashMap<>(); + private final QuickUnion nodeGrouper = new QuickUnion<>(); // Used to capture subscribed topics via Patterns discovered during the partition assignment process. private final Set subscriptionUpdates = new HashSet<>(); + private final ProcessorWrapper processorWrapper; + private String applicationId = null; // keyed by subtopology id @@ -313,7 +342,7 @@ private boolean isMatch(final String topic) { @Override Source describe() { - return new Source(name, topics.size() == 0 ? null : new HashSet<>(topics), pattern); + return new Source(name, topics.isEmpty() ? null : new HashSet<>(topics), pattern); } } @@ -367,7 +396,11 @@ public final InternalTopologyBuilder setApplicationId(final String applicationId public final synchronized void setStreamsConfig(final StreamsConfig applicationConfig) { Objects.requireNonNull(applicationConfig, "config can't be null"); - topologyConfigs = new TopologyConfig(applicationConfig); + + final Properties topologyOverrides = topologyConfigs == null + ? new Properties() + : topologyConfigs.topologyOverrides; + topologyConfigs = new TopologyConfig(topologyName, applicationConfig, topologyOverrides); } @SuppressWarnings("deprecation") @@ -408,13 +441,13 @@ public final synchronized InternalTopologyBuilder rewriteTopology(final StreamsC // build global state stores for (final StoreFactory storeFactory : globalStateBuilders.values()) { storeFactory.configure(config); - globalStateStores.put(storeFactory.name(), storeFactory.build()); + globalStateStores.put(storeFactory.storeName(), storeFactory.builder().build()); } return this; } - public final void addSource(final Topology.AutoOffsetReset offsetReset, + public final void addSource(final AutoOffsetResetInternal offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, @@ -431,7 +464,7 @@ public final void addSource(final Topology.AutoOffsetReset offsetReset, for (final String topic : topics) { Objects.requireNonNull(topic, "topic names cannot be null"); validateTopicNotAlreadyRegistered(topic); - maybeAddToResetList(earliestResetTopics, latestResetTopics, offsetReset, topic); + maybeAddToResetList(noneResetTopics, earliestResetTopics, latestResetTopics, durationResetTopics, offsetReset, topic); rawSourceTopicNames.add(topic); } @@ -441,7 +474,7 @@ public final void addSource(final Topology.AutoOffsetReset offsetReset, nodeGroups = null; } - public final void addSource(final Topology.AutoOffsetReset offsetReset, + public final void addSource(final AutoOffsetResetInternal offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, @@ -460,7 +493,7 @@ public final void addSource(final Topology.AutoOffsetReset offsetReset, } } - maybeAddToResetList(earliestResetPatterns, latestResetPatterns, offsetReset, topicPattern); + maybeAddToResetList(noneResetPatterns, earliestResetPatterns, latestResetPatterns, durationResetPatterns, offsetReset, topicPattern); nodeFactories.put(name, new SourceNodeFactory<>(name, null, topicPattern, timestampExtractor, keyDeserializer, valDeserializer)); nodeToSourcePatterns.put(name, topicPattern); @@ -583,7 +616,7 @@ public final void addProcessor(final String name, public final void addStateStore(final StoreBuilder storeBuilder, final String... processorNames) { - addStateStore(new StoreBuilderWrapper(storeBuilder), false, processorNames); + addStateStore(StoreBuilderWrapper.wrapStoreBuilder(storeBuilder), false, processorNames); } public final void addStateStore(final StoreFactory storeFactory, @@ -595,27 +628,26 @@ public final void addStateStore(final StoreFactory storeFactory, final boolean allowOverride, final String... processorNames) { Objects.requireNonNull(storeFactory, "stateStoreFactory can't be null"); - final StoreFactory stateFactory = stateFactories.get(storeFactory.name()); + final StoreFactory stateFactory = stateFactories.get(storeFactory.storeName()); if (!allowOverride && stateFactory != null && !stateFactory.isCompatibleWith(storeFactory)) { - throw new TopologyException("A different StateStore has already been added with the name " + storeFactory.name()); + throw new TopologyException("A different StateStore has already been added with the name " + storeFactory.storeName()); } - if (globalStateBuilders.containsKey(storeFactory.name())) { - throw new TopologyException("A different GlobalStateStore has already been added with the name " + storeFactory.name()); + if (globalStateBuilders.containsKey(storeFactory.storeName())) { + throw new TopologyException("A different GlobalStateStore has already been added with the name " + storeFactory.storeName()); } - stateFactories.put(storeFactory.name(), storeFactory); + stateFactories.put(storeFactory.storeName(), storeFactory); if (processorNames != null) { for (final String processorName : processorNames) { Objects.requireNonNull(processorName, "processor name must not be null"); - connectProcessorAndStateStore(processorName, storeFactory.name()); + connectProcessorAndStateStore(processorName, storeFactory.storeName()); } } nodeGroups = null; } - public final void addGlobalStore(final StoreFactory storeFactory, - final String sourceName, + public final void addGlobalStore(final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, @@ -623,13 +655,20 @@ public final void addGlobalStore(final StoreFactory storeFactory, final String processorName, final ProcessorSupplier stateUpdateSupplier, final boolean reprocessOnRestore) { - Objects.requireNonNull(storeFactory, "store builder must not be null"); ApiUtils.checkSupplier(stateUpdateSupplier); + final Set> stores = stateUpdateSupplier.stores(); + if (stores == null || stores.size() != 1) { + throw new IllegalArgumentException( + "Global stores must pass in suppliers with exactly one store but got " + + (stores != null ? stores.size() : 0)); + } + final StoreFactory storeFactory = + StoreBuilderWrapper.wrapStoreBuilder(stores.iterator().next()); validateGlobalStoreArguments(sourceName, topic, processorName, stateUpdateSupplier, - storeFactory.name(), + storeFactory.storeName(), storeFactory.loggingEnabled()); validateTopicNotAlreadyRegistered(topic); @@ -651,18 +690,18 @@ public final void addGlobalStore(final StoreFactory storeFactory, keyDeserializer, valueDeserializer) ); - storeNameToReprocessOnRestore.put(storeFactory.name(), + storeNameToReprocessOnRestore.put(storeFactory.storeName(), reprocessOnRestore ? Optional.of(new ReprocessFactory<>(stateUpdateSupplier, keyDeserializer, valueDeserializer)) : Optional.empty()); nodeToSourceTopics.put(sourceName, Arrays.asList(topics)); nodeGrouper.add(sourceName); - nodeFactory.addStateStore(storeFactory.name()); + nodeFactory.addStateStore(storeFactory.storeName()); nodeFactories.put(processorName, nodeFactory); nodeGrouper.add(processorName); nodeGrouper.unite(processorName, predecessors); - globalStateBuilders.put(storeFactory.name(), storeFactory); - connectSourceStoreAndTopic(storeFactory.name(), topic); + globalStateBuilders.put(storeFactory.storeName(), storeFactory); + connectSourceStoreAndTopic(storeFactory.storeName(), topic); nodeGroups = null; } @@ -882,21 +921,28 @@ private void connectStateStoreNameToSourceTopicsOrPattern Collections.unmodifiableSet(sourcePatterns) ); } - } - private void maybeAddToResetList(final Collection earliestResets, + private void maybeAddToResetList(final Collection noneResets, + final Collection earliestResets, final Collection latestResets, - final Topology.AutoOffsetReset offsetReset, + final Map durationReset, + final AutoOffsetResetInternal offsetReset, final T item) { if (offsetReset != null) { - switch (offsetReset) { + switch (offsetReset.offsetResetStrategy()) { + case NONE: + noneResets.add(item); + break; case EARLIEST: earliestResets.add(item); break; case LATEST: latestResets.add(item); break; + case BY_DURATION: + durationReset.put(item, offsetReset.duration()); + break; default: throw new TopologyException(String.format("Unrecognized reset format %s", offsetReset)); } @@ -1127,7 +1173,7 @@ private void buildProcessorNode(final Map> pro if (topologyConfigs != null) { storeFactory.configure(topologyConfigs.applicationConfigs); } - store = storeFactory.build(); + store = storeFactory.builder().build(); stateStoreMap.put(stateStoreName, store); } else { store = globalStateStores.get(stateStoreName); @@ -1227,8 +1273,8 @@ public synchronized Map subtopologyToTopicsInfo() { // if the node is connected to a state store whose changelog topics are not predefined, // add to the changelog topics for (final StoreFactory stateFactory : stateFactories.values()) { - if (stateFactory.connectedProcessorNames().contains(node) && storeToChangelogTopic.containsKey(stateFactory.name())) { - final String topicName = storeToChangelogTopic.get(stateFactory.name()); + if (stateFactory.connectedProcessorNames().contains(node) && storeToChangelogTopic.containsKey(stateFactory.storeName())) { + final String topicName = storeToChangelogTopic.get(stateFactory.storeName()); if (!stateChangelogTopics.containsKey(topicName)) { final InternalTopicConfig internalTopicConfig = createChangelogTopicConfig(stateFactory, topicName); @@ -1314,19 +1360,30 @@ private InternalTopicConfig createChangelogTopicConfig(fi } public boolean hasOffsetResetOverrides() { - return !(earliestResetTopics.isEmpty() && earliestResetPatterns.isEmpty() - && latestResetTopics.isEmpty() && latestResetPatterns.isEmpty()); + return noneResetTopics.size() + noneResetPatterns.size() + + earliestResetTopics.size() + earliestResetPatterns.size() + + latestResetTopics.size() + latestResetPatterns.size() + + durationResetTopics.size() + durationResetPatterns.size() > 0; } - public OffsetResetStrategy offsetResetStrategy(final String topic) { - if (maybeDecorateInternalSourceTopics(earliestResetTopics).contains(topic) || + public AutoOffsetResetStrategy offsetResetStrategy(final String topic) { + final Optional resetDuration; + + if (maybeDecorateInternalSourceTopics(noneResetTopics).contains(topic) || + noneResetPatterns.stream().anyMatch(p -> p.matcher(topic).matches())) { + return AutoOffsetResetStrategy.NONE; + } else if (maybeDecorateInternalSourceTopics(earliestResetTopics).contains(topic) || earliestResetPatterns.stream().anyMatch(p -> p.matcher(topic).matches())) { - return EARLIEST; + return AutoOffsetResetStrategy.EARLIEST; } else if (maybeDecorateInternalSourceTopics(latestResetTopics).contains(topic) || latestResetPatterns.stream().anyMatch(p -> p.matcher(topic).matches())) { - return LATEST; + return AutoOffsetResetStrategy.LATEST; + } else if (maybeDecorateInternalSourceTopics(durationResetTopics.keySet()).contains(topic)) { + return AutoOffsetResetStrategy.fromString("by_duration:" + durationResetTopics.get(topic).toString()); + } else if ((resetDuration = findDuration(topic)).isPresent()) { + return AutoOffsetResetStrategy.fromString("by_duration:" + resetDuration.get()); } else if (containsTopic(topic)) { - return NONE; + return null; } else { throw new IllegalStateException(String.format( "Unable to lookup offset reset strategy for the following topic as it does not exist in the topology%s: %s", @@ -1336,6 +1393,19 @@ public OffsetResetStrategy offsetResetStrategy(final String topic) { } } + private Optional findDuration(final String topic) { + final List resetDuration = durationResetPatterns.entrySet().stream() + .filter(e -> e.getKey().matcher(topic).matches()) + .map(Map.Entry::getValue) + .collect(Collectors.toList()); + + if (resetDuration.size() > 1) { + throw new IllegalStateException("Found more than one reset duration for topic: " + topic); + } + + return resetDuration.isEmpty() ? Optional.empty() : resetDuration.stream().findAny(); + } + /** * @return map from state store name to full names (including application id/topology name prefix) * of all source topics whose processors are connected to it @@ -1389,7 +1459,7 @@ public synchronized Collection> copartitionGroups() { } } final Set> uniqueCopartitionGroups = new HashSet<>(topicsToCopartitionGroup.values()); - return Collections.unmodifiableList(new ArrayList<>(uniqueCopartitionGroups)); + return List.copyOf(uniqueCopartitionGroups); } private List maybeDecorateInternalSourceTopics(final Collection sourceTopics) { @@ -1417,9 +1487,10 @@ private String decorateTopic(final String topic) { + "applicationId hasn't been set. Call " + "setApplicationId first"); } - final String prefix = topologyConfigs == null ? - applicationId : - ProcessorContextUtils.topicNamePrefix(topologyConfigs.applicationConfigs.originals(), applicationId); + + final String prefix = topologyConfigs == null + ? applicationId + : ProcessorContextUtils.topicNamePrefix(topologyConfigs.applicationConfigs.originals(), applicationId); if (hasNamedTopology()) { return prefix + "-" + topologyName + "-" + topic; @@ -1428,7 +1499,6 @@ private String decorateTopic(final String topic) { } } - void initializeSubscription() { if (usesPatternSubscription()) { log.debug("Found pattern subscribed source topics, initializing consumer's subscription pattern."); @@ -2081,8 +2151,8 @@ public int compare(final TopologyDescription.Subtopology subtopology1, private static final SubtopologyComparator SUBTOPOLOGY_COMPARATOR = new SubtopologyComparator(); public static final class TopologyDescription implements org.apache.kafka.streams.TopologyDescription { - private final TreeSet subtopologies = new TreeSet<>(SUBTOPOLOGY_COMPARATOR); - private final TreeSet globalStores = new TreeSet<>(GLOBALSTORE_COMPARATOR); + private final TreeSet subtopologies = new TreeSet<>(SUBTOPOLOGY_COMPARATOR); + private final TreeSet globalStores = new TreeSet<>(GLOBALSTORE_COMPARATOR); private final String namedTopology; public TopologyDescription() { @@ -2093,21 +2163,21 @@ public TopologyDescription(final String namedTopology) { this.namedTopology = namedTopology; } - public void addSubtopology(final TopologyDescription.Subtopology subtopology) { + public void addSubtopology(final Subtopology subtopology) { subtopologies.add(subtopology); } - public void addGlobalStore(final TopologyDescription.GlobalStore globalStore) { + public void addGlobalStore(final GlobalStore globalStore) { globalStores.add(globalStore); } @Override - public Set subtopologies() { + public Set subtopologies() { return Collections.unmodifiableSet(subtopologies); } @Override - public Set globalStores() { + public Set globalStores() { return Collections.unmodifiableSet(globalStores); } @@ -2120,17 +2190,17 @@ public String toString() { } else { sb.append("Topology: ").append(namedTopology).append(":\n "); } - final TopologyDescription.Subtopology[] sortedSubtopologies = - subtopologies.descendingSet().toArray(new TopologyDescription.Subtopology[0]); - final TopologyDescription.GlobalStore[] sortedGlobalStores = + final Subtopology[] sortedSubtopologies = + subtopologies.descendingSet().toArray(new Subtopology[0]); + final GlobalStore[] sortedGlobalStores = globalStores.descendingSet().toArray(new GlobalStore[0]); int expectedId = 0; int subtopologiesIndex = sortedSubtopologies.length - 1; int globalStoresIndex = sortedGlobalStores.length - 1; while (subtopologiesIndex != -1 && globalStoresIndex != -1) { sb.append(" "); - final TopologyDescription.Subtopology subtopology = sortedSubtopologies[subtopologiesIndex]; - final TopologyDescription.GlobalStore globalStore = sortedGlobalStores[globalStoresIndex]; + final Subtopology subtopology = sortedSubtopologies[subtopologiesIndex]; + final GlobalStore globalStore = sortedGlobalStores[globalStoresIndex]; if (subtopology.id() == expectedId) { sb.append(subtopology); subtopologiesIndex--; @@ -2141,13 +2211,13 @@ public String toString() { expectedId++; } while (subtopologiesIndex != -1) { - final TopologyDescription.Subtopology subtopology = sortedSubtopologies[subtopologiesIndex]; + final Subtopology subtopology = sortedSubtopologies[subtopologiesIndex]; sb.append(" "); sb.append(subtopology); subtopologiesIndex--; } while (globalStoresIndex != -1) { - final TopologyDescription.GlobalStore globalStore = sortedGlobalStores[globalStoresIndex]; + final GlobalStore globalStore = sortedGlobalStores[globalStoresIndex]; sb.append(" "); sb.append(globalStore); globalStoresIndex--; @@ -2245,4 +2315,22 @@ public boolean hasNamedTopology() { public synchronized Map stateStores() { return stateFactories; } + + public WrappedFixedKeyProcessorSupplier wrapFixedKeyProcessorSupplier( + final String name, + final FixedKeyProcessorSupplier processorSupplier + ) { + return ProcessorWrapper.asWrappedFixedKey( + processorWrapper.wrapFixedKeyProcessorSupplier(name, processorSupplier) + ); + } + + public WrappedProcessorSupplier wrapProcessorSupplier( + final String name, + final ProcessorSupplier processorSupplier + ) { + return ProcessorWrapper.asWrapped( + processorWrapper.wrapProcessorSupplier(name, processorSupplier) + ); + } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/NoOpProcessorWrapper.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/NoOpProcessorWrapper.java new file mode 100644 index 0000000000000..7a969731b06f8 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/NoOpProcessorWrapper.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.processor.internals; + +import org.apache.kafka.streams.processor.api.FixedKeyProcessor; +import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; +import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.processor.api.ProcessorWrapper; +import org.apache.kafka.streams.processor.api.WrappedFixedKeyProcessorSupplier; +import org.apache.kafka.streams.processor.api.WrappedProcessorSupplier; +import org.apache.kafka.streams.state.StoreBuilder; + +import java.util.Set; + +public class NoOpProcessorWrapper implements ProcessorWrapper { + + @Override + public WrappedProcessorSupplier wrapProcessorSupplier(final String processorName, + final ProcessorSupplier processorSupplier) { + return ProcessorWrapper.asWrapped(processorSupplier); + } + + @Override + public WrappedFixedKeyProcessorSupplier wrapFixedKeyProcessorSupplier(final String processorName, + final FixedKeyProcessorSupplier processorSupplier) { + return ProcessorWrapper.asWrappedFixedKey(processorSupplier); + } + + public static class WrappedProcessorSupplierImpl implements WrappedProcessorSupplier { + + private final ProcessorSupplier delegate; + + public WrappedProcessorSupplierImpl(final ProcessorSupplier delegate) { + this.delegate = delegate; + } + + @Override + public Set> stores() { + return delegate.stores(); + } + + @Override + public Processor get() { + return delegate.get(); + } + } + + public static class WrappedFixedKeyProcessorSupplierImpl implements WrappedFixedKeyProcessorSupplier { + + private final FixedKeyProcessorSupplier delegate; + + public WrappedFixedKeyProcessorSupplierImpl(final FixedKeyProcessorSupplier delegate) { + this.delegate = delegate; + } + + @Override + public Set> stores() { + return delegate.stores(); + } + + @Override + public FixedKeyProcessor get() { + return delegate.get(); + } + } +} \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/PartitionGroup.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/PartitionGroup.java index 5e57efb962898..5fb313ff6052d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/PartitionGroup.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/PartitionGroup.java @@ -250,10 +250,11 @@ StampedRecord nextRecord(final RecordInfo info, final long wallClockTime) { if (queue != null) { // get the first record from this queue. + final int oldSize = queue.size(); record = queue.poll(wallClockTime); if (record != null) { - --totalBuffered; + totalBuffered -= oldSize - queue.size(); if (queue.isEmpty()) { // if a certain queue has been drained, reset the flag diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorAdapter.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorAdapter.java deleted file mode 100644 index 79db3847cfb06..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorAdapter.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.processor.internals; - - -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.ProcessorContext; -import org.apache.kafka.streams.processor.api.Record; - -@SuppressWarnings("deprecation") // Old PAPI compatibility -public final class ProcessorAdapter implements Processor { - private final org.apache.kafka.streams.processor.Processor delegate; - private InternalProcessorContext context; - - public static Processor adapt(final org.apache.kafka.streams.processor.Processor delegate) { - if (delegate == null) { - return null; - } else { - return new ProcessorAdapter<>(delegate); - } - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - public static Processor adaptRaw(final org.apache.kafka.streams.processor.Processor delegate) { - if (delegate == null) { - return null; - } else { - return new ProcessorAdapter<>(delegate); - } - } - - private ProcessorAdapter(final org.apache.kafka.streams.processor.Processor delegate) { - this.delegate = delegate; - } - - @Override - public void init(final ProcessorContext context) { - // It only makes sense to use this adapter internally to Streams, in which case - // all contexts are implementations of InternalProcessorContext. - // This would fail if someone were to use this adapter in a unit test where - // the context only implements api.ProcessorContext. - this.context = (InternalProcessorContext) context; - delegate.init((org.apache.kafka.streams.processor.ProcessorContext) context); - } - - @Override - public void process(final Record record) { - final ProcessorRecordContext processorRecordContext = context.recordContext(); - try { - context.setRecordContext(new ProcessorRecordContext( - record.timestamp(), - context.offset(), - context.partition(), - context.topic(), - record.headers() - )); - delegate.process(record.key(), record.value()); - } finally { - context.setRecordContext(processorRecordContext); - } - } - - @Override - public void close() { - delegate.close(); - } -} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextImpl.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextImpl.java index e526f89d78ef1..6a53afd07b3d0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextImpl.java @@ -174,7 +174,7 @@ public S getStateStore(final String name) { " as the store is not connected to the processor. If you add stores manually via '.addStateStore()' " + "make sure to connect the added store to the processor by providing the processor name to " + "'.addStateStore()' or connect them via '.connectProcessorAndStateStores()'. " + - "DSL users need to provide the store name to '.process()', '.transform()', or '.transformValues()' " + + "DSL users need to provide the store name to '.process()', '.processValues()', or '.transformValues()' " + "to connect the store to the corresponding operator, or they can provide a StoreBuilder by implementing " + "the stores() method on the Supplier itself. If you do not add stores manually, " + "please file a bug report at https://issues.apache.org/jira/projects/KAFKA."); @@ -236,8 +236,8 @@ public void forward(final Record record, final String childName) { final ProcessorNode previousNode = currentNode(); if (previousNode == null) { throw new StreamsException("Current node is unknown. This can happen if 'forward()' is called " + - "in an illegal scope. The root cause could be that a 'Processor' or 'Transformer' instance" + - " is shared. To avoid this error, make sure that your suppliers return new instances " + + "in an illegal scope. The root cause could be that a 'Processor' instance " + + "is shared. To avoid this error, make sure that your suppliers return new instances " + "each time 'get()' of Supplier is called and do not return the same object reference " + "multiple times."); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextUtils.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextUtils.java index ae404b5c7ffd4..906d129aa4984 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextUtils.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorContextUtils.java @@ -51,7 +51,7 @@ public static StreamsMetricsImpl metricsImpl(final StateStoreContext context) { public static String changelogFor(final StateStoreContext context, final String storeName, final Boolean newChangelogTopic) { final String prefix = topicNamePrefix(context.appConfigs(), context.applicationId()); if (context instanceof InternalProcessorContext && !newChangelogTopic) { - final String changelogTopic = ((InternalProcessorContext) context).changelogFor(storeName); + final String changelogTopic = ((InternalProcessorContext) context).changelogFor(storeName); if (changelogTopic != null) return changelogTopic; @@ -81,9 +81,10 @@ public static InternalProcessorContext asInternalProcessorContext(final Processo } } - public static InternalProcessorContext asInternalProcessorContext(final StateStoreContext context) { + @SuppressWarnings("unchecked") + public static InternalProcessorContext asInternalProcessorContext(final StateStoreContext context) { if (context instanceof InternalProcessorContext) { - return (InternalProcessorContext) context; + return (InternalProcessorContext) context; } else { throw new IllegalArgumentException( "This component requires internal features of Kafka Streams and must be disabled for unit tests." diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorNode.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorNode.java index 9e2a79f0f2593..2bb58eb6b82bd 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorNode.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorNode.java @@ -203,7 +203,10 @@ public void process(final Record record) { } catch (final FailedProcessingException | TaskCorruptedException | TaskMigratedException e) { // Rethrow exceptions that should not be handled here throw e; - } catch (final RuntimeException processingException) { + } catch (final Exception processingException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages final ErrorHandlerContext errorHandlerContext = new DefaultErrorHandlerContext( null, // only required to pass for DeserializationExceptionHandler internalProcessorContext.topic(), @@ -220,7 +223,10 @@ public void process(final Record record) { processingExceptionHandler.handle(errorHandlerContext, record, processingException), "Invalid ProductionExceptionHandler response." ); - } catch (final RuntimeException fatalUserException) { + } catch (final Exception fatalUserException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages log.error( "Processing error callback failed after processing error for record: {}", errorHandlerContext, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorStateManager.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorStateManager.java index f4702c469f8f3..3506845d288af 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorStateManager.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ProcessorStateManager.java @@ -24,11 +24,11 @@ import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.errors.TaskCorruptedException; import org.apache.kafka.streams.errors.TaskMigratedException; +import org.apache.kafka.streams.errors.internals.FailedProcessingException; import org.apache.kafka.streams.processor.CommitCallback; import org.apache.kafka.streams.processor.StateRestoreCallback; import org.apache.kafka.streams.processor.StateRestoreListener; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.Task.TaskType; import org.apache.kafka.streams.state.internals.CachedStateStore; @@ -44,7 +44,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -62,12 +61,12 @@ * ProcessorStateManager is the source of truth for the current offset for each state store, * which is either the read offset during restoring, or the written offset during normal processing. * - * The offset is initialized as null when the state store is registered, and then it can be updated by + *

                  The offset is initialized as null when the state store is registered, and then it can be updated by * loading checkpoint file, restore state stores, or passing from the record collector's written offsets. * - * When checkpointing, if the offset is not null it would be written to the file. + *

                  When checkpointing, if the offset is not null it would be written to the file. * - * The manager is also responsible for restoring state stores via their registered restore callback, + *

                  The manager is also responsible for restoring state stores via their registered restore callback, * which is used for both updating standby tasks as well as restoring active tasks. */ public class ProcessorStateManager implements StateManager { @@ -233,8 +232,9 @@ static ProcessorStateManager createStartupTaskStateManager(final TaskId taskId, final LogContext logContext, final StateDirectory stateDirectory, final Map storeToChangelogTopic, + final Set sourcePartitions, final boolean stateUpdaterEnabled) { - return new ProcessorStateManager(taskId, TaskType.STANDBY, eosEnabled, logContext, stateDirectory, null, storeToChangelogTopic, new HashSet<>(0), stateUpdaterEnabled); + return new ProcessorStateManager(taskId, TaskType.STANDBY, eosEnabled, logContext, stateDirectory, null, storeToChangelogTopic, sourcePartitions, stateUpdaterEnabled); } /** @@ -255,7 +255,7 @@ void assignToStreamThread(final LogContext logContext, this.sourcePartitions.addAll(sourcePartitions); } - void registerStateStores(final List allStores, final InternalProcessorContext processorContext) { + void registerStateStores(final List allStores, final InternalProcessorContext processorContext) { processorContext.uninitialize(); for (final StateStore store : allStores) { if (stores.containsKey(store.name())) { @@ -263,7 +263,7 @@ void registerStateStores(final List allStores, final InternalProcess maybeRegisterStoreWithChangelogReader(store.name()); } } else { - store.init((StateStoreContext) processorContext, store); + store.init(processorContext, store); } log.trace("Registered state store {}", store.name()); } @@ -538,13 +538,20 @@ public void flush() { } catch (final RuntimeException exception) { if (firstException == null) { // do NOT wrap the error if it is actually caused by Streams itself - if (exception instanceof StreamsException) + // In case of FailedProcessingException Do not keep the failed processing exception in the stack trace + if (exception instanceof FailedProcessingException) + firstException = new ProcessorStateException( + format("%sFailed to flush state store %s", logPrefix, store.name()), + exception.getCause()); + else if (exception instanceof StreamsException) firstException = exception; else firstException = new ProcessorStateException( format("%sFailed to flush state store %s", logPrefix, store.name()), exception); + log.error("Failed to flush state store {}: ", store.name(), firstException); + } else { + log.error("Failed to flush state store {}: ", store.name(), exception); } - log.error("Failed to flush state store {}: ", store.name(), exception); } } } @@ -567,13 +574,18 @@ public void flushCache() { if (store instanceof TimeOrderedKeyValueBuffer) { store.flush(); } else if (store instanceof CachedStateStore) { - ((CachedStateStore) store).flushCache(); + ((CachedStateStore) store).flushCache(); } log.trace("Flushed cache or buffer {}", store.name()); } catch (final RuntimeException exception) { if (firstException == null) { // do NOT wrap the error if it is actually caused by Streams itself - if (exception instanceof StreamsException) { + // In case of FailedProcessingException Do not keep the failed processing exception in the stack trace + if (exception instanceof FailedProcessingException) { + firstException = new ProcessorStateException( + format("%sFailed to flush cache of store %s", logPrefix, store.name()), + exception.getCause()); + } else if (exception instanceof StreamsException) { firstException = exception; } else { firstException = new ProcessorStateException( @@ -581,8 +593,10 @@ public void flushCache() { exception ); } + log.error("Failed to flush cache of store {}: ", store.name(), firstException); + } else { + log.error("Failed to flush cache of store {}: ", store.name(), exception); } - log.error("Failed to flush cache of store {}: ", store.name(), exception); } } } @@ -618,13 +632,20 @@ public void close() throws ProcessorStateException { } catch (final RuntimeException exception) { if (firstException == null) { // do NOT wrap the error if it is actually caused by Streams itself - if (exception instanceof StreamsException) + // In case of FailedProcessingException Do not keep the failed processing exception in the stack trace + if (exception instanceof FailedProcessingException) + firstException = new ProcessorStateException( + format("%sFailed to close state store %s", logPrefix, store.name()), + exception.getCause()); + else if (exception instanceof StreamsException) firstException = exception; else firstException = new ProcessorStateException( format("%sFailed to close state store %s", logPrefix, store.name()), exception); + log.error("Failed to close state store {}: ", store.name(), firstException); + } else { + log.error("Failed to close state store {}: ", store.name(), exception); } - log.error("Failed to close state store {}: ", store.name(), exception); } } @@ -657,7 +678,7 @@ void recycle() { final StateStore store = metadata.stateStore; if (store instanceof CachedStateStore) { - ((CachedStateStore) store).clearCache(); + ((CachedStateStore) store).clearCache(); } log.trace("Cleared cache {}", store.name()); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java index 8dedeb7dad41e..d47db7ea94261 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java @@ -33,6 +33,7 @@ import org.apache.kafka.common.errors.SecurityDisabledException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.TransactionAbortedException; import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; @@ -160,7 +161,7 @@ public void send(final String topic, } if (!partitions.isEmpty()) { final Optional> maybeMulticastPartitions = partitioner.partitions(topic, key, value, partitions.size()); - if (!maybeMulticastPartitions.isPresent()) { + if (maybeMulticastPartitions.isEmpty()) { // A null//empty partition indicates we should use the default partitioner send(topic, key, value, headers, null, timestamp, keySerializer, valueSerializer, processorNodeId, context); } else { @@ -210,7 +211,10 @@ public void send(final String topic, key, keySerializer, exception); - } catch (final RuntimeException serializationException) { + } catch (final Exception serializationException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages handleException( ProductionExceptionHandler.SerializationExceptionOrigin.KEY, topic, @@ -221,7 +225,8 @@ public void send(final String topic, timestamp, processorNodeId, context, - serializationException); + serializationException + ); return; } @@ -234,7 +239,10 @@ public void send(final String topic, value, valueSerializer, exception); - } catch (final RuntimeException serializationException) { + } catch (final Exception serializationException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages handleException( ProductionExceptionHandler.SerializationExceptionOrigin.VALUE, topic, @@ -312,7 +320,7 @@ private void handleException(final ProductionExceptionHandler.Serializati final Long timestamp, final String processorNodeId, final InternalProcessorContext context, - final RuntimeException serializationException) { + final Exception serializationException) { log.debug(String.format("Error serializing record for topic %s", topic), serializationException); final ProducerRecord record = new ProducerRecord<>(topic, partition, timestamp, key, value, headers); @@ -328,7 +336,10 @@ private void handleException(final ProductionExceptionHandler.Serializati ), "Invalid ProductionExceptionHandler response." ); - } catch (final RuntimeException fatalUserException) { + } catch (final Exception fatalUserException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages log.error( String.format( "Production error callback failed after serialization error for record %s: %s", @@ -431,6 +442,11 @@ private void recordSendError(final String topic, errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since the producer is fenced, " + "indicating the task may be migrated out"; sendException.set(new TaskMigratedException(errorMessage, productionException)); + } else if (productionException instanceof TransactionAbortedException) { + // swallow silently + // + // TransactionAbortedException is only thrown after `abortTransaction()` was called, + // so it's only a followup error, and Kafka Streams is already handling the original error } else { final ProductionExceptionHandlerResponse response; try { @@ -442,7 +458,10 @@ private void recordSendError(final String topic, ), "Invalid ProductionExceptionHandler response." ); - } catch (final RuntimeException fatalUserException) { + } catch (final Exception fatalUserException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages log.error( "Production error callback failed after production error for record {}", serializedRecord, @@ -557,7 +576,7 @@ private void removeAllProducedSensors() { @Override public Map offsets() { - return Collections.unmodifiableMap(new HashMap<>(offsets)); + return Map.copyOf(offsets); } private void checkForException() { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordDeserializer.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordDeserializer.java index 5ddafe654e98e..6f9fe989552f8 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordDeserializer.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordDeserializer.java @@ -70,7 +70,10 @@ ConsumerRecord deserialize(final ProcessorContext processo rawRecord.headers(), rawRecord.leaderEpoch() ); - } catch (final RuntimeException deserializationException) { + } catch (final Exception deserializationException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages handleDeserializationFailure(deserializationExceptionHandler, processorContext, deserializationException, rawRecord, log, droppedRecordsSensor, sourceNode().name()); return null; // 'handleDeserializationFailure' would either throw or swallow -- if we swallow we need to skip the record by returning 'null' } @@ -78,7 +81,7 @@ ConsumerRecord deserialize(final ProcessorContext processo public static void handleDeserializationFailure(final DeserializationExceptionHandler deserializationExceptionHandler, final ProcessorContext processorContext, - final RuntimeException deserializationException, + final Exception deserializationException, final ConsumerRecord rawRecord, final Logger log, final Sensor droppedRecordsSensor, @@ -100,7 +103,10 @@ public static void handleDeserializationFailure(final DeserializationExceptionHa deserializationExceptionHandler.handle(errorHandlerContext, rawRecord, deserializationException), "Invalid DeserializationExceptionHandler response." ); - } catch (final RuntimeException fatalUserException) { + } catch (final Exception fatalUserException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages log.error( "Deserialization error callback failed after deserialization error for record {}", rawRecord, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RepartitionTopics.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RepartitionTopics.java index ff217925f9d04..2a0358a866b3d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RepartitionTopics.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RepartitionTopics.java @@ -217,7 +217,7 @@ private void setRepartitionSourceTopicPartitionCount(final Map repartitionSourceTopicPartitionCount = repartitionTopicMetadata.get(repartitionSourceTopic).numberOfPartitions(); - if (!repartitionSourceTopicPartitionCount.isPresent()) { + if (repartitionSourceTopicPartitionCount.isEmpty()) { final Integer numPartitions = computePartitionCount( repartitionTopicMetadata, topicGroups, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/SerdeGetter.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/SerdeGetter.java index 72bfc99804b9f..74665bc38774e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/SerdeGetter.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/SerdeGetter.java @@ -18,37 +18,34 @@ import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.streams.processor.StateStoreContext; +import org.apache.kafka.streams.processor.api.ProcessorContext; + +import java.util.function.Supplier; /** * Allows serde access across different context types. */ public class SerdeGetter { - private final org.apache.kafka.streams.processor.ProcessorContext oldProcessorContext; - private final org.apache.kafka.streams.processor.api.ProcessorContext newProcessorContext; - private final StateStoreContext stateStorecontext; - public SerdeGetter(final org.apache.kafka.streams.processor.ProcessorContext context) { - oldProcessorContext = context; - newProcessorContext = null; - stateStorecontext = null; - } - public SerdeGetter(final org.apache.kafka.streams.processor.api.ProcessorContext context) { - oldProcessorContext = null; - newProcessorContext = context; - stateStorecontext = null; + private final Supplier> keySerdeSupplier; + private final Supplier> valueSerdeSupplier; + + public SerdeGetter(final ProcessorContext context) { + keySerdeSupplier = context::keySerde; + valueSerdeSupplier = context::valueSerde; } + public SerdeGetter(final StateStoreContext context) { - oldProcessorContext = null; - newProcessorContext = null; - stateStorecontext = context; + keySerdeSupplier = context::keySerde; + valueSerdeSupplier = context::valueSerde; } - public Serde keySerde() { - return oldProcessorContext != null ? oldProcessorContext.keySerde() : - newProcessorContext != null ? newProcessorContext.keySerde() : stateStorecontext.keySerde(); + + public Serde keySerde() { + return keySerdeSupplier.get(); } - public Serde valueSerde() { - return oldProcessorContext != null ? oldProcessorContext.valueSerde() : - newProcessorContext != null ? newProcessorContext.valueSerde() : stateStorecontext.valueSerde(); + + public Serde valueSerde() { + return valueSerdeSupplier.get(); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StateDirectory.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StateDirectory.java index 1423fb934e737..97525b8972d82 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StateDirectory.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StateDirectory.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.streams.processor.internals; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; @@ -48,6 +49,7 @@ import java.nio.file.attribute.PosixFilePermissions; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -215,12 +217,17 @@ public void initializeStartupTasks(final TopologyMetadata topologyMetadata, // because it's possible that the topology has changed since that data was written, and is now stateless // this therefore prevents us from creating unnecessary Tasks just because of some left-over state if (subTopology.hasStateWithChangelogs()) { + final Set inputPartitions = topologyMetadata.nodeToSourceTopics(id).values().stream() + .flatMap(Collection::stream) + .map(t -> new TopicPartition(t, id.partition())) + .collect(Collectors.toSet()); final ProcessorStateManager stateManager = ProcessorStateManager.createStartupTaskStateManager( id, eosEnabled, logContext, this, subTopology.storeToChangelogTopic(), + inputPartitions, stateUpdaterEnabled ); @@ -234,7 +241,7 @@ public void initializeStartupTasks(final TopologyMetadata topologyMetadata, final Task task = new StandbyTask( id, - new HashSet<>(), + inputPartitions, subTopology, topologyMetadata.taskConfig(id), streamsMetrics, @@ -475,7 +482,6 @@ synchronized boolean lock(final TaskId taskId) { throw new IllegalStateException("The state directory has been deleted"); } else { lockedTasksToOwner.put(taskId, Thread.currentThread()); - // make sure the task directory actually exists, and create it if not return true; } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreBuilderWrapper.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreBuilderWrapper.java index b8522b8e2cd96..be915a8dfb637 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreBuilderWrapper.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreBuilderWrapper.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.streams.processor.internals; -import org.apache.kafka.streams.processor.StateStore; +import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.internals.SessionStoreBuilder; import org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder; @@ -35,16 +35,32 @@ */ public class StoreBuilderWrapper implements StoreFactory { + private final StoreBuilder builder; private final Set connectedProcessorNames = new HashSet<>(); - public StoreBuilderWrapper(final StoreBuilder builder) { + public static StoreFactory wrapStoreBuilder(final StoreBuilder builder) { + if (builder instanceof FactoryWrappingStoreBuilder) { + return ((FactoryWrappingStoreBuilder) builder).storeFactory(); + } else { + return new StoreBuilderWrapper(builder); + } + } + + private StoreBuilderWrapper(final StoreBuilder builder) { this.builder = builder; } @Override - public StateStore build() { - return builder.build(); + public void configure(final StreamsConfig config) { + if (builder instanceof ConfigurableStore) { + ((ConfigurableStore) builder).configure(config); + } + } + + @Override + public StoreBuilder builder() { + return builder; } @Override @@ -82,7 +98,7 @@ public boolean loggingEnabled() { } @Override - public String name() { + public String storeName() { return builder.name(); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreDelegatingProcessorSupplier.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreDelegatingProcessorSupplier.java new file mode 100644 index 0000000000000..cce8281e15eb3 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreDelegatingProcessorSupplier.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.processor.internals; + +import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.state.StoreBuilder; + +import java.util.Set; + +public class StoreDelegatingProcessorSupplier implements ProcessorSupplier { + + private final ProcessorSupplier delegate; + private final Set> stores; + + public StoreDelegatingProcessorSupplier( + final ProcessorSupplier delegate, + final Set> stores + ) { + this.delegate = delegate; + this.stores = stores; + } + + @Override + public Set> stores() { + return stores; + } + + @Override + public Processor get() { + return delegate.get(); + } +} diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreFactory.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreFactory.java index b05c334c27f54..2abdad5998677 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreFactory.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StoreFactory.java @@ -19,6 +19,7 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.TopologyConfig; import org.apache.kafka.streams.processor.StateStore; +import org.apache.kafka.streams.state.StoreBuilder; import java.util.Map; import java.util.Set; @@ -45,13 +46,9 @@ * to {@link org.apache.kafka.streams.StreamsBuilder#StreamsBuilder(TopologyConfig)}

                • *
                */ -public interface StoreFactory { +public interface StoreFactory extends ConfigurableStore { - default void configure(final StreamsConfig config) { - // do nothing - } - - StateStore build(); + StoreBuilder builder(); long retentionPeriod(); @@ -61,7 +58,7 @@ default void configure(final StreamsConfig config) { boolean loggingEnabled(); - String name(); + String storeName(); boolean isWindowStore(); @@ -75,4 +72,83 @@ default void configure(final StreamsConfig config) { boolean isCompatibleWith(StoreFactory storeFactory); + class FactoryWrappingStoreBuilder implements StoreBuilder, ConfigurableStore { + + private final StoreFactory storeFactory; + + public FactoryWrappingStoreBuilder(final StoreFactory storeFactory) { + this.storeFactory = storeFactory; + } + + public StoreFactory storeFactory() { + return storeFactory; + } + + public void configure(final StreamsConfig config) { + storeFactory.configure(config); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final FactoryWrappingStoreBuilder that = (FactoryWrappingStoreBuilder) o; + + return storeFactory.isCompatibleWith(that.storeFactory); + } + + @Override + public int hashCode() { + return storeFactory.hashCode(); + } + + @Override + public StoreBuilder withCachingEnabled() { + throw new IllegalStateException("Should not try to modify StoreBuilder wrapper"); + } + + @Override + public StoreBuilder withCachingDisabled() { + storeFactory.withCachingDisabled(); + return this; + } + + @Override + public StoreBuilder withLoggingEnabled(final Map config) { + throw new IllegalStateException("Should not try to modify StoreBuilder wrapper"); + } + + @Override + public StoreBuilder withLoggingDisabled() { + storeFactory.withLoggingDisabled(); + return this; + } + + @SuppressWarnings("unchecked") + @Override + public T build() { + return (T) storeFactory.builder().build(); + } + + @Override + public Map logConfig() { + return storeFactory.logConfig(); + } + + @Override + public boolean loggingEnabled() { + return storeFactory.loggingEnabled(); + } + + @Override + public String name() { + return storeFactory.storeName(); + } + } + } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java index 92772c686af82..5000522ed0d26 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java @@ -52,8 +52,6 @@ import org.apache.kafka.streams.state.internals.ThreadCache; import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -744,7 +742,7 @@ public boolean isProcessable(final long wallClockTime) { } final boolean readyToProcess = partitionGroup.readyToProcess(wallClockTime); if (!readyToProcess) { - if (!timeCurrentIdlingStarted.isPresent()) { + if (timeCurrentIdlingStarted.isEmpty()) { timeCurrentIdlingStarted = Optional.of(wallClockTime); } } else { @@ -788,7 +786,7 @@ record = partitionGroup.nextRecord(recordInfo, wallClockTime); // after processing this record, if its partition queue's buffered size has been // decreased to the threshold, we can then resume the consumption on this partition - if (recordInfo.queue().size() == maxBufferedSize) { + if (recordInfo.queue().size() <= maxBufferedSize) { partitionsToResume.add(partition); } @@ -883,18 +881,6 @@ public void recordProcessTimeRatioAndBufferSize(final long allTaskProcessMs, fin processTimeMs = 0L; } - private String getStacktraceString(final Throwable e) { - String stacktrace = null; - try (final StringWriter stringWriter = new StringWriter(); - final PrintWriter printWriter = new PrintWriter(stringWriter)) { - e.printStackTrace(printWriter); - stacktrace = stringWriter.toString(); - } catch (final IOException ioe) { - log.error("Encountered error extracting stacktrace from this exception", ioe); - } - return stacktrace; - } - /** * @throws IllegalStateException if the current node is not null * @throws TaskMigratedException if the task producer got fenced (EOS only) @@ -937,7 +923,10 @@ record = null; throw createStreamsException(node.name(), e.getCause()); } catch (final TaskCorruptedException | TaskMigratedException e) { throw e; - } catch (final RuntimeException processingException) { + } catch (final Exception processingException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages final ErrorHandlerContext errorHandlerContext = new DefaultErrorHandlerContext( null, recordContext.topic(), @@ -955,7 +944,10 @@ record = null; processingExceptionHandler.handle(errorHandlerContext, null, processingException), "Invalid ProcessingExceptionHandler response." ); - } catch (final RuntimeException fatalUserException) { + } catch (final Exception fatalUserException) { + // while Java distinguishes checked vs unchecked exceptions, other languages + // like Scala or Kotlin do not, and thus we need to catch `Exception` + // (instead of `RuntimeException`) to work well with those languages log.error( "Processing error callback failed after processing error for record: {}", errorHandlerContext, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java index 88e599a92cd7c..d2ff9ff22eae8 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java @@ -23,7 +23,9 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.InvalidOffsetException; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.OffsetAndTimestamp; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Metric; @@ -309,6 +311,7 @@ public boolean isStartingRunningOrPartitionAssigned() { private long lastLogSummaryMs = -1L; private long totalRecordsProcessedSinceLastSummary = 0L; private long totalPunctuatorsSinceLastSummary = 0L; + private long totalPolledSinceLastSummary = 0L; private long totalCommittedSinceLastSummary = 0L; private long now; @@ -369,10 +372,15 @@ public static StreamThread create(final TopologyMetadata topologyMetadata, final Runnable shutdownErrorHook, final BiConsumer streamsUncaughtExceptionHandler) { + final boolean stateUpdaterEnabled = InternalConfig.stateUpdaterEnabled(config.originals()); + final String threadId = clientId + THREAD_ID_SUBSTRING + threadIdx; + final String stateUpdaterId = threadId.replace(THREAD_ID_SUBSTRING, STATE_UPDATER_ID_SUBSTRING); + final String restorationThreadId = stateUpdaterEnabled ? stateUpdaterId : threadId; final String logPrefix = String.format("stream-thread [%s] ", threadId); final LogContext logContext = new LogContext(logPrefix); + final LogContext restorationLogContext = stateUpdaterEnabled ? new LogContext(String.format("state-updater [%s] ", restorationThreadId)) : logContext; final Logger log = logContext.logger(StreamThread.class); final ReferenceContainer referenceContainer = new ReferenceContainer(); @@ -382,13 +390,13 @@ public static StreamThread create(final TopologyMetadata topologyMetadata, referenceContainer.clientTags = config.getClientTags(); log.info("Creating restore consumer client"); - final Map restoreConsumerConfigs = config.getRestoreConsumerConfigs(restoreConsumerClientId(threadId)); + final Map restoreConsumerConfigs = config.getRestoreConsumerConfigs(restoreConsumerClientId(restorationThreadId)); final Consumer restoreConsumer = clientSupplier.getRestoreConsumer(restoreConsumerConfigs); final StoreChangelogReader changelogReader = new StoreChangelogReader( time, config, - logContext, + restorationLogContext, adminClient, restoreConsumer, userStateRestoreListener, @@ -397,7 +405,6 @@ public static StreamThread create(final TopologyMetadata topologyMetadata, final ThreadCache cache = new ThreadCache(logContext, cacheSizeBytes, streamsMetrics); - final boolean stateUpdaterEnabled = InternalConfig.stateUpdaterEnabled(config.originals()); final boolean proceessingThreadsEnabled = InternalConfig.processingThreadsEnabled(config.originals()); final ActiveTaskCreator activeTaskCreator = new ActiveTaskCreator( topologyMetadata, @@ -475,7 +482,6 @@ public static StreamThread create(final TopologyMetadata topologyMetadata, taskManager.setMainConsumer(mainConsumer); referenceContainer.mainConsumer = mainConsumer; - final String stateUpdaterId = threadId.replace(THREAD_ID_SUBSTRING, STATE_UPDATER_ID_SUBSTRING); final StreamsThreadMetricsDelegatingReporter reporter = new StreamsThreadMetricsDelegatingReporter(mainConsumer, threadId, stateUpdaterId); streamsMetrics.metricsRegistry().addReporter(reporter); @@ -611,6 +617,14 @@ public StreamThread(final Time time, streamsMetrics, time.milliseconds() ); + ThreadMetrics.addThreadStateTelemetryMetric( + threadId, + streamsMetrics, + (metricConfig, now) -> this.state().ordinal()); + ThreadMetrics.addThreadStateMetric( + threadId, + streamsMetrics, + (metricConfig, now) -> this.state()); ThreadMetrics.addThreadBlockedTimeMetric( threadId, new StreamThreadTotalBlockedTime( @@ -947,6 +961,7 @@ void runOnceWithoutProcessingThreads() { final long pollLatency; taskManager.resumePollingForPartitionsWithAvailableSpace(); pollLatency = pollPhase(); + totalPolledSinceLastSummary += 1; // Shutdown hook could potentially be triggered and transit the thread state to PENDING_SHUTDOWN during #pollRequests(). // The task manager internal states could be uninitialized if the state transition happens during #onPartitionsAssigned(). @@ -1062,12 +1077,14 @@ void runOnceWithoutProcessingThreads() { pollRatioSensor.record((double) pollLatency / runOnceLatency, now); commitRatioSensor.record((double) totalCommitLatency / runOnceLatency, now); - if (logSummaryIntervalMs > 0 && now - lastLogSummaryMs > logSummaryIntervalMs) { - log.info("Processed {} total records, ran {} punctuators, and committed {} total tasks since the last update", - totalRecordsProcessedSinceLastSummary, totalPunctuatorsSinceLastSummary, totalCommittedSinceLastSummary); + final long timeSinceLastLog = now - lastLogSummaryMs; + if (logSummaryIntervalMs > 0 && timeSinceLastLog > logSummaryIntervalMs) { + log.info("Processed {} total records, ran {} punctuators, polled {} times and committed {} total tasks since the last update {}ms ago", + totalRecordsProcessedSinceLastSummary, totalPunctuatorsSinceLastSummary, totalPolledSinceLastSummary, totalCommittedSinceLastSummary, timeSinceLastLog); totalRecordsProcessedSinceLastSummary = 0L; totalPunctuatorsSinceLastSummary = 0L; + totalPolledSinceLastSummary = 0L; totalCommittedSinceLastSummary = 0L; lastLogSummaryMs = now; } @@ -1280,32 +1297,79 @@ private void resetOffsets(final Set partitions, final Exception final Set loggedTopics = new HashSet<>(); final Set seekToBeginning = new HashSet<>(); final Set seekToEnd = new HashSet<>(); + final Map seekByDuration = new HashMap<>(); final Set notReset = new HashSet<>(); for (final TopicPartition partition : partitions) { - final OffsetResetStrategy offsetResetStrategy = topologyMetadata.offsetResetStrategy(partition.topic()); + final Optional offsetResetStrategy = topologyMetadata.offsetResetStrategy(partition.topic()); - // This may be null if the task we are currently processing was apart of a named topology that was just removed. - // TODO KAFKA-13713: keep the StreamThreads and TopologyMetadata view of named topologies in sync until final thread has acked + // TODO + // This may be null if the task we are currently processing was part of a named topology that was just removed. + // After named topologies are removed, we can update `topologyMetadata.offsetResetStrateg()` so it + // will not return null any longer, and we can remove this check if (offsetResetStrategy != null) { - switch (offsetResetStrategy) { - case EARLIEST: - addToResetList(partition, seekToBeginning, "Setting topic '{}' to consume from {} offset", "earliest", loggedTopics); - break; - case LATEST: - addToResetList(partition, seekToEnd, "Setting topic '{}' to consume from {} offset", "latest", loggedTopics); - break; - case NONE: - if ("earliest".equals(originalReset)) { - addToResetList(partition, seekToBeginning, "No custom setting defined for topic '{}' using original config '{}' for offset reset", "earliest", loggedTopics); - } else if ("latest".equals(originalReset)) { - addToResetList(partition, seekToEnd, "No custom setting defined for topic '{}' using original config '{}' for offset reset", "latest", loggedTopics); - } else { - notReset.add(partition); - } - break; - default: - throw new IllegalStateException("Unable to locate topic " + partition.topic() + " in the topology"); + if (offsetResetStrategy.isPresent()) { + final AutoOffsetResetStrategy resetPolicy = offsetResetStrategy.get(); + + if (resetPolicy == AutoOffsetResetStrategy.NONE) { + notReset.add(partition); + } else if (resetPolicy == AutoOffsetResetStrategy.EARLIEST) { + addToResetList( + partition, + seekToBeginning, + "Setting topic '{}' to consume from earliest offset", + loggedTopics + ); + } else if (resetPolicy == AutoOffsetResetStrategy.LATEST) { + addToResetList( + partition, + seekToEnd, + "Setting topic '{}' to consume from latest offset", + loggedTopics + ); + } else if (resetPolicy.type() == AutoOffsetResetStrategy.StrategyType.BY_DURATION) { + addToResetList( + partition, + seekByDuration, + resetPolicy.duration().get(), + "Setting topic '{}' to consume from by_duration:{}", + resetPolicy.duration().get().toString(), + loggedTopics + ); + } else { + throw new IllegalStateException("Unknown reset policy " + resetPolicy); + } + } else { + final AutoOffsetResetStrategy resetPolicy = AutoOffsetResetStrategy.fromString(originalReset); + + if (resetPolicy == AutoOffsetResetStrategy.NONE) { + notReset.add(partition); + } else if (resetPolicy == AutoOffsetResetStrategy.EARLIEST) { + addToResetList( + partition, + seekToBeginning, + "No custom setting defined for topic '{}' using original config 'earliest' for offset reset", + loggedTopics + ); + } else if (resetPolicy == AutoOffsetResetStrategy.LATEST) { + addToResetList( + partition, + seekToEnd, + "No custom setting defined for topic '{}' using original config 'latest' for offset reset", + loggedTopics + ); + } else if (resetPolicy.type() == AutoOffsetResetStrategy.StrategyType.BY_DURATION) { + addToResetList( + partition, + seekByDuration, + resetPolicy.duration().get(), + "No custom setting defined for topic '{}' using original config 'by_duration:{}' for offset reset", + resetPolicy.duration().get().toString(), + loggedTopics + ); + } else { + throw new IllegalStateException("Unknown reset policy " + resetPolicy); + } } } } @@ -1318,6 +1382,46 @@ private void resetOffsets(final Set partitions, final Exception if (!seekToEnd.isEmpty()) { mainConsumer.seekToEnd(seekToEnd); } + + if (!seekByDuration.isEmpty()) { + final long nowMs = time.milliseconds(); + final Map seekToTimestamps = seekByDuration.entrySet().stream() + .map(e -> { + long seekMs = nowMs - e.getValue().toMillis(); + if (seekMs < 0L) { + log.debug("Cannot reset offset to negative timestamp {} for partition {}. Seeking to timestamp 0 instead.", seekMs, e.getKey()); + seekMs = 0L; + } + return Map.entry(e.getKey(), seekMs); + }) + .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), Map::putAll); + + try { + for (final Map.Entry partitionAndOffset : mainConsumer.offsetsForTimes(seekToTimestamps).entrySet()) { + final TopicPartition partition = partitionAndOffset.getKey(); + final OffsetAndTimestamp seekOffset = partitionAndOffset.getValue(); + if (seekOffset != null) { + mainConsumer.seek(partition, new OffsetAndMetadata(seekOffset.offset())); + } else { + log.debug( + "Cannot reset offset to non-existing timestamp {} (larger than timestamp of last record)" + + " for partition {}. Seeking to end instead.", + seekToTimestamps.get(partition), + partition + ); + mainConsumer.seekToEnd(Collections.singleton(partitionAndOffset.getKey())); + } + } + } catch (final TimeoutException timeoutException) { + taskManager.maybeInitTaskTimeoutsOrThrow(seekByDuration.keySet(), timeoutException, now); + log.debug( + String.format( + "Could not reset offset for %s due to the following exception; will retry.", + seekByDuration.keySet()), + timeoutException + ); + } + } } else { final String notResetString = notReset.stream() @@ -1341,14 +1445,34 @@ private void resetOffsets(final Set partitions, final Exception } } - private void addToResetList(final TopicPartition partition, final Set partitions, final String logMessage, final String resetPolicy, final Set loggedTopics) { + private void addToResetList( + final TopicPartition partition, + final Set partitions, + final String resetPolicy, + final Set loggedTopics + ) { final String topic = partition.topic(); if (loggedTopics.add(topic)) { - log.info(logMessage, topic, resetPolicy); + log.info("Setting topic '{}' to consume from {} offset", topic, resetPolicy); } partitions.add(partition); } + private void addToResetList( + final TopicPartition partition, + final Map durationForPartitions, + final Duration durationTime, + final String logMessage, + final String durationString, + final Set loggedTopics + ) { + final String topic = partition.topic(); + if (loggedTopics.add(topic)) { + log.info(logMessage, topic, durationString); + } + durationForPartitions.put(partition, durationTime); + } + // This method is added for usage in tests where mocking the underlying native call is not possible. public boolean isThreadAlive() { return isAlive(); diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java index ca8c7accdaf73..ce019036e11b4 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java @@ -463,7 +463,7 @@ private List rebuildMetadataForSingleTopology(final Map>, Integer> getPartition = maybeMulticastPartitions -> { - if (!maybeMulticastPartitions.isPresent()) { + if (maybeMulticastPartitions.isEmpty()) { return null; } if (maybeMulticastPartitions.get().size() != 1) { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java index 1048b5a2ecfdf..546186b2dbd15 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java @@ -70,6 +70,7 @@ public class StreamsProducer { private Producer producer; private boolean transactionInFlight = false; private boolean transactionInitialized = false; + private boolean closed = false; private double oldProducerTotalBlockedTime = 0; // we have a single `StreamsProducer` per thread, and thus a single `sendException` instance, // which we share across all tasks, ie, all `RecordCollectorImpl` @@ -98,6 +99,10 @@ boolean transactionInFlight() { return transactionInFlight; } + boolean isClosed() { + return closed; + } + /** * @throws IllegalStateException if EOS is disabled */ @@ -320,6 +325,7 @@ void flush() { void close() { producer.close(); + closed = true; transactionInFlight = false; transactionInitialized = false; } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskManager.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskManager.java index 1ca98aeba5de6..037ff941105a4 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskManager.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/TaskManager.java @@ -337,7 +337,7 @@ private Map> assignStartupTasks(final Map inputPartitions = entry.getValue(); task.stateManager().assignToStreamThread(new LogContext(threadLogPrefix), changelogReader, inputPartitions); - task.updateInputPartitions(inputPartitions, topologyMetadata.nodeToSourceTopics(taskId)); + updateInputPartitionsOfStandbyTaskIfTheyChanged(task, inputPartitions); assignedTasks.put(task, inputPartitions); } @@ -1076,7 +1076,8 @@ private void addTaskToStateUpdater(final Task task) { } catch (final LockException lockException) { // The state directory may still be locked by another thread, when the rebalance just happened. // Retry in the next iteration. - log.info("Encountered lock exception. Reattempting locking the state in the next iteration.", lockException); + log.info("Encountered lock exception. Reattempting locking the state in the next iteration. Error message was: {}", + lockException.getMessage()); tasks.addPendingTasksToInit(Collections.singleton(task)); updateOrCreateBackoffRecord(task.id(), nowMs); } @@ -1885,6 +1886,18 @@ void updateNextOffsets(final Map nextOffsets) } } + void maybeInitTaskTimeoutsOrThrow( + final Collection partitions, + final TimeoutException timeoutException, + final long nowMs + ) { + for (final TopicPartition partition : partitions) { + final Task task = getActiveTask(partition); + task.maybeInitTaskTimeoutOrThrow(nowMs, timeoutException); + stateUpdater.add(task); + } + } + private Task getActiveTask(final TopicPartition partition) { final Task activeTask = tasks.activeTasksForInputPartition(partition); diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java index b1f6af32c9124..fe04f2c4613f2 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.streams.processor.internals; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.internals.KafkaFutureImpl; @@ -427,10 +427,10 @@ public boolean hasOffsetResetOverrides() { return hasNamedTopologies() || evaluateConditionIsTrueForAnyBuilders(InternalTopologyBuilder::hasOffsetResetOverrides); } - public OffsetResetStrategy offsetResetStrategy(final String topic) { + public Optional offsetResetStrategy(final String topic) { for (final InternalTopologyBuilder builder : builders.values()) { if (builder.containsTopic(topic)) { - return builder.offsetResetStrategy(topic); + return Optional.ofNullable(builder.offsetResetStrategy(topic)); } } log.warn("Unable to look up offset reset strategy for topic {} " + @@ -439,6 +439,9 @@ public OffsetResetStrategy offsetResetStrategy(final String topic) { "persist or appear frequently.", topic, namedTopologiesView() ); + // returning `null` for an Optional return type triggers spotbugs + // we added an exception for NP_OPTIONAL_RETURN_NULL for this method + // when we remove NamedTopologies, we can remove this exception return null; } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/AssignorConfiguration.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/AssignorConfiguration.java index ad6aca2bac8f5..bc2324044da08 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/AssignorConfiguration.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/AssignorConfiguration.java @@ -130,6 +130,7 @@ public RebalanceProtocol rebalanceProtocol() { case UPGRADE_FROM_36: case UPGRADE_FROM_37: case UPGRADE_FROM_38: + case UPGRADE_FROM_39: // we need to add new version when new "upgrade.from" values become available // This config is for explicitly sending FK response to a requested partition @@ -192,6 +193,7 @@ public int configuredMetadataVersion(final int priorVersion) { case UPGRADE_FROM_36: case UPGRADE_FROM_37: case UPGRADE_FROM_38: + case UPGRADE_FROM_39: // we need to add new version when new "upgrade.from" values become available // This config is for explicitly sending FK response to a requested partition diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/DefaultKafkaStreamsState.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/DefaultKafkaStreamsState.java index f2d3e1b6af2ca..06f5c9ce96947 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/DefaultKafkaStreamsState.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/DefaultKafkaStreamsState.java @@ -99,7 +99,7 @@ public SortedSet previousStandbyTasks() { @Override public long lagFor(final TaskId task) { - if (!taskLagTotals.isPresent()) { + if (taskLagTotals.isEmpty()) { LOG.error("lagFor was called on a KafkaStreamsState {} that does not support lag computations.", processId); throw new UnsupportedOperationException("Lag computation was not requested for KafkaStreamsState with process " + processId); } @@ -115,7 +115,7 @@ public long lagFor(final TaskId task) { @Override public SortedSet prevTasksByLag(final String consumerClientId) { - if (!taskLagTotals.isPresent()) { + if (taskLagTotals.isEmpty()) { LOG.error("prevTasksByLag was called on a KafkaStreamsState {} that does not support lag computations.", processId); throw new UnsupportedOperationException("Lag computation was not requested for KafkaStreamsState with process " + processId); } @@ -139,7 +139,7 @@ public SortedSet prevTasksByLag(final String consumerClientId) { @Override public Map statefulTasksToLagSums() { - if (!taskLagTotals.isPresent()) { + if (taskLagTotals.isEmpty()) { LOG.error("statefulTasksToLagSums was called on a KafkaStreamsState {} that does not support lag computations.", processId); throw new UnsupportedOperationException("Lag computation was not requested for KafkaStreamsState with process " + processId); } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/RackAwareTaskAssignor.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/RackAwareTaskAssignor.java index 05af50dec003a..455afd0af701f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/RackAwareTaskAssignor.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/assignment/RackAwareTaskAssignor.java @@ -241,7 +241,7 @@ public static boolean validateClientRack(final Map previousRackInfo = null; for (final Map.Entry> rackEntry : entry.getValue().entrySet()) { - if (!rackEntry.getValue().isPresent()) { + if (rackEntry.getValue().isEmpty()) { if (!StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE.equals(assignmentConfigs.rackAwareAssignmentStrategy())) { log.error( String.format("RackId doesn't exist for process %s and consumer %s", diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java index b61a4fb3e27d6..ca26b70283956 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java @@ -22,6 +22,7 @@ import java.util.Map; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.AVG_LATENCY_DESCRIPTION; +import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.LATENCY_DESCRIPTION_SUFFIX; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.LATENCY_SUFFIX; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.MAX_LATENCY_DESCRIPTION; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.PROCESSOR_NODE_LEVEL_GROUP; @@ -68,8 +69,8 @@ private ProcessorNodeMetrics() {} private static final String EMIT_FINAL_LATENCY = EMITTED_RECORDS + LATENCY_SUFFIX; private static final String EMIT_FINAL_DESCRIPTION = "calls to emit final"; - private static final String EMIT_FINAL_AVG_LATENCY_DESCRIPTION = AVG_LATENCY_DESCRIPTION + EMIT_FINAL_DESCRIPTION; - private static final String EMIT_FINAL_MAX_LATENCY_DESCRIPTION = MAX_LATENCY_DESCRIPTION + EMIT_FINAL_DESCRIPTION; + private static final String EMIT_FINAL_AVG_LATENCY_DESCRIPTION = AVG_LATENCY_DESCRIPTION + EMIT_FINAL_DESCRIPTION + LATENCY_DESCRIPTION_SUFFIX; + private static final String EMIT_FINAL_MAX_LATENCY_DESCRIPTION = MAX_LATENCY_DESCRIPTION + EMIT_FINAL_DESCRIPTION + LATENCY_DESCRIPTION_SUFFIX; public static Sensor suppressionEmitSensor(final String threadId, final String taskId, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/StreamsMetricsImpl.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/StreamsMetricsImpl.java index af693d14bc331..670c2ef639a14 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/StreamsMetricsImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/StreamsMetricsImpl.java @@ -151,6 +151,7 @@ public int hashCode() { public static final String RATIO_DESCRIPTION = "The fraction of time the thread spent on "; public static final String AVG_LATENCY_DESCRIPTION = "The average latency of "; public static final String MAX_LATENCY_DESCRIPTION = "The maximum latency of "; + public static final String LATENCY_DESCRIPTION_SUFFIX = " in milliseconds"; public static final String RATE_DESCRIPTION_PREFIX = "The average number of "; public static final String RATE_DESCRIPTION_SUFFIX = " per second"; diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetrics.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetrics.java index 0793e56b61e1f..a5ba7894c46ca 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetrics.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetrics.java @@ -16,8 +16,10 @@ */ package org.apache.kafka.streams.processor.internals.metrics; +import org.apache.kafka.common.metrics.Gauge; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.Sensor.RecordingLevel; +import org.apache.kafka.streams.processor.internals.StreamThread; import org.apache.kafka.streams.processor.internals.StreamThreadTotalBlockedTime; import java.util.Map; @@ -44,7 +46,9 @@ private ThreadMetrics() {} private static final String CREATE_TASK = "task-created"; private static final String CLOSE_TASK = "task-closed"; private static final String BLOCKED_TIME = "blocked-time-ns-total"; + private static final String STATE = "state"; private static final String THREAD_START_TIME = "thread-start-time"; + private static final String THREAD_STATE = "thread-state"; private static final String COMMIT_DESCRIPTION = "calls to commit"; private static final String COMMIT_TOTAL_DESCRIPTION = TOTAL_DESCRIPTION + COMMIT_DESCRIPTION; @@ -88,6 +92,8 @@ private ThreadMetrics() {} "The total time the thread spent blocked on kafka in nanoseconds"; private static final String THREAD_START_TIME_DESCRIPTION = "The time that the thread was started"; + private static final String THREAD_STATE_DESCRIPTION = + "The current state of the thread"; public static Sensor createTaskSensor(final String threadId, final StreamsMetricsImpl streamsMetrics) { @@ -290,6 +296,30 @@ public static void addThreadStartTimeMetric(final String threadId, ); } + public static void addThreadStateTelemetryMetric(final String threadId, + final StreamsMetricsImpl streamsMetrics, + final Gauge threadStateProvider) { + streamsMetrics.addThreadLevelMutableMetric( + THREAD_STATE, + THREAD_STATE_DESCRIPTION, + threadId, + threadStateProvider + ); + } + + public static void addThreadStateMetric(final String threadId, + final StreamsMetricsImpl streamsMetrics, + final Gauge threadStateProvider) { + streamsMetrics.addThreadLevelMutableMetric( + STATE, + THREAD_STATE_DESCRIPTION, + threadId, + threadStateProvider + ); + } + + + public static void addThreadBlockedTimeMetric(final String threadId, final StreamThreadTotalBlockedTime blockedTime, final StreamsMetricsImpl streamsMetrics) { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/namedtopology/KafkaStreamsNamedTopologyWrapper.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/namedtopology/KafkaStreamsNamedTopologyWrapper.java index 977e4ad97bf1b..943f00dc2e57b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/namedtopology/KafkaStreamsNamedTopologyWrapper.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/namedtopology/KafkaStreamsNamedTopologyWrapper.java @@ -215,7 +215,7 @@ public RemoveNamedTopologyResult removeNamedTopology(final String topologyToRemo removeTopologyFuture.completeExceptionally( new IllegalStateException("Cannot remove a NamedTopology while the state is " + super.state) ); - } else if (!getTopologyByName(topologyToRemove).isPresent()) { + } else if (getTopologyByName(topologyToRemove).isEmpty()) { log.error("Attempted to remove unknown topology {}. This application currently contains the" + "following topologies: {}.", topologyToRemove, topologyMetadata.namedTopologiesView() ); @@ -431,7 +431,7 @@ public KeyQueryMetadata queryMetadataForKey(final String storeName, * See {@link KafkaStreams#allLocalStorePartitionLags()} */ public Map> allLocalStorePartitionLagsForTopology(final String topologyName) { - if (!getTopologyByName(topologyName).isPresent()) { + if (getTopologyByName(topologyName).isEmpty()) { log.error("Can't get local store partition lags since topology {} does not exist in this application", topologyName); throw new UnknownTopologyException("Can't get local store partition lags", topologyName); diff --git a/streams/src/main/java/org/apache/kafka/streams/query/StateQueryRequest.java b/streams/src/main/java/org/apache/kafka/streams/query/StateQueryRequest.java index f37d8067453ec..5cdbf8bc8ce7e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/query/StateQueryRequest.java +++ b/streams/src/main/java/org/apache/kafka/streams/query/StateQueryRequest.java @@ -18,8 +18,6 @@ import org.apache.kafka.common.annotation.InterfaceStability.Evolving; -import java.util.Collections; -import java.util.HashSet; import java.util.Optional; import java.util.Set; @@ -102,7 +100,7 @@ public StateQueryRequest withPartitions(final Set partitions) { return new StateQueryRequest<>( storeName, position, - Optional.of(Collections.unmodifiableSet(new HashSet<>(partitions))), + Optional.of(Set.copyOf(partitions)), query, executionInfoEnabled, requireActive @@ -166,7 +164,7 @@ public Query getQuery() { * Whether this request should fetch from all locally available partitions. */ public boolean isAllPartitions() { - return !partitions.isPresent(); + return partitions.isEmpty(); } /** @@ -175,7 +173,7 @@ public boolean isAllPartitions() { * @throws IllegalStateException if this is a request for all partitions */ public Set getPartitions() { - if (!partitions.isPresent()) { + if (partitions.isEmpty()) { throw new IllegalStateException( "Cannot list partitions of an 'all partitions' request"); } else { diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractMergedSortedCacheStoreIterator.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractMergedSortedCacheStoreIterator.java index 819c58ca20e99..834b18f6491c0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractMergedSortedCacheStoreIterator.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractMergedSortedCacheStoreIterator.java @@ -59,14 +59,28 @@ private boolean isDeletedCacheEntry(final KeyValue nextFro public boolean hasNext() { // skip over items deleted from cache, and corresponding store items if they have the same key while (cacheIterator.hasNext() && isDeletedCacheEntry(cacheIterator.peekNext())) { - if (storeIterator.hasNext()) { - final KS nextStoreKey = storeIterator.peekNextKey(); - // advance the store iterator if the key is the same as the deleted cache key - if (compare(cacheIterator.peekNextKey(), nextStoreKey) == 0) { - storeIterator.next(); - } + if (!storeIterator.hasNext()) { + // if storeIterator is exhausted, we can just skip over every tombstone + // in the cache since they don't shadow any valid key + cacheIterator.next(); + continue; + } + + final KS nextStoreKey = storeIterator.peekNextKey(); + final int compare = compare(cacheIterator.peekNextKey(), nextStoreKey); + + if (compare == 0) { + // next cache entry is a valid tombstone for the next store key + storeIterator.next(); + cacheIterator.next(); + } else if (compare < 0) { + // cache has a tombstone for an entry that doesn't exist in the store + cacheIterator.next(); + } else { + // store iterator has a valid entry, but we should not advance the cache + // iterator because it may still shadow a future store key + return true; } - cacheIterator.next(); } return cacheIterator.hasNext() || storeIterator.hasNext(); diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractSegments.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractSegments.java index 5611fe99d24e5..4f7ca5e59ae9c 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractSegments.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractSegments.java @@ -17,7 +17,7 @@ package org.apache.kafka.streams.state.internals; import org.apache.kafka.streams.errors.ProcessorStateException; -import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.query.Position; import org.slf4j.Logger; @@ -81,7 +81,7 @@ public S segmentForTimestamp(final long timestamp) { @Override public S getOrCreateSegmentIfLive(final long segmentId, - final ProcessorContext context, + final StateStoreContext context, final long streamTime) { final long minLiveTimestamp = streamTime - retentionPeriod; final long minLiveSegment = segmentId(minLiveTimestamp); @@ -95,7 +95,7 @@ public S getOrCreateSegmentIfLive(final long segmentId, } @Override - public void openExisting(final ProcessorContext context, final long streamTime) { + public void openExisting(final StateStoreContext context, final long streamTime) { try { final File dir = new File(context.stateDir(), name); if (dir.exists()) { diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java index 3af62e4c59439..b5f05b5c4752e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingKeyValueStore.java @@ -60,7 +60,7 @@ public class CachingKeyValueStore private CacheFlushListener flushListener; private boolean sendOldValues; private String cacheName; - private InternalProcessorContext context; + private InternalProcessorContext internalContext; private Thread streamThread; private final ReadWriteLock lock = new ReentrantReadWriteLock(); private final Position position; @@ -95,13 +95,12 @@ QueryResult apply( } @Override - public void init(final StateStoreContext stateStoreContext, - final StateStore root) { - this.context = asInternalProcessorContext(stateStoreContext); - this.cacheName = ThreadCache.nameSpaceFromTaskIdAndStore(context.taskId().toString(), name()); - this.context.registerCacheFlushListener(cacheName, entries -> { + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + internalContext = asInternalProcessorContext(stateStoreContext); + cacheName = ThreadCache.nameSpaceFromTaskIdAndStore(internalContext.taskId().toString(), name()); + internalContext.registerCacheFlushListener(cacheName, entries -> { for (final ThreadCache.DirtyEntry entry : entries) { - putAndMaybeForward(entry, context); + putAndMaybeForward(entry, internalContext); } }); super.init(stateStoreContext, root); @@ -138,7 +137,7 @@ public QueryResult query(final Query query, result = wrapped().query(query, positionBound, config); } else { - final int partition = context.taskId().partition(); + final int partition = internalContext.taskId().partition(); final Lock lock = this.lock.readLock(); lock.lock(); try { @@ -183,8 +182,8 @@ private QueryResult runKeyQuery(final Query query, final Bytes key = keyQuery.getKey(); synchronized (mergedPosition) { - if (context.cache() != null) { - final LRUCacheEntry lruCacheEntry = context.cache().get(cacheName, key); + if (internalContext.cache() != null) { + final LRUCacheEntry lruCacheEntry = internalContext.cache().get(cacheName, key); if (lruCacheEntry != null) { final byte[] rawValue; if (timestampedSchema && !WrappedStateStore.isTimestamped(wrapped()) && !StoreQueryUtils.isAdapter(wrapped())) { @@ -268,19 +267,19 @@ public void put(final Bytes key, private void putInternal(final Bytes key, final byte[] value) { synchronized (position) { - context.cache().put( + internalContext.cache().put( cacheName, key, new LRUCacheEntry( value, - context.headers(), + internalContext.headers(), true, - context.offset(), - context.timestamp(), - context.partition(), - context.topic())); + internalContext.offset(), + internalContext.timestamp(), + internalContext.partition(), + internalContext.topic())); - StoreQueryUtils.updatePosition(position, context); + StoreQueryUtils.updatePosition(position, internalContext); } } @@ -357,8 +356,8 @@ public byte[] get(final Bytes key) { private byte[] getInternal(final Bytes key) { LRUCacheEntry entry = null; - if (context.cache() != null) { - entry = context.cache().get(cacheName, key); + if (internalContext.cache() != null) { + entry = internalContext.cache().get(cacheName, key); } if (entry == null) { final byte[] rawValue = wrapped().get(key); @@ -368,7 +367,7 @@ private byte[] getInternal(final Bytes key) { // only update the cache if this call is on the streamThread // as we don't want other threads to trigger an eviction/flush if (Thread.currentThread().equals(streamThread)) { - context.cache().put(cacheName, key, new LRUCacheEntry(rawValue)); + internalContext.cache().put(cacheName, key, new LRUCacheEntry(rawValue)); } return rawValue; } else { @@ -389,7 +388,7 @@ public KeyValueIterator range(final Bytes from, validateStoreOpen(); final KeyValueIterator storeIterator = wrapped().range(from, to); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().range(cacheName, from, to); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().range(cacheName, from, to); return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, true); } @@ -406,7 +405,7 @@ public KeyValueIterator reverseRange(final Bytes from, validateStoreOpen(); final KeyValueIterator storeIterator = wrapped().reverseRange(from, to); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().reverseRange(cacheName, from, to); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseRange(cacheName, from, to); return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, false); } @@ -414,7 +413,7 @@ public KeyValueIterator reverseRange(final Bytes from, public KeyValueIterator all() { validateStoreOpen(); final KeyValueIterator storeIterator = wrapped().all(); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().all(cacheName); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().all(cacheName); return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, true); } @@ -424,7 +423,7 @@ public , P> KeyValueIterator prefixScan( final KeyValueIterator storeIterator = wrapped().prefixScan(prefix, prefixKeySerializer); final Bytes from = Bytes.wrap(prefixKeySerializer.serialize(null, prefix)); final Bytes to = Bytes.increment(from); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().range(cacheName, from, to, false); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().range(cacheName, from, to, false); return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, true); } @@ -432,7 +431,7 @@ public , P> KeyValueIterator prefixScan( public KeyValueIterator reverseAll() { validateStoreOpen(); final KeyValueIterator storeIterator = wrapped().reverseAll(); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().reverseAll(cacheName); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseAll(cacheName); return new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, false); } @@ -454,7 +453,7 @@ public void flush() { lock.writeLock().lock(); try { validateStoreOpen(); - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); wrapped().flush(); } finally { lock.writeLock().unlock(); @@ -467,7 +466,7 @@ public void flushCache() { lock.writeLock().lock(); try { validateStoreOpen(); - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); } finally { lock.writeLock().unlock(); } @@ -479,7 +478,7 @@ public void clearCache() { lock.writeLock().lock(); try { validateStoreOpen(); - context.cache().clear(cacheName); + internalContext.cache().clear(cacheName); } finally { lock.writeLock().unlock(); } @@ -490,8 +489,8 @@ public void close() { lock.writeLock().lock(); try { final LinkedList suppressed = executeAll( - () -> context.cache().flush(cacheName), - () -> context.cache().close(cacheName), + () -> internalContext.cache().flush(cacheName), + () -> internalContext.cache().close(cacheName), wrapped()::close ); if (!suppressed.isEmpty()) { diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingSessionStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingSessionStore.java index f8c57d2b94fc5..c863050f94d85 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingSessionStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingSessionStore.java @@ -55,7 +55,7 @@ class CachingSessionStore "Note that the built-in numerical serdes do not follow this for negative numbers"; private String cacheName; - private InternalProcessorContext context; + private InternalProcessorContext internalContext; private CacheFlushListener flushListener; private boolean sendOldValues; @@ -71,12 +71,11 @@ class CachingSessionStore @Override public void init(final StateStoreContext stateStoreContext, final StateStore root) { - this.context = asInternalProcessorContext(stateStoreContext); - - cacheName = context.taskId() + "-" + name(); - context.registerCacheFlushListener(cacheName, entries -> { + internalContext = asInternalProcessorContext(stateStoreContext); + cacheName = internalContext.taskId() + "-" + name(); + internalContext.registerCacheFlushListener(cacheName, entries -> { for (final ThreadCache.DirtyEntry entry : entries) { - putAndMaybeForward(entry, context); + putAndMaybeForward(entry, internalContext); } }); super.init(stateStoreContext, root); @@ -136,13 +135,13 @@ public void put(final Windowed key, final byte[] value) { final LRUCacheEntry entry = new LRUCacheEntry( value, - context.headers(), + internalContext.headers(), true, - context.offset(), - context.timestamp(), - context.partition(), - context.topic()); - context.cache().put(cacheName, cacheFunction.cacheKey(binaryKey), entry); + internalContext.offset(), + internalContext.timestamp(), + internalContext.partition(), + internalContext.topic()); + internalContext.cache().put(cacheName, cacheFunction.cacheKey(binaryKey), entry); maxObservedTimestamp = Math.max(keySchema.segmentTimestamp(binaryKey), maxObservedTimestamp); } @@ -161,7 +160,7 @@ public KeyValueIterator, byte[]> findSessions(final Bytes key, final PeekingKeyValueIterator cacheIterator = wrapped().persistent() ? new CacheIteratorWrapper(key, earliestSessionEndTime, latestSessionStartTime, true) : - context.cache().range(cacheName, + internalContext.cache().range(cacheName, cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(key, earliestSessionEndTime)), cacheFunction.cacheKey(keySchema.upperRangeFixedSize(key, latestSessionStartTime)) ); @@ -187,7 +186,7 @@ public KeyValueIterator, byte[]> backwardFindSessions(final Byte final PeekingKeyValueIterator cacheIterator = wrapped().persistent() ? new CacheIteratorWrapper(key, earliestSessionEndTime, latestSessionStartTime, false) : - context.cache().reverseRange( + internalContext.cache().reverseRange( cacheName, cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(key, earliestSessionEndTime)), cacheFunction.cacheKey(keySchema.upperRangeFixedSize(key, latestSessionStartTime) @@ -225,7 +224,7 @@ public KeyValueIterator, byte[]> findSessions(final Bytes keyFro final Bytes cacheKeyFrom = keyFrom == null ? null : cacheFunction.cacheKey(keySchema.lowerRange(keyFrom, earliestSessionEndTime)); final Bytes cacheKeyTo = keyTo == null ? null : cacheFunction.cacheKey(keySchema.upperRange(keyTo, latestSessionStartTime)); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); final KeyValueIterator, byte[]> storeIterator = wrapped().findSessions( keyFrom, keyTo, earliestSessionEndTime, latestSessionStartTime @@ -254,7 +253,7 @@ public KeyValueIterator, byte[]> backwardFindSessions(final Byte final Bytes cacheKeyFrom = keyFrom == null ? null : cacheFunction.cacheKey(keySchema.lowerRange(keyFrom, earliestSessionEndTime)); final Bytes cacheKeyTo = keyTo == null ? null : cacheFunction.cacheKey(keySchema.upperRange(keyTo, latestSessionStartTime)); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); final KeyValueIterator, byte[]> storeIterator = wrapped().backwardFindSessions(keyFrom, keyTo, earliestSessionEndTime, latestSessionStartTime); @@ -274,13 +273,13 @@ public KeyValueIterator, byte[]> backwardFindSessions(final Byte public byte[] fetchSession(final Bytes key, final long earliestSessionEndTime, final long latestSessionStartTime) { Objects.requireNonNull(key, "key cannot be null"); validateStoreOpen(); - if (context.cache() == null) { + if (internalContext.cache() == null) { return wrapped().fetchSession(key, earliestSessionEndTime, latestSessionStartTime); } else { final Bytes bytesKey = SessionKeySchema.toBinary(key, earliestSessionEndTime, latestSessionStartTime); final Bytes cacheKey = cacheFunction.cacheKey(bytesKey); - final LRUCacheEntry entry = context.cache().get(cacheName, cacheKey); + final LRUCacheEntry entry = internalContext.cache().get(cacheName, cacheKey); if (entry == null) { return wrapped().fetchSession(key, earliestSessionEndTime, latestSessionStartTime); } else { @@ -314,24 +313,24 @@ public KeyValueIterator, byte[]> backwardFetch(final Bytes keyFr } public void flush() { - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); wrapped().flush(); } @Override public void flushCache() { - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); } @Override public void clearCache() { - context.cache().clear(cacheName); + internalContext.cache().clear(cacheName); } public void close() { final LinkedList suppressed = executeAll( - () -> context.cache().flush(cacheName), - () -> context.cache().close(cacheName), + () -> internalContext.cache().flush(cacheName), + () -> internalContext.cache().close(cacheName), wrapped()::close ); if (!suppressed.isEmpty()) { @@ -381,13 +380,13 @@ private CacheIteratorWrapper(final Bytes keyFrom, this.lastSegmentId = cacheFunction.segmentId(maxObservedTimestamp); setCacheKeyRange(earliestSessionEndTime, currentSegmentLastTime()); - this.current = context.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); + this.current = internalContext.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); } else { this.lastSegmentId = cacheFunction.segmentId(earliestSessionEndTime); this.currentSegmentId = cacheFunction.segmentId(maxObservedTimestamp); setCacheKeyRange(currentSegmentBeginTime(), Math.min(latestSessionStartTime, maxObservedTimestamp)); - this.current = context.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); + this.current = internalContext.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); } } @@ -461,7 +460,7 @@ private void getNextSegmentIterator() { current.close(); - current = context.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); + current = internalContext.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); } else { --currentSegmentId; @@ -474,7 +473,7 @@ private void getNextSegmentIterator() { current.close(); - current = context.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); + current = internalContext.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingWindowStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingWindowStore.java index e581dac0c5387..dff0ac70749af 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingWindowStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/CachingWindowStore.java @@ -56,7 +56,7 @@ class CachingWindowStore private String cacheName; private boolean sendOldValues; - private InternalProcessorContext context; + private InternalProcessorContext internalContext; private StateSerdes bytesSerdes; private CacheFlushListener flushListener; @@ -74,16 +74,16 @@ class CachingWindowStore @Override public void init(final StateStoreContext stateStoreContext, final StateStore root) { final String changelogTopic = ProcessorContextUtils.changelogFor(stateStoreContext, name(), Boolean.TRUE); - this.context = asInternalProcessorContext(stateStoreContext); + internalContext = asInternalProcessorContext(stateStoreContext); bytesSerdes = new StateSerdes<>( changelogTopic, Serdes.Bytes(), Serdes.ByteArray()); - cacheName = context.taskId() + "-" + name(); + cacheName = internalContext.taskId() + "-" + name(); - context.registerCacheFlushListener(cacheName, entries -> { + internalContext.registerCacheFlushListener(cacheName, entries -> { for (final ThreadCache.DirtyEntry entry : entries) { - putAndMaybeForward(entry, context); + putAndMaybeForward(entry, internalContext); } }); @@ -153,13 +153,13 @@ public synchronized void put(final Bytes key, final LRUCacheEntry entry = new LRUCacheEntry( value, - context.headers(), + internalContext.headers(), true, - context.offset(), - context.timestamp(), - context.partition(), - context.topic()); - context.cache().put(cacheName, cacheFunction.cacheKey(keyBytes), entry); + internalContext.offset(), + internalContext.timestamp(), + internalContext.partition(), + internalContext.topic()); + internalContext.cache().put(cacheName, cacheFunction.cacheKey(keyBytes), entry); maxObservedTimestamp.set(Math.max(keySchema.segmentTimestamp(keyBytes), maxObservedTimestamp.get())); } @@ -170,10 +170,10 @@ public byte[] fetch(final Bytes key, validateStoreOpen(); final Bytes bytesKey = WindowKeySchema.toStoreKeyBinary(key, timestamp, 0); final Bytes cacheKey = cacheFunction.cacheKey(bytesKey); - if (context.cache() == null) { + if (internalContext.cache() == null) { return wrapped().fetch(key, timestamp); } - final LRUCacheEntry entry = context.cache().get(cacheName, cacheKey); + final LRUCacheEntry entry = internalContext.cache().get(cacheName, cacheKey); if (entry == null) { return wrapped().fetch(key, timestamp); } else { @@ -190,13 +190,13 @@ public synchronized WindowStoreIterator fetch(final Bytes key, validateStoreOpen(); final WindowStoreIterator underlyingIterator = wrapped().fetch(key, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } final PeekingKeyValueIterator cacheIterator = wrapped().persistent() ? new CacheIteratorWrapper(key, timeFrom, timeTo, true) : - context.cache().range( + internalContext.cache().range( cacheName, cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(key, timeFrom)), cacheFunction.cacheKey(keySchema.upperRangeFixedSize(key, timeTo)) @@ -218,13 +218,13 @@ public synchronized WindowStoreIterator backwardFetch(final Bytes key, validateStoreOpen(); final WindowStoreIterator underlyingIterator = wrapped().backwardFetch(key, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } final PeekingKeyValueIterator cacheIterator = wrapped().persistent() ? new CacheIteratorWrapper(key, timeFrom, timeTo, false) : - context.cache().reverseRange( + internalContext.cache().reverseRange( cacheName, cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(key, timeFrom)), cacheFunction.cacheKey(keySchema.upperRangeFixedSize(key, timeTo)) @@ -256,13 +256,13 @@ public KeyValueIterator, byte[]> fetch(final Bytes keyFrom, final KeyValueIterator, byte[]> underlyingIterator = wrapped().fetch(keyFrom, keyTo, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } final PeekingKeyValueIterator cacheIterator = wrapped().persistent() ? new CacheIteratorWrapper(keyFrom, keyTo, timeFrom, timeTo, true) : - context.cache().range( + internalContext.cache().range( cacheName, keyFrom == null ? null : cacheFunction.cacheKey(keySchema.lowerRange(keyFrom, timeFrom)), keyTo == null ? null : cacheFunction.cacheKey(keySchema.upperRange(keyTo, timeTo)) @@ -300,13 +300,13 @@ public KeyValueIterator, byte[]> backwardFetch(final Bytes keyFr final KeyValueIterator, byte[]> underlyingIterator = wrapped().backwardFetch(keyFrom, keyTo, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } final PeekingKeyValueIterator cacheIterator = wrapped().persistent() ? new CacheIteratorWrapper(keyFrom, keyTo, timeFrom, timeTo, false) : - context.cache().reverseRange( + internalContext.cache().reverseRange( cacheName, keyFrom == null ? null : cacheFunction.cacheKey(keySchema.lowerRange(keyFrom, timeFrom)), keyTo == null ? null : cacheFunction.cacheKey(keySchema.upperRange(keyTo, timeTo)) @@ -332,7 +332,7 @@ public KeyValueIterator, byte[]> fetchAll(final long timeFrom, validateStoreOpen(); final KeyValueIterator, byte[]> underlyingIterator = wrapped().fetchAll(timeFrom, timeTo); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().all(cacheName); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().all(cacheName); final HasNextCondition hasNextCondition = keySchema.hasNextCondition(null, null, timeFrom, timeTo, true); final PeekingKeyValueIterator filteredCacheIterator = @@ -353,7 +353,7 @@ public KeyValueIterator, byte[]> backwardFetchAll(final long tim validateStoreOpen(); final KeyValueIterator, byte[]> underlyingIterator = wrapped().backwardFetchAll(timeFrom, timeTo); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().reverseAll(cacheName); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseAll(cacheName); final HasNextCondition hasNextCondition = keySchema.hasNextCondition(null, null, timeFrom, timeTo, false); final PeekingKeyValueIterator filteredCacheIterator = @@ -374,7 +374,7 @@ public KeyValueIterator, byte[]> all() { validateStoreOpen(); final KeyValueIterator, byte[]> underlyingIterator = wrapped().all(); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().all(cacheName); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().all(cacheName); return new MergedSortedCacheWindowStoreKeyValueIterator( cacheIterator, @@ -391,7 +391,7 @@ public KeyValueIterator, byte[]> backwardAll() { validateStoreOpen(); final KeyValueIterator, byte[]> underlyingIterator = wrapped().backwardAll(); - final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = context.cache().reverseAll(cacheName); + final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = internalContext.cache().reverseAll(cacheName); return new MergedSortedCacheWindowStoreKeyValueIterator( cacheIterator, @@ -405,25 +405,25 @@ public KeyValueIterator, byte[]> backwardAll() { @Override public synchronized void flush() { - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); wrapped().flush(); } @Override public void flushCache() { - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); } @Override public void clearCache() { - context.cache().clear(cacheName); + internalContext.cache().clear(cacheName); } @Override public synchronized void close() { final LinkedList suppressed = executeAll( - () -> context.cache().flush(cacheName), - () -> context.cache().close(cacheName), + () -> internalContext.cache().flush(cacheName), + () -> internalContext.cache().close(cacheName), wrapped()::close ); if (!suppressed.isEmpty()) { @@ -472,13 +472,13 @@ private CacheIteratorWrapper(final Bytes keyFrom, this.currentSegmentId = cacheFunction.segmentId(timeFrom); setCacheKeyRange(timeFrom, currentSegmentLastTime()); - this.current = context.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); + this.current = internalContext.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); } else { this.currentSegmentId = cacheFunction.segmentId(Math.min(timeTo, maxObservedTimestamp.get())); this.lastSegmentId = cacheFunction.segmentId(timeFrom); setCacheKeyRange(currentSegmentBeginTime(), Math.min(timeTo, maxObservedTimestamp.get())); - this.current = context.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); + this.current = internalContext.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); } } @@ -553,7 +553,7 @@ private void getNextSegmentIterator() { current.close(); - current = context.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); + current = internalContext.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); } else { --currentSegmentId; @@ -567,7 +567,7 @@ private void getNextSegmentIterator() { current.close(); - current = context.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); + current = internalContext.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); } } @@ -576,7 +576,7 @@ private void setCacheKeyRange(final long lowerRangeEndTime, final long upperRang throw new IllegalStateException("Error iterating over segments: segment interval has changed"); } - if (keyFrom != null && keyTo != null && keyFrom.equals(keyTo)) { + if (keyFrom != null && keyFrom.equals(keyTo)) { cacheKeyFrom = cacheFunction.cacheKey(segmentLowerRangeFixedSize(keyFrom, lowerRangeEndTime)); cacheKeyTo = cacheFunction.cacheKey(segmentUpperRangeFixedSize(keyTo, upperRangeEndTime)); } else { diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStore.java index a0262a35a844e..78bcbd83a0bfe 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStore.java @@ -33,17 +33,17 @@ public class ChangeLoggingKeyValueBytesStore extends WrappedStateStore, byte[], byte[]> implements KeyValueStore { - InternalProcessorContext context; + InternalProcessorContext internalContext; ChangeLoggingKeyValueBytesStore(final KeyValueStore inner) { super(inner); } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { - this.context = asInternalProcessorContext(context); - super.init(context, root); + internalContext = asInternalProcessorContext(stateStoreContext); + super.init(stateStoreContext, root); maybeSetEvictionListener(); } @@ -52,7 +52,7 @@ private void maybeSetEvictionListener() { if (wrapped() instanceof MemoryLRUCache) { ((MemoryLRUCache) wrapped()).setWhenEldestRemoved((key, value) -> { // pass null to indicate removal - log(key, null, context.timestamp()); + log(key, null, internalContext.timestamp()); }); } } @@ -66,7 +66,7 @@ public long approximateNumEntries() { public void put(final Bytes key, final byte[] value) { wrapped().put(key, value); - log(key, value, context.timestamp()); + log(key, value, internalContext.timestamp()); } @Override @@ -75,7 +75,7 @@ public byte[] putIfAbsent(final Bytes key, final byte[] previous = wrapped().putIfAbsent(key, value); if (previous == null) { // then it was absent - log(key, value, context.timestamp()); + log(key, value, internalContext.timestamp()); } return previous; } @@ -84,7 +84,7 @@ public byte[] putIfAbsent(final Bytes key, public void putAll(final List> entries) { wrapped().putAll(entries); for (final KeyValue entry : entries) { - log(entry.key, entry.value, context.timestamp()); + log(entry.key, entry.value, internalContext.timestamp()); } } @@ -97,7 +97,7 @@ public , P> KeyValueIterator prefixScan( @Override public byte[] delete(final Bytes key) { final byte[] oldValue = wrapped().delete(key); - log(key, null, context.timestamp()); + log(key, null, internalContext.timestamp()); return oldValue; } @@ -129,6 +129,6 @@ public KeyValueIterator reverseAll() { } void log(final Bytes key, final byte[] value, final long timestamp) { - context.logChange(name(), key, value, timestamp, wrapped().getPosition()); + internalContext.logChange(name(), key, value, timestamp, wrapped().getPosition()); } } \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingListValueBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingListValueBytesStore.java index be5805c53fced..9070fc8da5f10 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingListValueBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingListValueBytesStore.java @@ -32,9 +32,9 @@ public void put(final Bytes key, final byte[] value) { // we need to log the full new list and thus call get() on the inner store below // if the value is a tombstone, we delete the whole list and thus can save the get call if (value == null) { - log(key, null, context.timestamp()); + log(key, null, internalContext.timestamp()); } else { - log(key, wrapped().get(key), context.timestamp()); + log(key, wrapped().get(key), internalContext.timestamp()); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStore.java index 76b998af2dcb2..06097aa7a714f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStore.java @@ -34,16 +34,16 @@ public class ChangeLoggingSessionBytesStore extends WrappedStateStore, byte[], byte[]> implements SessionStore { - private InternalProcessorContext context; + private InternalProcessorContext internalContext; ChangeLoggingSessionBytesStore(final SessionStore bytesStore) { super(bytesStore); } @Override - public void init(final StateStoreContext context, final StateStore root) { - this.context = asInternalProcessorContext(context); - super.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + internalContext = asInternalProcessorContext(stateStoreContext); + super.init(stateStoreContext, root); } @Override @@ -73,13 +73,13 @@ public KeyValueIterator, byte[]> backwardFindSessions(final Byte @Override public void remove(final Windowed sessionKey) { wrapped().remove(sessionKey); - context.logChange(name(), SessionKeySchema.toBinary(sessionKey), null, context.timestamp(), wrapped().getPosition()); + internalContext.logChange(name(), SessionKeySchema.toBinary(sessionKey), null, internalContext.timestamp(), wrapped().getPosition()); } @Override public void put(final Windowed sessionKey, final byte[] aggregate) { wrapped().put(sessionKey, aggregate); - context.logChange(name(), SessionKeySchema.toBinary(sessionKey), aggregate, context.timestamp(), wrapped().getPosition()); + internalContext.logChange(name(), SessionKeySchema.toBinary(sessionKey), aggregate, internalContext.timestamp(), wrapped().getPosition()); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStore.java index efb265ee44d16..916c9547184a4 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStore.java @@ -35,7 +35,7 @@ public class ChangeLoggingTimestampedKeyValueBytesStore extends ChangeLoggingKey public void put(final Bytes key, final byte[] valueAndTimestamp) { wrapped().put(key, valueAndTimestamp); - log(key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp)); + log(key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? internalContext.timestamp() : timestamp(valueAndTimestamp)); } @Override @@ -44,7 +44,7 @@ public byte[] putIfAbsent(final Bytes key, final byte[] previous = wrapped().putIfAbsent(key, valueAndTimestamp); if (previous == null) { // then it was absent - log(key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp)); + log(key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? internalContext.timestamp() : timestamp(valueAndTimestamp)); } return previous; } @@ -54,7 +54,7 @@ public void putAll(final List> entries) { wrapped().putAll(entries); for (final KeyValue entry : entries) { final byte[] valueAndTimestamp = entry.value; - log(entry.key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp)); + log(entry.key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? internalContext.timestamp() : timestamp(valueAndTimestamp)); } } } \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStore.java index aea8417073343..2bf87f9d2a8b5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStore.java @@ -32,11 +32,11 @@ class ChangeLoggingTimestampedWindowBytesStore extends ChangeLoggingWindowBytesS @Override void log(final Bytes key, final byte[] valueAndTimestamp) { - context.logChange( + internalContext.logChange( name(), key, rawValue(valueAndTimestamp), - valueAndTimestamp != null ? timestamp(valueAndTimestamp) : context.timestamp(), + valueAndTimestamp != null ? timestamp(valueAndTimestamp) : internalContext.timestamp(), wrapped().getPosition() ); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStore.java index f9f3e79b54e33..94bfdc7ee79c8 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStore.java @@ -57,7 +57,7 @@ public byte[] delete(final Bytes key, final long timestamp) { @Override public void log(final Bytes key, final byte[] value, final long timestamp) { - context.logChange( + internalContext.logChange( name(), key, value, diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStore.java index cd61ae70b9280..d5857d0456e66 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStore.java @@ -41,7 +41,7 @@ interface ChangeLoggingKeySerializer { } private final boolean retainDuplicates; - InternalProcessorContext context; + InternalProcessorContext internalContext; private int seqnum = 0; private final ChangeLoggingKeySerializer keySerializer; @@ -54,10 +54,10 @@ interface ChangeLoggingKeySerializer { } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { - this.context = asInternalProcessorContext(context); - super.init(context, root); + internalContext = asInternalProcessorContext(stateStoreContext); + super.init(stateStoreContext, root); } @Override @@ -129,7 +129,7 @@ public void put(final Bytes key, } void log(final Bytes key, final byte[] value) { - context.logChange(name(), key, value, context.timestamp(), wrapped().getPosition()); + internalContext.logChange(name(), key, value, internalContext.timestamp(), wrapped().getPosition()); } private int maybeUpdateSeqnumForDups() { diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueSegments.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueSegments.java index 304d77e82597d..a18d901b83f5f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueSegments.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueSegments.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.streams.state.internals; -import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ProcessorContextUtils; import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder; @@ -37,7 +37,7 @@ class KeyValueSegments extends AbstractSegments { @Override public KeyValueSegment getOrCreateSegment(final long segmentId, - final ProcessorContext context) { + final StateStoreContext context) { if (segments.containsKey(segmentId)) { return segments.get(segmentId); } else { @@ -55,7 +55,7 @@ public KeyValueSegment getOrCreateSegment(final long segmentId, @Override public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId, - final ProcessorContext context, + final StateStoreContext context, final long streamTime) { final KeyValueSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime); cleanupExpiredSegments(streamTime); @@ -63,7 +63,7 @@ public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId, } @Override - public void openExisting(final ProcessorContext context, final long streamTime) { + public void openExisting(final StateStoreContext context, final long streamTime) { metricsRecorder.init(ProcessorContextUtils.metricsImpl(context), context.taskId()); super.openExisting(context, streamTime); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapper.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapper.java index 43efc9159d53a..dce28444d85ec 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapper.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapper.java @@ -47,7 +47,7 @@ public class KeyValueStoreWrapper implements StateStore { // same as either timestampedStore or versionedStore above. kept merely as a convenience // to simplify implementation for methods which do not depend on store type. - private StateStore store = null; + private StateStore store; public KeyValueStoreWrapper(final ProcessorContext context, final String storeName) { try { @@ -122,8 +122,8 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - store.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + store.init(stateStoreContext, root); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueToTimestampedKeyValueByteStoreAdapter.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueToTimestampedKeyValueByteStoreAdapter.java index 8e5d1e8cc2d81..8e79a86bc2b8a 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueToTimestampedKeyValueByteStoreAdapter.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/KeyValueToTimestampedKeyValueByteStoreAdapter.java @@ -95,8 +95,8 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - store.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + store.init(stateStoreContext, root); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegment.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegment.java index 6b9cd747de245..18b371048c3f1 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegment.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegment.java @@ -137,7 +137,7 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { + public void init(final StateStoreContext stateStoreContext, final StateStore root) { throw new UnsupportedOperationException("cannot initialize a logical segment"); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegments.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegments.java index bcbeb4689b388..c46a2c2788c4b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegments.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegments.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.streams.state.internals; -import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ProcessorContextUtils; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder; @@ -29,8 +29,8 @@ * Regular segments with {@code segmentId >= 0} expire according to the specified * retention period. "Reserved" segments with {@code segmentId < 0} do not expire * and are completely separate from regular segments in that methods such as - * {@link #segmentForTimestamp(long)}, {@link #getOrCreateSegment(long, ProcessorContext)}, - * {@link #getOrCreateSegmentIfLive(long, ProcessorContext, long)}, + * {@link #segmentForTimestamp(long)}, {@link #getOrCreateSegment(long, StateStoreContext)}, + * {@link #getOrCreateSegmentIfLive(long, StateStoreContext, long)}, * {@link #segments(long, long, boolean)}, and {@link #allSegments(boolean)} * only return regular segments and not reserved segments. The methods {@link #flush()} * and {@link #close()} flush and close both regular and reserved segments, due to @@ -62,7 +62,7 @@ public void setPosition(final Position position) { @Override public LogicalKeyValueSegment getOrCreateSegment(final long segmentId, - final ProcessorContext context) { + final StateStoreContext context) { if (segments.containsKey(segmentId)) { return segments.get(segmentId); } else { @@ -103,7 +103,7 @@ LogicalKeyValueSegment getReservedSegment(final long segmentId) { } @Override - public void openExisting(final ProcessorContext context, final long streamTime) { + public void openExisting(final StateStoreContext context, final long streamTime) { metricsRecorder.init(ProcessorContextUtils.metricsImpl(context), context.taskId()); physicalStore.openDB(context.appConfigs(), context.stateDir()); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/MemoryLRUCache.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/MemoryLRUCache.java index f648d2ada62f6..dc77be5da01dd 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/MemoryLRUCache.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/MemoryLRUCache.java @@ -84,14 +84,14 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { + public void init(final StateStoreContext stateStoreContext, final StateStore root) { final boolean consistencyEnabled = StreamsConfig.InternalConfig.getBoolean( - context.appConfigs(), + stateStoreContext.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false ); // register the store - context.register( + stateStoreContext.register( root, (RecordBatchingStateRestoreCallback) records -> { restoring = true; @@ -108,7 +108,7 @@ public void init(final StateStoreContext context, final StateStore root) { restoring = false; } ); - this.context = context; + this.context = stateStoreContext; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStore.java index e8e613acdbf4b..991b9b365d713 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStore.java @@ -52,6 +52,7 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.NoSuchElementException; import java.util.Objects; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.LongAdder; @@ -91,7 +92,7 @@ public class MeteredKeyValueStore private Sensor flushSensor; private Sensor e2eLatencySensor; protected Sensor iteratorDurationSensor; - protected InternalProcessorContext context; + protected InternalProcessorContext internalContext; private StreamsMetricsImpl streamsMetrics; private TaskId taskId; @@ -124,19 +125,19 @@ public class MeteredKeyValueStore } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { - this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext) context : null; - taskId = context.taskId(); - initStoreSerde(context); - streamsMetrics = (StreamsMetricsImpl) context.metrics(); + internalContext = stateStoreContext instanceof InternalProcessorContext ? (InternalProcessorContext) stateStoreContext : null; + taskId = stateStoreContext.taskId(); + initStoreSerde(stateStoreContext); + streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics(); registerMetrics(); final Sensor restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics); // register and possibly restore the state from the logs - maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor); + maybeMeasureLatency(() -> super.init(stateStoreContext, root), time, restoreSensor); } private void registerMetrics() { @@ -154,7 +155,13 @@ private void registerMetrics() { StateStoreMetrics.addNumOpenIteratorsGauge(taskId.toString(), metricsScope, name(), streamsMetrics, (config, now) -> numOpenIterators.sum()); StateStoreMetrics.addOldestOpenIteratorGauge(taskId.toString(), metricsScope, name(), streamsMetrics, - (config, now) -> openIterators.isEmpty() ? null : openIterators.first().startTimestamp() + (config, now) -> { + try { + return openIterators.isEmpty() ? null : openIterators.first().startTimestamp(); + } catch (final NoSuchElementException ignored) { + return null; + } + } ); } @@ -421,9 +428,9 @@ private List> innerEntries(final List> fr protected void maybeRecordE2ELatency() { // Context is null if the provided context isn't an implementation of InternalProcessorContext. // In that case, we _can't_ get the current timestamp, so we don't record anything. - if (e2eLatencySensor.shouldRecord() && context != null) { + if (e2eLatencySensor.shouldRecord() && internalContext != null) { final long currentTime = time.milliseconds(); - final long e2eLatency = currentTime - context.timestamp(); + final long e2eLatency = currentTime - internalContext.timestamp(); e2eLatencySensor.record(e2eLatency, currentTime); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredSessionStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredSessionStore.java index a6546c1edb5e1..7fb7276bcfc34 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredSessionStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredSessionStore.java @@ -46,6 +46,7 @@ import java.util.Comparator; import java.util.Map; import java.util.NavigableSet; +import java.util.NoSuchElementException; import java.util.Objects; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.LongAdder; @@ -70,7 +71,7 @@ public class MeteredSessionStore private Sensor removeSensor; private Sensor e2eLatencySensor; private Sensor iteratorDurationSensor; - private InternalProcessorContext context; + private InternalProcessorContext internalContext; private TaskId taskId; private final LongAdder numOpenIterators = new LongAdder(); @@ -99,19 +100,19 @@ public class MeteredSessionStore } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { - this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext) context : null; - taskId = context.taskId(); - initStoreSerde(context); - streamsMetrics = (StreamsMetricsImpl) context.metrics(); + internalContext = stateStoreContext instanceof InternalProcessorContext ? (InternalProcessorContext) stateStoreContext : null; + taskId = stateStoreContext.taskId(); + initStoreSerde(stateStoreContext); + streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics(); registerMetrics(); final Sensor restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics); // register and possibly restore the state from the logs - maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor); + maybeMeasureLatency(() -> super.init(stateStoreContext, root), time, restoreSensor); } private void registerMetrics() { @@ -124,7 +125,13 @@ private void registerMetrics() { StateStoreMetrics.addNumOpenIteratorsGauge(taskId.toString(), metricsScope, name(), streamsMetrics, (config, now) -> numOpenIterators.sum()); StateStoreMetrics.addOldestOpenIteratorGauge(taskId.toString(), metricsScope, name(), streamsMetrics, - (config, now) -> openIterators.isEmpty() ? null : openIterators.first().startTimestamp() + (config, now) -> { + try { + return openIterators.isEmpty() ? null : openIterators.first().startTimestamp(); + } catch (final NoSuchElementException ignored) { + return null; + } + } ); } @@ -488,9 +495,9 @@ private Bytes keyBytes(final K key) { private void maybeRecordE2ELatency() { // Context is null if the provided context isn't an implementation of InternalProcessorContext. // In that case, we _can't_ get the current timestamp, so we don't record anything. - if (e2eLatencySensor.shouldRecord() && context != null) { + if (e2eLatencySensor.shouldRecord() && internalContext != null) { final long currentTime = time.milliseconds(); - final long e2eLatency = currentTime - context.timestamp(); + final long e2eLatency = currentTime - internalContext.timestamp(); e2eLatencySensor.record(e2eLatency, currentTime); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStore.java index de9efa7eb6a84..66eb3206deffa 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStore.java @@ -101,7 +101,7 @@ private class MeteredVersionedKeyValueStoreInternal private final Serde plainValueSerde; private StateSerdes plainValueSerdes; - private final Map queryHandlers = + private final Map, QueryHandler> queryHandlers = mkMap( mkEntry( RangeQuery.class, @@ -201,6 +201,7 @@ public QueryResult query(final Query query, final PositionBound positi return result; } + @SuppressWarnings("unused") private QueryResult runRangeQuery(final Query query, final PositionBound positionBound, final QueryConfig config) { @@ -209,6 +210,7 @@ private QueryResult runRangeQuery(final Query query, throw new UnsupportedOperationException("Versioned stores do not support RangeQuery queries at this time."); } + @SuppressWarnings("unused") private QueryResult runKeyQuery(final Query query, final PositionBound positionBound, final QueryConfig config) { @@ -262,7 +264,7 @@ private QueryResult runMultiVersionedKeyQuery(final Query query, final final QueryResult> rawResult = wrapped().query(rawKeyQuery, positionBound, config); if (rawResult.isSuccess()) { final MeteredMultiVersionedKeyQueryIterator typedResult = - new MeteredMultiVersionedKeyQueryIterator( + new MeteredMultiVersionedKeyQueryIterator<>( rawResult.getResult(), iteratorDurationSensor, time, @@ -340,8 +342,8 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - internal.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + internal.init(stateStoreContext, root); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredWindowStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredWindowStore.java index e59665fb2eb04..ed3d31e86d01f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredWindowStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/MeteredWindowStore.java @@ -49,6 +49,7 @@ import java.util.Comparator; import java.util.Map; import java.util.NavigableSet; +import java.util.NoSuchElementException; import java.util.Objects; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.LongAdder; @@ -73,7 +74,7 @@ public class MeteredWindowStore private Sensor flushSensor; private Sensor e2eLatencySensor; private Sensor iteratorDurationSensor; - private InternalProcessorContext context; + private InternalProcessorContext internalContext; private TaskId taskId; private final LongAdder numOpenIterators = new LongAdder(); @@ -115,19 +116,19 @@ public class MeteredWindowStore } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { - this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext) context : null; - taskId = context.taskId(); - initStoreSerde(context); - streamsMetrics = (StreamsMetricsImpl) context.metrics(); + internalContext = stateStoreContext instanceof InternalProcessorContext ? (InternalProcessorContext) stateStoreContext : null; + taskId = stateStoreContext.taskId(); + initStoreSerde(stateStoreContext); + streamsMetrics = (StreamsMetricsImpl) stateStoreContext.metrics(); registerMetrics(); final Sensor restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics); // register and possibly restore the state from the logs - maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor); + maybeMeasureLatency(() -> super.init(stateStoreContext, root), time, restoreSensor); } protected Serde prepareValueSerde(final Serde valueSerde, final SerdeGetter getter) { return WrappingNullableUtils.prepareValueSerde(valueSerde, getter); @@ -142,7 +143,13 @@ private void registerMetrics() { StateStoreMetrics.addNumOpenIteratorsGauge(taskId.toString(), metricsScope, name(), streamsMetrics, (config, now) -> numOpenIterators.sum()); StateStoreMetrics.addOldestOpenIteratorGauge(taskId.toString(), metricsScope, name(), streamsMetrics, - (config, now) -> openIterators.isEmpty() ? null : openIterators.first().startTimestamp() + (config, now) -> { + try { + return openIterators.isEmpty() ? null : openIterators.first().startTimestamp(); + } catch (final NoSuchElementException ignored) { + return null; + } + } ); } @@ -504,9 +511,9 @@ protected V outerValue(final byte[] value) { private void maybeRecordE2ELatency() { // Context is null if the provided context isn't an implementation of InternalProcessorContext. // In that case, we _can't_ get the current timestamp, so we don't record anything. - if (e2eLatencySensor.shouldRecord() && context != null) { + if (e2eLatencySensor.shouldRecord() && internalContext != null) { final long currentTime = time.milliseconds(); - final long e2eLatency = currentTime - context.timestamp(); + final long e2eLatency = currentTime - internalContext.timestamp(); e2eLatencySensor.record(e2eLatency, currentTime); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java index b7b611f8be0d3..5b2f1e06b344f 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java @@ -22,7 +22,6 @@ import org.rocksdb.AbstractEventListener; import org.rocksdb.AbstractSlice; import org.rocksdb.AbstractWalFilter; -import org.rocksdb.AccessHint; import org.rocksdb.BuiltinComparator; import org.rocksdb.Cache; import org.rocksdb.ColumnFamilyOptions; @@ -37,6 +36,7 @@ import org.rocksdb.DbPath; import org.rocksdb.Env; import org.rocksdb.InfoLogLevel; +import org.rocksdb.LoggerInterface; import org.rocksdb.MemTableConfig; import org.rocksdb.MergeOperator; import org.rocksdb.Options; @@ -332,14 +332,6 @@ public Statistics statistics() { return dbOptions.statistics(); } - @Deprecated - public int baseBackgroundCompactions() { - final String message = "This method has been removed from the underlying RocksDB. " + - "It is currently a no-op method which returns a default value of -1."; - log.warn(message); - return -1; - } - @Override public Options setMaxSubcompactions(final int maxSubcompactions) { dbOptions.setMaxSubcompactions(maxSubcompactions); @@ -571,34 +563,6 @@ public long dbWriteBufferSize() { return dbOptions.dbWriteBufferSize(); } - @Override - public Options setAccessHintOnCompactionStart(final AccessHint accessHint) { - dbOptions.setAccessHintOnCompactionStart(accessHint); - return this; - } - - @Override - public AccessHint accessHintOnCompactionStart() { - return dbOptions.accessHintOnCompactionStart(); - } - - @Deprecated - public Options setNewTableReaderForCompactionInputs(final boolean newTableReaderForCompactionInputs) { - final String message = "This method has been removed from the underlying RocksDB. " + - "It was not affecting compaction even in earlier versions. " + - "It is currently a no-op method."; - log.warn(message); - return this; - } - - @Deprecated - public boolean newTableReaderForCompactionInputs() { - final String message = "This method has been removed from the underlying RocksDB. " + - "It is now a method which always returns false."; - log.warn(message); - return false; - } - @Override public Options setCompactionReadaheadSize(final long compactionReadaheadSize) { dbOptions.setCompactionReadaheadSize(compactionReadaheadSize); @@ -843,7 +807,7 @@ public Options setSstFileManager(final SstFileManager sstFileManager) { } @Override - public Options setLogger(final org.rocksdb.Logger logger) { + public Options setLogger(final LoggerInterface logger) { dbOptions.setLogger(logger); return this; } @@ -914,6 +878,16 @@ public Options setCompressionType(final CompressionType compressionType) { return this; } + @Override + public Options setMemtableMaxRangeDeletions(final int n) { + columnFamilyOptions.setMemtableMaxRangeDeletions(n); + return this; + } + + @Override + public int memtableMaxRangeDeletions() { + return columnFamilyOptions.memtableMaxRangeDeletions(); + } @Override public Options setBottommostCompressionType(final CompressionType bottommostCompressionType) { @@ -1464,26 +1438,6 @@ public boolean allowIngestBehind() { return dbOptions.allowIngestBehind(); } - @Deprecated - public Options setPreserveDeletes(final boolean preserveDeletes) { - final String message = "This method has been removed from the underlying RocksDB. " + - "It was marked for deprecation in earlier versions. " + - "The behaviour can be replicated by using user-defined timestamps. " + - "It is currently a no-op method."; - log.warn(message); - // no-op - return this; - } - - @Deprecated - public boolean preserveDeletes() { - final String message = "This method has been removed from the underlying RocksDB. " + - "It was marked for deprecation in earlier versions. " + - "It is currently a no-op method with a default value of false."; - log.warn(message); - return false; - } - @Override public Options setTwoWriteQueues(final boolean twoWriteQueues) { dbOptions.setTwoWriteQueues(twoWriteQueues); diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBSessionStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBSessionStore.java index 7c72b2667bfc0..80af230c30002 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBSessionStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBSessionStore.java @@ -39,9 +39,9 @@ public class RocksDBSessionStore } @Override - public void init(final StateStoreContext context, final StateStore root) { - wrapped().init(context, root); - this.stateStoreContext = context; + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + wrapped().init(stateStoreContext, root); + this.stateStoreContext = stateStoreContext; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java index 52c193c86e0c7..6457a2c8fabfe 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java @@ -155,26 +155,26 @@ public RocksDBStore(final String name, } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { // open the DB dir - metricsRecorder.init(metricsImpl(context), context.taskId()); - openDB(context.appConfigs(), context.stateDir()); + metricsRecorder.init(metricsImpl(stateStoreContext), stateStoreContext.taskId()); + openDB(stateStoreContext.appConfigs(), stateStoreContext.stateDir()); - final File positionCheckpointFile = new File(context.stateDir(), name() + ".position"); + final File positionCheckpointFile = new File(stateStoreContext.stateDir(), name() + ".position"); this.positionCheckpoint = new OffsetCheckpoint(positionCheckpointFile); this.position = StoreQueryUtils.readPositionFromCheckpoint(positionCheckpoint); // value getter should always read directly from rocksDB // since it is only for values that are already flushed - this.context = context; - context.register( + this.context = stateStoreContext; + stateStoreContext.register( root, (RecordBatchingStateRestoreCallback) this::restoreBatch, () -> StoreQueryUtils.checkpointPosition(positionCheckpoint, position) ); consistencyEnabled = StreamsConfig.InternalConfig.getBoolean( - context.appConfigs(), + stateStoreContext.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java index 306d6bf9cfb8d..26065bf0fe318 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBuffer.java @@ -58,19 +58,29 @@ public class RocksDBTimeOrderedKeyValueBuffer implements TimeOrderedKeyVal private final boolean loggingEnabled; private int partition; private String changelogTopic; - private InternalProcessorContext context; + private InternalProcessorContext iternalContext; private boolean minValid; public static class Builder implements StoreBuilder> { private final String storeName; + private final Serde keySerde; + private final Serde valueSerde; private boolean loggingEnabled = true; private Map logConfig = new HashMap<>(); private final Duration grace; private final String topic; - public Builder(final String storeName, final Duration grace, final String topic) { + public Builder( + final String storeName, + final Serde keySerde, + final Serde valueSerde, + final Duration grace, + final String topic + ) { this.storeName = storeName; + this.keySerde = keySerde; + this.valueSerde = valueSerde; this.grace = grace; this.topic = topic; } @@ -115,6 +125,8 @@ public StoreBuilder> withLoggingDisabled() { public TimeOrderedKeyValueBuffer build() { return new RocksDBTimeOrderedKeyValueBuffer<>( new RocksDBTimeOrderedKeyValueBytesStoreSupplier(storeName).get(), + keySerde, + valueSerde, grace, topic, loggingEnabled); @@ -138,10 +150,14 @@ public String name() { public RocksDBTimeOrderedKeyValueBuffer(final RocksDBTimeOrderedKeyValueBytesStore store, + final Serde keySerde, + final Serde valueSerde, final Duration gracePeriod, final String topic, final boolean loggingEnabled) { this.store = store; + this.keySerde = keySerde; + this.valueSerde = valueSerde; this.gracePeriod = gracePeriod.toMillis(); minTimestamp = store.minTimestamp(); minValid = false; @@ -156,7 +172,7 @@ public RocksDBTimeOrderedKeyValueBuffer(final RocksDBTimeOrderedKeyValueBytesSto @Override public void setSerdesIfNull(final SerdeGetter getter) { keySerde = keySerde == null ? (Serde) getter.keySerde() : keySerde; - valueSerde = valueSerde == null ? getter.valueSerde() : valueSerde; + valueSerde = valueSerde == null ? (Serde) getter.valueSerde() : valueSerde; } private long observedStreamTime() { @@ -169,12 +185,12 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - store.init(context, root); - this.context = ProcessorContextUtils.asInternalProcessorContext(context); - partition = context.taskId().partition(); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + store.init(stateStoreContext, root); + iternalContext = ProcessorContextUtils.asInternalProcessorContext(stateStoreContext); + partition = stateStoreContext.taskId().partition(); if (loggingEnabled) { - changelogTopic = ProcessorContextUtils.changelogFor(context, name(), Boolean.TRUE); + changelogTopic = ProcessorContextUtils.changelogFor(stateStoreContext, name(), Boolean.TRUE); } } @@ -314,7 +330,7 @@ private void logValue(final Bytes key, final BufferKey bufferKey, final BufferVa final ByteBuffer buffer = value.serialize(sizeOfBufferTime); buffer.putLong(bufferKey.time()); final byte[] array = buffer.array(); - ((RecordCollector.Supplier) context).recordCollector().send( + ((RecordCollector.Supplier) iternalContext).recordCollector().send( changelogTopic, key, array, @@ -328,7 +344,7 @@ private void logValue(final Bytes key, final BufferKey bufferKey, final BufferVa } private void logTombstone(final Bytes key) { - ((RecordCollector.Supplier) context).recordCollector().send( + ((RecordCollector.Supplier) iternalContext).recordCollector().send( changelogTopic, key, null, @@ -341,4 +357,4 @@ private void logTombstone(final Bytes key) { null); } -} \ No newline at end of file +} diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedSessionStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedSessionStore.java index 2bdd3b013eec0..cf46e2d5f2d39 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedSessionStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedSessionStore.java @@ -42,9 +42,9 @@ public class RocksDBTimeOrderedSessionStore } @Override - public void init(final StateStoreContext context, final StateStore root) { - wrapped().init(context, root); - this.stateStoreContext = context; + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + wrapped().init(stateStoreContext, root); + this.stateStoreContext = stateStoreContext; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java index aa203e6948c7e..ba39d2548e8d3 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java @@ -55,9 +55,9 @@ public class RocksDBTimeOrderedWindowStore } @Override - public void init(final StateStoreContext context, final StateStore root) { - stateStoreContext = context; - wrapped().init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + wrapped().init(stateStoreContext, root); + this.stateStoreContext = stateStoreContext; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStore.java index eaaed6f30e373..54580a26a1bde 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStore.java @@ -23,7 +23,6 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.InvalidStateStoreException; import org.apache.kafka.streams.errors.ProcessorStateException; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper; @@ -102,7 +101,7 @@ public class RocksDBVersionedStore implements VersionedKeyValueStore internalProcessorContext; private Sensor expiredRecordSensor; private long observedStreamTime = ConsumerRecord.NO_TIMESTAMP; private boolean consistencyEnabled = false; @@ -489,7 +488,7 @@ interface VersionedStoreClient { /** * @return the segment with the provided id, or {@code null} if the segment is expired */ - T getOrCreateSegmentIfLive(long segmentId, ProcessorContext context, long streamTime); + T getOrCreateSegmentIfLive(long segmentId, StateStoreContext context, long streamTime); /** * @return all segments in the store which contain timestamps at least the provided @@ -525,7 +524,7 @@ public void deleteLatestValue(final Bytes key) { } @Override - public LogicalKeyValueSegment getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime) { + public LogicalKeyValueSegment getOrCreateSegmentIfLive(final long segmentId, final StateStoreContext context, final long streamTime) { return segmentStores.getOrCreateSegmentIfLive(segmentId, context, streamTime); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreRestoreWriteBuffer.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreRestoreWriteBuffer.java index bd82465ec4907..e39c1193b48dc 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreRestoreWriteBuffer.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreRestoreWriteBuffer.java @@ -18,7 +18,7 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.state.internals.RocksDBVersionedStore.RocksDBVersionedStoreClient; import org.apache.kafka.streams.state.internals.RocksDBVersionedStore.VersionedStoreClient; import org.apache.kafka.streams.state.internals.RocksDBVersionedStore.VersionedStoreSegment; @@ -91,7 +91,7 @@ void flush() throws RocksDBException { // older segments/stores before later ones try (final WriteBatch segmentsBatch = new WriteBatch()) { final List allSegments = restoreClient.reversedSegments(Long.MIN_VALUE); - if (allSegments.size() > 0) { + if (!allSegments.isEmpty()) { // collect entries into write batch for (final WriteBufferSegmentWithDbFallback bufferSegment : allSegments) { final LogicalKeyValueSegment dbSegment = bufferSegment.dbSegment(); @@ -206,7 +206,7 @@ public void deleteLatestValue(final Bytes key) { } @Override - public WriteBufferSegmentWithDbFallback getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime) { + public WriteBufferSegmentWithDbFallback getOrCreateSegmentIfLive(final long segmentId, final StateStoreContext context, final long streamTime) { if (segmentsWriteBuffer.containsKey(segmentId)) { return segmentsWriteBuffer.get(segmentId); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBWindowStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBWindowStore.java index 61212048285d1..61823c3f2617b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBWindowStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBWindowStore.java @@ -48,9 +48,9 @@ public class RocksDBWindowStore } @Override - public void init(final StateStoreContext context, final StateStore root) { - wrapped().init(context, root); - this.stateStoreContext = context; + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + wrapped().init(stateStoreContext, root); + this.stateStoreContext = stateStoreContext; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/Segments.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/Segments.java index a3ef2426c3cc7..18086a5441b65 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/Segments.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/Segments.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.streams.state.internals; -import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.StateStoreContext; import java.util.List; @@ -28,11 +28,11 @@ interface Segments { S segmentForTimestamp(final long timestamp); - S getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime); + S getOrCreateSegmentIfLive(final long segmentId, final StateStoreContext context, final long streamTime); - S getOrCreateSegment(final long segmentId, final ProcessorContext context); + S getOrCreateSegment(final long segmentId, final StateStoreContext context); - void openExisting(final ProcessorContext context, final long streamTime); + void openExisting(final StateStoreContext context, final long streamTime); List segments(final long timeFrom, final long timeTo, final boolean forward); diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreQueryUtils.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreQueryUtils.java index bc7a52a48141b..4bcbe0089c9ae 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreQueryUtils.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreQueryUtils.java @@ -205,11 +205,11 @@ private static QueryResult runRangeQuery( final ResultOrder order = rangeQuery.resultOrder(); final KeyValueIterator iterator; try { - if (!lowerRange.isPresent() && !upperRange.isPresent() && !order.equals(ResultOrder.DESCENDING)) { + if (lowerRange.isEmpty() && upperRange.isEmpty() && !order.equals(ResultOrder.DESCENDING)) { iterator = kvStore.all(); } else if (!order.equals(ResultOrder.DESCENDING)) { iterator = kvStore.range(lowerRange.orElse(null), upperRange.orElse(null)); - } else if (!lowerRange.isPresent() && !upperRange.isPresent()) { + } else if (lowerRange.isEmpty() && upperRange.isEmpty()) { iterator = kvStore.reverseAll(); } else { iterator = kvStore.reverseRange(lowerRange.orElse(null), upperRange.orElse(null)); @@ -500,4 +500,4 @@ private static String parseStoreException(final Exception e, final StateStor printWriter.flush(); return stringWriter.toString(); } -} \ No newline at end of file +} diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializer.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializer.java index cf44ca19bb1d7..813024fe7c13b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializer.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializer.java @@ -20,13 +20,11 @@ import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.kstream.internals.WrappingNullableUtils; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.SerdeGetter; import org.apache.kafka.streams.state.StateSerdes; - public class StoreSerdeInitializer { static StateSerdes prepareStoreSerde(final StateStoreContext context, final String storeName, @@ -41,19 +39,6 @@ static StateSerdes prepareStoreSerde(final StateStoreContext contex ); } - static StateSerdes prepareStoreSerde(final ProcessorContext context, - final String storeName, - final String changelogTopic, - final Serde keySerde, - final Serde valueSerde, - final PrepareFunc prepareValueSerdeFunc) { - return new StateSerdes<>( - changelogTopic, - prepareSerde(WrappingNullableUtils::prepareKeySerde, storeName, keySerde, new SerdeGetter(context), true, context.taskId()), - prepareSerde(prepareValueSerdeFunc, storeName, valueSerde, new SerdeGetter(context), false, context.taskId()) - ); - } - private static Serde prepareSerde(final PrepareFunc prepare, final String storeName, final Serde serde, diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingWindowStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingWindowStore.java index fadc7eafe24ae..47cbfde4c40bd 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingWindowStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingWindowStore.java @@ -71,7 +71,7 @@ class TimeOrderedCachingWindowStore private String cacheName; private boolean hasIndex; private boolean sendOldValues; - private InternalProcessorContext context; + private InternalProcessorContext internalContext; private StateSerdes bytesSerdes; private CacheFlushListener flushListener; @@ -102,31 +102,29 @@ private RocksDBTimeOrderedWindowStore getWrappedStore(final StateStore wrapped) return (RocksDBTimeOrderedWindowStore) wrapped; } if (wrapped instanceof WrappedStateStore) { - return getWrappedStore(((WrappedStateStore) wrapped).wrapped()); + return getWrappedStore(((WrappedStateStore) wrapped).wrapped()); } return null; } @Override - public void init(final StateStoreContext context, final StateStore root) { + public void init(final StateStoreContext stateStoreContext, final StateStore root) { final String prefix = StreamsConfig.InternalConfig.getString( - context.appConfigs(), + stateStoreContext.appConfigs(), StreamsConfig.InternalConfig.TOPIC_PREFIX_ALTERNATIVE, - context.applicationId() + stateStoreContext.applicationId() ); - this.context = asInternalProcessorContext(context); - final String topic = ProcessorStateManager.storeChangelogTopic(prefix, name(), context.taskId().topologyName()); + internalContext = asInternalProcessorContext(stateStoreContext); + final String topic = ProcessorStateManager.storeChangelogTopic(prefix, name(), stateStoreContext.taskId().topologyName()); bytesSerdes = new StateSerdes<>( topic, Serdes.Bytes(), Serdes.ByteArray()); - cacheName = context.taskId() + "-" + name(); + cacheName = stateStoreContext.taskId() + "-" + name(); - this.context.registerCacheFlushListener(cacheName, entries -> { - putAndMaybeForward(entries, this.context); - }); - super.init(context, root); + internalContext.registerCacheFlushListener(cacheName, entries -> putAndMaybeForward(entries, internalContext)); + super.init(stateStoreContext, root); } private void putAndMaybeForward(final List entries, @@ -257,12 +255,12 @@ public synchronized void put(final Bytes key, final LRUCacheEntry entry = new LRUCacheEntry( value, - context.headers(), + internalContext.headers(), true, - context.offset(), - context.timestamp(), - context.partition(), - context.topic()); + internalContext.offset(), + internalContext.timestamp(), + internalContext.partition(), + internalContext.topic()); // Put to index first so that base can be evicted later if (hasIndex) { @@ -270,20 +268,20 @@ public synchronized void put(final Bytes key, // it could be evicted when we are putting base key. In that case, base key is not yet // in cache so we can't store key/value to store when index is evicted. Then if we fetch // using index, we can't find it in either store or cache - context.cache().put(cacheName, baseKeyCacheFunction.cacheKey(baseKeyBytes), entry); + internalContext.cache().put(cacheName, baseKeyCacheFunction.cacheKey(baseKeyBytes), entry); final LRUCacheEntry emptyEntry = new LRUCacheEntry( new byte[0], new RecordHeaders(), true, - context.offset(), - context.timestamp(), - context.partition(), + internalContext.offset(), + internalContext.timestamp(), + internalContext.partition(), ""); final Bytes indexKey = KeyFirstWindowKeySchema.toStoreKeyBinary(key, windowStartTimestamp, 0); - context.cache().put(cacheName, indexKeyCacheFunction.cacheKey(indexKey), emptyEntry); + internalContext.cache().put(cacheName, indexKeyCacheFunction.cacheKey(indexKey), emptyEntry); } else { - context.cache().put(cacheName, baseKeyCacheFunction.cacheKey(baseKeyBytes), entry); + internalContext.cache().put(cacheName, baseKeyCacheFunction.cacheKey(baseKeyBytes), entry); } maxObservedTimestamp.set(Math.max(windowStartTimestamp, maxObservedTimestamp.get())); } @@ -292,14 +290,14 @@ public synchronized void put(final Bytes key, public byte[] fetch(final Bytes key, final long timestamp) { validateStoreOpen(); - if (context.cache() == null) { + if (internalContext.cache() == null) { return wrapped().fetch(key, timestamp); } final Bytes baseBytesKey = TimeFirstWindowKeySchema.toStoreKeyBinary(key, timestamp, 0); final Bytes cacheKey = baseKeyCacheFunction.cacheKey(baseBytesKey); - final LRUCacheEntry entry = context.cache().get(cacheName, cacheKey); + final LRUCacheEntry entry = internalContext.cache().get(cacheName, cacheKey); if (entry == null) { return wrapped().fetch(key, timestamp); } else { @@ -316,7 +314,7 @@ public synchronized WindowStoreIterator fetch(final Bytes key, validateStoreOpen(); final WindowStoreIterator underlyingIterator = wrapped().fetch(key, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } @@ -332,7 +330,7 @@ public synchronized WindowStoreIterator backwardFetch(final Bytes key, validateStoreOpen(); final WindowStoreIterator underlyingIterator = wrapped().backwardFetch(key, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } @@ -377,7 +375,7 @@ public KeyValueIterator, byte[]> fetch(final Bytes keyFrom, final KeyValueIterator, byte[]> underlyingIterator = wrapped().fetch(keyFrom, keyTo, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } @@ -402,7 +400,7 @@ public KeyValueIterator, byte[]> backwardFetch(final Bytes keyFr final KeyValueIterator, byte[]> underlyingIterator = wrapped().backwardFetch(keyFrom, keyTo, timeFrom, timeTo); - if (context.cache() == null) { + if (internalContext.cache() == null) { return underlyingIterator; } @@ -500,25 +498,25 @@ public KeyValueIterator, byte[]> backwardAll() { @Override public synchronized void flush() { - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); wrapped().flush(); } @Override public void flushCache() { - context.cache().flush(cacheName); + internalContext.cache().flush(cacheName); } @Override public void clearCache() { - context.cache().clear(cacheName); + internalContext.cache().clear(cacheName); } @Override public synchronized void close() { final LinkedList suppressed = executeAll( - () -> context.cache().flush(cacheName), - () -> context.cache().close(cacheName), + () -> internalContext.cache().flush(cacheName), + () -> internalContext.cache().close(cacheName), wrapped()::close ); if (!suppressed.isEmpty()) { @@ -575,13 +573,13 @@ private CacheIteratorWrapper(final Bytes keyFrom, this.currentSegmentId = cacheFunction.segmentId(timeFrom); setCacheKeyRange(timeFrom, currentSegmentLastTime()); - this.current = context.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); + this.current = internalContext.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); } else { this.currentSegmentId = cacheFunction.segmentId(Math.min(timeTo, maxObservedTimestamp.get())); this.lastSegmentId = cacheFunction.segmentId(timeFrom); setCacheKeyRange(currentSegmentBeginTime(), Math.min(timeTo, maxObservedTimestamp.get())); - this.current = context.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); + this.current = internalContext.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); } } @@ -599,7 +597,7 @@ public boolean hasNext() { final Bytes indexKey = indexKeyCacheFunction.key(cacheIndexKey); final Bytes baseKey = indexKeyToBaseKey(indexKey); final Bytes cachedBaseKey = baseKeyCacheFunction.cacheKey(baseKey); - cachedBaseValue = context.cache().get(cacheName, cachedBaseKey); + cachedBaseValue = internalContext.cache().get(cacheName, cachedBaseKey); if (cachedBaseValue != null) { return true; } @@ -683,7 +681,7 @@ private void getNextSegmentIterator() { current.close(); - current = context.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); + current = internalContext.cache().range(cacheName, cacheKeyFrom, cacheKeyTo); } else { --currentSegmentId; @@ -697,7 +695,7 @@ private void getNextSegmentIterator() { current.close(); - current = context.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); + current = internalContext.cache().reverseRange(cacheName, cacheKeyFrom, cacheKeyTo); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/TimestampedSegments.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/TimestampedSegments.java index 597d7bf0ce064..70fae5030606d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/TimestampedSegments.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/TimestampedSegments.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.streams.state.internals; -import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ProcessorContextUtils; import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder; @@ -37,7 +37,7 @@ class TimestampedSegments extends AbstractSegments { @Override public TimestampedSegment getOrCreateSegment(final long segmentId, - final ProcessorContext context) { + final StateStoreContext context) { if (segments.containsKey(segmentId)) { return segments.get(segmentId); } else { @@ -55,7 +55,7 @@ public TimestampedSegment getOrCreateSegment(final long segmentId, @Override public TimestampedSegment getOrCreateSegmentIfLive(final long segmentId, - final ProcessorContext context, + final StateStoreContext context, final long streamTime) { final TimestampedSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime); cleanupExpiredSegments(streamTime); @@ -63,7 +63,7 @@ public TimestampedSegment getOrCreateSegmentIfLive(final long segmentId, } @Override - public void openExisting(final ProcessorContext context, final long streamTime) { + public void openExisting(final StateStoreContext context, final long streamTime) { metricsRecorder.init(ProcessorContextUtils.metricsImpl(context), context.taskId()); super.openExisting(context, streamTime); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/VersionedKeyValueToBytesStoreAdapter.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/VersionedKeyValueToBytesStoreAdapter.java index b91fb97b2c5b4..5daa6ed1815dd 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/VersionedKeyValueToBytesStoreAdapter.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/VersionedKeyValueToBytesStoreAdapter.java @@ -88,8 +88,8 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - inner.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + inner.init(stateStoreContext, root); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/WindowToTimestampedWindowByteStoreAdapter.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/WindowToTimestampedWindowByteStoreAdapter.java index b46a14c563f49..eec2e2ff1d8de 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/WindowToTimestampedWindowByteStoreAdapter.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/WindowToTimestampedWindowByteStoreAdapter.java @@ -156,8 +156,8 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { - store.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + store.init(stateStoreContext, root); } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/WrappedStateStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/WrappedStateStore.java index 99141c804fafe..adbb7568c87c5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/WrappedStateStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/WrappedStateStore.java @@ -36,7 +36,7 @@ public static boolean isTimestamped(final StateStore stateStore) { if (stateStore instanceof TimestampedBytesStore) { return true; } else if (stateStore instanceof WrappedStateStore) { - return isTimestamped(((WrappedStateStore) stateStore).wrapped()); + return isTimestamped(((WrappedStateStore) stateStore).wrapped()); } else { return false; } @@ -46,7 +46,7 @@ public static boolean isVersioned(final StateStore stateStore) { if (stateStore instanceof VersionedBytesStore) { return true; } else if (stateStore instanceof WrappedStateStore) { - return isVersioned(((WrappedStateStore) stateStore).wrapped()); + return isVersioned(((WrappedStateStore) stateStore).wrapped()); } else { return false; } @@ -59,8 +59,8 @@ public WrappedStateStore(final S wrapped) { } @Override - public void init(final StateStoreContext context, final StateStore root) { - wrapped.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + wrapped.init(stateStoreContext, root); } @SuppressWarnings("unchecked") @@ -76,14 +76,14 @@ public boolean setFlushListener(final CacheFlushListener listener, @Override public void flushCache() { if (wrapped instanceof CachedStateStore) { - ((CachedStateStore) wrapped).flushCache(); + ((CachedStateStore) wrapped).flushCache(); } } @Override public void clearCache() { if (wrapped instanceof CachedStateStore) { - ((CachedStateStore) wrapped).clearCache(); + ((CachedStateStore) wrapped).clearCache(); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorder.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorder.java index fd7adab1b5728..10e8cb804fece 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorder.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorder.java @@ -135,7 +135,7 @@ public TaskId taskId() { public void init(final StreamsMetricsImpl streamsMetrics, final TaskId taskId) { Objects.requireNonNull(streamsMetrics, "Streams metrics must not be null"); - Objects.requireNonNull(streamsMetrics, "task ID must not be null"); + Objects.requireNonNull(taskId, "task ID must not be null"); if (this.taskId != null && !this.taskId.equals(taskId)) { throw new IllegalStateException("Metrics recorder is re-initialised with different task: previous task is " + this.taskId + " whereas current task is " + taskId + ". This is a bug in Kafka Streams. " + @@ -462,8 +462,7 @@ public void record(final long now) { writeStallDuration += valueProviders.statistics.getAndResetTickerCount(TickerType.STALL_MICROS); bytesWrittenDuringCompaction += valueProviders.statistics.getAndResetTickerCount(TickerType.COMPACT_WRITE_BYTES); bytesReadDuringCompaction += valueProviders.statistics.getAndResetTickerCount(TickerType.COMPACT_READ_BYTES); - numberOfOpenFiles += valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_OPENS) - - valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_CLOSES); + numberOfOpenFiles = -1; numberOfFileErrors += valueProviders.statistics.getAndResetTickerCount(TickerType.NO_FILE_ERRORS); final HistogramData memtableFlushTimeData = valueProviders.statistics.getHistogramData(HistogramType.FLUSH_TIME); memtableFlushTimeSum += memtableFlushTimeData.getSum(); diff --git a/streams/src/main/resources/common/message/SubscriptionInfoData.json b/streams/src/main/resources/common/message/SubscriptionInfoData.json index 6304478739bac..0405130bb3c92 100644 --- a/streams/src/main/resources/common/message/SubscriptionInfoData.json +++ b/streams/src/main/resources/common/message/SubscriptionInfoData.json @@ -21,55 +21,65 @@ { "name": "version", "versions": "1+", - "type": "int32" + "type": "int32", + "about": "The version of the subscription info data." }, { "name": "latestSupportedVersion", "versions": "3+", "default": "-1", - "type": "int32" + "type": "int32", + "about": "The latest supported version of the subscription info data." }, { "name": "processId", "versions": "1+", - "type": "uuid" + "type": "uuid", + "about": "The process id of the client that sent the request." }, /***** Protocol version 1-6 only (after 6 this is encoded in task offset sum map) *****/ { "name": "prevTasks", "versions": "1-6", - "type": "[]TaskId" + "type": "[]TaskId", + "about": "The previous tasks that were assigned to the client." }, { "name": "standbyTasks", "versions": "1-6", - "type": "[]TaskId" + "type": "[]TaskId", + "about": "The standby tasks that were assigned to the client." }, /***************/ { "name": "userEndPoint", "versions": "2+", - "type": "bytes" + "type": "bytes", + "about": "The user end point of the client that sent the request." }, { "name": "taskOffsetSums", "versions": "7+", - "type": "[]TaskOffsetSum" + "type": "[]TaskOffsetSum", + "about": "The task offset sums that were assigned to the client." }, { "name": "uniqueField", "versions": "8+", - "type": "int8" + "type": "int8", + "about": "A unique field that is used to identify the client that sent the request." }, { "name": "errorCode", "versions": "9+", - "type": "int32" + "type": "int32", + "about": "The error code of the request." }, { "name": "clientTags", "versions": "11+", - "type": "[]ClientTag" + "type": "[]ClientTag", + "about": "The client tags that were assigned to the client." } ], "commonStructs": [ @@ -81,12 +91,14 @@ { "name": "topicGroupId", "versions": "1-6", - "type": "int32" + "type": "int32", + "about": "The topic group id of the task." }, { "name": "partition", "versions": "1-6", - "type": "int32" + "type": "int32", + "about": "The partition of the task." } ] }, @@ -97,31 +109,36 @@ { "name": "topicGroupId", "versions": "7+", - "type": "int32" + "type": "int32", + "about": "The topic group id of the task." }, // Prior to version 10, in 7-9, the below fields (partition and offsetSum) were encoded via the nested // partitionToOffsetSum struct. In 10+ all fields are encoded directly in the TaskOffsetSum struct { "name": "partition", "versions": "10+", - "type": "int32" + "type": "int32", + "about": "The partition of the task." }, { "name": "offsetSum", "versions": "10+", - "type": "int64" + "type": "int64", + "about": "The offset sum of the task." }, { "name": "namedTopology", "versions": "10+", "nullableVersions": "10+", "ignorable": "false", // namedTopology is not ignorable because if you do, a TaskId may not be unique - "type": "string" + "type": "string", + "about": "The named topology of the task." }, { "name": "partitionToOffsetSum", "versions": "7-9", - "type": "[]PartitionToOffsetSum" + "type": "[]PartitionToOffsetSum", + "about": "The partition to offset sum of the task." } ] }, @@ -133,12 +150,14 @@ { "name": "partition", "versions": "7-9", - "type": "int32" + "type": "int32", + "about": "The partition of the task." }, { "name": "offsetSum", "versions": "7-9", - "type": "int64" + "type": "int64", + "about": "The offset sum of the task." } ] }, @@ -149,12 +168,14 @@ { "name": "key", "versions": "11+", - "type": "bytes" + "type": "bytes", + "about": "The key of the client tag." }, { "name": "value", "versions": "11+", - "type": "bytes" + "type": "bytes", + "about": "The value of the client tag." } ] } diff --git a/streams/src/test/java/org/apache/kafka/streams/AutoOffsetResetTest.java b/streams/src/test/java/org/apache/kafka/streams/AutoOffsetResetTest.java new file mode 100644 index 0000000000000..fb4d9738c9337 --- /dev/null +++ b/streams/src/test/java/org/apache/kafka/streams/AutoOffsetResetTest.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams; + +import org.apache.kafka.streams.internals.AutoOffsetResetInternal; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +class AutoOffsetResetTest { + + @Test + void shouldThrowExceptionOnDurationForNoneReset() { + final AutoOffsetResetInternal none = new AutoOffsetResetInternal(AutoOffsetReset.none()); + assertThrows(IllegalStateException.class, none::duration, "None should not have a duration."); + } + + @Test + void shouldThrowExceptionOnDurationForEarliestReset() { + final AutoOffsetResetInternal earliest = new AutoOffsetResetInternal(AutoOffsetReset.earliest()); + assertThrows(IllegalStateException.class, earliest::duration, "Earliest should not have a duration."); + } + + @Test + void shouldThrowExceptionOnDurationForLastetReset() { + final AutoOffsetResetInternal latest = new AutoOffsetResetInternal(AutoOffsetReset.latest()); + assertThrows(IllegalStateException.class, latest::duration, "Latest should not have a duration."); + } + + @Test + void customDurationShouldMatchExpectedValue() { + final Duration duration = Duration.ofSeconds(10L); + final AutoOffsetResetInternal custom = new AutoOffsetResetInternal(AutoOffsetReset.byDuration(duration)); + assertEquals(10L, custom.duration().toSeconds(), "Duration should match the specified value in milliseconds."); + } + + @Test + void shouldThrowExceptionIfDurationIsNegative() { + final IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> AutoOffsetReset.byDuration(Duration.ofSeconds(-1)), + "Creating an AutoOffsetReset with a negative duration should throw an IllegalArgumentException." + ); + assertEquals("Duration cannot be negative", exception.getMessage(), "Exception message should indicate the duration cannot be negative."); + } + + @Test + void twoInstancesCreatedAtTheSameTimeWithSameOptionsShouldBeEqual() { + final AutoOffsetReset latest1 = AutoOffsetReset.latest(); + final AutoOffsetReset latest2 = AutoOffsetReset.latest(); + final AutoOffsetReset earliest1 = AutoOffsetReset.earliest(); + final AutoOffsetReset earliest2 = AutoOffsetReset.earliest(); + final AutoOffsetReset custom1 = AutoOffsetReset.byDuration(Duration.ofSeconds(5)); + final AutoOffsetReset custom2 = AutoOffsetReset.byDuration(Duration.ofSeconds(5)); + final AutoOffsetReset customDifferent = AutoOffsetReset.byDuration(Duration.ofSeconds(10)); + + // Equals + assertEquals(latest1, latest2, "Two latest instances should be equal."); + assertEquals(earliest1, earliest2, "Two earliest instances should be equal."); + assertEquals(custom1, custom2, "Two custom instances with the same duration should be equal."); + assertNotEquals(latest1, earliest1, "Latest and earliest should not be equal."); + assertNotEquals(custom1, customDifferent, "Custom instances with different durations should not be equal."); + + // HashCode + assertEquals(latest1.hashCode(), latest2.hashCode(), "HashCode for equal instances should be the same."); + assertEquals(custom1.hashCode(), custom2.hashCode(), "HashCode for equal custom instances should be the same."); + assertNotEquals(custom1.hashCode(), customDifferent.hashCode(), "HashCode for different custom instances should not match."); + } +} diff --git a/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java b/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java index ab35530abd11a..1516dfb5eda5a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java @@ -89,6 +89,7 @@ import java.util.Properties; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; @@ -145,7 +146,7 @@ public class KafkaStreamsTest { private Properties props; private MockAdminClient adminClient; private StateListenerStub streamsStateListener; - + @Mock private StreamThread streamThreadOne; @Mock @@ -344,13 +345,49 @@ private void prepareThreadState(final StreamThread thread, final AtomicReference }).when(thread).start(); } + private CountDownLatch terminableThreadBlockingLatch = new CountDownLatch(1); + private void prepareTerminableThread(final StreamThread thread) throws InterruptedException { doAnswer(invocation -> { - Thread.sleep(2000L); + terminableThreadBlockingLatch.await(); return null; }).when(thread).join(); } + private class KafkaStreamsWithTerminableThread extends KafkaStreams { + + KafkaStreamsWithTerminableThread(final Topology topology, + final Properties props, + final KafkaClientSupplier clientSupplier, + final Time time) { + super(topology, props, clientSupplier, time); + } + + + KafkaStreamsWithTerminableThread(final Topology topology, + final Properties props, + final KafkaClientSupplier clientSupplier) { + super(topology, props, clientSupplier); + } + + KafkaStreamsWithTerminableThread(final Topology topology, + final StreamsConfig applicationConfigs) { + super(topology, applicationConfigs); + } + + KafkaStreamsWithTerminableThread(final Topology topology, + final StreamsConfig applicationConfigs, + final KafkaClientSupplier clientSupplier) { + super(topology, applicationConfigs, clientSupplier); + } + + @Override + public void close() { + terminableThreadBlockingLatch.countDown(); + super.close(); + } + } + @Test public void testShouldTransitToNotRunningIfCloseRightAfterCreated() { prepareStreams(); @@ -947,7 +984,7 @@ public void shouldThrowOnCleanupWhileShuttingDown() throws Exception { prepareThreadState(streamThreadOne, state1); prepareThreadState(streamThreadTwo, state2); prepareTerminableThread(streamThreadOne); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) { streams.start(); waitForCondition( () -> streams.state() == KafkaStreams.State.RUNNING, @@ -972,7 +1009,7 @@ public void shouldThrowOnCleanupWhileShuttingDownStreamClosedWithCloseOptionLeav when(mockClientSupplier.getAdmin(any())).thenReturn(adminClient); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, mockClientSupplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier, time)) { streams.start(); waitForCondition( () -> streams.state() == KafkaStreams.State.RUNNING, @@ -997,7 +1034,7 @@ public void shouldThrowOnCleanupWhileShuttingDownStreamClosedWithCloseOptionLeav prepareThreadState(streamThreadOne, state1); prepareThreadState(streamThreadTwo, state2); prepareTerminableThread(streamThreadOne); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) { streams.start(); waitForCondition( () -> streams.state() == KafkaStreams.State.RUNNING, @@ -1154,7 +1191,7 @@ public void shouldReturnFalseOnCloseWhenThreadsHaventTerminated() throws Excepti prepareTerminableThread(streamThreadOne); // do not use mock time so that it can really elapse - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier)) { assertFalse(streams.close(Duration.ofMillis(10L))); } } @@ -1166,7 +1203,7 @@ public void shouldThrowOnNegativeTimeoutForClose() throws Exception { prepareStreamThread(streamThreadTwo, 2); prepareTerminableThread(streamThreadOne); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) { assertThrows(IllegalArgumentException.class, () -> streams.close(Duration.ofMillis(-1L))); } } @@ -1178,7 +1215,7 @@ public void shouldNotBlockInCloseForZeroDuration() throws Exception { prepareStreamThread(streamThreadTwo, 2); prepareTerminableThread(streamThreadOne); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) { // with mock time that does not elapse, close would not return if it ever waits on the state transition assertFalse(streams.close(Duration.ZERO)); } @@ -1193,7 +1230,7 @@ public void shouldReturnFalseOnCloseWithCloseOptionWithLeaveGroupFalseWhenThread final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ofMillis(10L)); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier)) { assertFalse(streams.close(closeOptions)); } } @@ -1207,7 +1244,7 @@ public void shouldThrowOnNegativeTimeoutForCloseWithCloseOptionLeaveGroupFalse() final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ofMillis(-1L)); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) { assertThrows(IllegalArgumentException.class, () -> streams.close(closeOptions)); } } @@ -1221,7 +1258,7 @@ public void shouldNotBlockInCloseWithCloseOptionLeaveGroupFalseForZeroDuration() final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ZERO); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier)) { assertFalse(streams.close(closeOptions)); } } @@ -1240,7 +1277,7 @@ public void shouldReturnFalseOnCloseWithCloseOptionWithLeaveGroupTrueWhenThreads final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ofMillis(10L)); closeOptions.leaveGroup(true); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, mockClientSupplier)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier)) { assertFalse(streams.close(closeOptions)); } } @@ -1258,7 +1295,7 @@ public void shouldThrowOnNegativeTimeoutForCloseWithCloseOptionLeaveGroupTrue() final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ofMillis(-1L)); closeOptions.leaveGroup(true); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, mockClientSupplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier, time)) { assertThrows(IllegalArgumentException.class, () -> streams.close(closeOptions)); } } @@ -1277,7 +1314,7 @@ public void shouldNotBlockInCloseWithCloseOptionLeaveGroupTrueForZeroDuration() final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ZERO); closeOptions.leaveGroup(true); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, mockClientSupplier)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, mockClientSupplier)) { assertFalse(streams.close(closeOptions)); } } @@ -1300,7 +1337,7 @@ public void shouldTriggerRecordingOfRocksDBMetricsIfRecordingLevelIsDebug() thro builder.table("topic", Materialized.as("store")); props.setProperty(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, RecordingLevel.DEBUG.name()); - try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { + try (final KafkaStreams streams = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), props, supplier, time)) { streams.start(); } @@ -1324,7 +1361,7 @@ public void shouldGetClientSupplierFromConfigForConstructor() throws Exception { final StreamsConfig mockConfig = spy(config); when(mockConfig.getKafkaClientSupplier()).thenReturn(supplier); - try (final KafkaStreams ignored = new KafkaStreams(getBuilderWithSource().build(), mockConfig)) { + try (final KafkaStreams ignored = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), mockConfig)) { // no-op } // It's called once in above when mock @@ -1361,7 +1398,7 @@ public void shouldUseProvidedClientSupplier() throws Exception { final StreamsConfig config = new StreamsConfig(props); final StreamsConfig mockConfig = spy(config); - try (final KafkaStreams ignored = new KafkaStreams(getBuilderWithSource().build(), mockConfig, supplier)) { + try (final KafkaStreams ignored = new KafkaStreamsWithTerminableThread(getBuilderWithSource().build(), mockConfig, supplier)) { // no-op } // It's called once in above when mock diff --git a/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java index aa646e873cf3f..08e413703c13a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java @@ -28,6 +28,7 @@ import org.apache.kafka.streams.kstream.Grouped; import org.apache.kafka.streams.kstream.JoinWindows; import org.apache.kafka.streams.kstream.Joined; +import org.apache.kafka.streams.kstream.KGroupedStream; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; @@ -35,10 +36,15 @@ import org.apache.kafka.streams.kstream.Printed; import org.apache.kafka.streams.kstream.Produced; import org.apache.kafka.streams.kstream.SessionWindows; +import org.apache.kafka.streams.kstream.SlidingWindows; import org.apache.kafka.streams.kstream.StreamJoined; +import org.apache.kafka.streams.kstream.Suppressed; +import org.apache.kafka.streams.kstream.TableJoined; +import org.apache.kafka.streams.kstream.TimeWindows; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; +import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; import org.apache.kafka.streams.processor.internals.ProcessorNode; @@ -46,6 +52,7 @@ import org.apache.kafka.streams.state.BuiltInDslStoreSuppliers; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.SessionStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.state.internals.InMemoryKeyValueStore; @@ -54,32 +61,43 @@ import org.apache.kafka.streams.state.internals.RocksDBStore; import org.apache.kafka.streams.state.internals.RocksDBWindowStore; import org.apache.kafka.streams.state.internals.WrappedStateStore; +import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper; +import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper.WrapperRecorder; +import org.apache.kafka.test.MockApiFixedKeyProcessorSupplier; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockMapper; import org.apache.kafka.test.MockPredicate; import org.apache.kafka.test.MockValueJoiner; -import org.apache.kafka.test.NoopValueTransformer; -import org.apache.kafka.test.NoopValueTransformerWithKey; import org.apache.kafka.test.StreamsTestUtils; +import org.hamcrest.CoreMatchers; +import org.hamcrest.Matchers; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import java.time.Duration; import java.time.Instant; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Random; import java.util.Set; import java.util.regex.Pattern; import static java.util.Arrays.asList; +import static org.apache.kafka.streams.StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG; +import static org.apache.kafka.streams.StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG; import static org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.SUBTOPOLOGY_0; import static org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.SUBTOPOLOGY_1; +import static org.apache.kafka.streams.state.Stores.inMemoryKeyValueStore; +import static org.apache.kafka.streams.state.Stores.timestampedKeyValueStoreBuilder; +import static org.apache.kafka.streams.utils.TestUtils.PROCESSOR_WRAPPER_COUNTER_CONFIG; +import static org.apache.kafka.streams.utils.TestUtils.dummyStreamsConfigMap; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; @@ -114,7 +132,7 @@ public void shouldAddGlobalStore() { final StreamsBuilder builder = new StreamsBuilder(); builder.addGlobalStore( Stores.keyValueStoreBuilder( - Stores.inMemoryKeyValueStore("store"), + inMemoryKeyValueStore("store"), Serdes.String(), Serdes.String() ), @@ -845,24 +863,6 @@ public void shouldUseSpecifiedNameForForEachOperation() { assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", STREAM_OPERATION_NAME); } - @Test - @SuppressWarnings("deprecation") - public void shouldUseSpecifiedNameForTransformValues() { - builder.stream(STREAM_TOPIC).transformValues(() -> new NoopValueTransformer<>(), Named.as(STREAM_OPERATION_NAME)); - builder.build(); - final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", STREAM_OPERATION_NAME); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldUseSpecifiedNameForTransformValuesWithKey() { - builder.stream(STREAM_TOPIC).transformValues(() -> new NoopValueTransformerWithKey<>(), Named.as(STREAM_OPERATION_NAME)); - builder.build(); - final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", STREAM_OPERATION_NAME); - } - @Test public void shouldUseSpecifiedNameForSplitOperation() { builder.stream(STREAM_TOPIC) @@ -1317,29 +1317,21 @@ public void shouldUseSpecifiedNameForProcessOperation() { } @Test - public void shouldUseSpecifiedNameForPrintOperation() { - builder.stream(STREAM_TOPIC).print(Printed.toSysOut().withName("print-processor")); - builder.build(); - final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", "print-processor"); - } + public void shouldUseSpecifiedNameForProcessValuesOperation() { + builder.stream(STREAM_TOPIC) + .processValues(new MockApiFixedKeyProcessorSupplier<>(), Named.as("test-fixed-key-processor")); - @Test - @SuppressWarnings("deprecation") - public void shouldUseSpecifiedNameForFlatTransformValueOperation() { - builder.stream(STREAM_TOPIC).flatTransformValues(() -> new NoopValueTransformer<>(), Named.as(STREAM_OPERATION_NAME)); builder.build(); final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", STREAM_OPERATION_NAME); + assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", "test-fixed-key-processor"); } @Test - @SuppressWarnings({"unchecked", "rawtypes", "deprecation"}) - public void shouldUseSpecifiedNameForFlatTransformValueWithKeyOperation() { - builder.stream(STREAM_TOPIC).flatTransformValues(() -> new NoopValueTransformerWithKey(), Named.as(STREAM_OPERATION_NAME)); + public void shouldUseSpecifiedNameForPrintOperation() { + builder.stream(STREAM_TOPIC).print(Printed.toSysOut().withName("print-processor")); builder.build(); final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", STREAM_OPERATION_NAME); + assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", "print-processor"); } @Test @@ -1392,7 +1384,7 @@ public void shouldUseSpecifiedNameForAggregateOperationGivenTable() { @Test public void shouldUseSpecifiedNameForGlobalStoreProcessor() { builder.addGlobalStore(Stores.keyValueStoreBuilder( - Stores.inMemoryKeyValueStore("store"), + inMemoryKeyValueStore("store"), Serdes.String(), Serdes.String() ), @@ -1409,7 +1401,7 @@ public void shouldUseSpecifiedNameForGlobalStoreProcessor() { @Test public void shouldUseDefaultNameForGlobalStoreProcessor() { builder.addGlobalStore(Stores.keyValueStoreBuilder( - Stores.inMemoryKeyValueStore("store"), + inMemoryKeyValueStore("store"), Serdes.String(), Serdes.String() ), @@ -1423,6 +1415,868 @@ public void shouldUseDefaultNameForGlobalStoreProcessor() { assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", "KTABLE-SOURCE-0000000001"); } + @Test + public void shouldWrapProcessorsForProcess() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + // Add a bit of randomness to the lambda-created processors to avoid them being + // optimized into a shared instance that will cause the ApiUtils#checkSupplier + // call to fail + final Random random = new Random(); + + final StoreBuilder store = timestampedKeyValueStoreBuilder(inMemoryKeyValueStore("store"), Serdes.String(), Serdes.String()); + builder.stream("input", Consumed.as("source")) + .process( + new ProcessorSupplier<>() { + @Override + public Processor get() { + return record -> System.out.println("Processing: " + random.nextInt()); + } + + @Override + public Set> stores() { + return Collections.singleton(store); + } + }, + Named.as("stateful-process-1")) + .process( + new ProcessorSupplier<>() { + @Override + public Processor get() { + return record -> System.out.println("Processing: " + random.nextInt()); + } + + @Override + public Set> stores() { + return Collections.singleton(store); + } + }, + Named.as("stateful-process-2")) + .processValues( + () -> record -> System.out.println("Processing values: " + random.nextInt()), + Named.as("stateless-processValues")) + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(3)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "stateful-process-1", "stateful-process-2", "stateless-processValues")); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForStreamReduce() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .groupBy(KeyValue::new, Grouped.as("groupBy")) // wrapped 1 & 2 (implicit selectKey & repartition) + .reduce((l, r) -> l, Named.as("reduce"), Materialized.as("store")) // wrapped 3 + .toStream(Named.as("toStream"))// wrapped 4 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "groupBy", "groupBy-repartition-filter", "reduce", "toStream")); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(4)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(1)); + } + + @Test + public void shouldWrapProcessorsForStreamAggregate() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .groupByKey() + .count(Named.as("count")) // wrapped 1 + .toStream(Named.as("toStream"))// wrapped 2 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder("count", "toStream")); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(1)); + } + + @Test + public void shouldWrapProcessorsForSuppress() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .groupByKey() + .count(Named.as("count"))// wrapped 1 + .suppress(Suppressed.untilTimeLimit(Duration.ofSeconds(10), Suppressed.BufferConfig.unbounded()).withName("suppressed")) // wrapped 2 + .toStream(Named.as("toStream"))// wrapped 3 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(3)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder("count", "toStream", "suppressed")); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForTimeWindowStreamAggregate() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .groupByKey() + .windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofDays(1))) + .count(Named.as("count")) // wrapped 1 + .toStream(Named.as("toStream"))// wrapped 2 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder("count", "toStream")); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(1)); + } + + @Test + public void shouldWrapProcessorsForSlidingWindowStreamAggregate() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .groupByKey() + .windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(Duration.ofDays(1))) + .count(Named.as("count")) // wrapped 1 + .toStream(Named.as("toStream"))// wrapped 2 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder("count", "toStream")); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(1)); + } + + @Test + public void shouldWrapProcessorsForSessionWindowStreamAggregate() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .groupByKey() + .windowedBy(SessionWindows.ofInactivityGapWithNoGrace(Duration.ofDays(1))) + .count(Named.as("count")) // wrapped 1 + .toStream(Named.as("toStream"))// wrapped 2 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder("count", "toStream")); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(1)); + } + + @Test + public void shouldWrapProcessorsForCoGroupedStreamAggregate() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream1 = builder.stream("one", Consumed.as("source-1")); + final KStream stream2 = builder.stream("two", Consumed.as("source-2")); + + final KGroupedStream grouped1 = stream1.groupByKey(Grouped.as("groupByKey-1")); + final KGroupedStream grouped2 = stream2.groupByKey(Grouped.as("groupByKey-2")); + + grouped1 + .cogroup((k, v, a) -> a + v) // wrapped 1 + .cogroup(grouped2, (k, v, a) -> a + v) // wrapped 2 + .aggregate(() -> "", Named.as("aggregate"), Materialized.as("store")) // wrapped 3, store 1 + .toStream(Named.as("toStream"))// wrapped 4 + .to("output", Produced.as("sink")); + + builder.build(); + + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "aggregate-cogroup-agg-0", "aggregate-cogroup-agg-1", "aggregate-cogroup-merge", "toStream" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(4)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForMapValuesWithMaterializedStore() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.table("input", Consumed.as("source-table")) + .mapValues(v -> null, Named.as("map-values"), Materialized.as("map-values-store")) + .toStream(Named.as("to-stream")) + .to("output-topic", Produced.as("sink")); + builder.build(); + + assertThat(counter.wrappedProcessorNames(), + Matchers.containsInAnyOrder("source-table", "map-values", "to-stream")); + assertThat(counter.numUniqueStateStores(), is(1)); + assertThat(counter.numConnectedStateStores(), is(1)); + } + + @Test + public void shouldWrapProcessorAndStoreForFilterTable() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.table("input", Consumed.as("source-table")) + .filter((k, v) -> true, Named.as("filter"), Materialized.as("filter")) + .toStream(Named.as("to-stream")) + .to("output-topic", Produced.as("sink")); + builder.build(); + + assertThat(counter.wrappedProcessorNames(), + Matchers.containsInAnyOrder("source-table", "filter", "to-stream")); + assertThat(counter.numUniqueStateStores(), is(1)); + assertThat(counter.numConnectedStateStores(), is(1)); + } + + @Test + public void shouldWrapProcessorsForTableAggregate() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.table("input", Consumed.as("source-table")) // wrapped 1, store 1 + .groupBy(KeyValue::new, Grouped.as("groupBy")) // wrapped 2 (implicit selectKey) + .count(Named.as("count")) // wrapped 3, store 2 + .toStream(Named.as("toStream"))// wrapped 4 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "source-table", "groupBy", "count", "toStream" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(4)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForTableReduce() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.table("input", Consumed.as("source-table")) // wrapped 1, store 1 + .groupBy(KeyValue::new, Grouped.as("groupBy")) // wrapped 2 (implicit selectKey) + .reduce((l, r) -> "", (l, r) -> "", Named.as("reduce"), Materialized.as("store")) // wrapped 3, store 2 + .toStream(Named.as("toStream"))// wrapped 4 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "source-table", "groupBy", "reduce", "toStream" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(4)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForStatelessOperators() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .filter((k, v) -> true, Named.as("filter-stream")) // wrapped 1 + .map(KeyValue::new, Named.as("map")) // wrapped 2 + .selectKey((k, v) -> k, Named.as("selectKey")) // wrapped 3 + .peek((k, v) -> { }, Named.as("peek")) // wrapped 4 + .flatMapValues(e -> new ArrayList<>(), Named.as("flatMap")) // wrapped 5 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(5)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "filter-stream", "map", "selectKey", "peek", "flatMap" + )); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(0)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(0)); + } + + @Test + public void shouldWrapProcessorsWhenMultipleTableOperators() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.stream("input", Consumed.as("source")) + .toTable(Named.as("to-table")) + .mapValues(v -> v, Named.as("map-values")) + .mapValues(v -> v, Named.as("map-values-stateful"), Materialized.as("map-values-stateful")) + .filter((k, v) -> true, Named.as("filter-table")) + .filter((k, v) -> true, Named.as("filter-table-stateful"), Materialized.as("filter-table-stateful")) + .toStream(Named.as("to-stream")) + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(6)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "to-table", "map-values", "map-values-stateful", + "filter-table", "filter-table-stateful", "to-stream" + )); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForUnmaterializedSourceTable() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.table("input", Consumed.as("source")) // wrapped 1 + .toStream(Named.as("toStream")) // wrapped 2 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "source", "toStream" + )); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(0)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(0)); + } + + @Test + public void shouldWrapProcessorsForMaterializedSourceTable() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + builder.table("input", Consumed.as("source"), Materialized.as("store")) // wrapped 1 + .toStream(Named.as("toStream")) // wrapped 2 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "source", "toStream" + )); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(1)); + } + + @Test + public void shouldWrapProcessorsForStreamStreamInnerJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream1 = builder.stream("input-1", Consumed.as("source-1")); + final KStream stream2 = builder.stream("input-2", Consumed.as("source-2")); + + stream1.join( + stream2, + MockValueJoiner.TOSTRING_JOINER, + JoinWindows.ofTimeDifferenceAndGrace(Duration.ofDays(1), Duration.ofDays(1)), + StreamJoined.as("ss-join")) + .to("output", Produced.as("sink")); + + builder.build(); + + // TODO: fix these names once we address https://issues.apache.org/jira/browse/KAFKA-18191 + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "KSTREAM-JOINTHIS-0000000004", "KSTREAM-JOINOTHER-0000000005", + "KSTREAM-WINDOWED-0000000003", "KSTREAM-WINDOWED-0000000002", + "KSTREAM-MERGE-0000000006" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(5)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(4)); + } + + @Test + public void shouldWrapProcessorsForStreamStreamLeftJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream1 = builder.stream("input-1", Consumed.as("source-1")); + final KStream stream2 = builder.stream("input-2", Consumed.as("source-2")); + + stream1.leftJoin( + stream2, + MockValueJoiner.TOSTRING_JOINER, + JoinWindows.ofTimeDifferenceAndGrace(Duration.ofDays(1), Duration.ofDays(1)), + StreamJoined.as("ss-join")) + .to("output", Produced.as("sink")); + + builder.build(); + + // TODO: fix these names once we address https://issues.apache.org/jira/browse/KAFKA-18191 + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "KSTREAM-JOINTHIS-0000000004", "KSTREAM-OUTEROTHER-0000000005", + "KSTREAM-WINDOWED-0000000003", "KSTREAM-WINDOWED-0000000002", + "KSTREAM-MERGE-0000000006" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(5)); + + // 1 additional store due to spurious results fix for left/outer joins + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(3)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(6)); + } + + @Test + public void shouldWrapProcessorsForStreamStreamOuterJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream1 = builder.stream("input-1", Consumed.as("source-1")); + final KStream stream2 = builder.stream("input-2", Consumed.as("source-2")); + + stream1.outerJoin( + stream2, + MockValueJoiner.TOSTRING_JOINER, + JoinWindows.ofTimeDifferenceAndGrace(Duration.ofDays(1), Duration.ofDays(1)), + StreamJoined.as("ss-join")) + .to("output", Produced.as("sink")); + + builder.build(); + + // TODO: fix these names once we address https://issues.apache.org/jira/browse/KAFKA-18191 + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "KSTREAM-OUTERTHIS-0000000004", "KSTREAM-OUTEROTHER-0000000005", + "KSTREAM-WINDOWED-0000000003", "KSTREAM-WINDOWED-0000000002", + "KSTREAM-MERGE-0000000006" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(5)); + + // 1 additional store due to spurious results fix for left/outer joins + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(3)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(6)); + } + + @SuppressWarnings("deprecation") + @Test + public void shouldWrapProcessorsForStreamStreamOuterJoinWithoutSpuriousResultsFix() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream1 = builder.stream("input-1", Consumed.as("source-1")); + final KStream stream2 = builder.stream("input-2", Consumed.as("source-2")); + + stream1.outerJoin( + stream2, + MockValueJoiner.TOSTRING_JOINER, + JoinWindows.of(Duration.ofDays(1)), // intentionally uses deprecated version of this API! + StreamJoined.as("ss-join")) + .to("output", Produced.as("sink")); + + builder.build(); + + // TODO: fix these names once we address https://issues.apache.org/jira/browse/KAFKA-18191 + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "KSTREAM-OUTERTHIS-0000000004", "KSTREAM-OUTEROTHER-0000000005", + "KSTREAM-WINDOWED-0000000003", "KSTREAM-WINDOWED-0000000002", + "KSTREAM-MERGE-0000000006" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(5)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(4)); + } + + @Test + public void shouldWrapProcessorsForStreamStreamSelfJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream1 = builder.stream("input", Consumed.as("source")); + + stream1.join( + stream1, + MockValueJoiner.TOSTRING_JOINER, + JoinWindows.ofTimeDifferenceWithNoGrace(Duration.ofDays(1)), + StreamJoined.as("ss-join")) + .to("output", Produced.as("sink")); + + builder.build(); + + // TODO: fix these names once we address https://issues.apache.org/jira/browse/KAFKA-18191 + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "KSTREAM-JOINTHIS-0000000003", "KSTREAM-JOINOTHER-0000000004", + "KSTREAM-WINDOWED-0000000001", "KSTREAM-WINDOWED-0000000002", + "KSTREAM-MERGE-0000000005" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(5)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(4)); + } + + @Test + public void shouldWrapProcessorsForStreamStreamSelfJoinWithSharedStoreOptimization() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + props.put(TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream1 = builder.stream("input", Consumed.as("source")); + + stream1.join( + stream1, + MockValueJoiner.TOSTRING_JOINER, + JoinWindows.ofTimeDifferenceWithNoGrace(Duration.ofDays(1)), + StreamJoined.as("ss-join")) + .to("output", Produced.as("sink")); + + final Properties properties = new Properties(); + properties.putAll(props); + builder.build(properties); + + // TODO: fix these names once we address https://issues.apache.org/jira/browse/KAFKA-18191 + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "KSTREAM-WINDOWED-0000000001", "KSTREAM-MERGE-0000000005" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + // only 1 store when topology optimizations enabled due to sharing self-join store + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForStreamTableJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream = builder.stream("input", Consumed.as("source-stream")); + final KTable table = builder.table("input-table", Consumed.as("source-table")); + + stream.join( + table, + MockValueJoiner.TOSTRING_JOINER, + Joined.as("st-join")) + .to("output", Produced.as("sink")); + + builder.build(); + + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "source-table", "st-join" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(1)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(1)); + } + + @Test + public void shouldWrapProcessorsForStreamTableJoinWithGracePeriod() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KStream stream = builder.stream("input", Consumed.as("source-stream")); + final KTable table = builder.table( + "input-table", + Consumed.as("versioned-source-table"), + Materialized.as(Stores.persistentVersionedKeyValueStore("table-store", Duration.ofDays(1))) + ); + + stream.join( + table, + MockValueJoiner.TOSTRING_JOINER, + Joined.as("st-join").withGracePeriod(Duration.ofDays(1))) + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "versioned-source-table", "st-join" + )); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(2)); + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(2)); + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(2)); + } + + @Test + public void shouldWrapProcessorsForTableTableInnerJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KTable t1 = builder.table("input1", Consumed.as("input1")); // 1 + final KTable t2 = builder.table("input2", Consumed.as("input2")); // 2 + + t1.join(t2, (v1, v2) -> v1 + v2, Named.as("join-processor"), Materialized.as("the_join")) // 3 (this), 4 (other), 5 (merger) + .toStream(Named.as("toStream")) // 6 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(6)); + assertThat(counter.wrappedProcessorNames().toString(), counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "input1", + "input2", + "join-processor-join-this", + "join-processor-join-other", + "join-processor", + "toStream" + )); + + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(3)); // one for join this, one for join that + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(3)); + } + + @Test + public void shouldWrapProcessorsForTableTableLeftJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KTable t1 = builder.table("input1", Consumed.as("input1")); // 1 + final KTable t2 = builder.table("input2", Consumed.as("input2")); // 2 + + t1.leftJoin(t2, (v1, v2) -> v1 + v2, Named.as("join-processor"), Materialized.as("the_join")) // 3 (this), 4 (other), 5 (merger) + .toStream(Named.as("toStream")) // 6 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(6)); + assertThat(counter.wrappedProcessorNames().toString(), counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "input1", + "input2", + "join-processor-join-this", + "join-processor-join-other", + "join-processor", + "toStream" + )); + + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(3)); // table1, table2, join materialized + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(3)); + } + + @Test + public void shouldWrapProcessorsForTableTableOuterJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KTable t1 = builder.table("input1", Consumed.as("input1")); // 1 + final KTable t2 = builder.table("input2", Consumed.as("input2")); // 2 + + t1.outerJoin(t2, (v1, v2) -> v1 + v2, Named.as("join-processor"), Materialized.as("the_join")) // 3 (this), 4 (other), 5 (merger) + .toStream(Named.as("toStream")) // 6 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(6)); + assertThat(counter.wrappedProcessorNames().toString(), counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "input1", + "input2", + "join-processor-join-this", + "join-processor-join-other", + "join-processor", + "toStream" + )); + + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(3)); // table1, table2, join materialized + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(3)); + } + + @Test + public void shouldWrapProcessorsForForeignKeyInnerJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KTable left = builder.table("input1", Consumed.as("input1")); + final KTable right = builder.table("input2", Consumed.as("input2")); + + left.join(right, + value -> value, + (v1, v2) -> v1 + v2, + TableJoined.as("join"), + Materialized.>as("materialized-store").withValueSerde(Serdes.String())) + .toStream(Named.as("toStream")) + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(9)); + assertThat(counter.wrappedProcessorNames().toString(), counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "input1", + "input2", + "join-foreign-join-subscription", + "join-subscription-join-foreign", + "join-subscription-registration-processor", + "join-subscription-receive", + "join-result", + "join-subscription-response-resolver", + "toStream" + )); + + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(4)); // table1, table2, subscription store, and join materialized + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(5)); + } + + @Test + public void shouldWrapProcessorsForForeignKeyLeftJoin() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(props))); + + final KTable left = builder.table("input1", Consumed.as("input1")); + final KTable right = builder.table("input2", Consumed.as("input2")); + + left.leftJoin(right, + value -> value, + (v1, v2) -> v1 + v2, + TableJoined.as("l-join"), + Materialized.>as("materialized-store").withValueSerde(Serdes.String())) + .toStream(Named.as("toStream")) // 6 + .to("output", Produced.as("sink")); + + builder.build(); + assertThat(counter.numWrappedProcessors(), CoreMatchers.is(9)); + assertThat(counter.wrappedProcessorNames().toString(), counter.wrappedProcessorNames(), Matchers.containsInAnyOrder( + "input1", + "input2", + "l-join-foreign-join-subscription", + "l-join-subscription-join-foreign", + "l-join-subscription-registration-processor", + "l-join-subscription-receive", + "l-join-result", + "l-join-subscription-response-resolver", + "toStream" + )); + + assertThat(counter.numUniqueStateStores(), CoreMatchers.is(4)); // table1, table2, subscription store, and join materialized + assertThat(counter.numConnectedStateStores(), CoreMatchers.is(5)); + } + @Test public void shouldAllowStreamsFromSameTopic() { builder.stream("topic"); diff --git a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java index 31e755c3b1f8e..5895d3a632a74 100644 --- a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java @@ -38,11 +38,13 @@ import org.apache.kafka.streams.processor.FailOnInvalidTimestamp; import org.apache.kafka.streams.processor.TimestampExtractor; import org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier; +import org.apache.kafka.streams.processor.internals.NoOpProcessorWrapper; import org.apache.kafka.streams.processor.internals.RecordCollectorTest; import org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor; import org.apache.kafka.streams.state.BuiltInDslStoreSuppliers; +import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -69,6 +71,7 @@ import static org.apache.kafka.streams.StreamsConfig.EXACTLY_ONCE_V2; import static org.apache.kafka.streams.StreamsConfig.MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH; import static org.apache.kafka.streams.StreamsConfig.MAX_RACK_AWARE_ASSIGNMENT_TAG_VALUE_LENGTH; +import static org.apache.kafka.streams.StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG; import static org.apache.kafka.streams.StreamsConfig.RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG; import static org.apache.kafka.streams.StreamsConfig.RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG; import static org.apache.kafka.streams.StreamsConfig.STATE_DIR_CONFIG; @@ -1219,6 +1222,24 @@ public void shouldThrowOnInvalidClientSupplier() { assertThrows(ConfigException.class, () -> new StreamsConfig(props)); } + @Test + public void shouldReturnDefaultProcessorWrapperClass() { + final String defaultWrapperClassName = streamsConfig.getClass(PROCESSOR_WRAPPER_CLASS_CONFIG).getName(); + assertThat(defaultWrapperClassName, equalTo(NoOpProcessorWrapper.class.getName())); + } + + @Test + public void shouldAllowConfiguringProcessorWrapperWithClass() { + props.put(StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + new StreamsConfig(props); + } + + @Test + public void shouldAllowConfiguringProcessorWrapperWithClassName() { + props.put(StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class.getName()); + new StreamsConfig(props); + } + @Test public void shouldSupportAllUpgradeFromValues() { for (final UpgradeFromValues upgradeFrom : UpgradeFromValues.values()) { diff --git a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java index 7d886ffa23345..0dc0179c6e59a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java @@ -45,12 +45,15 @@ import org.apache.kafka.streams.state.WindowBytesStoreSupplier; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.state.internals.KeyValueStoreBuilder; +import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper; +import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper.WrapperRecorder; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockKeyValueStore; import org.apache.kafka.test.MockProcessorSupplier; import org.apache.kafka.test.MockValueJoiner; import org.apache.kafka.test.StreamsTestUtils; +import org.hamcrest.Matchers; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -66,11 +69,16 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Properties; +import java.util.Random; import java.util.Set; import java.util.regex.Pattern; import static java.time.Duration.ofMillis; +import static org.apache.kafka.streams.StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG; +import static org.apache.kafka.streams.utils.TestUtils.PROCESSOR_WRAPPER_COUNTER_CONFIG; +import static org.apache.kafka.streams.utils.TestUtils.dummyStreamsConfigMap; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; @@ -2249,7 +2257,7 @@ private Topology topologyWithStaticTopicName() { private TopologyDescription.Source addSource(final String sourceName, final String... sourceTopic) { - topology.addSource(null, sourceName, null, null, null, sourceTopic); + topology.addSource((Topology.AutoOffsetReset) null, sourceName, null, null, null, sourceTopic); final StringBuilder allSourceTopics = new StringBuilder(sourceTopic[0]); for (int i = 1; i < sourceTopic.length; ++i) { allSourceTopics.append(", ").append(sourceTopic[i]); @@ -2259,7 +2267,7 @@ private TopologyDescription.Source addSource(final String sourceName, private TopologyDescription.Source addSource(final String sourceName, final Pattern sourcePattern) { - topology.addSource(null, sourceName, null, null, null, sourcePattern); + topology.addSource((Topology.AutoOffsetReset) null, sourceName, null, null, null, sourcePattern); return new InternalTopologyBuilder.Source(sourceName, null, sourcePattern); } @@ -2415,6 +2423,41 @@ public void readOnlyStateStoresShouldNotLog() { assertThat(stateStoreFactory.loggingEnabled(), equalTo(false)); } + @Test + public void shouldWrapProcessors() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, RecordingProcessorWrapper.class); + + final WrapperRecorder counter = new WrapperRecorder(); + props.put(PROCESSOR_WRAPPER_COUNTER_CONFIG, counter); + + final Topology topology = new Topology(new TopologyConfig(new StreamsConfig(props))); + + // Add a bit of randomness to the lambda-created processors to avoid them being + // optimized into a shared instance that will cause the ApiUtils#checkSupplier + // call to fail + final Random random = new Random(); + + topology.addSource("source", "topic"); + topology.addProcessor( + "p1", + () -> (Processor) record -> System.out.println("Processing: " + random.nextInt()), + "source" + ); + topology.addProcessor( + "p2", + () -> (Processor) record -> System.out.println("Processing: " + random.nextInt()), + "p1" + ); + topology.addProcessor( + "p3", + () -> (Processor) record -> System.out.println("Processing: " + random.nextInt()), + "p2" + ); + assertThat(counter.numWrappedProcessors(), is(3)); + assertThat(counter.wrappedProcessorNames(), Matchers.containsInAnyOrder("p1", "p2", "p3")); + } + @SuppressWarnings("deprecation") private TopologyConfig overrideDefaultStore(final String defaultStore) { final Properties topologyOverrides = new Properties(); diff --git a/streams/src/test/java/org/apache/kafka/streams/internals/metrics/ClientMetricsTest.java b/streams/src/test/java/org/apache/kafka/streams/internals/metrics/ClientMetricsTest.java index c5ab1a2e9c9de..21e65ce892ef2 100644 --- a/streams/src/test/java/org/apache/kafka/streams/internals/metrics/ClientMetricsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/internals/metrics/ClientMetricsTest.java @@ -111,6 +111,32 @@ public void shouldAddAliveStreamThreadsMetric() { ); } + @Test + public void shouldAddClientStateTelemetryMetric() { + final String name = "client-state"; + final String description = "The state of the Kafka Streams client"; + final Gauge stateProvider = (config, now) -> State.RUNNING.ordinal(); + setUpAndVerifyMutableMetric( + name, + description, + stateProvider, + () -> ClientMetrics.addClientStateTelemetryMetric(streamsMetrics, stateProvider) + ); + } + + @Test + public void shouldAddRecordingLevelMetric() { + final String name = "recording-level"; + final String description = "The metrics recording level of the Kafka Streams client"; + final int recordingLevel = 1; + setUpAndVerifyImmutableMetric( + name, + description, + recordingLevel, + () -> ClientMetrics.addClientRecordingLevelMetric(streamsMetrics, recordingLevel) + ); + } + @Test public void shouldGetFailedStreamThreadsSensor() { final String name = "failed-stream-threads"; @@ -159,4 +185,19 @@ private void setUpAndVerifyImmutableMetric(final String name, eq(value) ); } + + private void setUpAndVerifyImmutableMetric(final String name, + final String description, + final int value, + final Runnable metricAdder) { + + metricAdder.run(); + + verify(streamsMetrics).addClientLevelImmutableMetric( + eq(name), + eq(description), + eq(RecordingLevel.INFO), + eq(value) + ); + } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsClientMetricsDelegatingReporterTest.java b/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsClientMetricsDelegatingReporterTest.java index 032911b47900c..113e96cf0ae62 100644 --- a/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsClientMetricsDelegatingReporterTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsClientMetricsDelegatingReporterTest.java @@ -26,7 +26,6 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import java.util.Arrays; @@ -75,26 +74,25 @@ public void tearDown() { } @Test - @DisplayName("Should register metrics from init method") public void shouldInitMetrics() { final List metrics = Arrays.asList(streamClientMetricOne, streamClientMetricTwo, streamClientMetricThree, kafkaMetricWithThreadIdTag); streamsClientMetricsDelegatingReporter.init(metrics); final List expectedMetrics = Arrays.asList(streamClientMetricOne, streamClientMetricTwo, streamClientMetricThree); - assertEquals(expectedMetrics, mockAdminClient.addedMetrics()); + assertEquals(expectedMetrics, mockAdminClient.addedMetrics(), + "Should register metrics from init method"); } @Test - @DisplayName("Should register client instance metrics only") public void shouldRegisterCorrectMetrics() { streamsClientMetricsDelegatingReporter.metricChange(kafkaMetricWithThreadIdTag); assertEquals(0, mockAdminClient.addedMetrics().size()); streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricOne); - assertEquals(1, mockAdminClient.addedMetrics().size()); + assertEquals(1, mockAdminClient.addedMetrics().size(), + "Should register client instance metrics only"); } @Test - @DisplayName("Should remove client instance metrics") public void metricRemoval() { streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricOne); streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricTwo); @@ -102,6 +100,7 @@ public void metricRemoval() { assertEquals(3, mockAdminClient.addedMetrics().size()); streamsClientMetricsDelegatingReporter.metricRemoval(streamClientMetricOne); - assertEquals(2, mockAdminClient.addedMetrics().size()); + assertEquals(2, mockAdminClient.addedMetrics().size(), + "Should remove client instance metrics"); } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsThreadMetricsDelegatingReporterTest.java b/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsThreadMetricsDelegatingReporterTest.java index 03dbacb82b701..faf30334e73f0 100644 --- a/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsThreadMetricsDelegatingReporterTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/internals/metrics/StreamsThreadMetricsDelegatingReporterTest.java @@ -18,7 +18,7 @@ package org.apache.kafka.streams.internals.metrics; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Measurable; @@ -27,7 +27,6 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import java.util.Arrays; @@ -63,7 +62,7 @@ public void setUp() { final Map noThreadIdTagMap = new HashMap<>(); noThreadIdTagMap.put("client-id", "foo"); - mockConsumer = new MockConsumer<>(OffsetResetStrategy.NONE); + mockConsumer = new MockConsumer<>(AutoOffsetResetStrategy.NONE.name()); streamsThreadMetricsDelegatingReporter = new StreamsThreadMetricsDelegatingReporter(mockConsumer, threadId, stateUpdaterId); final MetricName metricNameOne = new MetricName("metric-one", "test-group-one", "foo bar baz", threadIdTagMap); @@ -84,23 +83,22 @@ public void tearDown() { @Test - @DisplayName("Init method should register metrics it receives as parameters") public void shouldInitMetrics() { final List allMetrics = Arrays.asList(kafkaMetricOneHasThreadIdTag, kafkaMetricTwoHasThreadIdTag, kafkaMetricThreeHasThreadIdTag); final List expectedMetrics = Arrays.asList(kafkaMetricOneHasThreadIdTag, kafkaMetricTwoHasThreadIdTag, kafkaMetricThreeHasThreadIdTag); streamsThreadMetricsDelegatingReporter.init(allMetrics); - assertEquals(expectedMetrics, mockConsumer.addedMetrics()); + assertEquals(expectedMetrics, mockConsumer.addedMetrics(), + "Init method should register metrics it receives as parameters"); } @Test - @DisplayName("Should register metrics with thread-id in tag map") public void shouldRegisterMetrics() { streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricOneHasThreadIdTag); - assertEquals(kafkaMetricOneHasThreadIdTag, mockConsumer.addedMetrics().get(0)); + assertEquals(kafkaMetricOneHasThreadIdTag, mockConsumer.addedMetrics().get(0), + "Should register metrics with thread-id in tag map"); } @Test - @DisplayName("Should remove metrics") public void shouldRemoveMetrics() { streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricOneHasThreadIdTag); streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricTwoHasThreadIdTag); @@ -109,13 +107,14 @@ public void shouldRemoveMetrics() { assertEquals(expected, mockConsumer.addedMetrics()); streamsThreadMetricsDelegatingReporter.metricRemoval(kafkaMetricOneHasThreadIdTag); expected = Arrays.asList(kafkaMetricTwoHasThreadIdTag, kafkaMetricThreeHasThreadIdTag); - assertEquals(expected, mockConsumer.addedMetrics()); + assertEquals(expected, mockConsumer.addedMetrics(), + "Should remove metrics"); } @Test - @DisplayName("Should not register metrics without thread-id tag") public void shouldNotRegisterMetricsWithoutThreadIdTag() { streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricWithoutThreadIdTag); - assertEquals(0, mockConsumer.addedMetrics().size()); + assertEquals(0, mockConsumer.addedMetrics().size(), + "Should not register metrics without thread-id tag"); } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/MaterializedTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/MaterializedTest.java index 57f57a61f41a4..93726825fa882 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/MaterializedTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/MaterializedTest.java @@ -100,18 +100,12 @@ public void shouldThrowIllegalArgumentExceptionIfStoreSupplierAndStoreTypeBothSe @Test public void shouldThrowTopologyExceptionIfStoreNameExceedsMaxAllowedLength() { - final StringBuffer invalidStoreNameBuffer = new StringBuffer(); final int maxNameLength = 249; - - for (int i = 0; i < maxNameLength + 1; i++) { - invalidStoreNameBuffer.append('a'); - } - - final String invalidStoreName = invalidStoreNameBuffer.toString(); + final String invalidStoreName = "a".repeat(maxNameLength + 1); final TopologyException e = assertThrows(TopologyException.class, () -> Materialized.as(invalidStoreName)); assertEquals(e.getMessage(), "Invalid topology: Name is illegal, it can't be longer than " + maxNameLength + " characters, name: " + invalidStoreName); } -} \ No newline at end of file +} diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java index ca4fd756cbc1b..01e833f1b976b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java @@ -32,7 +32,6 @@ import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.test.MockApiProcessorSupplier; -import org.apache.kafka.test.NoopValueTransformer; import org.apache.kafka.test.NoopValueTransformerWithKey; import org.junit.jupiter.api.Test; @@ -51,21 +50,6 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class AbstractStreamTest { - @SuppressWarnings("deprecation") - @Test - public void testToInternalValueTransformerSupplierSuppliesNewTransformers() { - final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier = - mock(org.apache.kafka.streams.kstream.ValueTransformerSupplier.class); - when(valueTransformerSupplier.get()) - .thenReturn(new NoopValueTransformer<>()) - .thenReturn(new NoopValueTransformer<>()); - final ValueTransformerWithKeySupplier valueTransformerWithKeySupplier = - AbstractStream.toValueTransformerWithKeySupplier(valueTransformerSupplier); - valueTransformerWithKeySupplier.get(); - valueTransformerWithKeySupplier.get(); - valueTransformerWithKeySupplier.get(); - } - @Test public void testToInternalValueTransformerWithKeySupplierSuppliesNewTransformers() { final ValueTransformerWithKeySupplier valueTransformerWithKeySupplier = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java index 755944227b7e1..c761d1eda7cf5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilderTest.java @@ -16,11 +16,13 @@ */ package org.apache.kafka.streams.kstream.internals; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.streams.AutoOffsetReset; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.TopologyException; +import org.apache.kafka.streams.internals.AutoOffsetResetInternal; import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.GlobalKTable; import org.apache.kafka.streams.kstream.JoinWindows; @@ -64,7 +66,6 @@ import static java.time.Duration.ofMillis; import static java.util.Arrays.asList; -import static org.apache.kafka.streams.Topology.AutoOffsetReset; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -81,7 +82,7 @@ public class InternalStreamsBuilderTest { private static final String APP_ID = "app-id"; private final InternalStreamsBuilder builder = new InternalStreamsBuilder(new InternalTopologyBuilder()); - private final ConsumedInternal consumed = new ConsumedInternal<>(); + private final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(null, null)); private final String storePrefix = "prefix-"; private final MaterializedInternal> materialized = new MaterializedInternal<>(Materialized.as("test-store"), builder, storePrefix); private final Properties props = StreamsTestUtils.getStreamsConfig(); @@ -287,40 +288,78 @@ public void shouldMapStateStoresToCorrectSourceTopics() { assertEquals(Collections.singletonList(APP_ID + "-KSTREAM-MAP-0000000003-repartition"), builder.internalTopologyBuilder.sourceTopicsForStore("count")); } + @Test + public void shouldAddTopicToNoneAutoOffsetResetList() { + final String topicName = "topic-1"; + final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(AutoOffsetReset.none())); + builder.stream(Collections.singleton(topicName), consumed); + builder.buildAndOptimizeTopology(); + + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(AutoOffsetResetStrategy.NONE)); + } + @Test public void shouldAddTopicToEarliestAutoOffsetResetList() { final String topicName = "topic-1"; - final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(AutoOffsetReset.EARLIEST)); + final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(AutoOffsetReset.earliest())); builder.stream(Collections.singleton(topicName), consumed); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(OffsetResetStrategy.EARLIEST)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(AutoOffsetResetStrategy.EARLIEST)); } @Test public void shouldAddTopicToLatestAutoOffsetResetList() { final String topicName = "topic-1"; - final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(AutoOffsetReset.LATEST)); + final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(AutoOffsetReset.latest())); + builder.stream(Collections.singleton(topicName), consumed); + builder.buildAndOptimizeTopology(); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(AutoOffsetResetStrategy.LATEST)); + } + + @Test + public void shouldAddTopicToDurationAutoOffsetResetList() { + final String topicName = "topic-1"; + + final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(new AutoOffsetResetInternal(AutoOffsetReset.byDuration(Duration.ofSeconds(42L))))); builder.stream(Collections.singleton(topicName), consumed); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(OffsetResetStrategy.LATEST)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName).type(), equalTo(AutoOffsetResetStrategy.StrategyType.BY_DURATION)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName).duration().get().toSeconds(), equalTo(42L)); + } + + @Test + public void shouldAddTableToNoneAutoOffsetResetList() { + final String topicName = "topic-1"; + builder.table(topicName, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.none())), materialized); + builder.buildAndOptimizeTopology(); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(AutoOffsetResetStrategy.NONE)); } @Test public void shouldAddTableToEarliestAutoOffsetResetList() { final String topicName = "topic-1"; - builder.table(topicName, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.EARLIEST)), materialized); + builder.table(topicName, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.earliest())), materialized); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(OffsetResetStrategy.EARLIEST)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(AutoOffsetResetStrategy.EARLIEST)); } @Test public void shouldAddTableToLatestAutoOffsetResetList() { final String topicName = "topic-1"; - builder.table(topicName, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.LATEST)), materialized); + builder.table(topicName, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.latest())), materialized); + builder.buildAndOptimizeTopology(); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(AutoOffsetResetStrategy.LATEST)); + } + + @Test + public void shouldAddTableToDurationAutoOffsetResetList() { + final String topicName = "topic-1"; + builder.table(topicName, new ConsumedInternal<>(Consumed.with(AutoOffsetResetInternal.byDuration(Duration.ofSeconds(42L)))), materialized); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(OffsetResetStrategy.LATEST)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName).type(), equalTo(AutoOffsetResetStrategy.StrategyType.BY_DURATION)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName).duration().get().toSeconds(), equalTo(42L)); } @Test @@ -330,7 +369,7 @@ public void shouldNotAddTableToOffsetResetLists() { builder.table(topicName, consumed, materialized); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(OffsetResetStrategy.NONE)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicName), equalTo(null)); } @Test @@ -341,7 +380,7 @@ public void shouldNotAddRegexTopicsToOffsetResetLists() { builder.stream(topicPattern, consumed); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topic), equalTo(OffsetResetStrategy.NONE)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topic), equalTo(null)); } @Test @@ -349,10 +388,10 @@ public void shouldAddRegexTopicToEarliestAutoOffsetResetList() { final Pattern topicPattern = Pattern.compile("topic-\\d+"); final String topicTwo = "topic-500000"; - builder.stream(topicPattern, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.EARLIEST))); + builder.stream(topicPattern, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.earliest()))); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicTwo), equalTo(OffsetResetStrategy.EARLIEST)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicTwo), equalTo(AutoOffsetResetStrategy.EARLIEST)); } @Test @@ -360,10 +399,22 @@ public void shouldAddRegexTopicToLatestAutoOffsetResetList() { final Pattern topicPattern = Pattern.compile("topic-\\d+"); final String topicTwo = "topic-1000000"; - builder.stream(topicPattern, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.LATEST))); + builder.stream(topicPattern, new ConsumedInternal<>(Consumed.with(AutoOffsetReset.latest()))); + builder.buildAndOptimizeTopology(); + + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicTwo), equalTo(AutoOffsetResetStrategy.LATEST)); + } + + @Test + public void shouldAddRegexTopicToDurationAutoOffsetResetList() { + final Pattern topicPattern = Pattern.compile("topic-\\d+"); + final String topicTwo = "topic-1000000"; + + builder.stream(topicPattern, new ConsumedInternal<>(Consumed.with(AutoOffsetResetInternal.byDuration(Duration.ofSeconds(42L))))); builder.buildAndOptimizeTopology(); - assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicTwo), equalTo(OffsetResetStrategy.LATEST)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicTwo).type(), equalTo(AutoOffsetResetStrategy.StrategyType.BY_DURATION)); + assertThat(builder.internalTopologyBuilder.offsetResetStrategy(topicTwo).duration().get().toSeconds(), equalTo(42L)); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformTest.java deleted file mode 100644 index 5335128aa459d..0000000000000 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.kstream.internals.KStreamFlatTransform.KStreamFlatTransformProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentMatchers; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.mockito.junit.jupiter.MockitoSettings; -import org.mockito.quality.Strictness; - -import java.util.Arrays; -import java.util.Collections; - -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(MockitoExtension.class) -@MockitoSettings(strictness = Strictness.STRICT_STUBS) -@SuppressWarnings("deprecation") -public class KStreamFlatTransformTest { - - private Number inputKey; - private Number inputValue; - - @Mock - private org.apache.kafka.streams.kstream.Transformer>> transformer; - @Mock - private InternalProcessorContext context; - private InOrder inOrder; - - private KStreamFlatTransformProcessor processor; - - @BeforeEach - public void setUp() { - inputKey = 1; - inputValue = 10; - inOrder = inOrder(context); - processor = new KStreamFlatTransformProcessor<>(transformer); - } - - @Test - public void shouldInitialiseFlatTransformProcessor() { - processor.init(context); - - verify(transformer).init(context); - } - - @Test - public void shouldTransformInputRecordToMultipleOutputRecords() { - final Iterable> outputRecords = Arrays.asList( - KeyValue.pair(2, 20), - KeyValue.pair(3, 30), - KeyValue.pair(4, 40)); - - processor.init(context); - - when(transformer.transform(inputKey, inputValue)).thenReturn(outputRecords); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - for (final KeyValue outputRecord : outputRecords) { - inOrder.verify(context).forward(new Record<>(outputRecord.key, outputRecord.value, 0L)); - } - } - - @Test - public void shouldAllowEmptyListAsResultOfTransform() { - processor.init(context); - - when(transformer.transform(inputKey, inputValue)).thenReturn(Collections.emptyList()); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldAllowNullAsResultOfTransform() { - processor.init(context); - - when(transformer.transform(inputKey, inputValue)).thenReturn(null); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldCloseFlatTransformProcessor() { - processor.close(); - - verify(transformer).close(); - } - - @Test - public void shouldGetFlatTransformProcessor() { - @SuppressWarnings("unchecked") - final org.apache.kafka.streams.kstream.TransformerSupplier>> transformerSupplier = - mock(org.apache.kafka.streams.kstream.TransformerSupplier.class); - final KStreamFlatTransform processorSupplier = - new KStreamFlatTransform<>(transformerSupplier); - - when(transformerSupplier.get()).thenReturn(transformer); - - final Processor processor = processorSupplier.get(); - - assertInstanceOf(KStreamFlatTransformProcessor.class, processor); - } -} diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValuesTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValuesTest.java deleted file mode 100644 index 50a636f349db3..0000000000000 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValuesTest.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; -import org.apache.kafka.streams.kstream.internals.KStreamFlatTransformValues.KStreamFlatTransformValuesProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.ForwardingDisabledProcessorContext; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentMatchers; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.mockito.junit.jupiter.MockitoSettings; -import org.mockito.quality.Strictness; - -import java.util.Arrays; -import java.util.Collections; - -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(MockitoExtension.class) -@MockitoSettings(strictness = Strictness.STRICT_STUBS) -public class KStreamFlatTransformValuesTest { - - private Integer inputKey; - private Integer inputValue; - - @Mock - private ValueTransformerWithKey> valueTransformer; - @Mock - private InternalProcessorContext context; - private InOrder inOrder; - - private KStreamFlatTransformValuesProcessor processor; - - @BeforeEach - public void setUp() { - inputKey = 1; - inputValue = 10; - inOrder = inOrder(context); - processor = new KStreamFlatTransformValuesProcessor<>(valueTransformer); - } - - @Test - public void shouldInitializeFlatTransformValuesProcessor() { - processor.init(context); - - verify(valueTransformer).init(ArgumentMatchers.isA(ForwardingDisabledProcessorContext.class)); - } - - @Test - public void shouldTransformInputRecordToMultipleOutputValues() { - final Iterable outputValues = Arrays.asList( - "Hello", - "Blue", - "Planet"); - - processor.init(context); - - when(valueTransformer.transform(inputKey, inputValue)).thenReturn(outputValues); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - for (final String outputValue : outputValues) { - inOrder.verify(context).forward(new Record<>(inputKey, outputValue, 0L)); - } - } - - @Test - public void shouldEmitNoRecordIfTransformReturnsEmptyList() { - processor.init(context); - - when(valueTransformer.transform(inputKey, inputValue)).thenReturn(Collections.emptyList()); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldEmitNoRecordIfTransformReturnsNull() { - processor.init(context); - - when(valueTransformer.transform(inputKey, inputValue)).thenReturn(null); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldCloseFlatTransformValuesProcessor() { - processor.close(); - - verify(valueTransformer).close(); - } - - @Test - public void shouldGetFlatTransformValuesProcessor() { - @SuppressWarnings("unchecked") - final ValueTransformerWithKeySupplier> valueTransformerSupplier = - mock(ValueTransformerWithKeySupplier.class); - final KStreamFlatTransformValues processorSupplier = - new KStreamFlatTransformValues<>(valueTransformerSupplier); - - when(valueTransformerSupplier.get()).thenReturn(valueTransformer); - - final Processor processor = processorSupplier.get(); - - assertInstanceOf(KStreamFlatTransformValuesProcessor.class, processor); - } -} diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java index 226fc357a6a3c..26b2ea197c3a6 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java @@ -48,15 +48,13 @@ import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.ValueMapper; import org.apache.kafka.streams.kstream.ValueMapperWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.processor.FailOnInvalidTimestamp; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.TopicNameExtractor; import org.apache.kafka.streams.processor.api.ContextualFixedKeyProcessor; -import org.apache.kafka.streams.processor.api.ContextualProcessor; import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; import org.apache.kafka.streams.processor.api.FixedKeyRecord; +import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorTopology; @@ -113,60 +111,6 @@ public class KStreamImplTest { private final Consumed stringConsumed = Consumed.with(Serdes.String(), Serdes.String()); private final MockApiProcessorSupplier processorSupplier = new MockApiProcessorSupplier<>(); private final MockApiFixedKeyProcessorSupplier fixedKeyProcessorSupplier = new MockApiFixedKeyProcessorSupplier<>(); - @SuppressWarnings("deprecation") - private final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier = - () -> new org.apache.kafka.streams.kstream.ValueTransformer() { - @Override - public void init(final ProcessorContext context) {} - - @Override - public String transform(final String value) { - return value; - } - - @Override - public void close() {} - }; - private final ValueTransformerWithKeySupplier valueTransformerWithKeySupplier = - () -> new ValueTransformerWithKey() { - @Override - public void init(final ProcessorContext context) {} - - @Override - public String transform(final String key, final String value) { - return value; - } - - @Override - public void close() {} - }; - @SuppressWarnings("deprecation") - private final org.apache.kafka.streams.kstream.ValueTransformerSupplier> flatValueTransformerSupplier = - () -> new org.apache.kafka.streams.kstream.ValueTransformer>() { - @Override - public void init(final ProcessorContext context) {} - - @Override - public Iterable transform(final String value) { - return Collections.singleton(value); - } - - @Override - public void close() {} - }; - private final ValueTransformerWithKeySupplier> flatValueTransformerWithKeySupplier = - () -> new ValueTransformerWithKey>() { - @Override - public void init(final ProcessorContext context) {} - - @Override - public Iterable transform(final String key, final String value) { - return Collections.singleton(value); - } - - @Override - public void close() {} - }; private StreamsBuilder builder; private KStream testStream; @@ -1213,9 +1157,6 @@ public void shouldPreserveSerdesForOperators() { assertEquals(((AbstractStream) stream1.flatMapValues(flatMapper)).keySerde(), consumedInternal.keySerde()); assertNull(((AbstractStream) stream1.flatMapValues(flatMapper)).valueSerde()); - assertEquals(((AbstractStream) stream1.transformValues(valueTransformerSupplier)).keySerde(), consumedInternal.keySerde()); - assertNull(((AbstractStream) stream1.transformValues(valueTransformerSupplier)).valueSerde()); - assertNull(((AbstractStream) stream1.merge(stream1)).keySerde()); assertNull(((AbstractStream) stream1.merge(stream1)).valueSerde()); @@ -1589,7 +1530,7 @@ public void shouldNotAllowBadProcessSupplierOnProcessWithNamed() { processorSupplier.get(); final IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, - () -> testStream.process(() -> processor, Named.as("flatTransformer")) + () -> testStream.process(() -> processor, Named.as("processor")) ); assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); } @@ -1606,496 +1547,49 @@ public void shouldNotAllowBadProcessSupplierOnProcessWithNamedAndStores() { } @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowBadTransformerSupplierOnTransformValues() { - final org.apache.kafka.streams.kstream.ValueTransformer transformer = valueTransformerSupplier.get(); + public void shouldNotAllowBadProcessSupplierOnProcessValues() { + final org.apache.kafka.streams.processor.api.FixedKeyProcessor processor = + fixedKeyProcessorSupplier.get(); final IllegalArgumentException exception = assertThrows( - IllegalArgumentException.class, - () -> testStream.transformValues(() -> transformer) + IllegalArgumentException.class, + () -> testStream.processValues(() -> processor) ); assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); } @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowBadTransformerSupplierOnTransformValuesWithNamed() { - final org.apache.kafka.streams.kstream.ValueTransformer transformer = valueTransformerSupplier.get(); + public void shouldNotAllowBadProcessSupplierOnProcessValuesWithStores() { + final org.apache.kafka.streams.processor.api.FixedKeyProcessor processor = + fixedKeyProcessorSupplier.get(); final IllegalArgumentException exception = assertThrows( - IllegalArgumentException.class, - () -> testStream.transformValues(() -> transformer, Named.as("transformer")) + IllegalArgumentException.class, + () -> testStream.processValues(() -> processor, "storeName") ); assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); } @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnTransformValues() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues((org.apache.kafka.streams.kstream.ValueTransformerSupplier) null)); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowBadValueTransformerWithKeySupplierOnTransformValues() { - final ValueTransformerWithKey transformer = valueTransformerWithKeySupplier.get(); + public void shouldNotAllowBadProcessSupplierOnProcessValuesWithNamed() { + final org.apache.kafka.streams.processor.api.FixedKeyProcessor processor = + fixedKeyProcessorSupplier.get(); final IllegalArgumentException exception = assertThrows( - IllegalArgumentException.class, - () -> testStream.transformValues(() -> transformer) + IllegalArgumentException.class, + () -> testStream.processValues(() -> processor, Named.as("processor")) ); assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); } @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowBadValueTransformerWithKeySupplierOnTransformValuesWithNamed() { - final ValueTransformerWithKey transformer = valueTransformerWithKeySupplier.get(); + public void shouldNotAllowBadProcessSupplierOnProcessValuesWithNamedAndStores() { + final org.apache.kafka.streams.processor.api.FixedKeyProcessor processor = + fixedKeyProcessorSupplier.get(); final IllegalArgumentException exception = assertThrows( - IllegalArgumentException.class, - () -> testStream.transformValues(() -> transformer, Named.as("transformer")) + IllegalArgumentException.class, + () -> testStream.processValues(() -> processor, Named.as("processor"), "storeName") ); assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); } - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnTransformValues() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues((ValueTransformerWithKeySupplier) null)); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnTransformValuesWithStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnTransformValuesWithStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - (ValueTransformerWithKeySupplier) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnTransformValuesWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier) null, - Named.as("valueTransformer"))); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnTransformValuesWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - (ValueTransformerWithKeySupplier) null, - Named.as("valueTransformerWithKey"))); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnTransformValuesWithNamedAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier) null, - Named.as("valueTransformer"), - "storeName")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnTransformValuesWithNamedAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - (ValueTransformerWithKeySupplier) null, - Named.as("valueTransformerWithKey"), - "storeName")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnTransformValuesWithValueTransformerSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerSupplier, - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnTransformValuesWithValueTransformerWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerWithKeySupplier, - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnTransformValuesWithValueTransformerSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerSupplier, (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnTransformValuesWithValueTransformerWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerWithKeySupplier, - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnTransformValuesWithValueTransformerSupplierWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerSupplier, - Named.as("valueTransformer"), - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnTransformValuesWithValueTransformerWithKeySupplierWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerWithKeySupplier, - Named.as("valueTransformer"), - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnTransformValuesWithValueTransformerSupplierWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerSupplier, - Named.as("valueTransformer"), - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnTransformValuesWithValueTransformerWithKeySupplierWithName() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerWithKeySupplier, - Named.as("valueTransformerWithKey"), - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnTransformValuesWithValueTransformerSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerSupplier, - (Named) null)); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnTransformValuesWithValueTransformerWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerWithKeySupplier, - (Named) null)); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnTransformValuesWithValueTransformerSupplierAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerSupplier, - (Named) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnTransformValuesWithValueTransformerWithKeySupplierAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.transformValues( - valueTransformerWithKeySupplier, - (Named) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValues() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues((org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null)); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValues() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues((ValueTransformerWithKeySupplier>) null)); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null, - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValuesWithStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (ValueTransformerWithKeySupplier>) null, - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null, - Named.as("flatValueTransformer"))); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValuesWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (ValueTransformerWithKeySupplier>) null, - Named.as("flatValueWithKeyTransformer"))); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithNamedAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null, - Named.as("flatValueTransformer"), - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValuesWithNamedAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (ValueTransformerWithKeySupplier>) null, - Named.as("flatValueWitKeyTransformer"), - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueSupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - Named.as("flatValueTransformer"), - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueWithKeySupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - Named.as("flatValueWitKeyTransformer"), - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueSupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - Named.as("flatValueTransformer"), - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueWithKeySupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - Named.as("flatValueWitKeyTransformer"), - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (Named) null)); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (Named) null)); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueSupplierAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (Named) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueWithKeySupplierAndStore() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (Named) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - @Test public void shouldNotAllowNullProcessSupplierOnProcess() { final NullPointerException exception = assertThrows( @@ -2316,9 +1810,8 @@ public void shouldNotMaterializedKTableFromKStream() { } } - @SuppressWarnings("deprecation") @Test - public void shouldProcessWithOldProcessorAndState() { + public void shouldProcessWithProcessorAndState() { final Consumed consumed = Consumed.with(Serdes.String(), Serdes.String()); final StreamsBuilder builder = new StreamsBuilder(); @@ -2332,16 +1825,19 @@ public void shouldProcessWithOldProcessorAndState() { )); builder.stream(input, consumed) - .process(() -> new org.apache.kafka.streams.processor.Processor() { + .process(() -> new Processor() { private KeyValueStore sumStore; @Override - public void init(final ProcessorContext context) { + public void init(final ProcessorContext context) { this.sumStore = context.getStateStore("sum"); } @Override - public void process(final String key, final String value) { + public void process(final Record record) { + final String key = record.key(); + final String value = record.value(); + final Integer counter = sumStore.get(key); if (counter == null) { sumStore.putIfAbsent(key, value.length()); @@ -2353,10 +1849,6 @@ public void process(final String key, final String value) { } } } - - @Override - public void close() { - } }, Named.as("p"), "sum"); final String topologyDescription = builder.build().describe().toString(); @@ -2395,9 +1887,8 @@ public void close() { } } - @SuppressWarnings("deprecation") @Test - public void shouldBindStateWithOldProcessorSupplier() { + public void shouldBindStateWithProcessorSupplier() { final Consumed consumed = Consumed.with(Serdes.String(), Serdes.String()); final StreamsBuilder builder = new StreamsBuilder(); @@ -2405,20 +1896,23 @@ public void shouldBindStateWithOldProcessorSupplier() { final String input = "input"; builder.stream(input, consumed) - .process(new org.apache.kafka.streams.processor.ProcessorSupplier() { + .process(new ProcessorSupplier() { @Override - public org.apache.kafka.streams.processor.Processor get() { - return new org.apache.kafka.streams.processor.Processor() { + public Processor get() { + return new Processor<>() { private KeyValueStore sumStore; @Override - public void init(final ProcessorContext context) { + public void init(final ProcessorContext context) { this.sumStore = context.getStateStore("sum"); } @Override - public void process(final String key, final String value) { + public void process(final Record record) { + final String key = record.key(); + final String value = record.value(); + final Integer counter = sumStore.get(key); if (counter == null) { sumStore.putIfAbsent(key, value.length()); @@ -2430,14 +1924,9 @@ public void process(final String key, final String value) { } } } - - @Override - public void close() { - } }; } - @SuppressWarnings("unchecked") @Override public Set> stores() { final Set> stores = new HashSet<>(); @@ -2486,72 +1975,6 @@ public Set> stores() { } } - @Test - public void shouldBindStateWithOldProcessor() { - final Consumed consumed = Consumed.with(Serdes.String(), Serdes.String()); - - final StreamsBuilder builder = new StreamsBuilder(); - - final String input = "input"; - final String output = "output"; - - builder.stream(input, consumed) - .process(() -> new ContextualProcessor() { - @Override - public void process(final Record record) { - context().forward(record.withValue(record.value().length())); - } - }, Named.as("p")) - .to(output, Produced.valueSerde(Serdes.Integer())); - - final String topologyDescription = builder.build().describe().toString(); - - assertThat( - topologyDescription, - equalTo("Topologies:\n" + - " Sub-topology: 0\n" + - " Source: KSTREAM-SOURCE-0000000000 (topics: [input])\n" + - " --> p\n" + - " Processor: p (stores: [])\n" + - " --> KSTREAM-SINK-0000000001\n" + - " <-- KSTREAM-SOURCE-0000000000\n" + - " Sink: KSTREAM-SINK-0000000001 (topic: output)\n" + - " <-- p\n\n") - ); - - try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { - final TestInputTopic inputTopic = - driver.createInputTopic( - input, - new StringSerializer(), - new StringSerializer() - ); - final TestOutputTopic outputTopic = - driver.createOutputTopic( - output, - new StringDeserializer(), - new IntegerDeserializer() - ); - - inputTopic.pipeInput("A", "0", 5L); - inputTopic.pipeInput("B", "00", 100L); - inputTopic.pipeInput("C", "000", 0L); - inputTopic.pipeInput("D", "0000", 0L); - inputTopic.pipeInput("A", "00000", 10L); - inputTopic.pipeInput("A", "000000", 8L); - - final List> outputExpectRecords = new ArrayList<>(); - outputExpectRecords.add(new TestRecord<>("A", 1, Instant.ofEpochMilli(5L))); - outputExpectRecords.add(new TestRecord<>("B", 2, Instant.ofEpochMilli(100L))); - outputExpectRecords.add(new TestRecord<>("C", 3, Instant.ofEpochMilli(0L))); - outputExpectRecords.add(new TestRecord<>("D", 4, Instant.ofEpochMilli(0L))); - outputExpectRecords.add(new TestRecord<>("A", 5, Instant.ofEpochMilli(10L))); - outputExpectRecords.add(new TestRecord<>("A", 6, Instant.ofEpochMilli(8L))); - - assertEquals(outputTopic.readRecordsToList(), outputExpectRecords); - } - } - @Test public void shouldProcessValues() { final Consumed consumed = Consumed.with(Serdes.String(), Serdes.String()); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java index f388441f3561d..d9bd938264275 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java @@ -34,11 +34,11 @@ import org.apache.kafka.streams.kstream.JoinWindows; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.StreamJoined; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.InternalTopicConfig; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; +import org.apache.kafka.streams.processor.internals.StoreBuilderWrapper; import org.apache.kafka.streams.state.BuiltInDslStoreSuppliers; import org.apache.kafka.streams.state.DslWindowParams; import org.apache.kafka.streams.state.KeyValueStore; @@ -57,7 +57,7 @@ import org.apache.kafka.test.GenericInMemoryKeyValueStore; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.apache.kafka.test.MockValueJoiner; import org.apache.kafka.test.StreamsTestUtils; @@ -452,43 +452,46 @@ public void shouldJoinWithNonTimestampedStore() { @Test public void shouldThrottleEmitNonJoinedOuterRecordsEvenWhenClockDrift() { - /** + /* * This test is testing something internal to [[KStreamKStreamJoin]], so we had to setup low-level api manually. */ final KStreamImplJoin.TimeTrackerSupplier tracker = new KStreamImplJoin.TimeTrackerSupplier(); - final KStreamKStreamJoinRightSide join = new KStreamKStreamJoinRightSide<>( + final WindowStoreBuilder otherStoreBuilder = new WindowStoreBuilder<>( + new InMemoryWindowBytesStoreSupplier( "other", - new JoinWindowsInternal(JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(1000))), - (key, v1, v2) -> v1 + v2, - true, - Optional.of("outer"), - tracker); + 1000L, + 100, + false), + Serdes.String(), + Serdes.String(), + new MockTime()); + final KeyValueStoreBuilder, LeftOrRightValue> outerStoreBuilder = new KeyValueStoreBuilder<>( + new InMemoryKeyValueBytesStoreSupplier("outer"), + new TimestampedKeyAndJoinSideSerde<>(Serdes.String()), + new LeftOrRightValueSerde<>(Serdes.String(), Serdes.String()), + new MockTime() + ); + final KStreamKStreamJoinRightSide join = new KStreamKStreamJoinRightSide<>( + new JoinWindowsInternal(JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(1000))), + (key, v1, v2) -> v1 + v2, + true, + tracker, + StoreBuilderWrapper.wrapStoreBuilder(otherStoreBuilder), + Optional.of(StoreBuilderWrapper.wrapStoreBuilder(outerStoreBuilder))); + final Processor joinProcessor = join.get(); - final MockInternalNewProcessorContext procCtx = new MockInternalNewProcessorContext<>(); - final WindowStore otherStore = new WindowStoreBuilder<>( - new InMemoryWindowBytesStoreSupplier( - "other", - 1000L, - 100, - false), - Serdes.String(), - Serdes.String(), - new MockTime()).build(); + final MockInternalProcessorContext procCtx = new MockInternalProcessorContext<>(); + final WindowStore otherStore = otherStoreBuilder.build(); - final KeyValueStore, LeftOrRightValue> outerStore = Mockito.spy( - new KeyValueStoreBuilder<>( - new InMemoryKeyValueBytesStoreSupplier("outer"), - new TimestampedKeyAndJoinSideSerde<>(Serdes.String()), - new LeftOrRightValueSerde<>(Serdes.String(), Serdes.String()), - new MockTime() - ).build()); + final KeyValueStore, LeftOrRightValue> outerStore = + Mockito.spy(outerStoreBuilder.build()); final GenericInMemoryKeyValueStore rootStore = new GenericInMemoryKeyValueStore<>("root"); - otherStore.init((StateStoreContext) procCtx, rootStore); + otherStore.init(procCtx, rootStore); procCtx.addStateStore(otherStore); - outerStore.init((StateStoreContext) procCtx, rootStore); + outerStore.init(procCtx, rootStore); procCtx.addStateStore(outerStore); joinProcessor.init(procCtx); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamNewProcessorApiTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamProcessorApiTest.java similarity index 95% rename from streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamNewProcessorApiTest.java rename to streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamProcessorApiTest.java index 99282e0c7357e..970fcff0d342c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamNewProcessorApiTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamProcessorApiTest.java @@ -34,7 +34,6 @@ import org.apache.kafka.streams.state.Stores; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import java.util.ArrayList; @@ -47,16 +46,14 @@ import static java.util.Arrays.asList; -public class KStreamNewProcessorApiTest { +public class KStreamProcessorApiTest { @Test - @DisplayName("Should attach the state store using ConnectedStoreProvider") void shouldGetStateStoreWithConnectedStoreProvider() { runTest(false); } @Test - @DisplayName("Should attach the state store StreamBuilder.addStateStore") void shouldGetStateStoreWithStreamBuilder() { runTest(true); } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java index adf7b32c70836..dd3960c17ec4f 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java @@ -33,7 +33,6 @@ import org.apache.kafka.streams.kstream.Merger; import org.apache.kafka.streams.kstream.SessionWindows; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; @@ -64,6 +63,7 @@ import static java.time.Duration.ofMillis; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; +import static org.apache.kafka.streams.utils.TestUtils.mockStoreFactory; import static org.apache.kafka.test.StreamsTestUtils.getMetricByName; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.is; @@ -87,7 +87,7 @@ public class KStreamSessionWindowAggregateProcessorTest { private final Merger sessionMerger = (aggKey, aggOne, aggTwo) -> aggOne + aggTwo; private final List, Change>> results = new ArrayList<>(); - private InternalMockProcessorContext, Change> context; + private InternalMockProcessorContext, Change> mockContext; private KStreamSessionWindowAggregate sessionAggregator; private Processor, Change> processor; private SessionStore sessionStore; @@ -104,7 +104,7 @@ private void setup(final EmitStrategy.StrategyType inputType, final boolean enab prop.put(StreamsConfig.InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); final StreamsConfig config = new StreamsConfig(prop); - context = new InternalMockProcessorContext, Change>( + mockContext = new InternalMockProcessorContext<>( TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), @@ -126,7 +126,7 @@ public , V extends Change> void forward(final R sessionAggregator = new KStreamSessionWindowAggregate<>( SessionWindows.ofInactivityGapWithNoGrace(ofMillis(GAP_MS)), - STORE_NAME, + mockStoreFactory(STORE_NAME), emitStrategy, initializer, aggregator, @@ -139,11 +139,11 @@ public , V extends Change> void forward(final R // Set initial timestamp for CachingSessionStore to prepare entry from as default // InternalMockProcessorContext#timestamp returns -1. - context.setTime(0L); - TaskMetrics.droppedRecordsSensor(threadId, context.taskId().toString(), streamsMetrics); + mockContext.setTime(0L); + TaskMetrics.droppedRecordsSensor(threadId, mockContext.taskId().toString(), streamsMetrics); initStore(enableCaching); - processor.init(context); + processor.init(mockContext); } private void initStore(final boolean enableCaching) { @@ -162,7 +162,7 @@ private void initStore(final boolean enableCaching) { sessionStore.close(); } sessionStore = storeBuilder.build(); - sessionStore.init((StateStoreContext) context, sessionStore); + sessionStore.init(mockContext, sessionStore); } @AfterEach @@ -191,13 +191,19 @@ public void shouldMergeSessions(final EmitStrategy.StrategyType inputType) { setup(inputType, true); final String sessionId = "mel"; processor.process(new Record<>(sessionId, "first", 0L)); - assertTrue(sessionStore.findSessions(sessionId, 0, 0).hasNext()); + try (final KeyValueIterator, Long> iterator = sessionStore.findSessions(sessionId, 0, 0)) { + assertTrue(iterator.hasNext()); + } // move time beyond gap processor.process(new Record<>(sessionId, "second", GAP_MS + 1)); - assertTrue(sessionStore.findSessions(sessionId, GAP_MS + 1, GAP_MS + 1).hasNext()); + try (final KeyValueIterator, Long> iterator = sessionStore.findSessions(sessionId, GAP_MS + 1, GAP_MS + 1)) { + assertTrue(iterator.hasNext()); + } // should still exist as not within gap - assertTrue(sessionStore.findSessions(sessionId, 0, 0).hasNext()); + try (final KeyValueIterator, Long> iterator = sessionStore.findSessions(sessionId, 0, 0)) { + assertTrue(iterator.hasNext()); + } // move time back processor.process(new Record<>(sessionId, "third", GAP_MS / 2)); @@ -376,7 +382,7 @@ public void shouldHandleMultipleSessionsAndMerging(final EmitStrategy.StrategyTy public void shouldGetAggregatedValuesFromValueGetter(final EmitStrategy.StrategyType inputType) { setup(inputType, true); final KTableValueGetter, Long> getter = sessionAggregator.view().get(); - getter.init(context); + getter.init(mockContext); processor.process(new Record<>("a", "1", 0L)); processor.process(new Record<>("a", "1", GAP_MS + 1)); processor.process(new Record<>("a", "2", GAP_MS + 1)); @@ -394,7 +400,7 @@ public void shouldImmediatelyForwardNewSessionWhenNonCachedStore(final EmitStrat return; initStore(false); - processor.init(context); + processor.init(mockContext); processor.process(new Record<>("a", "1", 0L)); processor.process(new Record<>("b", "1", 0L)); @@ -427,7 +433,7 @@ public void shouldImmediatelyForwardRemovedSessionsWhenMerging(final EmitStrateg return; initStore(false); - processor.init(context); + processor.init(mockContext); processor.process(new Record<>("a", "1", 0L)); processor.process(new Record<>("a", "1", 5L)); @@ -454,7 +460,7 @@ public void shouldImmediatelyForwardRemovedSessionsWhenMerging(final EmitStrateg @EnumSource(EmitStrategy.StrategyType.class) public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics(final EmitStrategy.StrategyType inputType) { setup(inputType, false); - context.setRecordContext( + mockContext.setRecordContext( new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()) ); @@ -474,7 +480,7 @@ public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics(final EmitStr assertEquals( 1.0, - getMetricByName(context.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue() + getMetricByName(mockContext.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue() ); } @@ -484,31 +490,31 @@ public void shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace(final EmitStrat setup(inputType, false); final Processor, Change> processor = new KStreamSessionWindowAggregate<>( SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(0L)), - STORE_NAME, + mockStoreFactory(STORE_NAME), EmitStrategy.onWindowUpdate(), initializer, aggregator, sessionMerger ).get(); - processor.init(context); + processor.init(mockContext); // dummy record to establish stream time = 0 - context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 0L)); // record arrives on time, should not be skipped - context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("OnTime1", "1", 0L)); // dummy record to advance stream time = 11, 10 for gap time plus 1 to place outside window - context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 11L)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) { // record is late - context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("Late1", "1", 0L)); assertThat( @@ -551,39 +557,39 @@ public void shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace(final EmitSt setup(inputType, false); final Processor, Change> processor = new KStreamSessionWindowAggregate<>( SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1L)), - STORE_NAME, + mockStoreFactory(STORE_NAME), EmitStrategy.onWindowUpdate(), initializer, aggregator, sessionMerger ).get(); - processor.init(context); + processor.init(mockContext); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) { // dummy record to establish stream time = 0 - context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 0L)); // record arrives on time, should not be skipped - context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("OnTime1", "1", 0L)); // dummy record to advance stream time = 11, 10 for gap time plus 1 to place at edge of window - context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 11L)); // delayed record arrives on time, should not be skipped - context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("OnTime2", "1", 0L)); // dummy record to advance stream time = 12, 10 for gap time plus 2 to place outside window - context.setRecordContext(new ProcessorRecordContext(12, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(12, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 12L)); // delayed record arrives late - context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); + mockContext.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("Late1", "1", 0L)); assertThat( diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamTransformValuesTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamTransformValuesTest.java deleted file mode 100644 index 5cb51ae7bebb1..0000000000000 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamTransformValuesTest.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.common.serialization.IntegerSerializer; -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.streams.KeyValueTimestamp; -import org.apache.kafka.streams.StreamsBuilder; -import org.apache.kafka.streams.TestInputTopic; -import org.apache.kafka.streams.TopologyTestDriver; -import org.apache.kafka.streams.kstream.Consumed; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.internals.ForwardingDisabledProcessorContext; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.test.MockProcessorSupplier; -import org.apache.kafka.test.NoOpValueTransformerWithKeySupplier; -import org.apache.kafka.test.StreamsTestUtils; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.junit.jupiter.MockitoExtension; -import org.mockito.junit.jupiter.MockitoSettings; -import org.mockito.quality.Strictness; - -import java.util.Properties; - -import static org.hamcrest.CoreMatchers.isA; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.mockito.Mockito.mock; - -@ExtendWith(MockitoExtension.class) -@MockitoSettings(strictness = Strictness.STRICT_STUBS) -public class KStreamTransformValuesTest { - private final String topicName = "topic"; - private final MockProcessorSupplier supplier = new MockProcessorSupplier<>(); - private final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.Integer(), Serdes.Integer()); - private InternalProcessorContext context = mock(InternalProcessorContext.class); - - @SuppressWarnings("deprecation") // Old PAPI. Needs to be migrated. - @Test - public void testTransform() { - final StreamsBuilder builder = new StreamsBuilder(); - - final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier = - () -> new org.apache.kafka.streams.kstream.ValueTransformer() { - private int total = 0; - - @Override - public void init(final org.apache.kafka.streams.processor.ProcessorContext context) { } - - @Override - public Integer transform(final Number value) { - total += value.intValue(); - return total; - } - - @Override - public void close() { } - }; - - final int[] expectedKeys = {1, 10, 100, 1000}; - - final KStream stream; - stream = builder.stream(topicName, Consumed.with(Serdes.Integer(), Serdes.Integer())); - stream.transformValues(valueTransformerSupplier).process(supplier); - - try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { - for (final int expectedKey : expectedKeys) { - final TestInputTopic inputTopic = - driver.createInputTopic(topicName, new IntegerSerializer(), new IntegerSerializer()); - inputTopic.pipeInput(expectedKey, expectedKey * 10, expectedKey / 2L); - } - } - final KeyValueTimestamp[] expected = {new KeyValueTimestamp<>(1, 10, 0), - new KeyValueTimestamp<>(10, 110, 5), - new KeyValueTimestamp<>(100, 1110, 50), - new KeyValueTimestamp<>(1000, 11110, 500)}; - - assertArrayEquals(expected, supplier.theCapturedProcessor().processed().toArray()); - } - - @SuppressWarnings("deprecation") // Old PAPI. Needs to be migrated. - @Test - public void testTransformWithKey() { - final StreamsBuilder builder = new StreamsBuilder(); - - final ValueTransformerWithKeySupplier valueTransformerSupplier = - () -> new ValueTransformerWithKey() { - private int total = 0; - - @Override - public void init(final org.apache.kafka.streams.processor.ProcessorContext context) { } - - @Override - public Integer transform(final Integer readOnlyKey, final Number value) { - total += value.intValue() + readOnlyKey; - return total; - } - - @Override - public void close() { } - }; - - final int[] expectedKeys = {1, 10, 100, 1000}; - - final KStream stream; - stream = builder.stream(topicName, Consumed.with(Serdes.Integer(), Serdes.Integer())); - stream.transformValues(valueTransformerSupplier).process(supplier); - - try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { - final TestInputTopic inputTopic = - driver.createInputTopic(topicName, new IntegerSerializer(), new IntegerSerializer()); - for (final int expectedKey : expectedKeys) { - inputTopic.pipeInput(expectedKey, expectedKey * 10, expectedKey / 2L); - } - } - final KeyValueTimestamp[] expected = {new KeyValueTimestamp<>(1, 11, 0), - new KeyValueTimestamp<>(10, 121, 5), - new KeyValueTimestamp<>(100, 1221, 50), - new KeyValueTimestamp<>(1000, 12221, 500)}; - - assertArrayEquals(expected, supplier.theCapturedProcessor().processed().toArray()); - } - - @SuppressWarnings("unchecked") - @Test - public void shouldInitializeTransformerWithForwardDisabledProcessorContext() { - final NoOpValueTransformerWithKeySupplier transformer = new NoOpValueTransformerWithKeySupplier<>(); - final KStreamTransformValues transformValues = new KStreamTransformValues<>(transformer); - final Processor processor = transformValues.get(); - - processor.init(context); - - assertThat(transformer.context, isA((Class) ForwardingDisabledProcessorContext.class)); - } -} diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java index 14b77a7cc300e..4bae5b3387784 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java @@ -46,6 +46,7 @@ import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorNode; +import org.apache.kafka.streams.processor.internals.StoreFactory; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.TimestampedWindowStore; import org.apache.kafka.streams.state.WindowBytesStoreSupplier; @@ -56,7 +57,7 @@ import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockInitializer; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -76,6 +77,7 @@ import static java.util.Arrays.asList; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; +import static org.apache.kafka.streams.utils.TestUtils.mockStoreFactory; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.hasItems; @@ -90,6 +92,7 @@ public class KStreamWindowAggregateTest { private static final String WINDOW_STORE_NAME = "dummy-store-name"; private final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.String(), Serdes.String()); private final String threadId = Thread.currentThread().getName(); + private final StoreFactory storeFactory = mockStoreFactory(WINDOW_STORE_NAME); public StrategyType type; @@ -643,10 +646,10 @@ public void shouldNotEmitFinalIfNotProgressEnough(final StrategyType inputType, try { // Always process props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); - final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final MockInternalProcessorContext, Change> context = makeContext(stateDir, windowSize); final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( windows, - WINDOW_STORE_NAME, + storeFactory, emitStrategy, MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER @@ -733,10 +736,10 @@ public void shouldEmitWithInterval0(final StrategyType inputType, final boolean try { // Always process props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); - final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final MockInternalProcessorContext, Change> context = makeContext(stateDir, windowSize); final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( windows, - WINDOW_STORE_NAME, + storeFactory, emitStrategy, MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER @@ -802,10 +805,10 @@ public void shouldEmitWithLargeInterval(final StrategyType inputType, final bool try { // Emit final every second props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 1000L); - final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final MockInternalProcessorContext, Change> context = makeContext(stateDir, windowSize); final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( windows, - WINDOW_STORE_NAME, + storeFactory, emitStrategy, MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER @@ -903,10 +906,10 @@ public void shouldEmitFromLastEmitTime(final StrategyType inputType, final boole try { // Always process props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); - final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final MockInternalProcessorContext, Change> context = makeContext(stateDir, windowSize); final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( windows, - WINDOW_STORE_NAME, + storeFactory, emitStrategy, MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER @@ -982,7 +985,7 @@ public void showThrowIfEmitFinalUsedWithUnlimitedWindow(final StrategyType input final IllegalArgumentException e = assertThrows( IllegalArgumentException.class, () -> new KStreamWindowAggregate<>( UnlimitedWindows.of(), - WINDOW_STORE_NAME, + storeFactory, emitStrategy, MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER) @@ -992,7 +995,7 @@ public void showThrowIfEmitFinalUsedWithUnlimitedWindow(final StrategyType input } else { new KStreamWindowAggregate<>( UnlimitedWindows.of(), - WINDOW_STORE_NAME, + storeFactory, emitStrategy, MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER @@ -1025,14 +1028,14 @@ private TimestampedWindowStore getWindowStore(final long windowS .build(); } - private MockInternalNewProcessorContext, Change> makeContext(final File stateDir, final long windowSize) { - final MockInternalNewProcessorContext, Change> context = new MockInternalNewProcessorContext<>( + private MockInternalProcessorContext, Change> makeContext(final File stateDir, final long windowSize) { + final MockInternalProcessorContext, Change> context = new MockInternalProcessorContext<>( props, new TaskId(0, 0), stateDir ); - context.setCurrentNode(new ProcessorNode("testNode")); + context.setCurrentNode(new ProcessorNode<>("testNode")); // Create, initialize, and register the state store. final TimestampedWindowStore store = getWindowStore(windowSize); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableForeignKeyJoinScenarioTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableForeignKeyJoinScenarioTest.java index 6e7073b5bf767..5606933733ce1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableForeignKeyJoinScenarioTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableForeignKeyJoinScenarioTest.java @@ -39,6 +39,7 @@ import org.junit.jupiter.api.Test; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; @@ -233,6 +234,137 @@ public void shouldUseExpectedTopicsWithSerde() { ))); } + @Test + public void shouldWorkWithCompositeKeyAndProducerIdInValue() { + final StreamsBuilder builder = new StreamsBuilder(); + + // Left table keyed by + final KTable leftTable = builder.table( + "left_table", + Consumed.with(Serdes.String(), Serdes.String()) + ); + + // Right table keyed by producer_id + final KTable rightTable = builder.table( + "right_table", + Consumed.with(Serdes.String(), Serdes.String()) + ); + + // Have to include producer_id in value since foreignKeyExtractor only gets value + final KTable joined = leftTable.join( + rightTable, + value -> value.split("\\|")[0], // extract producer_id from value + (leftValue, rightValue) -> "(" + leftValue + "," + rightValue + ")", + Materialized.as("store") + ); + + joined.toStream().to("output"); + + try (final TopologyTestDriver driver = createTopologyTestDriver(builder)) { + final TestInputTopic leftInput = driver.createInputTopic( + "left_table", + new StringSerializer(), + new StringSerializer() + ); + final TestInputTopic rightInput = driver.createInputTopic( + "right_table", + new StringSerializer(), + new StringSerializer() + ); + final TestOutputTopic output = driver.createOutputTopic( + "output", + new StringDeserializer(), + new StringDeserializer() + ); + + // Key format: "producerId:productId" + // Left value format: "producerId|productData" + leftInput.pipeInput("producer1:product1", "producer1|product1-data"); + leftInput.pipeInput("producer1:product2", "producer1|product2-data"); + leftInput.pipeInput("producer2:product1", "producer2|product1-data"); + + rightInput.pipeInput("producer1", "producer1-data"); + rightInput.pipeInput("producer2", "producer2-data"); + + final Map expectedOutput = new HashMap<>(); + expectedOutput.put("producer1:product1", "(producer1|product1-data,producer1-data)"); + expectedOutput.put("producer1:product2", "(producer1|product2-data,producer1-data)"); + expectedOutput.put("producer2:product1", "(producer2|product1-data,producer2-data)"); + + assertThat(output.readKeyValuesToMap(), is(expectedOutput)); + } + } + + @Test + public void shouldWorkWithCompositeKeyAndBiFunctionExtractor() { + final StreamsBuilder builder = new StreamsBuilder(); + + // Left table keyed by + final KTable leftTable = builder.table( + "left_table", + Consumed.with(Serdes.String(), Serdes.String()) + ); + + // Right table keyed by producer_id + final KTable rightTable = builder.table( + "right_table", + Consumed.with(Serdes.String(), Serdes.String()) + ); + + // Can extract producer_id from composite key using BiFunction + final KTable joined = leftTable.join( + rightTable, + (key, value) -> key.split(":")[0], // extract producer_id from key + (leftValue, rightValue) -> "(" + leftValue + "," + rightValue + ")", + Materialized.as("store") + ); + + joined.toStream().to("output"); + + try (final TopologyTestDriver driver = createTopologyTestDriver(builder)) { + final TestInputTopic leftInput = driver.createInputTopic( + "left_table", + new StringSerializer(), + new StringSerializer() + ); + final TestInputTopic rightInput = driver.createInputTopic( + "right_table", + new StringSerializer(), + new StringSerializer() + ); + final TestOutputTopic output = driver.createOutputTopic( + "output", + new StringDeserializer(), + new StringDeserializer() + ); + + // Now we don't need producer_id in the value + leftInput.pipeInput("producer1:product1", "product1-data"); + leftInput.pipeInput("producer1:product2", "product2-data"); + leftInput.pipeInput("producer2:product1", "product1-data"); + + rightInput.pipeInput("producer1", "producer1-data"); + rightInput.pipeInput("producer2", "producer2-data"); + + final Map expectedOutput = new HashMap<>(); + expectedOutput.put("producer1:product1", "(product1-data,producer1-data)"); + expectedOutput.put("producer1:product2", "(product2-data,producer1-data)"); + expectedOutput.put("producer2:product1", "(product1-data,producer2-data)"); + + assertThat(output.readKeyValuesToMap(), is(expectedOutput)); + } + } + + private TopologyTestDriver createTopologyTestDriver(final StreamsBuilder builder) { + final Properties config = new Properties(); + config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "test-app"); + config.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"); + config.setProperty(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); + config.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + return new TopologyTestDriver(builder.build(), config); + } + private void validateTopologyCanProcessData(final StreamsBuilder builder) { final Properties config = new Properties(); config.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.IntegerSerde.class.getName()); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableReduceTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableReduceTest.java index de437ed66a51e..5f1c8489dabde 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableReduceTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableReduceTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.streams.kstream.internals; +import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorNode; @@ -42,7 +43,7 @@ public void shouldAddAndSubtract() { final Processor>, String, Change>> reduceProcessor = new KTableReduce>( - "myStore", + new MaterializedInternal<>(Materialized.as("myStore")), this::unionNotNullArgs, this::differenceNotNullArgs ).get(); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java index 17a6e38683b36..6dbb9b8d1dbc7 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SessionWindowedKStreamImplTest.java @@ -227,7 +227,7 @@ public void shouldMaterializeCount(final EmitStrategy.StrategyType inputType) { try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); final SessionStore store = driver.getSessionStore("count-store"); - final List, Long>> data = StreamsTestUtils.toList(store.fetch("1", "2")); + final List, Long>> data = StreamsTestUtils.toListAndCloseIterator(store.fetch("1", "2")); if (!emitFinal) { assertThat( data, @@ -255,7 +255,7 @@ public void shouldMaterializeReduced(final EmitStrategy.StrategyType inputType) try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); final SessionStore sessionStore = driver.getSessionStore("reduced"); - final List, String>> data = StreamsTestUtils.toList(sessionStore.fetch("1", "2")); + final List, String>> data = StreamsTestUtils.toListAndCloseIterator(sessionStore.fetch("1", "2")); if (!emitFinal) { assertThat( @@ -288,7 +288,7 @@ public void shouldMaterializeAggregated(final EmitStrategy.StrategyType inputTyp try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); final SessionStore sessionStore = driver.getSessionStore("aggregated"); - final List, String>> data = StreamsTestUtils.toList(sessionStore.fetch("1", "2")); + final List, String>> data = StreamsTestUtils.toListAndCloseIterator(sessionStore.fetch("1", "2")); if (!emitFinal) { assertThat( data, diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImplTest.java index 2928c04b1c5cc..e9c52c33831e8 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SlidingWindowedKStreamImplTest.java @@ -208,7 +208,7 @@ public void shouldMaterializeCount() { { final WindowStore windowStore = driver.getWindowStore("count-store"); final List, Long>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), 1L), @@ -223,7 +223,7 @@ public void shouldMaterializeCount() { final WindowStore> windowStore = driver.getTimestampedWindowStore("count-store"); final List, ValueAndTimestamp>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), ValueAndTimestamp.make(1L, 100L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(50, 150)), ValueAndTimestamp.make(2L, 150L)), @@ -248,7 +248,7 @@ public void shouldMaterializeReduced() { { final WindowStore windowStore = driver.getWindowStore("reduced"); final List, String>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), "1"), KeyValue.pair(new Windowed<>("1", new TimeWindow(50, 150)), "1+2"), @@ -262,7 +262,7 @@ public void shouldMaterializeReduced() { final WindowStore> windowStore = driver.getTimestampedWindowStore("reduced"); final List, ValueAndTimestamp>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), ValueAndTimestamp.make("1", 100L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(50, 150)), ValueAndTimestamp.make("1+2", 150L)), @@ -289,7 +289,7 @@ public void shouldMaterializeAggregated() { { final WindowStore windowStore = driver.getWindowStore("aggregated"); final List, String>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), "0+1"), KeyValue.pair(new Windowed<>("1", new TimeWindow(50, 150)), "0+1+2"), @@ -303,7 +303,7 @@ public void shouldMaterializeAggregated() { final WindowStore> windowStore = driver.getTimestampedWindowStore("aggregated"); final List, ValueAndTimestamp>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 100)), ValueAndTimestamp.make("0+1", 100L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(50, 150)), ValueAndTimestamp.make("0+1+2", 150L)), @@ -410,7 +410,7 @@ public void shouldDropWindowsOutsideOfRetention() { { final WindowStore windowStore = driver.getWindowStore("aggregated"); final List, String>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "1", ofEpochMilli(0), ofEpochMilli(10000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "1", ofEpochMilli(0), ofEpochMilli(10000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(900, 1000)), "0+4"), KeyValue.pair(new Windowed<>("1", new TimeWindow(1900, 2000)), "0+5")))); @@ -419,7 +419,7 @@ public void shouldDropWindowsOutsideOfRetention() { final WindowStore> windowStore = driver.getTimestampedWindowStore("aggregated"); final List, ValueAndTimestamp>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "1", ofEpochMilli(0), ofEpochMilli(2000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "1", ofEpochMilli(0), ofEpochMilli(2000L))); assertThat(data, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(900, 1000)), ValueAndTimestamp.make("0+4", 1000L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(1900, 2000)), ValueAndTimestamp.make("0+5", 2000L))))); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SuppressScenarioTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SuppressScenarioTest.java index 4c37fa2f5cf1f..5c52187bab2e1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SuppressScenarioTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/SuppressScenarioTest.java @@ -811,7 +811,7 @@ public void shouldWorkBeforeJoinLeft() { } @Test - public void shouldWorkWithCogrouped() { + public void shouldWorkWithCogroupedTimeWindows() { final StreamsBuilder builder = new StreamsBuilder(); final KGroupedStream stream1 = builder.stream("one", Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())); @@ -823,6 +823,32 @@ public void shouldWorkWithCogrouped() { .toStream(); } + @Test + public void shouldWorkWithCogroupedSlidingWindows() { + final StreamsBuilder builder = new StreamsBuilder(); + + final KGroupedStream stream1 = builder.stream("one", Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())); + final KGroupedStream stream2 = builder.stream("two", Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())); + final KStream, Object> cogrouped = stream1.cogroup((key, value, aggregate) -> aggregate + value).cogroup(stream2, (key, value, aggregate) -> aggregate + value) + .windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(Duration.ofMinutes(15))) + .aggregate(() -> "", Named.as("test"), Materialized.as("store")) + .suppress(Suppressed.untilWindowCloses(unbounded())) + .toStream(); + } + + @Test + public void shouldWorkWithCogroupedSessionWindows() { + final StreamsBuilder builder = new StreamsBuilder(); + + final KGroupedStream stream1 = builder.stream("one", Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())); + final KGroupedStream stream2 = builder.stream("two", Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())); + final KStream, Object> cogrouped = stream1.cogroup((key, value, aggregate) -> aggregate + value).cogroup(stream2, (key, value, aggregate) -> aggregate + value) + .windowedBy(SessionWindows.ofInactivityGapAndGrace(Duration.ofMinutes(15), Duration.ofMinutes(5))) + .aggregate(() -> "", (k, v1, v2) -> "", Named.as("test"), Materialized.as("store")) + .suppress(Suppressed.untilWindowCloses(unbounded())) + .toStream(); + } + private static void verify(final List> results, final List> expectedResults) { if (results.size() != expectedResults.size()) { diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java index 3758ad7b9bc74..c526b09f9c64a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java @@ -239,7 +239,7 @@ public void shouldMaterializeCount(final StrategyType inputType, final boolean i { final WindowStore windowStore = driver.getWindowStore("count-store"); final List, Long>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); if (withCache) { // with cache returns all records (expired from underneath as well) as part of @@ -266,7 +266,7 @@ public void shouldMaterializeCount(final StrategyType inputType, final boolean i final WindowStore> windowStore = driver.getTimestampedWindowStore("count-store"); final List, ValueAndTimestamp>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); // the same values and logic described above applies here as well. if (withCache) { @@ -305,7 +305,7 @@ public void shouldMaterializeReduced(final StrategyType inputType, final boolean { final WindowStore windowStore = driver.getWindowStore("reduced"); final List, String>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); if (withCache) { // with cache returns all records (expired from underneath as well) as part of @@ -325,7 +325,7 @@ public void shouldMaterializeReduced(final StrategyType inputType, final boolean { final WindowStore> windowStore = driver.getTimestampedWindowStore("reduced"); final List, ValueAndTimestamp>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); // same logic/data as explained above. if (withCache) { @@ -358,7 +358,7 @@ public void shouldMaterializeAggregated(final StrategyType inputType, final bool { final WindowStore windowStore = driver.getWindowStore("aggregated"); final List, String>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); if (withCache) { // with cache returns all records (expired from underneath as well) as part of @@ -379,7 +379,7 @@ public void shouldMaterializeAggregated(final StrategyType inputType, final bool { final WindowStore> windowStore = driver.getTimestampedWindowStore("aggregated"); final List, ValueAndTimestamp>> data = - StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); if (withCache) { assertThat(data, equalTo(asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 500)), ValueAndTimestamp.make("0+1+2", 15L)), diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java index eb8f95f96f19b..25a15efdd4c9d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/WindowedStreamPartitionerTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.streams.kstream.internals; +import org.apache.kafka.clients.producer.internals.BuiltInPartitioner; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; @@ -58,8 +59,6 @@ public class WindowedStreamPartitionerTest { @Test public void testCopartitioning() { final Random rand = new Random(); - @SuppressWarnings("deprecation") - final org.apache.kafka.clients.producer.internals.DefaultPartitioner defaultPartitioner = new org.apache.kafka.clients.producer.internals.DefaultPartitioner(); final WindowedSerializer timeWindowedSerializer = new TimeWindowedSerializer<>(intSerializer); final WindowedStreamPartitioner streamPartitioner = new WindowedStreamPartitioner<>(timeWindowedSerializer); @@ -68,9 +67,8 @@ public void testCopartitioning() { final byte[] keyBytes = intSerializer.serialize(topicName, key); final String value = key.toString(); - final byte[] valueBytes = stringSerializer.serialize(topicName, value); - final Set expected = Collections.singleton(defaultPartitioner.partition(topicName, key, keyBytes, value, valueBytes, cluster)); + final Set expected = Set.of(BuiltInPartitioner.partitionForKey(keyBytes, cluster.partitionsForTopic(topicName).size())); for (int w = 1; w < 10; w++) { final TimeWindow window = new TimeWindow(10 * w, 20 * w); @@ -82,7 +80,5 @@ public void testCopartitioning() { assertEquals(expected, actual.get()); } } - - defaultPartitioner.close(); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplierTests.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplierTests.java index 4ce76eee0cfdc..722aada294db2 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplierTests.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ForeignTableJoinProcessorSupplierTests.java @@ -21,15 +21,15 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.kstream.internals.Change; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreBuilderWrapper; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.TimestampedKeyValueStore; import org.apache.kafka.streams.state.ValueAndTimestamp; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -59,7 +59,7 @@ public class ForeignTableJoinProcessorSupplierTests { Serdes.String() ); - private MockInternalNewProcessorContext> context = null; + private MockInternalProcessorContext> context = null; private TimestampedKeyValueStore> stateStore = null; private Processor, String, SubscriptionResponseWrapper> processor = null; private File stateDir; @@ -68,13 +68,16 @@ public class ForeignTableJoinProcessorSupplierTests { public void setUp() { stateDir = TestUtils.tempDirectory(); final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.String(), Serdes.String()); - context = new MockInternalNewProcessorContext<>(props, new TaskId(0, 0), stateDir); + context = new MockInternalProcessorContext<>(props, new TaskId(0, 0), stateDir); final StoreBuilder>> storeBuilder = storeBuilder(); - processor = new ForeignTableJoinProcessorSupplier(storeBuilder().name(), COMBINED_KEY_SCHEMA).get(); + processor = new ForeignTableJoinProcessorSupplier( + StoreBuilderWrapper.wrapStoreBuilder(storeBuilder()), + COMBINED_KEY_SCHEMA + ).get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); processor.init(context); } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ResponseJoinProcessorSupplierTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ResponseJoinProcessorSupplierTest.java index 49fad51c3bf52..d5052247d2ece 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ResponseJoinProcessorSupplierTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/ResponseJoinProcessorSupplierTest.java @@ -29,7 +29,7 @@ import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.Murmur3; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.junit.jupiter.api.Test; @@ -96,7 +96,7 @@ public void shouldNotForwardWhenHashDoesNotMatch() { leftJoin ); final Processor, String, String> processor = processorSupplier.get(); - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); processor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -125,7 +125,7 @@ public void shouldIgnoreUpdateWhenLeftHasBecomeNull() { leftJoin ); final Processor, String, String> processor = processorSupplier.get(); - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); processor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -154,7 +154,7 @@ public void shouldForwardWhenHashMatches() { leftJoin ); final Processor, String, String> processor = processorSupplier.get(); - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); processor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -180,7 +180,7 @@ public void shouldEmitTombstoneForInnerJoinWhenRightIsNull() { leftJoin ); final Processor, String, String> processor = processorSupplier.get(); - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); processor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -206,7 +206,7 @@ public void shouldEmitResultForLeftJoinWhenRightIsNull() { leftJoin ); final Processor, String, String> processor = processorSupplier.get(); - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); processor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -232,7 +232,7 @@ public void shouldEmitTombstoneForLeftJoinWhenRightIsNullAndLeftIsNull() { leftJoin ); final Processor, String, String> processor = processorSupplier.get(); - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); processor.init(context); context.setRecordMetadata("topic", 0, 0); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplierTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplierTest.java index 72e4eb23e4594..954c3b687cc56 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplierTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionReceiveProcessorSupplierTest.java @@ -23,16 +23,16 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.kstream.internals.Change; import org.apache.kafka.streams.kstream.internals.foreignkeyjoin.SubscriptionWrapper.Instruction; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.StoreBuilderWrapper; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.TimestampedKeyValueStore; import org.apache.kafka.streams.state.ValueAndTimestamp; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -53,7 +53,7 @@ public class SubscriptionReceiveProcessorSupplierTest { private final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.String(), Serdes.String()); private File stateDir; - private MockInternalNewProcessorContext, Change>>> context; + private MockInternalProcessorContext, Change>>> context; private TimestampedKeyValueStore> stateStore = null; private static final String FK = "fk1"; @@ -71,7 +71,7 @@ public class SubscriptionReceiveProcessorSupplierTest { @BeforeEach public void before() { stateDir = TestUtils.tempDirectory(); - context = new MockInternalNewProcessorContext<>(props, new TaskId(0, 0), stateDir); + context = new MockInternalProcessorContext<>(props, new TaskId(0, 0), stateDir); } @AfterEach @@ -98,7 +98,7 @@ public void shouldDeleteKeyAndPropagateV0() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, @@ -150,7 +150,7 @@ public void shouldDeleteKeyAndPropagateV1() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, Instruction.DELETE_KEY_AND_PROPAGATE, @@ -201,7 +201,7 @@ public void shouldDeleteKeyNoPropagateV0() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, @@ -253,7 +253,7 @@ public void shouldDeleteKeyNoPropagateV1() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, @@ -305,7 +305,7 @@ public void shouldPropagateOnlyIfFKValAvailableV0() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, @@ -357,7 +357,7 @@ public void shouldPropagateOnlyIfFKValAvailableV1() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, @@ -409,7 +409,7 @@ public void shouldPropagateNullIfNoFKValAvailableV0() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, @@ -461,7 +461,7 @@ public void shouldPropagateNullIfNoFKValAvailableV1() { Change>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); - stateStore.init((StateStoreContext) context, stateStore); + stateStore.init(context, stateStore); final SubscriptionWrapper oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, @@ -507,7 +507,10 @@ public void shouldPropagateNullIfNoFKValAvailableV1() { private SubscriptionReceiveProcessorSupplier supplier( final StoreBuilder>> storeBuilder) { - return new SubscriptionReceiveProcessorSupplier<>(storeBuilder.name(), COMBINED_KEY_SCHEMA); + return new SubscriptionReceiveProcessorSupplier<>( + StoreBuilderWrapper.wrapStoreBuilder(storeBuilder), + COMBINED_KEY_SCHEMA + ); } private StoreBuilder>> storeBuilder() { diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionSendProcessorSupplierTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionSendProcessorSupplierTest.java index def6d9b36d301..87366bd5334a9 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionSendProcessorSupplierTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/foreignkeyjoin/SubscriptionSendProcessorSupplierTest.java @@ -24,7 +24,7 @@ import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.state.internals.Murmur3; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.junit.jupiter.api.Test; @@ -45,7 +45,7 @@ public class SubscriptionSendProcessorSupplierTest { private final Processor, String, SubscriptionWrapper> leftJoinProcessor = new SubscriptionSendProcessorSupplier( - LeftValue::getForeignKey, + ForeignKeyExtractor.fromFunction(LeftValue::getForeignKey), () -> "subscription-topic-fk", () -> "value-serde-topic", Serdes.String(), @@ -55,7 +55,7 @@ public class SubscriptionSendProcessorSupplierTest { private final Processor, String, SubscriptionWrapper> innerJoinProcessor = new SubscriptionSendProcessorSupplier( - LeftValue::getForeignKey, + ForeignKeyExtractor.fromFunction(LeftValue::getForeignKey), () -> "subscription-topic-fk", () -> "value-serde-topic", Serdes.String(), @@ -70,7 +70,7 @@ public class SubscriptionSendProcessorSupplierTest { // Left join tests @Test public void leftJoinShouldPropagateNewPrimaryKeyWithNonNullFK() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -87,7 +87,7 @@ public void leftJoinShouldPropagateNewPrimaryKeyWithNonNullFK() { @Test public void leftJoinShouldPropagateNewPrimaryKeyWithNullFK() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -104,7 +104,7 @@ public void leftJoinShouldPropagateNewPrimaryKeyWithNullFK() { @Test public void leftJoinShouldPropagateChangeOfFKFromNonNullToNonNullValue() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -121,7 +121,7 @@ public void leftJoinShouldPropagateChangeOfFKFromNonNullToNonNullValue() { @Test public void leftJoinShouldPropagateNewRecordOfUnchangedFK() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -138,7 +138,7 @@ public void leftJoinShouldPropagateNewRecordOfUnchangedFK() { @Test public void leftJoinShouldPropagateChangeOfFKFromNonNullToNullValue() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -155,7 +155,7 @@ public void leftJoinShouldPropagateChangeOfFKFromNonNullToNullValue() { @Test public void leftJoinShouldPropagateChangeFromNullFKToNonNullFKValue() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -172,7 +172,7 @@ public void leftJoinShouldPropagateChangeFromNullFKToNonNullFKValue() { @Test public void leftJoinShouldPropagateChangeFromNullFKToNullFKValue() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -189,7 +189,7 @@ public void leftJoinShouldPropagateChangeFromNullFKToNullFKValue() { @Test public void leftJoinShouldPropagateDeletionOfAPrimaryKey() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -204,7 +204,7 @@ public void leftJoinShouldPropagateDeletionOfAPrimaryKey() { @Test public void leftJoinShouldPropagateDeletionOfAPrimaryKeyThatHadNullFK() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -219,7 +219,7 @@ public void leftJoinShouldPropagateDeletionOfAPrimaryKeyThatHadNullFK() { @Test public void leftJoinShouldPropagateNothingWhenOldAndNewLeftValueIsNull() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -231,7 +231,7 @@ public void leftJoinShouldPropagateNothingWhenOldAndNewLeftValueIsNull() { // Inner join tests @Test public void innerJoinShouldPropagateNewPrimaryKey() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); innerJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -248,7 +248,7 @@ public void innerJoinShouldPropagateNewPrimaryKey() { @Test public void innerJoinShouldNotPropagateNewPrimaryKeyWithNullFK() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); innerJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -265,7 +265,7 @@ public void innerJoinShouldNotPropagateNewPrimaryKeyWithNullFK() { @Test public void innerJoinShouldDeleteOldAndPropagateNewFK() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); innerJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -286,7 +286,7 @@ public void innerJoinShouldDeleteOldAndPropagateNewFK() { @Test public void innerJoinShouldPropagateNothingWhenOldAndNewFKIsNull() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); innerJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -303,7 +303,7 @@ public void innerJoinShouldPropagateNothingWhenOldAndNewFKIsNull() { @Test public void innerJoinShouldPropagateDeletionOfPrimaryKey() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); innerJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -318,7 +318,7 @@ public void innerJoinShouldPropagateDeletionOfPrimaryKey() { @Test public void innerJoinShouldPropagateNothingWhenOldAndNewLeftValueIsNull() { - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); innerJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); @@ -327,6 +327,306 @@ public void innerJoinShouldPropagateNothingWhenOldAndNewLeftValueIsNull() { assertThat(context.forwarded(), empty()); } + // Bi-function tests: inner join, left join + private final Processor, String, SubscriptionWrapper> biFunctionLeftJoinProcessor = + new SubscriptionSendProcessorSupplier( + ForeignKeyExtractor.fromBiFunction((key, value) -> value.getForeignKey() == null ? null : key + value.getForeignKey()), + () -> "subscription-topic-fk", + () -> "value-serde-topic", + Serdes.String(), + new LeftValueSerializer(), + true + ).get(); + + private final Processor, String, SubscriptionWrapper> biFunctionInnerJoinProcessor = + new SubscriptionSendProcessorSupplier( + ForeignKeyExtractor.fromBiFunction((key, value) -> value.getForeignKey() == null ? null : key + value.getForeignKey()), + () -> "subscription-topic-fk", + () -> "value-serde-topic", + Serdes.String(), + new LeftValueSerializer(), + false + ).get(); + + // Bi-function tests: left join + @Test + public void biFunctionLeftJoinShouldPropagateNewPrimaryKeyWithNonNullFK() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(fk1); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, null), 0)); + + final String compositeKey = pk + fk1; + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateNewPrimaryKeyWithNullFK() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(null); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, null), 0)); + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(null, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateChangeOfFKFromNonNullToNonNullValue() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(fk2); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, new LeftValue(fk1)), 0)); + + final String compositeKey = pk + fk2; + + assertThat(context.forwarded().size(), is(2)); + assertThat( + context.forwarded().get(1).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateNewRecordOfUnchangedFK() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(fk1); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, leftRecordValue), 0)); + + final String compositeKey = pk + fk1; + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateChangeOfFKFromNonNullToNullValue() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(null); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, new LeftValue(fk1)), 0)); + + final String compositeKey = pk + fk1; + + assertThat(context.forwarded().size(), greaterThan(0)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(hash(leftRecordValue), DELETE_KEY_AND_PROPAGATE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateChangeFromNullFKToNonNullFKValue() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(fk1); + + final String compositeKey = pk + fk1; + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, new LeftValue(null)), 0)); + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateChangeFromNullFKToNullFKValue() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(null); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, leftRecordValue), 0)); + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(null, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateDeletionOfAPrimaryKey() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(null, new LeftValue(fk1)), 0)); + + final String compositeKey = pk + fk1; + + assertThat(context.forwarded().size(), greaterThan(0)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(null, DELETE_KEY_AND_PROPAGATE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateDeletionOfAPrimaryKeyThatHadNullFK() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(null, new LeftValue(null)), 0)); + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(null, new SubscriptionWrapper<>(null, PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionLeftJoinShouldPropagateNothingWhenOldAndNewLeftValueIsNull() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionLeftJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + biFunctionLeftJoinProcessor.process(new Record<>(pk, new Change<>(null, null), 0)); + + assertThat(context.forwarded(), empty()); + } + + // Bi-function tests: inner join + @Test + public void biFunctionInnerJoinShouldPropagateNewPrimaryKey() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionInnerJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(fk1); + + biFunctionInnerJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, null), 0)); + + final String compositeKey = pk + fk1; + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_ONLY_IF_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionInnerJoinShouldNotPropagateNewPrimaryKeyWithNullFK() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionInnerJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(null); + + biFunctionInnerJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, null), 0)); + + assertThat(context.forwarded(), empty()); + + // test dropped-records sensors + assertEquals(1.0, getDroppedRecordsTotalMetric(context)); + assertNotEquals(0.0, getDroppedRecordsRateMetric(context)); + } + + @Test + public void biFunctionInnerJoinShouldDeleteOldAndPropagateNewFK() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionInnerJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(fk2); + + biFunctionInnerJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, new LeftValue(fk1)), 0)); + + final String compositeKey1 = pk + fk1; + final String compositeKey2 = pk + fk2; + + assertThat(context.forwarded().size(), is(2)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey1, new SubscriptionWrapper<>(hash(leftRecordValue), DELETE_KEY_NO_PROPAGATE, pk, 0), 0)) + ); + assertThat( + context.forwarded().get(1).record(), + is(new Record<>(compositeKey2, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionInnerJoinShouldPropagateNothingWhenOldAndNewFKIsNull() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionInnerJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + final LeftValue leftRecordValue = new LeftValue(null); + + biFunctionInnerJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, leftRecordValue), 0)); + + assertThat(context.forwarded(), empty()); + + // test dropped-records sensors + assertEquals(1.0, getDroppedRecordsTotalMetric(context)); + assertNotEquals(0.0, getDroppedRecordsRateMetric(context)); + } + + @Test + public void biFunctionInnerJoinShouldPropagateDeletionOfPrimaryKey() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionInnerJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + biFunctionInnerJoinProcessor.process(new Record<>(pk, new Change<>(null, new LeftValue(fk1)), 0)); + + final String compositeKey = pk + fk1; + + assertThat(context.forwarded().size(), is(1)); + assertThat( + context.forwarded().get(0).record(), + is(new Record<>(compositeKey, new SubscriptionWrapper<>(null, DELETE_KEY_AND_PROPAGATE, pk, 0), 0)) + ); + } + + @Test + public void biFunctionInnerJoinShouldPropagateNothingWhenOldAndNewLeftValueIsNull() { + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + biFunctionInnerJoinProcessor.init(context); + context.setRecordMetadata("topic", 0, 0); + + biFunctionInnerJoinProcessor.process(new Record<>(pk, new Change<>(null, null), 0)); + + assertThat(context.forwarded(), empty()); + } + private static class LeftValueSerializer implements Serializer { @Override public byte[] serialize(final String topic, final LeftValue data) { diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtilTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtilTest.java index 1541dce30baa9..6ed7dea0fb3dd 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtilTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/GraphGraceSearchUtilTest.java @@ -25,11 +25,11 @@ import org.apache.kafka.streams.kstream.internals.TimeWindow; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.StoreFactory; import org.junit.jupiter.api.Test; import static java.time.Duration.ofMillis; +import static org.apache.kafka.streams.utils.TestUtils.mockStoreFactory; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.fail; @@ -49,8 +49,20 @@ public void shouldThrowOnNull() { public void shouldFailIfThereIsNoGraceAncestor() { // doesn't matter if this ancestor is stateless or stateful. The important thing it that there is // no grace period defined on any ancestor of the node - final StatefulProcessorNode gracelessAncestor = new StatefulProcessorNode<>( - "stateful", + final ProcessorGraphNode gracelessAncestor = new ProcessorGraphNode<>( + "graceless", + new ProcessorParameters<>( + () -> new Processor() { + @Override + public void process(final Record record) {} + + }, + "graceless" + ) + ); + + final ProcessorGraphNode node = new ProcessorGraphNode<>( + "stateless", new ProcessorParameters<>( () -> new Processor() { @@ -58,54 +70,52 @@ public void shouldFailIfThereIsNoGraceAncestor() { public void process(final Record record) {} }, - "dummy" - ), - (StoreFactory) null + "stateless" + ) ); - final ProcessorGraphNode node = new ProcessorGraphNode<>("stateless", null); gracelessAncestor.addChild(node); try { GraphGraceSearchUtil.findAndVerifyWindowGrace(node); fail("should have thrown."); } catch (final TopologyException e) { - assertThat(e.getMessage(), is("Invalid topology: Window close time is only defined for windowed computations. Got [stateful->stateless].")); + assertThat(e.getMessage(), is("Invalid topology: Window close time is only defined for windowed computations. Got [graceless->stateless].")); } } @Test public void shouldExtractGraceFromKStreamWindowAggregateNode() { final TimeWindows windows = TimeWindows.ofSizeAndGrace(ofMillis(10L), ofMillis(1234L)); - final StatefulProcessorNode node = new StatefulProcessorNode<>( + final ProcessorGraphNode node = new GracePeriodGraphNode<>( "asdf", new ProcessorParameters<>( new KStreamWindowAggregate( windows, - "asdf", + mockStoreFactory("asdf"), EmitStrategy.onWindowUpdate(), null, null ), "asdf" ), - (StoreFactory) null + windows.gracePeriodMs() ); final long extracted = GraphGraceSearchUtil.findAndVerifyWindowGrace(node); - assertThat(extracted, is(windows.gracePeriodMs())); + assertThat(extracted, is(1234L)); } @Test public void shouldExtractGraceFromKStreamSessionWindowAggregateNode() { final SessionWindows windows = SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L)); - final StatefulProcessorNode node = new StatefulProcessorNode<>( + final ProcessorGraphNode node = new GracePeriodGraphNode<>( "asdf", new ProcessorParameters<>( new KStreamSessionWindowAggregate( windows, - "asdf", + mockStoreFactory("asdf"), EmitStrategy.onWindowUpdate(), null, null, @@ -113,25 +123,25 @@ public void shouldExtractGraceFromKStreamSessionWindowAggregateNode() { ), "asdf" ), - (StoreFactory) null + windows.gracePeriodMs() + windows.inactivityGap() ); final long extracted = GraphGraceSearchUtil.findAndVerifyWindowGrace(node); - assertThat(extracted, is(windows.gracePeriodMs() + windows.inactivityGap())); + assertThat(extracted, is(1244L)); } @Test public void shouldExtractGraceFromSessionAncestorThroughStatefulParent() { final SessionWindows windows = SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L)); - final StatefulProcessorNode graceGrandparent = new StatefulProcessorNode<>( + final ProcessorGraphNode graceGrandparent = new GracePeriodGraphNode<>( "asdf", new ProcessorParameters<>(new KStreamSessionWindowAggregate( - windows, "asdf", EmitStrategy.onWindowUpdate(), null, null, null + windows, mockStoreFactory("asdf"), EmitStrategy.onWindowUpdate(), null, null, null ), "asdf"), - (StoreFactory) null + windows.gracePeriodMs() + windows.inactivityGap() ); - final StatefulProcessorNode statefulParent = new StatefulProcessorNode<>( + final ProcessorGraphNode statefulParent = new ProcessorGraphNode<>( "stateful", new ProcessorParameters<>( () -> new Processor() { @@ -141,27 +151,37 @@ public void process(final Record record) {} }, "dummy" - ), - (StoreFactory) null + ) ); graceGrandparent.addChild(statefulParent); - final ProcessorGraphNode node = new ProcessorGraphNode<>("stateless", null); + final ProcessorGraphNode node = new ProcessorGraphNode<>( + "stateless", + new ProcessorParameters<>( + () -> new Processor() { + + @Override + public void process(final Record record) {} + + }, + "dummyChild-graceless" + ) + ); statefulParent.addChild(node); final long extracted = GraphGraceSearchUtil.findAndVerifyWindowGrace(node); - assertThat(extracted, is(windows.gracePeriodMs() + windows.inactivityGap())); + assertThat(extracted, is(1244L)); } @Test public void shouldExtractGraceFromSessionAncestorThroughStatelessParent() { final SessionWindows windows = SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L)); - final StatefulProcessorNode graceGrandparent = new StatefulProcessorNode<>( + final ProcessorGraphNode graceGrandparent = new GracePeriodGraphNode<>( "asdf", new ProcessorParameters<>( new KStreamSessionWindowAggregate( windows, - "asdf", + mockStoreFactory("asdf"), EmitStrategy.onWindowUpdate(), null, null, @@ -169,27 +189,50 @@ public void shouldExtractGraceFromSessionAncestorThroughStatelessParent() { ), "asdf" ), - (StoreFactory) null + windows.gracePeriodMs() + windows.inactivityGap() ); - final ProcessorGraphNode statelessParent = new ProcessorGraphNode<>("stateless", null); + final ProcessorGraphNode statelessParent = new ProcessorGraphNode<>( + "statelessParent", + new ProcessorParameters<>( + () -> new Processor() { + + @Override + public void process(final Record record) {} + + }, + "statelessParent" + ) + ); graceGrandparent.addChild(statelessParent); - final ProcessorGraphNode node = new ProcessorGraphNode<>("stateless", null); + final ProcessorGraphNode node = new ProcessorGraphNode<>( + "stateless", + new ProcessorParameters<>( + () -> new Processor() { + + @Override + public void process(final Record record) {} + + }, + "stateless" + ) + ); statelessParent.addChild(node); final long extracted = GraphGraceSearchUtil.findAndVerifyWindowGrace(node); - assertThat(extracted, is(windows.gracePeriodMs() + windows.inactivityGap())); + assertThat(extracted, is(1244L)); } @Test public void shouldUseMaxIfMultiParentsDoNotAgreeOnGrace() { - final StatefulProcessorNode leftParent = new StatefulProcessorNode<>( + final SessionWindows leftWindows = SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L)); + final ProcessorGraphNode leftParent = new GracePeriodGraphNode<>( "asdf", new ProcessorParameters<>( new KStreamSessionWindowAggregate( - SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L)), - "asdf", + leftWindows, + mockStoreFactory("asdf"), EmitStrategy.onWindowUpdate(), null, null, @@ -197,25 +240,37 @@ public void shouldUseMaxIfMultiParentsDoNotAgreeOnGrace() { ), "asdf" ), - (StoreFactory) null + leftWindows.gracePeriodMs() + leftWindows.inactivityGap() ); - final StatefulProcessorNode rightParent = new StatefulProcessorNode<>( + final TimeWindows rightWindows = TimeWindows.ofSizeAndGrace(ofMillis(10L), ofMillis(4321L)); + final ProcessorGraphNode rightParent = new GracePeriodGraphNode<>( "asdf", new ProcessorParameters<>( new KStreamWindowAggregate( - TimeWindows.ofSizeAndGrace(ofMillis(10L), ofMillis(4321L)), - "asdf", + rightWindows, + mockStoreFactory("asdf"), EmitStrategy.onWindowUpdate(), null, null ), "asdf" ), - (StoreFactory) null + rightWindows.gracePeriodMs() ); - final ProcessorGraphNode node = new ProcessorGraphNode<>("stateless", null); + final ProcessorGraphNode node = new ProcessorGraphNode<>( + "stateless", + new ProcessorParameters<>( + () -> new Processor() { + + @Override + public void process(final Record record) {} + + }, + "stateless" + ) + ); leftParent.addChild(node); rightParent.addChild(node); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/TableProcessorNodeTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/TableProcessorNodeTest.java deleted file mode 100644 index 36a32d01dc2fd..0000000000000 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/TableProcessorNodeTest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.streams.kstream.internals.graph; - -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.Record; - -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class TableProcessorNodeTest { - private static class TestProcessor implements Processor { - - @Override - public void process(final Record record) { - } - - } - - @Test - public void shouldConvertToStringWithNullStoreBuilder() { - final TableProcessorNode node = new TableProcessorNode<>( - "name", - new ProcessorParameters<>(TestProcessor::new, "processor"), - null, - new String[]{"store1", "store2"} - ); - - final String asString = node.toString(); - final String expected = "storeFactory=null"; - assertTrue( - asString.contains(expected), - String.format( - "Expected toString to return string with \"%s\", received: %s", - expected, - asString) - ); - } -} \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNodeTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNodeTest.java index 2988e14e720e9..bf70d47683979 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNodeTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/graph/TableSourceNodeTest.java @@ -16,23 +16,29 @@ */ package org.apache.kafka.streams.kstream.internals.graph; +import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.internals.ConsumedInternal; import org.apache.kafka.streams.kstream.internals.KTableSource; import org.apache.kafka.streams.kstream.internals.MaterializedInternal; import org.apache.kafka.streams.kstream.internals.graph.TableSourceNode.TableSourceNodeBuilder; +import org.apache.kafka.streams.processor.api.ProcessorWrapper; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; +import org.apache.kafka.streams.state.KeyValueStore; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.STRICT_STUBS) @@ -43,6 +49,12 @@ public class TableSourceNodeTest { private InternalTopologyBuilder topologyBuilder = mock(InternalTopologyBuilder.class); + @BeforeEach + public void before() { + when(topologyBuilder.wrapProcessorSupplier(any(), any())) + .thenAnswer(iom -> ProcessorWrapper.asWrapped(iom.getArgument(1))); + } + @Test public void shouldConnectStateStoreToInputTopicIfInputTopicIsUsedAsChangelog() { final boolean shouldReuseSourceTopicForChangelog = true; @@ -59,12 +71,13 @@ public void shouldConnectStateStoreToChangelogTopic() { private void buildTableSourceNode(final boolean shouldReuseSourceTopicForChangelog) { final TableSourceNodeBuilder tableSourceNodeBuilder = TableSourceNode.tableSourceNodeBuilder(); + final MaterializedInternal> + materializedInternal = new MaterializedInternal<>(Materialized.as(STORE_NAME)); final TableSourceNode tableSourceNode = tableSourceNodeBuilder .withTopic(TOPIC) - .withMaterializedInternal(new MaterializedInternal<>(Materialized.as(STORE_NAME))) .withConsumedInternal(new ConsumedInternal<>(Consumed.as("node-name"))) .withProcessorParameters( - new ProcessorParameters<>(new KTableSource<>(STORE_NAME, STORE_NAME), null)) + new ProcessorParameters<>(new KTableSource<>(materializedInternal), null)) .build(); tableSourceNode.reuseSourceTopicForChangeLog(shouldReuseSourceTopicForChangelog); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorMetricsTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorMetricsTest.java index bfa803f89bb9b..728fdd72f8345 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorMetricsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorMetricsTest.java @@ -25,19 +25,20 @@ import org.apache.kafka.streams.kstream.internals.Change; import org.apache.kafka.streams.kstream.internals.KTableImpl; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorNode; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.internals.InMemoryTimeOrderedKeyValueChangeBuffer; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; import org.hamcrest.Matcher; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; @@ -144,18 +145,18 @@ public void shouldRecordMetricsWithBuiltInMetricsVersionLatest() { final Processor, String, Change> processor = new KTableSuppressProcessorSupplier<>( (SuppressedInternal) Suppressed.untilTimeLimit(Duration.ofDays(100), maxRecords(1)), - storeName, + mockBuilderWithName(storeName), mock ).get(); streamsConfig.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST); - final MockInternalNewProcessorContext> context = - new MockInternalNewProcessorContext<>(streamsConfig, TASK_ID, TestUtils.tempDirectory()); + final MockInternalProcessorContext> context = + new MockInternalProcessorContext<>(streamsConfig, TASK_ID, TestUtils.tempDirectory()); final Time time = Time.SYSTEM; - context.setCurrentNode(new ProcessorNode("testNode")); + context.setCurrentNode(new ProcessorNode<>("testNode")); context.setSystemTimeMs(time.milliseconds()); - buffer.init((StateStoreContext) context, buffer); + buffer.init(context, buffer); processor.init(context); final long timestamp = 100L; @@ -206,4 +207,10 @@ private static void verifyMetric(final Map met assertThat(metrics.get(metricName).metricName().description(), is(metricName.description())); assertThat((T) metrics.get(metricName).metricValue(), matcher); } + + private StoreBuilder mockBuilderWithName(final String name) { + final StoreBuilder builder = Mockito.mock(StoreBuilder.class); + Mockito.when(builder.name()).thenReturn(name); + return builder; + } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorTest.java index 36d09f5bafe95..dc8dd5dc358af 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/suppress/KTableSuppressProcessorTest.java @@ -30,19 +30,20 @@ import org.apache.kafka.streams.kstream.internals.SessionWindow; import org.apache.kafka.streams.kstream.internals.TimeWindow; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.MockProcessorContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorNode; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.internals.InMemoryTimeOrderedKeyValueChangeBuffer; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; @@ -76,7 +77,7 @@ public class KTableSuppressProcessorTest { private static class Harness { private final Processor, K, Change> processor; - private final MockInternalNewProcessorContext> context; + private final MockInternalProcessorContext> context; Harness(final Suppressed suppressed, @@ -92,12 +93,12 @@ private static class Harness { @SuppressWarnings("unchecked") final KTableImpl parent = mock(KTableImpl.class); final Processor, K, Change> processor = - new KTableSuppressProcessorSupplier<>((SuppressedInternal) suppressed, storeName, parent).get(); + new KTableSuppressProcessorSupplier<>((SuppressedInternal) suppressed, mockBuilderWithName(storeName), parent).get(); - final MockInternalNewProcessorContext> context = new MockInternalNewProcessorContext<>(); - context.setCurrentNode(new ProcessorNode("testNode")); + final MockInternalProcessorContext> context = new MockInternalProcessorContext<>(); + context.setCurrentNode(new ProcessorNode<>("testNode")); - buffer.init((StateStoreContext) context, buffer); + buffer.init(context, buffer); processor.init(context); this.processor = processor; @@ -109,7 +110,7 @@ private static class Harness { public void zeroTimeLimitShouldImmediatelyEmit() { final Harness harness = new Harness<>(untilTimeLimit(ZERO, unbounded()), String(), Long()); - final MockInternalNewProcessorContext> context = harness.context; + final MockInternalProcessorContext> context = harness.context; final long timestamp = ARBITRARY_LONG; context.setRecordMetadata("", 0, 0L); @@ -119,7 +120,7 @@ public void zeroTimeLimitShouldImmediatelyEmit() { harness.processor.process(new Record<>(key, value, timestamp)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -127,7 +128,7 @@ public void zeroTimeLimitShouldImmediatelyEmit() { public void windowedZeroTimeLimitShouldImmediatelyEmit() { final Harness, Long> harness = new Harness<>(untilTimeLimit(ZERO, unbounded()), timeWindowedSerdeFrom(String.class, 100L), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; final long timestamp = ARBITRARY_LONG; context.setRecordMetadata("", 0, 0L); @@ -137,7 +138,7 @@ public void windowedZeroTimeLimitShouldImmediatelyEmit() { harness.processor.process(new Record<>(key, value, timestamp)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -145,7 +146,7 @@ public void windowedZeroTimeLimitShouldImmediatelyEmit() { public void intermediateSuppressionShouldBufferAndEmitLater() { final Harness harness = new Harness<>(untilTimeLimit(ofMillis(1), unbounded()), String(), Long()); - final MockInternalNewProcessorContext> context = harness.context; + final MockInternalProcessorContext> context = harness.context; final long timestamp = 0L; context.setRecordMetadata("topic", 0, 0); @@ -160,7 +161,7 @@ public void intermediateSuppressionShouldBufferAndEmitLater() { harness.processor.process(new Record<>("tick", new Change<>(null, null), 1L)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -168,7 +169,7 @@ public void intermediateSuppressionShouldBufferAndEmitLater() { public void finalResultsSuppressionShouldBufferAndEmitAtGraceExpiration() { final Harness, Long> harness = new Harness<>(finalResults(ofMillis(1L)), timeWindowedSerdeFrom(String.class, 1L), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; final long windowStart = 99L; final long recordTime = 99L; @@ -199,7 +200,7 @@ public void finalResultsSuppressionShouldBufferAndEmitAtGraceExpiration() { harness.processor.process(new Record<>(new Windowed<>("dummyKey2", new TimeWindow(windowStart3, windowEnd3)), ARBITRARY_CHANGE, recordTime3)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, recordTime))); } @@ -212,7 +213,7 @@ public void finalResultsSuppressionShouldBufferAndEmitAtGraceExpiration() { public void finalResultsWithZeroGraceShouldStillBufferUntilTheWindowEnd() { final Harness, Long> harness = new Harness<>(finalResults(ofMillis(0L)), timeWindowedSerdeFrom(String.class, 100L), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; // note the record is in the past, but the window end is in the future, so we still have to buffer, // even though the grace period is 0. @@ -230,7 +231,7 @@ public void finalResultsWithZeroGraceShouldStillBufferUntilTheWindowEnd() { harness.processor.process(new Record<>(new Windowed<>("dummyKey", new TimeWindow(windowEnd, windowEnd + 100L)), ARBITRARY_CHANGE, windowEnd)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -238,7 +239,7 @@ public void finalResultsWithZeroGraceShouldStillBufferUntilTheWindowEnd() { public void finalResultsWithZeroGraceAtWindowEndShouldImmediatelyEmit() { final Harness, Long> harness = new Harness<>(finalResults(ofMillis(0L)), timeWindowedSerdeFrom(String.class, 100L), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); @@ -248,7 +249,7 @@ public void finalResultsWithZeroGraceAtWindowEndShouldImmediatelyEmit() { harness.processor.process(new Record<>(key, value, timestamp)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -260,7 +261,7 @@ public void finalResultsWithZeroGraceAtWindowEndShouldImmediatelyEmit() { public void finalResultsShouldDropTombstonesForTimeWindows() { final Harness, Long> harness = new Harness<>(finalResults(ofMillis(0L)), timeWindowedSerdeFrom(String.class, 100L), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); @@ -281,7 +282,7 @@ public void finalResultsShouldDropTombstonesForTimeWindows() { public void finalResultsShouldDropTombstonesForSessionWindows() { final Harness, Long> harness = new Harness<>(finalResults(ofMillis(0L)), sessionWindowedSerdeFrom(String.class), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); @@ -301,7 +302,7 @@ public void finalResultsShouldDropTombstonesForSessionWindows() { public void suppressShouldNotDropTombstonesForTimeWindows() { final Harness, Long> harness = new Harness<>(untilTimeLimit(ofMillis(0), maxRecords(0)), timeWindowedSerdeFrom(String.class, 100L), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; final long timestamp = 100L; final Headers headers = new RecordHeaders().add("k", "v".getBytes(StandardCharsets.UTF_8)); @@ -313,7 +314,7 @@ public void suppressShouldNotDropTombstonesForTimeWindows() { harness.processor.process(new Record<>(key, value, timestamp)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp, headers))); } @@ -326,7 +327,7 @@ public void suppressShouldNotDropTombstonesForTimeWindows() { public void suppressShouldNotDropTombstonesForSessionWindows() { final Harness, Long> harness = new Harness<>(untilTimeLimit(ofMillis(0), maxRecords(0)), sessionWindowedSerdeFrom(String.class), Long()); - final MockInternalNewProcessorContext, Change> context = harness.context; + final MockInternalProcessorContext, Change> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); @@ -336,7 +337,7 @@ public void suppressShouldNotDropTombstonesForSessionWindows() { harness.processor.process(new Record<>(key, value, timestamp)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -349,7 +350,7 @@ public void suppressShouldNotDropTombstonesForSessionWindows() { public void suppressShouldNotDropTombstonesForKTable() { final Harness harness = new Harness<>(untilTimeLimit(ofMillis(0), maxRecords(0)), String(), Long()); - final MockInternalNewProcessorContext> context = harness.context; + final MockInternalProcessorContext> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); @@ -359,7 +360,7 @@ public void suppressShouldNotDropTombstonesForKTable() { harness.processor.process(new Record<>(key, value, timestamp)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -367,7 +368,7 @@ public void suppressShouldNotDropTombstonesForKTable() { public void suppressShouldEmitWhenOverRecordCapacity() { final Harness harness = new Harness<>(untilTimeLimit(Duration.ofDays(100), maxRecords(1)), String(), Long()); - final MockInternalNewProcessorContext> context = harness.context; + final MockInternalProcessorContext> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); @@ -381,7 +382,7 @@ public void suppressShouldEmitWhenOverRecordCapacity() { harness.processor.process(new Record<>("dummyKey", value, timestamp + 1)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -389,7 +390,7 @@ public void suppressShouldEmitWhenOverRecordCapacity() { public void suppressShouldEmitWhenOverByteCapacity() { final Harness harness = new Harness<>(untilTimeLimit(Duration.ofDays(100), maxBytes(60L)), String(), Long()); - final MockInternalNewProcessorContext> context = harness.context; + final MockInternalProcessorContext> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); @@ -403,7 +404,7 @@ public void suppressShouldEmitWhenOverByteCapacity() { harness.processor.process(new Record<>("dummyKey", value, timestamp + 1)); assertThat(context.forwarded(), hasSize(1)); - final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); + final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0); assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp))); } @@ -411,12 +412,12 @@ public void suppressShouldEmitWhenOverByteCapacity() { public void suppressShouldShutDownWhenOverRecordCapacity() { final Harness harness = new Harness<>(untilTimeLimit(Duration.ofDays(100), maxRecords(1).shutDownWhenFull()), String(), Long()); - final MockInternalNewProcessorContext> context = harness.context; + final MockInternalProcessorContext> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); context.setTimestamp(timestamp); - context.setCurrentNode(new ProcessorNode("testNode")); + context.setCurrentNode(new ProcessorNode<>("testNode")); final String key = "hey"; final Change value = new Change<>(null, ARBITRARY_LONG); harness.processor.process(new Record<>(key, value, timestamp)); @@ -435,12 +436,12 @@ public void suppressShouldShutDownWhenOverRecordCapacity() { public void suppressShouldShutDownWhenOverByteCapacity() { final Harness harness = new Harness<>(untilTimeLimit(Duration.ofDays(100), maxBytes(60L).shutDownWhenFull()), String(), Long()); - final MockInternalNewProcessorContext> context = harness.context; + final MockInternalProcessorContext> context = harness.context; final long timestamp = 100L; context.setRecordMetadata("", 0, 0L); context.setTimestamp(timestamp); - context.setCurrentNode(new ProcessorNode("testNode")); + context.setCurrentNode(new ProcessorNode<>("testNode")); final String key = "hey"; final Change value = new Change<>(null, ARBITRARY_LONG); harness.processor.process(new Record<>(key, value, timestamp)); @@ -461,7 +462,7 @@ private static SuppressedInternal finalResults(final Dur } private static Matcher> hasSize(final int i) { - return new BaseMatcher>() { + return new BaseMatcher<>() { @Override public void describeTo(final Description description) { description.appendText("a collection of size " + i); @@ -480,6 +481,7 @@ public boolean matches(final Object item) { }; } + @SuppressWarnings("resource") private static Serde> timeWindowedSerdeFrom(final Class rawType, final long windowSize) { final Serde kSerde = Serdes.serdeFrom(rawType); return new Serdes.WrapperSerde<>( @@ -487,4 +489,10 @@ private static Serde> timeWindowedSerdeFrom(final Class rawTy new TimeWindowedDeserializer<>(kSerde.deserializer(), windowSize) ); } -} \ No newline at end of file + + private static StoreBuilder mockBuilderWithName(final String name) { + final StoreBuilder builder = Mockito.mock(StoreBuilder.class); + Mockito.when(builder.name()).thenReturn(name); + return builder; + } +} diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java index 6a4339a3ed78a..73da4b7cc050b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java @@ -190,9 +190,23 @@ public void shouldCloseIfEosV2Enabled() { activeTaskCreator.close(); + assertThat(activeTaskCreator.streamsProducer().isClosed(), is(true)); assertThat(mockClientSupplier.producers.get(0).closed(), is(true)); } + @Test + public void shouldNotReInitializeProducerOnClose() { + properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); + mockClientSupplier.setApplicationIdForProducer("appId"); + createTasks(); + + activeTaskCreator.streamsProducer().close(); + activeTaskCreator.reInitializeProducer(); + // If streamsProducer is not closed, clientSupplier will recreate a producer, + // resulting in more than one producer being created. + assertThat(mockClientSupplier.producers.size(), is(1)); + } + // error handling @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java index 25f3f0e587f75..adc71ebc116e1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/DefaultStateUpdaterTest.java @@ -290,7 +290,11 @@ private void shouldThrowIfAddingTasksWithSameId(final Task task1, final Task tas stateUpdater.add(task2); verifyFailedTasks(IllegalStateException.class, task1); - assertFalse(stateUpdater.isRunning()); + waitForCondition( + () -> !stateUpdater.isRunning(), + VERIFICATION_TIMEOUT, + "Did not switch to non-running within the given timeout!" + ); } @Test @@ -1015,6 +1019,8 @@ public void shouldPauseActiveTaskAndTransitToUpdateStandby() throws Exception { verifyRestoredActiveTasks(); verifyUpdatingTasks(task2); verifyExceptionsAndFailedTasks(); + // shutdown ensures that the test does not end before changelog reader methods verified below are called + stateUpdater.shutdown(Duration.ofMinutes(1)); verify(changelogReader, times(1)).enforceRestoreActive(); verify(changelogReader, times(1)).transitToUpdateStandby(); } @@ -1152,6 +1158,8 @@ public void shouldIdleWhenAllTasksPaused() throws Exception { public void shouldResumeStandbyTask() throws Exception { final StandbyTask task = standbyTask(TASK_0_0, Set.of(TOPIC_PARTITION_A_0)).inState(State.RUNNING).build(); shouldResumeStatefulTask(task); + // shutdown ensures that the test does not end before changelog reader methods verified below are called + stateUpdater.shutdown(Duration.ofMinutes(1)); verify(changelogReader, times(2)).transitToUpdateStandby(); } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalProcessorContextImplTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalProcessorContextImplTest.java index f51b59f54aa5b..e4425c187b7fa 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalProcessorContextImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalProcessorContextImplTest.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.To; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.SessionStore; @@ -122,7 +121,7 @@ public void shouldNotAllowInitForKeyValueStore() { when(stateManager.globalStore(GLOBAL_KEY_VALUE_STORE_NAME)).thenReturn(mock(KeyValueStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_KEY_VALUE_STORE_NAME); try { - store.init((StateStoreContext) null, null); + store.init(null, null); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } } @@ -132,7 +131,7 @@ public void shouldNotAllowInitForTimestampedKeyValueStore() { when(stateManager.globalStore(GLOBAL_TIMESTAMPED_KEY_VALUE_STORE_NAME)).thenReturn(mock(TimestampedKeyValueStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_TIMESTAMPED_KEY_VALUE_STORE_NAME); try { - store.init((StateStoreContext) null, null); + store.init(null, null); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } } @@ -142,7 +141,7 @@ public void shouldNotAllowInitForWindowStore() { when(stateManager.globalStore(GLOBAL_WINDOW_STORE_NAME)).thenReturn(mock(WindowStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_WINDOW_STORE_NAME); try { - store.init((StateStoreContext) null, null); + store.init(null, null); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } } @@ -152,7 +151,7 @@ public void shouldNotAllowInitForTimestampedWindowStore() { when(stateManager.globalStore(GLOBAL_TIMESTAMPED_WINDOW_STORE_NAME)).thenReturn(mock(TimestampedWindowStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_TIMESTAMPED_WINDOW_STORE_NAME); try { - store.init((StateStoreContext) null, null); + store.init(null, null); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } } @@ -162,7 +161,7 @@ public void shouldNotAllowInitForSessionStore() { when(stateManager.globalStore(GLOBAL_SESSION_STORE_NAME)).thenReturn(mock(SessionStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_SESSION_STORE_NAME); try { - store.init((StateStoreContext) null, null); + store.init(null, null); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java index ff461ccc90b4a..e28ef673f07b7 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java @@ -19,7 +19,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.TimeoutException; @@ -161,7 +161,7 @@ public void before() { } }); stateDirectory = new StateDirectory(streamsConfig, time, true, false); - consumer = new MockConsumer<>(OffsetResetStrategy.NONE); + consumer = new MockConsumer<>(AutoOffsetResetStrategy.NONE.name()); stateManager = new GlobalStateManagerImpl( new LogContext("test"), time, @@ -579,7 +579,7 @@ private Map readOffsetsCheckpoint() throws IOException { @Test public void shouldNotRetryWhenEndOffsetsThrowsTimeoutExceptionAndTaskTimeoutIsZero() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized Map endOffsets(final Collection partitions) { numberOfCalls.incrementAndGet(); @@ -621,7 +621,7 @@ public synchronized Map endOffsets(final Collection(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized Map endOffsets(final Collection partitions) { time.sleep(100L); @@ -662,7 +662,7 @@ public synchronized Map endOffsets(final Collection(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized Map endOffsets(final Collection partitions) { time.sleep(100L); @@ -703,7 +703,7 @@ public synchronized Map endOffsets(final Collection(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized Map endOffsets(final Collection partitions) { time.sleep(1L); @@ -745,7 +745,7 @@ public synchronized long position(final TopicPartition partition) { @Test public void shouldNotRetryWhenPartitionsForThrowsTimeoutExceptionAndTaskTimeoutIsZero() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public List partitionsFor(final String topic) { numberOfCalls.incrementAndGet(); @@ -787,7 +787,7 @@ public List partitionsFor(final String topic) { @Test public void shouldRetryAtLeastOnceWhenPartitionsForThrowsTimeoutException() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public List partitionsFor(final String topic) { time.sleep(100L); @@ -828,7 +828,7 @@ public List partitionsFor(final String topic) { @Test public void shouldRetryWhenPartitionsForThrowsTimeoutExceptionUntilTaskTimeoutExpires() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public List partitionsFor(final String topic) { time.sleep(100L); @@ -869,7 +869,7 @@ public List partitionsFor(final String topic) { @Test public void shouldNotFailOnSlowProgressWhenPartitionForThrowsTimeoutException() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public List partitionsFor(final String topic) { time.sleep(1L); @@ -911,7 +911,7 @@ public synchronized long position(final TopicPartition partition) { @Test public void shouldNotRetryWhenPositionThrowsTimeoutExceptionAndTaskTimeoutIsZero() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized long position(final TopicPartition partition) { numberOfCalls.incrementAndGet(); @@ -953,7 +953,7 @@ public synchronized long position(final TopicPartition partition) { @Test public void shouldRetryAtLeastOnceWhenPositionThrowsTimeoutException() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized long position(final TopicPartition partition) { time.sleep(100L); @@ -994,7 +994,7 @@ public synchronized long position(final TopicPartition partition) { @Test public void shouldRetryWhenPositionThrowsTimeoutExceptionUntilTaskTimeoutExpired() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized long position(final TopicPartition partition) { time.sleep(100L); @@ -1035,7 +1035,7 @@ public synchronized long position(final TopicPartition partition) { @Test public void shouldNotFailOnSlowProgressWhenPositionThrowsTimeoutException() { final AtomicInteger numberOfCalls = new AtomicInteger(0); - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized long position(final TopicPartition partition) { time.sleep(1L); @@ -1071,7 +1071,7 @@ public synchronized long position(final TopicPartition partition) { @Test public void shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore() { - consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public synchronized ConsumerRecords poll(final Duration timeout) { time.sleep(timeout.toMillis()); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStreamThreadTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStreamThreadTest.java index e6b409fed4c9e..e4f78c900d10a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStreamThreadTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStreamThreadTest.java @@ -18,7 +18,7 @@ import org.apache.kafka.clients.consumer.InvalidOffsetException; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; @@ -41,6 +41,7 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.test.MockStateRestoreListener; import org.apache.kafka.test.TestUtils; @@ -72,7 +73,7 @@ public class GlobalStreamThreadTest { private final InternalTopologyBuilder builder = new InternalTopologyBuilder(); - private final MockConsumer mockConsumer = new MockConsumer<>(OffsetResetStrategy.NONE); + private final MockConsumer mockConsumer = new MockConsumer<>(AutoOffsetResetStrategy.NONE.name()); private final MockTime time = new MockTime(); private final MockStateRestoreListener stateRestoreListener = new MockStateRestoreListener(); private GlobalStreamThread globalStreamThread; @@ -108,15 +109,17 @@ public void process(final Record record) { } }; + final StoreFactory storeFactory = + new KeyValueStoreMaterializer<>(materialized).withLoggingDisabled(); + final StoreBuilder storeBuilder = new StoreFactory.FactoryWrappingStoreBuilder<>(storeFactory); builder.addGlobalStore( - new KeyValueStoreMaterializer<>(materialized).withLoggingDisabled(), "sourceName", null, null, null, GLOBAL_STORE_TOPIC_NAME, "processorName", - processorSupplier, + new StoreDelegatingProcessorSupplier<>(processorSupplier, Set.of(storeBuilder)), false ); @@ -160,7 +163,7 @@ public void shouldThrowStreamsExceptionOnStartupIfThereIsAStreamsException() thr @Test public void shouldThrowStreamsExceptionOnStartupIfExceptionOccurred() throws Exception { - final MockConsumer mockConsumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final MockConsumer mockConsumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public List partitionsFor(final String topic) { throw new RuntimeException("KABOOM!"); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java index b0152da0be4ab..14470db2efa8d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java @@ -53,7 +53,7 @@ import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.processor.internals.InternalTopicManager.ValidationResult; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java index 057c2aa1e3032..5802518cd26a2 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java @@ -16,22 +16,31 @@ */ package org.apache.kafka.streams.processor.internals; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; +import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.AutoOffsetReset; import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.TopologyConfig; import org.apache.kafka.streams.TopologyDescription; import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler; import org.apache.kafka.streams.errors.LogAndFailExceptionHandler; import org.apache.kafka.streams.errors.TopologyException; +import org.apache.kafka.streams.internals.AutoOffsetResetInternal; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.TopicNameExtractor; +import org.apache.kafka.streams.processor.api.FixedKeyProcessor; +import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; +import org.apache.kafka.streams.processor.api.FixedKeyRecord; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.processor.api.ProcessorWrapper; +import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.api.WrappedFixedKeyProcessorSupplier; +import org.apache.kafka.streams.processor.api.WrappedProcessorSupplier; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder.SubtopologyDescription; import org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology; import org.apache.kafka.streams.state.KeyValueStore; @@ -63,9 +72,11 @@ import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; import static org.apache.kafka.common.utils.Utils.mkProperties; +import static org.apache.kafka.streams.StreamsConfig.PROCESSOR_WRAPPER_CLASS_CONFIG; import static org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.SUBTOPOLOGY_0; import static org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.SUBTOPOLOGY_1; import static org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.SUBTOPOLOGY_2; +import static org.apache.kafka.streams.utils.TestUtils.dummyStreamsConfigMap; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -85,32 +96,48 @@ public class InternalTopologyBuilderTest { private final Serde stringSerde = Serdes.String(); private final InternalTopologyBuilder builder = new InternalTopologyBuilder(); - private final StoreFactory storeBuilder = new MockKeyValueStoreBuilder("testStore", false).asFactory(); + private final StoreBuilder storeBuilder = new MockKeyValueStoreBuilder("testStore", false); + private final StoreFactory storeFactory = new MockKeyValueStoreBuilder("testStore", false).asFactory(); @Test public void shouldAddSourceWithOffsetReset() { + final String noneTopic = "noneTopic"; final String earliestTopic = "earliestTopic"; final String latestTopic = "latestTopic"; + final String durationTopic = "durationTopic"; - builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", null, null, null, earliestTopic); - builder.addSource(Topology.AutoOffsetReset.LATEST, "source2", null, null, null, latestTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.none()), "source0", null, null, null, noneTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "source1", null, null, null, earliestTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.latest()), "source2", null, null, null, latestTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.byDuration(Duration.ofSeconds(42))), "source3", null, null, null, durationTopic); builder.initializeSubscription(); - assertThat(builder.offsetResetStrategy(earliestTopic), equalTo(OffsetResetStrategy.EARLIEST)); - assertThat(builder.offsetResetStrategy(latestTopic), equalTo(OffsetResetStrategy.LATEST)); + assertThat(builder.offsetResetStrategy(noneTopic), equalTo(AutoOffsetResetStrategy.NONE)); + assertThat(builder.offsetResetStrategy(earliestTopic), equalTo(AutoOffsetResetStrategy.EARLIEST)); + assertThat(builder.offsetResetStrategy(latestTopic), equalTo(AutoOffsetResetStrategy.LATEST)); + assertThat(builder.offsetResetStrategy(durationTopic).type(), equalTo(AutoOffsetResetStrategy.StrategyType.BY_DURATION)); + assertThat(builder.offsetResetStrategy(durationTopic).duration().get().toSeconds(), equalTo(42L)); } @Test public void shouldAddSourcePatternWithOffsetReset() { + final String noneTopicPattern = "none.*Topic"; final String earliestTopicPattern = "earliest.*Topic"; final String latestTopicPattern = "latest.*Topic"; + final String durationTopicPattern = "duration.*Topic"; + + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.none()), "source0", null, null, null, Pattern.compile(noneTopicPattern)); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "sourc1", null, null, null, Pattern.compile(earliestTopicPattern)); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.latest()), "source2", null, null, null, Pattern.compile(latestTopicPattern)); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.byDuration(Duration.ofSeconds(42))), "source3", null, null, null, Pattern.compile(durationTopicPattern)); - builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", null, null, null, Pattern.compile(earliestTopicPattern)); - builder.addSource(Topology.AutoOffsetReset.LATEST, "source2", null, null, null, Pattern.compile(latestTopicPattern)); builder.initializeSubscription(); - assertThat(builder.offsetResetStrategy("earliestTestTopic"), equalTo(OffsetResetStrategy.EARLIEST)); - assertThat(builder.offsetResetStrategy("latestTestTopic"), equalTo(OffsetResetStrategy.LATEST)); + assertThat(builder.offsetResetStrategy("noneTestTopic"), equalTo(AutoOffsetResetStrategy.NONE)); + assertThat(builder.offsetResetStrategy("earliestTestTopic"), equalTo(AutoOffsetResetStrategy.EARLIEST)); + assertThat(builder.offsetResetStrategy("latestTestTopic"), equalTo(AutoOffsetResetStrategy.LATEST)); + assertThat(builder.offsetResetStrategy("durationTestTopic").type(), equalTo(AutoOffsetResetStrategy.StrategyType.BY_DURATION)); + assertThat(builder.offsetResetStrategy("durationTestTopic").duration().get().toSeconds(), equalTo(42L)); } @Test @@ -120,7 +147,7 @@ public void shouldAddSourceWithoutOffsetReset() { assertEquals(Collections.singletonList("test-topic"), builder.fullSourceTopicNames()); - assertThat(builder.offsetResetStrategy("test-topic"), equalTo(OffsetResetStrategy.NONE)); + assertThat(builder.offsetResetStrategy("test-topic"), equalTo(null)); } @Test @@ -132,20 +159,20 @@ public void shouldAddPatternSourceWithoutOffsetReset() { assertThat(expectedPattern.pattern(), builder.sourceTopicPatternString(), equalTo("test-.*")); - assertThat(builder.offsetResetStrategy("test-topic"), equalTo(OffsetResetStrategy.NONE)); + assertThat(builder.offsetResetStrategy("test-topic"), equalTo(null)); } @Test public void shouldNotAllowOffsetResetSourceWithoutTopics() { - assertThrows(TopologyException.class, () -> builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", + assertThrows(TopologyException.class, () -> builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "source", null, stringSerde.deserializer(), stringSerde.deserializer())); } @Test public void shouldNotAllowOffsetResetSourceWithDuplicateSourceName() { - builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-1"); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-1"); try { - builder.addSource(Topology.AutoOffsetReset.LATEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-2"); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.latest()), "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-2"); fail("Should throw TopologyException for duplicate source name"); } catch (final TopologyException expected) { /* ok */ } } @@ -215,7 +242,6 @@ public void testAddGlobalStoreWithBadSupplier() { final IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> builder.addGlobalStore( - new MockKeyValueStoreBuilder("global-store", false).asFactory().withLoggingDisabled(), "globalSource", null, null, @@ -321,18 +347,20 @@ public void testPatternAndNameSourceTopics() { @Test public void testPatternSourceTopicsWithGlobalTopics() { + final StoreBuilder storeBuilder = + new MockKeyValueStoreBuilder("global-store", false) + .withLoggingDisabled(); builder.setApplicationId("X"); builder.addSource(null, "source-1", null, null, null, Pattern.compile("topic-1")); builder.addSource(null, "source-2", null, null, null, Pattern.compile("topic-2")); builder.addGlobalStore( - new MockKeyValueStoreBuilder("global-store", false).asFactory().withLoggingDisabled(), "globalSource", null, null, null, "globalTopic", "global-processor", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(storeBuilder)), false ); builder.initializeSubscription(); @@ -346,18 +374,20 @@ public void testPatternSourceTopicsWithGlobalTopics() { @Test public void testNameSourceTopicsWithGlobalTopics() { + final StoreBuilder storeBuilder = + new MockKeyValueStoreBuilder("global-store", false) + .withLoggingDisabled(); builder.setApplicationId("X"); builder.addSource(null, "source-1", null, null, null, "topic-1"); builder.addSource(null, "source-2", null, null, null, "topic-2"); builder.addGlobalStore( - new MockKeyValueStoreBuilder("global-store", false).asFactory().withLoggingDisabled(), "globalSource", null, null, null, "globalTopic", "global-processor", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(storeBuilder)), false ); builder.initializeSubscription(); @@ -417,14 +447,14 @@ public void testNamedTopicMatchesAlreadyProvidedPattern() { @Test public void testAddStateStoreWithNonExistingProcessor() { - assertThrows(TopologyException.class, () -> builder.addStateStore(storeBuilder, "no-such-processor")); + assertThrows(TopologyException.class, () -> builder.addStateStore(storeFactory, "no-such-processor")); } @Test public void testAddStateStoreWithSource() { builder.addSource(null, "source-1", null, null, null, "topic-1"); try { - builder.addStateStore(storeBuilder, "source-1"); + builder.addStateStore(storeFactory, "source-1"); fail("Should throw TopologyException with store cannot be added to source"); } catch (final TopologyException expected) { /* ok */ } } @@ -434,7 +464,7 @@ public void testAddStateStoreWithSink() { builder.addSource(null, "source-1", null, null, null, "topic-1"); builder.addSink("sink-1", "topic-1", null, null, null, "source-1"); try { - builder.addStateStore(storeBuilder, "sink-1"); + builder.addStateStore(storeFactory, "sink-1"); fail("Should throw TopologyException with store cannot be added to sink"); } catch (final TopologyException expected) { /* ok */ } } @@ -444,7 +474,7 @@ public void shouldNotAllowToAddStoresWithSameName() { final StoreBuilder> otherBuilder = new MockKeyValueStoreBuilder("testStore", false); - builder.addStateStore(storeBuilder); + builder.addStateStore(storeFactory); final TopologyException exception = assertThrows( TopologyException.class, @@ -459,24 +489,23 @@ public void shouldNotAllowToAddStoresWithSameName() { @Test public void shouldNotAllowToAddStoresWithSameNameWhenFirstStoreIsGlobal() { - final StoreFactory globalBuilder = - new MockKeyValueStoreBuilder("testStore", false).asFactory().withLoggingDisabled(); + final StoreBuilder globalBuilder = + new MockKeyValueStoreBuilder("testStore", false).withLoggingDisabled(); builder.addGlobalStore( - globalBuilder, "global-store", null, null, null, "global-topic", "global-processor", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(globalBuilder)), false ); final TopologyException exception = assertThrows( TopologyException.class, - () -> builder.addStateStore(storeBuilder) + () -> builder.addStateStore(storeFactory) ); assertThat( @@ -487,22 +516,21 @@ public void shouldNotAllowToAddStoresWithSameNameWhenFirstStoreIsGlobal() { @Test public void shouldNotAllowToAddStoresWithSameNameWhenSecondStoreIsGlobal() { - final StoreFactory globalBuilder = - new MockKeyValueStoreBuilder("testStore", false).asFactory().withLoggingDisabled(); + final StoreBuilder globalBuilder = + new MockKeyValueStoreBuilder("testStore", false).withLoggingDisabled(); - builder.addStateStore(storeBuilder); + builder.addStateStore(storeFactory); final TopologyException exception = assertThrows( TopologyException.class, () -> builder.addGlobalStore( - globalBuilder, "global-store", null, null, null, "global-topic", "global-processor", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(globalBuilder)), false ) ); @@ -515,34 +543,32 @@ public void shouldNotAllowToAddStoresWithSameNameWhenSecondStoreIsGlobal() { @Test public void shouldNotAllowToAddGlobalStoresWithSameName() { - final StoreFactory firstGlobalBuilder = - new MockKeyValueStoreBuilder("testStore", false).asFactory().withLoggingDisabled(); - final StoreFactory secondGlobalBuilder = - new MockKeyValueStoreBuilder("testStore", false).asFactory().withLoggingDisabled(); + final StoreBuilder> firstGlobalBuilder = + new MockKeyValueStoreBuilder("testStore", false).withLoggingDisabled(); + final StoreBuilder> secondGlobalBuilder = + new MockKeyValueStoreBuilder("testStore", false).withLoggingDisabled(); builder.addGlobalStore( - firstGlobalBuilder, "global-store", null, null, null, "global-topic", "global-processor", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(firstGlobalBuilder)), false ); final TopologyException exception = assertThrows( TopologyException.class, () -> builder.addGlobalStore( - secondGlobalBuilder, "global-store-2", null, null, null, "global-topic", "global-processor-2", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(secondGlobalBuilder)), false ) ); @@ -555,35 +581,35 @@ public void shouldNotAllowToAddGlobalStoresWithSameName() { @Test public void testAddStateStore() { - builder.addStateStore(storeBuilder); + builder.addStateStore(storeFactory); builder.setApplicationId("X"); builder.addSource(null, "source-1", null, null, null, "topic-1"); builder.addProcessor("processor-1", new MockApiProcessorSupplier<>(), "source-1"); assertEquals(0, builder.buildTopology().stateStores().size()); - builder.connectProcessorAndStateStores("processor-1", storeBuilder.name()); + builder.connectProcessorAndStateStores("processor-1", storeFactory.storeName()); final List suppliers = builder.buildTopology().stateStores(); assertEquals(1, suppliers.size()); - assertEquals(storeBuilder.name(), suppliers.get(0).name()); + assertEquals(storeFactory.storeName(), suppliers.get(0).name()); } @Test public void testStateStoreNamesForSubtopology() { - builder.addStateStore(storeBuilder); + builder.addStateStore(storeFactory); builder.setApplicationId("X"); builder.addSource(null, "source-1", null, null, null, "topic-1"); builder.addProcessor("processor-1", new MockApiProcessorSupplier<>(), "source-1"); - builder.connectProcessorAndStateStores("processor-1", storeBuilder.name()); + builder.connectProcessorAndStateStores("processor-1", storeFactory.storeName()); builder.addSource(null, "source-2", null, null, null, "topic-2"); builder.addProcessor("processor-2", new MockApiProcessorSupplier<>(), "source-2"); builder.buildTopology(); final Set stateStoreNames = builder.stateStoreNamesForSubtopology(0); - assertThat(stateStoreNames, equalTo(Set.of(storeBuilder.name()))); + assertThat(stateStoreNames, equalTo(Set.of(storeFactory.storeName()))); final Set emptyStoreNames = builder.stateStoreNamesForSubtopology(1); assertThat(emptyStoreNames, equalTo(Set.of())); @@ -597,13 +623,13 @@ public void shouldAllowAddingSameStoreBuilderMultipleTimes() { builder.setApplicationId("X"); builder.addSource(null, "source-1", null, null, null, "topic-1"); - builder.addStateStore(storeBuilder); + builder.addStateStore(storeFactory); builder.addProcessor("processor-1", new MockApiProcessorSupplier<>(), "source-1"); - builder.connectProcessorAndStateStores("processor-1", storeBuilder.name()); + builder.connectProcessorAndStateStores("processor-1", storeFactory.storeName()); - builder.addStateStore(storeBuilder); + builder.addStateStore(storeFactory); builder.addProcessor("processor-2", new MockApiProcessorSupplier<>(), "source-1"); - builder.connectProcessorAndStateStores("processor-2", storeBuilder.name()); + builder.connectProcessorAndStateStores("processor-2", storeFactory.storeName()); assertEquals(1, builder.buildTopology().stateStores().size()); } @@ -753,15 +779,16 @@ public void shouldAllowIncrementalBuilds() { assertNotEquals(oldNodeGroups, newNodeGroups); oldNodeGroups = newNodeGroups; + + final StoreBuilder globalBuilder = new MockKeyValueStoreBuilder("global-store", false).withLoggingDisabled(); builder.addGlobalStore( - new MockKeyValueStoreBuilder("global-store", false).asFactory().withLoggingDisabled(), "globalSource", null, null, null, "globalTopic", "global-processor", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(globalBuilder)), false ); newNodeGroups = builder.nodeGroups(); @@ -869,7 +896,7 @@ private Set nodeNames(final Collection> nodes) public void shouldAssociateStateStoreNameWhenStateStoreSupplierIsInternal() { builder.addSource(null, "source", null, null, null, "topic"); builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source"); - builder.addStateStore(storeBuilder, "processor"); + builder.addStateStore(storeFactory, "processor"); final Map> stateStoreNameToSourceTopic = builder.stateStoreNameToFullSourceTopicNames(); assertEquals(1, stateStoreNameToSourceTopic.size()); assertEquals(Collections.singletonList("topic"), stateStoreNameToSourceTopic.get("testStore")); @@ -879,7 +906,7 @@ public void shouldAssociateStateStoreNameWhenStateStoreSupplierIsInternal() { public void shouldAssociateStateStoreNameWhenStateStoreSupplierIsExternal() { builder.addSource(null, "source", null, null, null, "topic"); builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source"); - builder.addStateStore(storeBuilder, "processor"); + builder.addStateStore(storeFactory, "processor"); final Map> stateStoreNameToSourceTopic = builder.stateStoreNameToFullSourceTopicNames(); assertEquals(1, stateStoreNameToSourceTopic.size()); assertEquals(Collections.singletonList("topic"), stateStoreNameToSourceTopic.get("testStore")); @@ -891,7 +918,7 @@ public void shouldCorrectlyMapStateStoreToInternalTopics() { builder.addInternalTopic("internal-topic", InternalTopicProperties.empty()); builder.addSource(null, "source", null, null, null, "internal-topic"); builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source"); - builder.addStateStore(storeBuilder, "processor"); + builder.addStateStore(storeFactory, "processor"); final Map> stateStoreNameToSourceTopic = builder.stateStoreNameToFullSourceTopicNames(); assertEquals(1, stateStoreNameToSourceTopic.size()); assertEquals(Collections.singletonList("appId-internal-topic"), stateStoreNameToSourceTopic.get("testStore")); @@ -965,7 +992,7 @@ public void shouldAddInternalTopicConfigForNonWindowNonVersionedStores() { builder.setApplicationId("appId"); builder.addSource(null, "source", null, null, null, "topic"); builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source"); - builder.addStateStore(storeBuilder, "processor"); + builder.addStateStore(storeFactory, "processor"); builder.buildTopology(); final Map topicGroups = builder.subtopologyToTopicsInfo(); final InternalTopologyBuilder.TopicsInfo topicsInfo = topicGroups.values().iterator().next(); @@ -1173,7 +1200,7 @@ public void shouldSortProcessorNodesCorrectly() { public void shouldConnectRegexMatchedTopicsToStateStore() { builder.addSource(null, "ingest", null, null, null, Pattern.compile("topic-\\d+")); builder.addProcessor("my-processor", new MockApiProcessorSupplier<>(), "ingest"); - builder.addStateStore(storeBuilder, "my-processor"); + builder.addStateStore(storeFactory, "my-processor"); final Set updatedTopics = new HashSet<>(); @@ -1185,7 +1212,7 @@ public void shouldConnectRegexMatchedTopicsToStateStore() { builder.setApplicationId("test-app"); final Map> stateStoreAndTopics = builder.stateStoreNameToFullSourceTopicNames(); - final List topics = stateStoreAndTopics.get(storeBuilder.name()); + final List topics = stateStoreAndTopics.get(storeFactory.storeName()); assertEquals(2, topics.size(), "Expected to contain two topics"); @@ -1198,14 +1225,13 @@ public void shouldConnectRegexMatchedTopicsToStateStore() { public void shouldNotAllowToAddGlobalStoreWithSourceNameEqualsProcessorName() { final String sameNameForSourceAndProcessor = "sameName"; assertThrows(TopologyException.class, () -> builder.addGlobalStore( - storeBuilder, sameNameForSourceAndProcessor, null, null, null, "anyTopicName", sameNameForSourceAndProcessor, - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(storeBuilder)), false )); } @@ -1341,16 +1367,17 @@ public void shouldHaveCorrectInternalTopicConfigWhenInternalTopicPropertiesAreNo public void shouldConnectGlobalStateStoreToInputTopic() { final String globalStoreName = "global-store"; final String globalTopic = "global-topic"; + final StoreBuilder storeBuilder = + new MockKeyValueStoreBuilder(globalStoreName, false).withLoggingDisabled(); builder.setApplicationId("X"); builder.addGlobalStore( - new MockKeyValueStoreBuilder(globalStoreName, false).asFactory().withLoggingDisabled(), "globalSource", null, null, null, globalTopic, "global-processor", - new MockApiProcessorSupplier<>(), + new StoreDelegatingProcessorSupplier<>(new MockApiProcessorSupplier<>(), Set.of(storeBuilder)), false ); builder.initializeSubscription(); @@ -1362,4 +1389,86 @@ public void shouldConnectGlobalStateStoreToInputTopic() { assertThat(builder.buildGlobalStateTopology().storeToChangelogTopic().get(globalStoreName), is(globalTopic)); } + + @Test + public void shouldWrapProcessorSupplier() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, ProcessorSkippingWrapper.class); + + final InternalTopologyBuilder builder = + new InternalTopologyBuilder(new TopologyConfig(new StreamsConfig(props))); + + final ProcessorSupplier throwingProcessorSupplier = + () -> (Processor) record -> { + throw new RuntimeException("oops, don't call process on me!"); + }; + + final ProcessorSupplier wrappedProcessorSupplier = + builder.wrapProcessorSupplier("name", throwingProcessorSupplier); + + final Processor throwingProcessor = throwingProcessorSupplier.get(); + final Processor wrappedProcessor = wrappedProcessorSupplier.get(); + + final Record input = new Record<>("key", "value", 0L); + assertThrows(RuntimeException.class, () -> throwingProcessor.process(input)); + // wrapped processor should not throw + wrappedProcessor.process(input); + } + + @Test + public void shouldWrapFixedKeyProcessorSupplier() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, ProcessorSkippingWrapper.class.getName()); + + final InternalTopologyBuilder builder = + new InternalTopologyBuilder(new TopologyConfig(new StreamsConfig(props))); + + final FixedKeyProcessorSupplier throwingProcessorSupplier = + () -> (FixedKeyProcessor) record -> { + throw new RuntimeException("oops, don't call process on me!"); + }; + + final FixedKeyProcessorSupplier wrappedProcessorSupplier = + builder.wrapFixedKeyProcessorSupplier("name", throwingProcessorSupplier); + + final FixedKeyProcessor throwingProcessor = throwingProcessorSupplier.get(); + final FixedKeyProcessor wrappedProcessor = wrappedProcessorSupplier.get(); + + // TODO: when we expose a public constructor for FixedKeyRecord we should pass in a real one here + final FixedKeyRecord input = null; + assertThrows(RuntimeException.class, () -> throwingProcessor.process(input)); + // wrapped processor should not throw + wrappedProcessor.process(input); + } + + @Test + public void shouldThrowOnInvalidProcessorWrapperClassName() { + final Map props = dummyStreamsConfigMap(); + props.put(PROCESSOR_WRAPPER_CLASS_CONFIG, "invalid.class"); + + assertThrows( + ConfigException.class, + () -> new InternalTopologyBuilder(new TopologyConfig(new StreamsConfig(props))) + ); + } + + public static class ProcessorSkippingWrapper implements ProcessorWrapper { + + @Override + public WrappedProcessorSupplier wrapProcessorSupplier(final String processorName, + final ProcessorSupplier processorSupplier) { + return () -> (Processor) record -> { + // do nothing + }; + } + + @Override + public WrappedFixedKeyProcessorSupplier wrapFixedKeyProcessorSupplier(final String processorName, + final FixedKeyProcessorSupplier processorSupplier) { + return () -> (FixedKeyProcessor) record -> { + // do nothing + }; + } + } + } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/KeyValueStoreMaterializerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/KeyValueStoreMaterializerTest.java index 0bd7ea3a73995..7228496dd3676 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/KeyValueStoreMaterializerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/KeyValueStoreMaterializerTest.java @@ -25,6 +25,7 @@ import org.apache.kafka.streams.kstream.internals.MaterializedInternal; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.state.BuiltInDslStoreSuppliers; +import org.apache.kafka.streams.state.DslStoreSuppliers; import org.apache.kafka.streams.state.KeyValueBytesStoreSupplier; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.TimestampedKeyValueStore; @@ -48,6 +49,7 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import static java.util.Collections.emptyMap; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -77,8 +79,14 @@ public class KeyValueStoreMaterializerTest { @BeforeEach public void setUp() { - doReturn(BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers.class) - .when(streamsConfig).getClass(StreamsConfig.DSL_STORE_SUPPLIERS_CLASS_CONFIG); + doReturn(emptyMap()) + .when(streamsConfig).originals(); + doReturn(new BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers()) + .when(streamsConfig).getConfiguredInstance( + StreamsConfig.DSL_STORE_SUPPLIERS_CLASS_CONFIG, + DslStoreSuppliers.class, + emptyMap() + ); } private void mockInnerVersionedStore() { @@ -255,7 +263,7 @@ private TimestampedKeyValueStore getTimestampedStore( final MaterializedInternal> materialized) { final KeyValueStoreMaterializer materializer = new KeyValueStoreMaterializer<>(materialized); materializer.configure(streamsConfig); - return (TimestampedKeyValueStore) ((StoreFactory) materializer).build(); + return (TimestampedKeyValueStore) materializer.builder().build(); } @SuppressWarnings("unchecked") @@ -263,6 +271,6 @@ private VersionedKeyValueStore getVersionedStore( final MaterializedInternal> materialized) { final KeyValueStoreMaterializer materializer = new KeyValueStoreMaterializer<>(materialized); materializer.configure(streamsConfig); - return (VersionedKeyValueStore) ((StoreFactory) materializer).build(); + return (VersionedKeyValueStore) materializer.builder().build(); } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java index 1f4fec194840d..e29af81095bb7 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java @@ -40,7 +40,7 @@ import org.apache.kafka.test.MockSourceNode; import org.apache.kafka.test.MockTimestampExtractor; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.hamcrest.Matchers; import org.junit.jupiter.api.Test; @@ -606,6 +606,88 @@ public void shouldUpdatePartitionQueuesShrinkAndExpand() { assertThat(group.nextRecord(new RecordInfo(), time.milliseconds()), nullValue()); // all available records removed } + @Test + public void shouldUpdateBufferSizeCorrectlyForSkippedRecords() { + final PartitionGroup group = new PartitionGroup( + logContext, + mkMap(mkEntry(partition1, queue1)), + tp -> OptionalLong.of(0L), + getValueSensor(metrics, lastLatenessValue), + enforcedProcessingSensor, + maxTaskIdleMs + ); + final List> list1 = Arrays.asList( + new ConsumerRecord<>("topic", 1, 1L, recordKey, recordValue), + new ConsumerRecord<>("topic", 1, 5L, recordKey, recordValue), + new ConsumerRecord<>( + "topic", + 1, + -1, // offset as invalid timestamp + -1, // invalid timestamp + TimestampType.CREATE_TIME, + 0, + 0, + recordKey, + recordValue, + new RecordHeaders(), + Optional.empty() + ), + new ConsumerRecord<>( + "topic", + 1, + 11, + 0, + TimestampType.CREATE_TIME, + 0, + 0, + new byte[0], // corrupted key + recordValue, + new RecordHeaders(), + Optional.empty() + ), + new ConsumerRecord<>( + "topic", + 1, + -1, // offset as invalid timestamp + -1, // invalid timestamp + TimestampType.CREATE_TIME, + 0, + 0, + recordKey, + recordValue, + new RecordHeaders(), + Optional.empty() + ), + new ConsumerRecord<>( + "topic", + 1, + 13, + 0, + TimestampType.CREATE_TIME, + 0, + 0, + recordKey, + new byte[0], // corrupted value + new RecordHeaders(), + Optional.empty() + ), + new ConsumerRecord<>("topic", 1, 20L, recordKey, recordValue) + ); + + group.addRawRecords(partition1, list1); + assertEquals(7, group.numBuffered()); + + group.nextRecord(new RecordInfo(), time.milliseconds()); + assertEquals(6, group.numBuffered()); + + // drain corrupted records + group.nextRecord(new RecordInfo(), time.milliseconds()); + assertEquals(1, group.numBuffered()); + + group.nextRecord(new RecordInfo(), time.milliseconds()); + assertEquals(0, group.numBuffered()); + } + @Test public void shouldNeverWaitIfIdlingIsDisabled() { final PartitionGroup group = new PartitionGroup( diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorContextImplTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorContextImplTest.java index 9fd6ecd513481..165fc35bad4d5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorContextImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorContextImplTest.java @@ -24,12 +24,13 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.StreamsConfig.InternalConfig; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.PunctuationType; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.To; +import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.ProcessorContext; +import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.Task.TaskType; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.query.Position; @@ -118,6 +119,7 @@ public class ProcessorContextImplTest { private KeyValueIterator allIter; @Mock private KeyValueIterator> timestampedAllIter; + @SuppressWarnings("rawtypes") @Mock private WindowStoreIterator windowStoreIter; @@ -941,7 +943,6 @@ private ProcessorContextImpl buildProcessorContextImpl(final StreamsConfig strea ); } - @SuppressWarnings("unchecked") private KeyValueStore keyValueStoreMock(final KeyValueStore keyValueStoreMock) { initStateStoreMock(keyValueStoreMock); @@ -976,7 +977,6 @@ private void mockKeyValueStoreOperation(final KeyValueStore keyVal }).when(keyValueStoreMock).delete(anyString()); } - @SuppressWarnings("unchecked") private TimestampedKeyValueStore timestampedKeyValueStoreMock(final TimestampedKeyValueStore timestampedKeyValueStoreMock) { initStateStoreMock(timestampedKeyValueStoreMock); @@ -1011,7 +1011,6 @@ private void mockTimestampedKeyValueOperation(final TimestampedKeyValueStore windowStoreMock(final WindowStore windowStore) { initStateStoreMock(windowStore); @@ -1024,7 +1023,6 @@ private WindowStore windowStoreMock(final WindowStore timestampedWindowStoreMock(final TimestampedWindowStore windowStore) { initStateStoreMock(windowStore); @@ -1037,7 +1035,6 @@ private TimestampedWindowStore timestampedWindowStoreMock(final Ti return windowStore; } - @SuppressWarnings("unchecked") private SessionStore sessionStoreMock(final SessionStore sessionStore) { initStateStoreMock(sessionStore); @@ -1081,16 +1078,17 @@ private void mockStateStoreFlush(final StateStore stateStore) { }).when(stateStore).flush(); } + @SuppressWarnings("rawtypes") private void doTest(final String name, final Consumer checker) { - @SuppressWarnings("deprecation") final org.apache.kafka.streams.processor.Processor processor = new org.apache.kafka.streams.processor.Processor() { + final Processor processor = new Processor<>() { @Override - public void init(final ProcessorContext context) { + public void init(final ProcessorContext context) { final T store = context.getStateStore(name); checker.accept(store); } @Override - public void process(final String k, final Long v) { + public void process(final Record record) { //No-op. } @@ -1100,7 +1098,7 @@ public void close() { } }; - processor.init(context); + processor.init((ProcessorContext) context); } private void verifyStoreCannotBeInitializedOrClosed(final StateStore store) { @@ -1108,7 +1106,7 @@ private void verifyStoreCannotBeInitializedOrClosed(final StateStore store) { assertTrue(store.persistent()); assertTrue(store.isOpen()); - checkThrowsUnsupportedOperation(() -> store.init((StateStoreContext) null, null), "init()"); + checkThrowsUnsupportedOperation(() -> store.init(null, null), "init()"); checkThrowsUnsupportedOperation(store::close, "close()"); } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java index a46c54ee05beb..8c28ae6a33dcb 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java @@ -27,10 +27,10 @@ import org.apache.kafka.streams.errors.ProcessorStateException; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.errors.TaskCorruptedException; +import org.apache.kafka.streams.errors.internals.FailedProcessingException; import org.apache.kafka.streams.processor.CommitCallback; import org.apache.kafka.streams.processor.StateRestoreCallback; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata; import org.apache.kafka.streams.query.Position; @@ -38,6 +38,7 @@ import org.apache.kafka.streams.state.internals.CachedStateStore; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; import org.apache.kafka.streams.state.internals.StoreQueryUtils; +import org.apache.kafka.test.MockCachedKeyValueStore; import org.apache.kafka.test.MockKeyValueStore; import org.apache.kafka.test.MockRestoreCallback; import org.apache.kafka.test.TestUtils; @@ -57,6 +58,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -130,7 +132,7 @@ public class ProcessorStateManagerTest { @Mock private StateStoreMetadata storeMetadata; @Mock - private InternalProcessorContext context; + private InternalProcessorContext context; @BeforeEach public void setup() { @@ -312,7 +314,7 @@ public void shouldUnregisterChangelogsDuringClose() { stateMgr.registerStateStores(singletonList(store), context); verify(context).uninitialize(); - verify(store).init((StateStoreContext) context, store); + verify(store).init(context, store); stateMgr.registerStore(store, noopStateRestoreCallback, null); assertTrue(changelogReader.isPartitionRegistered(persistentStorePartition)); @@ -331,7 +333,7 @@ public void shouldRecycleStoreAndReregisterChangelog() { stateMgr.registerStateStores(singletonList(store), context); verify(context).uninitialize(); - verify(store).init((StateStoreContext) context, store); + verify(store).init(context, store); stateMgr.registerStore(store, noopStateRestoreCallback, null); assertTrue(changelogReader.isPartitionRegistered(persistentStorePartition)); @@ -354,7 +356,7 @@ public void shouldClearStoreCache() { stateMgr.registerStateStores(singletonList(store), context); verify(context).uninitialize(); - verify(store).init((StateStoreContext) context, store); + verify(store).init(context, store); stateMgr.registerStore(store, noopStateRestoreCallback, null); assertTrue(changelogReader.isPartitionRegistered(persistentStorePartition)); @@ -771,6 +773,64 @@ public void close() { assertEquals(exception, thrown); } + @Test + public void shouldThrowProcessorStateExceptionOnFlushIfStoreThrowsAFailedProcessingException() { + final RuntimeException exception = new RuntimeException("KABOOM!"); + final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE); + final MockKeyValueStore stateStore = new MockKeyValueStore(persistentStoreName, true) { + @Override + public void flush() { + throw new FailedProcessingException("processor", exception); + } + }; + stateManager.registerStore(stateStore, stateStore.stateRestoreCallback, null); + + final ProcessorStateException thrown = assertThrows(ProcessorStateException.class, stateManager::flush); + assertEquals(exception, thrown.getCause()); + assertFalse(exception.getMessage().contains("FailedProcessingException")); + assertFalse(Arrays.stream(thrown.getStackTrace()).anyMatch( + element -> element.getClassName().contains(FailedProcessingException.class.getSimpleName()))); + } + + @Test + public void shouldThrowProcessorStateExceptionOnFlushCacheIfStoreThrowsAFailedProcessingException() { + final RuntimeException exception = new RuntimeException("KABOOM!"); + final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE); + final MockCachedKeyValueStore stateStore = new MockCachedKeyValueStore(persistentStoreName, true) { + @Override + public void flushCache() { + throw new FailedProcessingException("processor", exception); + } + }; + stateManager.registerStore(stateStore, stateStore.stateRestoreCallback, null); + + final ProcessorStateException thrown = assertThrows(ProcessorStateException.class, stateManager::flushCache); + assertEquals(exception, thrown.getCause()); + assertFalse(exception.getMessage().contains("FailedProcessingException")); + assertFalse(Arrays.stream(thrown.getStackTrace()).anyMatch( + element -> element.getClassName().contains(FailedProcessingException.class.getSimpleName()))); + + } + + @Test + public void shouldThrowProcessorStateExceptionOnCloseIfStoreThrowsAFailedProcessingException() { + final RuntimeException exception = new RuntimeException("KABOOM!"); + final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE); + final MockKeyValueStore stateStore = new MockKeyValueStore(persistentStoreName, true) { + @Override + public void close() { + throw new FailedProcessingException("processor", exception); + } + }; + stateManager.registerStore(stateStore, stateStore.stateRestoreCallback, null); + + final ProcessorStateException thrown = assertThrows(ProcessorStateException.class, stateManager::close); + assertEquals(exception, thrown.getCause()); + assertFalse(exception.getMessage().contains("FailedProcessingException")); + assertFalse(Arrays.stream(thrown.getStackTrace()).anyMatch( + element -> element.getClassName().contains(FailedProcessingException.class.getSimpleName()))); + } + @Test public void shouldThrowIfRestoringUnregisteredStore() { final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE); @@ -1153,7 +1213,7 @@ public Position getCheckpointedPosition() { } @Override - public void onCommit() throws IOException { + public void onCommit() { StoreQueryUtils.checkpointPosition(checkpointFile, position); } } @@ -1194,5 +1254,5 @@ private static class ConverterStore extends MockKeyValueStore implements Timesta } } - interface CachingStore extends CachedStateStore, StateStore { } + interface CachingStore extends CachedStateStore, StateStore { } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java index 57a7dd3682099..a578e5b25f23e 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorTopologyTest.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.IntegerDeserializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.serialization.StringDeserializer; @@ -109,8 +110,8 @@ public void setup() { // Create a new directory in which we'll put all of the state for this test, enabling running tests in parallel ... final File localState = TestUtils.tempDirectory(); props.setProperty(StreamsConfig.STATE_DIR_CONFIG, localState.getAbsolutePath()); - props.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); - props.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + props.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName()); + props.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName()); props.setProperty(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, CustomTimestampExtractor.class.getName()); } @@ -125,7 +126,7 @@ public void cleanup() { private List> prefixScanResults(final KeyValueStore store, final String prefix) { final List> results = new ArrayList<>(); - try (final KeyValueIterator prefixScan = store.prefixScan(prefix, Serdes.String().serializer())) { + try (final KeyValueIterator prefixScan = store.prefixScan(prefix, new StringSerializer())) { while (prefixScan.hasNext()) { final KeyValue next = prefixScan.next(); results.add(next); @@ -281,7 +282,7 @@ public void testDrivingSimpleTopology() { driver = new TopologyTestDriver(createSimpleTopology(partition), props); final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER, Instant.ofEpochMilli(0L), Duration.ZERO); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new StringDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); assertNextOutputRecord(outputTopic1.readRecord(), "key1", "value1"); @@ -305,7 +306,7 @@ public void testDrivingSimpleTopologyWithDroppingPartitioner() { driver = new TopologyTestDriver(createSimpleTopologyWithDroppingPartitioner(), props); final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER, Instant.ofEpochMilli(0L), Duration.ZERO); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new StringDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); assertTrue(outputTopic1.isEmpty()); @@ -317,7 +318,7 @@ public void testDrivingStatefulTopology() { driver = new TopologyTestDriver(createStatefulTopology(storeName), props); final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -337,39 +338,7 @@ public void testDrivingConnectedStateStoreTopology() { driver = new TopologyTestDriver(createConnectedStateStoreTopology("connectedStore"), props); final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore("connectedStore"); - assertEquals("value4", store.get("key1")); - assertEquals("value2", store.get("key2")); - assertEquals("value3", store.get("key3")); - assertNull(store.get("key4")); - } - - @Deprecated // testing old PAPI - @Test - public void testDrivingConnectedStateStoreInDifferentProcessorsTopologyWithOldAPI() { - final String storeName = "connectedStore"; - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(storeName), Serdes.String(), Serdes.String()); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addSource("source2", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_2) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(storeName), Collections.singleton(storeBuilder)), "source1") - .addProcessor("processor2", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(storeName), Collections.singleton(storeBuilder)), "source2") - .addSink("counts", OUTPUT_TOPIC_1, "processor1", "processor2"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -400,7 +369,7 @@ public void testDrivingConnectedStateStoreInDifferentProcessorsTopology() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -430,7 +399,7 @@ public void testPrefixScanInMemoryStoreNoCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -465,7 +434,7 @@ public void testPrefixScanInMemoryStoreWithCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -500,7 +469,7 @@ public void testPrefixScanInMemoryStoreWithCachingWithLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -535,7 +504,7 @@ public void testPrefixScanPersistentStoreNoCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -570,7 +539,7 @@ public void testPrefixScanPersistentStoreWithCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -605,7 +574,7 @@ public void testPrefixScanPersistentStoreWithCachingWithLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -640,7 +609,7 @@ public void testPrefixScanPersistentTimestampedStoreNoCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -675,7 +644,7 @@ public void testPrefixScanPersistentTimestampedStoreWithCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -710,7 +679,7 @@ public void testPrefixScanPersistentTimestampedStoreWithCachingWithLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -745,7 +714,7 @@ public void testPrefixScanLruMapNoCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -780,7 +749,7 @@ public void testPrefixScanLruMapWithCachingNoLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -815,439 +784,7 @@ public void testPrefixScanLruMapWithCachingWithLogging() { final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanInMemoryStoreNoCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingDisabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanInMemoryStoreWithCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanInMemoryStoreWithCachingWithLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingEnabled(Collections.emptyMap()); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanPersistentStoreNoCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingDisabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanPersistentStoreWithCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanPersistentStoreWithCachingWithLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingEnabled(Collections.emptyMap()); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanPersistentTimestampedStoreNoCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.persistentTimestampedKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingDisabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanPersistentTimestampedStoreWithCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.persistentTimestampedKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanPersistentTimestampedStoreWithCachingWithLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.persistentTimestampedKeyValueStore(DEFAULT_STORE_NAME), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingEnabled(Collections.emptyMap()); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanLruMapNoCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.lruMap(DEFAULT_STORE_NAME, 100), Serdes.String(), Serdes.String()) - .withCachingDisabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanLruMapWithCachingNoLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.lruMap(DEFAULT_STORE_NAME, 100), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingDisabled(); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); - - inputTopic.pipeInput("key1", "value1"); - inputTopic.pipeInput("key2", "value2"); - inputTopic.pipeInput("key3", "value3"); - inputTopic.pipeInput("key1", "value4"); - assertTrue(outputTopic1.isEmpty()); - - final KeyValueStore store = driver.getKeyValueStore(DEFAULT_STORE_NAME); - final List> results = prefixScanResults(store, DEFAULT_PREFIX); - - assertEquals("key1", results.get(0).key); - assertEquals("value4", results.get(0).value); - assertEquals("key2", results.get(1).key); - assertEquals("value2", results.get(1).value); - assertEquals("key3", results.get(2).key); - assertEquals("value3", results.get(2).value); - - } - - @Deprecated // testing old PAPI - @Test - public void testPrefixScanLruMapWithCachingWithLoggingOldProcessor() { - final StoreBuilder> storeBuilder = - Stores.keyValueStoreBuilder(Stores.lruMap(DEFAULT_STORE_NAME, 100), Serdes.String(), Serdes.String()) - .withCachingEnabled() - .withLoggingEnabled(Collections.emptyMap()); - topology - .addSource("source1", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor1", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(DEFAULT_STORE_NAME), Collections.singleton(storeBuilder)), "source1") - .addSink("counts", OUTPUT_TOPIC_1, "processor1"); - - driver = new TopologyTestDriver(topology, props); - - final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); - final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.Integer().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new IntegerDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); inputTopic.pipeInput("key2", "value2"); @@ -1273,9 +810,9 @@ public void testDrivingSimpleMultiSourceTopology() { driver = new TopologyTestDriver(createSimpleMultiSourceTopology(partition), props); final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER, Instant.ofEpochMilli(0L), Duration.ZERO); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new StringDeserializer(), new StringDeserializer()); final TestOutputTopic outputTopic2 = - driver.createOutputTopic(OUTPUT_TOPIC_2, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_2, new StringDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1"); assertNextOutputRecord(outputTopic1.readRecord(), "key1", "value1"); @@ -1295,7 +832,7 @@ public void testDrivingForwardToSourceTopology() { inputTopic.pipeInput("key2", "value2"); inputTopic.pipeInput("key3", "value3"); final TestOutputTopic outputTopic2 = - driver.createOutputTopic(OUTPUT_TOPIC_2, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_2, new StringDeserializer(), new StringDeserializer()); assertNextOutputRecord(outputTopic2.readRecord(), "key1", "value1"); assertNextOutputRecord(outputTopic2.readRecord(), "key2", "value2"); assertNextOutputRecord(outputTopic2.readRecord(), "key3", "value3"); @@ -1383,7 +920,7 @@ public void shouldConsiderTimeStamps() { inputTopic.pipeInput("key2", "value2", 20L); inputTopic.pipeInput("key3", "value3", 30L); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new StringDeserializer(), new StringDeserializer()); assertNextOutputRecord(outputTopic1.readRecord(), "key1", "value1", 10L); assertNextOutputRecord(outputTopic1.readRecord(), "key2", "value2", 20L); assertNextOutputRecord(outputTopic1.readRecord(), "key3", "value3", 30L); @@ -1398,7 +935,7 @@ public void shouldConsiderModifiedTimeStamps() { inputTopic.pipeInput("key2", "value2", 20L); inputTopic.pipeInput("key3", "value3", 30L); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new StringDeserializer(), new StringDeserializer()); assertNextOutputRecord(outputTopic1.readRecord(), "key1", "value1", 20L); assertNextOutputRecord(outputTopic1.readRecord(), "key2", "value2", 30L); assertNextOutputRecord(outputTopic1.readRecord(), "key3", "value3", 40L); @@ -1410,9 +947,9 @@ public void shouldConsiderModifiedTimeStampsForMultipleProcessors() { driver = new TopologyTestDriver(createMultiProcessorTimestampTopology(partition), props); final TestInputTopic inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER); final TestOutputTopic outputTopic1 = - driver.createOutputTopic(OUTPUT_TOPIC_1, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_1, new StringDeserializer(), new StringDeserializer()); final TestOutputTopic outputTopic2 = - driver.createOutputTopic(OUTPUT_TOPIC_2, Serdes.String().deserializer(), Serdes.String().deserializer()); + driver.createOutputTopic(OUTPUT_TOPIC_2, new StringDeserializer(), new StringDeserializer()); inputTopic.pipeInput("key1", "value1", 10L); assertNextOutputRecord(outputTopic1.readRecord(), "key1", "value1", 10L); @@ -1568,21 +1105,19 @@ private Topology createSimpleTopologyWithDroppingPartitioner() { .addSink("sink", OUTPUT_TOPIC_1, new DroppingPartitioner(), "processor"); } - @Deprecated // testing old PAPI private Topology createStatefulTopology(final String storeName) { return topology .addSource("source", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor", define(new OldAPIStatefulProcessor(storeName)), "source") + .addProcessor("processor", () -> new StatefulProcessor(storeName), "source") .addStateStore(Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(storeName), Serdes.String(), Serdes.String()), "processor") .addSink("counts", OUTPUT_TOPIC_1, "processor"); } - @Deprecated // testing old PAPI private Topology createConnectedStateStoreTopology(final String storeName) { final StoreBuilder> storeBuilder = Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(storeName), Serdes.String(), Serdes.String()); return topology .addSource("source", STRING_DESERIALIZER, STRING_DESERIALIZER, INPUT_TOPIC_1) - .addProcessor("processor", defineWithStoresOldAPI(() -> new OldAPIStatefulProcessor(storeName), Collections.singleton(storeBuilder)), "source") + .addProcessor("processor", defineWithStores(() -> new StatefulProcessor(storeName), Collections.singleton(storeBuilder)), "source") .addSink("counts", OUTPUT_TOPIC_1, "processor"); } @@ -1729,30 +1264,6 @@ public void process(final Record record) { } } - /** - * A processor that stores each key-value pair in an in-memory key-value store registered with the context. - */ - @SuppressWarnings("deprecation") // Old PAPI. Needs to be migrated. - protected static class OldAPIStatefulProcessor extends org.apache.kafka.streams.processor.AbstractProcessor { - private KeyValueStore store; - private final String storeName; - - OldAPIStatefulProcessor(final String storeName) { - this.storeName = storeName; - } - - @Override - public void init(final org.apache.kafka.streams.processor.ProcessorContext context) { - super.init(context); - store = context.getStateStore(storeName); - } - - @Override - public void process(final String key, final String value) { - store.put(key, value); - } - } - /** * A processor that stores each key-value pair in an in-memory key-value store registered with the context. */ @@ -1775,30 +1286,9 @@ public void process(final Record record) { } } - @SuppressWarnings("deprecation") // Old PAPI. Needs to be migrated. - private org.apache.kafka.streams.processor.ProcessorSupplier define(final org.apache.kafka.streams.processor.Processor processor) { - return () -> processor; - } - - @SuppressWarnings("deprecation") // Old PAPI. Needs to be migrated. - private org.apache.kafka.streams.processor.ProcessorSupplier defineWithStoresOldAPI(final Supplier> supplier, - final Set> stores) { - return new org.apache.kafka.streams.processor.ProcessorSupplier() { - @Override - public org.apache.kafka.streams.processor.Processor get() { - return supplier.get(); - } - - @Override - public Set> stores() { - return stores; - } - }; - } - private ProcessorSupplier defineWithStores(final Supplier> supplier, final Set> stores) { - return new ProcessorSupplier() { + return new ProcessorSupplier<>() { @Override public Processor get() { return supplier.get(); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java index f7ec5784890c0..b01b87ed85f82 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java @@ -33,6 +33,7 @@ import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.TransactionAbortedException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; @@ -59,7 +60,8 @@ import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.test.InternalMockProcessorContext; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.filter.ThresholdFilter; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -135,7 +137,7 @@ public class RecordCollectorTest { (topic, key, value, numPartitions) -> Optional.of(Collections.singleton(Integer.parseInt(key) % numPartitions)); private final MockProducer mockProducer - = new MockProducer<>(cluster, true, new ByteArraySerializer(), new ByteArraySerializer()); + = new MockProducer<>(cluster, true, new org.apache.kafka.clients.producer.RoundRobinPartitioner(), new ByteArraySerializer(), new ByteArraySerializer()); private StreamsProducer streamsProducer; private ProcessorTopology topology; @@ -546,10 +548,10 @@ public Optional> partitions(final String topic, final String key, f final Map offsets = collector.offsets(); - // with mock producer without specific partition, we would use default producer partitioner with murmur hash - assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0))); + // with mock producer without specific partition, we would use roundrobin producer partitioner + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 0))); assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1))); - assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2))); + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 2))); assertEquals(9, mockProducer.history().size()); } @@ -600,10 +602,10 @@ public void shouldUseDefaultPartitionerAsPartitionReturnsEmptyOptional() { final Map offsets = collector.offsets(); - // with mock producer without specific partition, we would use default producer partitioner with murmur hash - assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0))); + // with mock producer without specific partition, we would use roundrobin producer partitioner + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 0))); assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1))); - assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2))); + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 2))); assertEquals(9, mockProducer.history().size()); } @@ -651,10 +653,10 @@ public void shouldUseDefaultPartitionerAsStreamPartitionerIsNull() { final Map offsets = collector.offsets(); - // with mock producer without specific partition, we would use default producer partitioner with murmur hash - assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0))); + // with mock producer without specific partition, we would use roundrobin producer partitioner + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 0))); assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1))); - assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2))); + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 2))); assertEquals(9, mockProducer.history().size()); } @@ -674,10 +676,10 @@ public void shouldSendWithNoPartition() { final Map offsets = collector.offsets(); - // with mock producer without specific partition, we would use default producer partitioner with murmur hash - assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0))); + // with mock producer without specific partition, we would use roundrobin producer partitioner + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 0))); assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1))); - assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2))); + assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 2))); assertEquals(9, mockProducer.history().size()); } @@ -1283,7 +1285,7 @@ public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContin try (final LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(RecordCollectorImpl.class)) { - logCaptureAppender.setThreshold(Level.INFO); + logCaptureAppender.addFilter(ThresholdFilter.createFilter(Level.INFO, null, null)); collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, sinkNodeName, context, streamPartitioner); collector.flush(); @@ -1430,7 +1432,7 @@ public void shouldNotAbortTxnOnEOSCloseDirtyIfNothingSent() { logContext, taskId, new StreamsProducer( - new MockProducer<>(cluster, true, byteArraySerializer, byteArraySerializer) { + new MockProducer<>(cluster, true, null, byteArraySerializer, byteArraySerializer) { @Override public void abortTransaction() { functionCalled.set(true); @@ -1455,7 +1457,7 @@ public void shouldThrowIfTopicIsUnknownOnSendWithPartitioner() { logContext, taskId, new StreamsProducer( - new MockProducer<>(cluster, true, byteArraySerializer, byteArraySerializer) { + new MockProducer<>(cluster, true, null, byteArraySerializer, byteArraySerializer) { @Override public List partitionsFor(final String topic) { return Collections.emptyList(); @@ -1805,6 +1807,31 @@ public void shouldNotCallProductionExceptionHandlerOnClassCastException() { } } + @Test + public void shouldSwallowTransactionAbortedExceptionAndNotCallProductionExceptionHandler() { + final MockProducer mockProducer = new MockProducer<>( + cluster, + false, + new org.apache.kafka.clients.producer.RoundRobinPartitioner(), + new ByteArraySerializer(), + new ByteArraySerializer() + ); + streamsProducer = new StreamsProducer( + mockProducer, + EXACTLY_ONCE_V2, + Time.SYSTEM, + logContext + ); + + final RecordCollector collector = newRecordCollector(new ProductionExceptionHandlerMock()); + collector.initialize(); + + collector.send(topic, "key", "val", null, 0, null, stringSerializer, stringSerializer, sinkNodeName, context); + mockProducer.errorNext(new TransactionAbortedException()); // error out the send() call + + collector.flush(); // need to call flush() to check for internal exceptions + } + @Test public void shouldNotSendIfSendOfOtherTaskFailedInCallback() { final TaskId taskId1 = new TaskId(0, 0); @@ -1884,7 +1911,7 @@ public byte[] serialize(final String topic, final Headers headers, final String private StreamsProducer getExceptionalStreamsProducerOnSend(final Exception exception) { return new StreamsProducer( - new MockProducer<>(cluster, true, byteArraySerializer, byteArraySerializer) { + new MockProducer<>(cluster, true, null, byteArraySerializer, byteArraySerializer) { @Override public synchronized Future send(final ProducerRecord record, final Callback callback) { callback.onCompletion(null, exception); @@ -1899,7 +1926,7 @@ public synchronized Future send(final ProducerRecord(cluster, true, byteArraySerializer, byteArraySerializer) { + new MockProducer<>(cluster, true, null, byteArraySerializer, byteArraySerializer) { @Override public synchronized List partitionsFor(final String topic) { throw exception; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateConsumerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateConsumerTest.java index 11271c8471afe..afe01ff433c69 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateConsumerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateConsumerTest.java @@ -18,7 +18,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.LogContext; @@ -39,7 +39,7 @@ public class StateConsumerTest { private final TopicPartition topicOne = new TopicPartition("topic-one", 1); private final TopicPartition topicTwo = new TopicPartition("topic-two", 1); - private final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + private final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); private final Map partitionOffsets = new HashMap<>(); private final LogContext logContext = new LogContext("test "); private GlobalStreamThread.StateConsumer stateConsumer; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java index 0e85e392e3188..320494c534850 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java @@ -27,7 +27,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; @@ -45,7 +45,7 @@ import org.apache.kafka.test.MockStateRestoreListener; import org.apache.kafka.test.StreamsTestUtils; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; @@ -140,7 +140,7 @@ public void onRestoreEnd(final TopicPartition tp, final String store, final long } }; - private final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + private final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); private final MockAdminClient adminClient = new MockAdminClient(); private final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback, standbyListener); @@ -389,7 +389,7 @@ public void shouldTriggerRestoreListenerWithOffsetZeroIfPositionThrowsTimeoutExc adminClient.updateEndOffsets(Collections.singletonMap(tp, 10L)); - final MockConsumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final MockConsumer consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public long position(final TopicPartition partition) { throw new TimeoutException("KABOOM!"); @@ -674,7 +674,7 @@ public void shouldRequestPositionAndHandleTimeoutException() { when(activeStateManager.taskId()).thenReturn(taskId); final AtomicBoolean clearException = new AtomicBoolean(false); - final MockConsumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final MockConsumer consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public long position(final TopicPartition partition) { if (clearException.get()) { @@ -720,7 +720,7 @@ public void shouldThrowIfPositionFail() { when(activeStateManager.taskId()).thenReturn(taskId); when(storeMetadata.offset()).thenReturn(10L); - final MockConsumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final MockConsumer consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public long position(final TopicPartition partition) { throw kaboom; @@ -770,7 +770,7 @@ public ListOffsetsResult listOffsets(final Map topic }; adminClient.updateEndOffsets(Collections.singletonMap(tp, 10L)); - final MockConsumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final MockConsumer consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public Map committed(final Set partitions) { throw new AssertionError("Should not trigger this function"); @@ -928,7 +928,7 @@ public synchronized ListConsumerGroupOffsetsResult listConsumerGroupOffsets(fina @Test public void shouldThrowIfUnsubscribeFail() { - final MockConsumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final MockConsumer consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public void unsubscribe() { throw kaboom; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java index b36f5e41e47db..bcf24ee7df888 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java @@ -21,7 +21,7 @@ import org.apache.kafka.clients.consumer.InvalidOffsetException; import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; @@ -157,8 +157,8 @@ public class StreamTaskTest { private final LogContext logContext = new LogContext("[test] "); private final String topic1 = "topic1"; private final String topic2 = "topic2"; - private final TopicPartition partition1 = new TopicPartition(topic1, 1); - private final TopicPartition partition2 = new TopicPartition(topic2, 1); + private final TopicPartition partition1 = new TopicPartition(topic1, 0); + private final TopicPartition partition2 = new TopicPartition(topic2, 0); private final Set partitions = new HashSet<>(List.of(partition1, partition2)); private final Serializer intSerializer = new IntegerSerializer(); private final Deserializer intDeserializer = new IntegerDeserializer(); @@ -189,7 +189,7 @@ public void process(final Record record) { private final MockKeyValueStore stateStore = new MockKeyValueStore(storeName, false); private final TopicPartition changelogPartition = new TopicPartition("store-changelog", 1); - private final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + private final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); private final byte[] recordValue = intSerializer.serialize(null, 10); private final byte[] recordKey = intSerializer.serialize(null, 1); private final String threadId = Thread.currentThread().getName(); @@ -440,7 +440,7 @@ public void shouldAutoOffsetResetIfNoCommittedOffsetFound() { task.addPartitionsForOffsetReset(Collections.singleton(partition1)); final AtomicReference shouldNotSeek = new AtomicReference<>(); - try (final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST) { + try (final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public void seek(final TopicPartition partition, final long offset) { final AssertionError error = shouldNotSeek.get(); @@ -1082,6 +1082,70 @@ public void shouldPauseAndResumeBasedOnBufferedRecords() { assertEquals(0, consumer.paused().size()); } + @Test + public void shouldResumePartitionWhenSkippingOverRecordsWithInvalidTs() { + when(stateManager.taskId()).thenReturn(taskId); + when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); + task = createStatelessTask(createConfig( + StreamsConfig.AT_LEAST_ONCE, + "-1", + LogAndContinueExceptionHandler.class, + LogAndFailProcessingExceptionHandler.class, + LogAndSkipOnInvalidTimestamp.class + )); + task.initializeIfNeeded(); + task.completeRestoration(noOpResetter -> { }); + + task.addRecords(partition1, asList( + getConsumerRecordWithOffsetAsTimestamp(partition1, 10), + getConsumerRecordWithOffsetAsTimestamp(partition1, 20), + getConsumerRecordWithInvalidTimestamp(30), + getConsumerRecordWithInvalidTimestamp(40), + getConsumerRecordWithInvalidTimestamp(50) + )); + assertTrue(consumer.paused().contains(partition1)); + + assertTrue(task.process(0L)); + + task.resumePollingForPartitionsWithAvailableSpace(); + assertTrue(consumer.paused().contains(partition1)); + + assertTrue(task.process(0L)); + + task.resumePollingForPartitionsWithAvailableSpace(); + assertEquals(0, consumer.paused().size()); + + assertTrue(task.process(0L)); // drain head record (ie, last invalid record) + assertFalse(task.process(0L)); + assertFalse(task.hasRecordsQueued()); + + + // repeat test for deserialization error + task.resumePollingForPartitionsWithAvailableSpace(); + task.addRecords(partition1, asList( + getConsumerRecordWithOffsetAsTimestamp(partition1, 110), + getConsumerRecordWithOffsetAsTimestamp(partition1, 120), + getCorruptedConsumerRecordWithOffsetAsTimestamp(130), + getCorruptedConsumerRecordWithOffsetAsTimestamp(140), + getCorruptedConsumerRecordWithOffsetAsTimestamp(150) + )); + assertTrue(consumer.paused().contains(partition1)); + + assertTrue(task.process(0L)); + + task.resumePollingForPartitionsWithAvailableSpace(); + assertTrue(consumer.paused().contains(partition1)); + + assertTrue(task.process(0L)); + + task.resumePollingForPartitionsWithAvailableSpace(); + assertEquals(0, consumer.paused().size()); + + assertTrue(task.process(0L)); // drain head record (ie, last corrupted record) + assertFalse(task.process(0L)); + assertFalse(task.hasRecordsQueued()); + } + @Test public void shouldPunctuateOnceStreamTimeAfterGap() { when(stateManager.taskId()).thenReturn(taskId); @@ -1488,7 +1552,7 @@ public void shouldBeProcessableIfAllPartitionsBuffered() { task.initializeIfNeeded(); task.completeRestoration(noOpResetter -> { }); - assertThat("task is not idling", !task.timeCurrentIdlingStarted().isPresent()); + assertThat("task is not idling", task.timeCurrentIdlingStarted().isEmpty()); assertFalse(task.process(0L)); @@ -1500,7 +1564,7 @@ public void shouldBeProcessableIfAllPartitionsBuffered() { task.addRecords(partition2, singleton(getConsumerRecordWithOffsetAsTimestamp(partition2, 0))); assertTrue(task.process(0L)); - assertThat("task is not idling", !task.timeCurrentIdlingStarted().isPresent()); + assertThat("task is not idling", task.timeCurrentIdlingStarted().isEmpty()); } @Test @@ -1516,7 +1580,7 @@ public void shouldBeRecordIdlingTimeIfSuspended() { task.resume(); - assertThat("task is not idling", !task.timeCurrentIdlingStarted().isPresent()); + assertThat("task is not idling", task.timeCurrentIdlingStarted().isEmpty()); } @Test @@ -1965,7 +2029,7 @@ public void shouldMaybeReturnOffsetsForRepartitionTopicsForPurging(final boolean public void shouldThrowStreamsExceptionWhenFetchCommittedFailed() { when(stateManager.taskId()).thenReturn(taskId); when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); - final Consumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final Consumer consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public Map committed(final Set partitions) { throw new KafkaException("KABOOM!"); @@ -3029,7 +3093,7 @@ private StreamTask createDisconnectedTask(final StreamsConfig config) { singletonList(stateStore), emptyMap()); - final MockConsumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { + final MockConsumer consumer = new MockConsumer(AutoOffsetResetStrategy.EARLIEST.name()) { @Override public Map committed(final Set partitions) { throw new TimeoutException("KABOOM!"); @@ -3314,7 +3378,7 @@ private ConsumerRecord getConsumerRecordWithOffsetAsTimestamp(fi private ConsumerRecord getConsumerRecordWithOffsetAsTimestamp(final Integer key, final long offset) { return new ConsumerRecord<>( topic1, - 1, + 0, offset, offset, // use the offset as the timestamp TimestampType.CREATE_TIME, @@ -3330,7 +3394,7 @@ private ConsumerRecord getConsumerRecordWithOffsetAsTimestamp(fi private ConsumerRecord getConsumerRecordWithInvalidTimestamp(final long offset) { return new ConsumerRecord<>( topic1, - 1, + 0, offset, -1L, // invalid (negative) timestamp TimestampType.CREATE_TIME, @@ -3347,24 +3411,24 @@ private ConsumerRecord getConsumerRecordWithOffsetAsTimestampWit final long offset, final int leaderEpoch) { return new ConsumerRecord<>( - topicPartition.topic(), - topicPartition.partition(), - offset, - offset, // use the offset as the timestamp - TimestampType.CREATE_TIME, - 0, - 0, - recordKey, - recordValue, - new RecordHeaders(), - Optional.of(leaderEpoch) + topicPartition.topic(), + topicPartition.partition(), + offset, + offset, // use the offset as the timestamp + TimestampType.CREATE_TIME, + 0, + 0, + recordKey, + recordValue, + new RecordHeaders(), + Optional.of(leaderEpoch) ); } private ConsumerRecord getCorruptedConsumerRecordWithOffsetAsTimestamp(final long offset) { return new ConsumerRecord<>( topic1, - 1, + 0, offset, offset, // use the offset as the timestamp TimestampType.CREATE_TIME, diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java index 42b90bf3ec61a..e48d9275b3ab2 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java @@ -24,7 +24,8 @@ import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.InvalidOffsetException; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.MockRebalanceListener; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.Producer; @@ -61,6 +62,7 @@ import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.errors.TaskCorruptedException; import org.apache.kafka.streams.errors.TaskMigratedException; +import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.internals.ConsumedInternal; import org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder; @@ -141,6 +143,7 @@ import static org.apache.kafka.streams.processor.internals.StateManagerUtil.CHECKPOINT_FILE_NAME; import static org.apache.kafka.test.StreamsTestUtils.TaskBuilder.statelessTask; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; +import static org.apache.kafka.test.TestUtils.waitForCondition; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.startsWith; @@ -185,7 +188,7 @@ public class StreamThreadTest { private final MockTime mockTime = new MockTime(); private final String stateDir = TestUtils.tempDirectory().getPath(); private final MockClientSupplier clientSupplier = new MockClientSupplier(); - private final ConsumedInternal consumed = new ConsumedInternal<>(); + private final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(null, null)); private final ChangelogReader changelogReader = new MockChangelogReader(); private StateDirectory stateDirectory = null; private final InternalTopologyBuilder internalTopologyBuilder = new InternalTopologyBuilder(); @@ -262,7 +265,8 @@ private Properties configProps(final boolean enableEoS, final boolean stateUpdat mkEntry(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName()), mkEntry(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName()), mkEntry(InternalConfig.STATE_UPDATER_ENABLED, Boolean.toString(stateUpdaterEnabled)), - mkEntry(InternalConfig.PROCESSING_THREADS_ENABLED, Boolean.toString(processingThreadsEnabled)) + mkEntry(InternalConfig.PROCESSING_THREADS_ENABLED, Boolean.toString(processingThreadsEnabled)), + mkEntry(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "1") )); } @@ -1390,8 +1394,8 @@ public void shouldNotReturnDataAfterTaskMigrated(final boolean stateUpdaterEnabl final InternalTopologyBuilder internalTopologyBuilder = mock(InternalTopologyBuilder.class); when(internalTopologyBuilder.fullSourceTopicNames()).thenReturn(Collections.singletonList(topic1)); - final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.LATEST); - final MockConsumer restoreConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.LATEST.name()); + final MockConsumer restoreConsumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); consumer.subscribe(Collections.singletonList(topic1), new MockRebalanceListener()); consumer.rebalance(Collections.singletonList(t1p1)); @@ -2084,7 +2088,7 @@ private void setupThread(final String storeName1, .count(Materialized.as(storeName1)); final MaterializedInternal> materialized = new MaterializedInternal<>(Materialized.as(storeName2), internalStreamsBuilder, ""); - internalStreamsBuilder.table(topic2, new ConsumedInternal<>(), materialized); + internalStreamsBuilder.table(topic2, new ConsumedInternal<>(Consumed.with(null, null)), materialized); internalStreamsBuilder.buildAndOptimizeTopology(); restoreConsumer.updatePartitions(changelogName1, @@ -2578,7 +2582,7 @@ public void shouldThrowTaskMigratedExceptionHandlingTaskLost(final boolean state final Set assignedPartitions = Collections.singleton(t1p1); final TaskManager taskManager = mock(TaskManager.class); - final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.LATEST); + final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.LATEST.name()); consumer.assign(assignedPartitions); consumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L)); consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L)); @@ -2608,7 +2612,7 @@ public void shouldThrowTaskMigratedExceptionHandlingRevocation(final boolean sta final Set assignedPartitions = Collections.singleton(t1p1); final TaskManager taskManager = mock(TaskManager.class); - final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.LATEST); + final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.LATEST.name()); consumer.assign(assignedPartitions); consumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L)); consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L)); @@ -2979,7 +2983,11 @@ public void shouldNotCommitNonRunningNonRestoringTasks(final boolean stateUpdate @ParameterizedTest @MethodSource("data") - public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps(final boolean stateUpdaterEnabled, final boolean processingThreadsEnabled) { + public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps( + final boolean stateUpdaterEnabled, + final boolean processingThreadsEnabled + ) throws Exception { + internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1); final Properties properties = configProps(false, stateUpdaterEnabled, processingThreadsEnabled); @@ -3013,12 +3021,20 @@ public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps(final boolean s addRecord(mockConsumer, ++offset); runOnce(processingThreadsEnabled); + if (processingThreadsEnabled) { + waitForCommit(mockConsumer, offset + 1); + } + addRecord(mockConsumer, ++offset); addRecord(mockConsumer, ++offset); addRecord(mockConsumer, ++offset); addRecord(mockConsumer, ++offset); runOnce(processingThreadsEnabled); + if (processingThreadsEnabled) { + waitForCommit(mockConsumer, offset + 1); + } + addRecord(mockConsumer, ++offset, 1L); addRecord(mockConsumer, ++offset, 1L); runOnce(processingThreadsEnabled); @@ -3059,6 +3075,18 @@ public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps(final boolean s } } + private void waitForCommit(final MockConsumer mockConsumer, final long expectedOffset) throws Exception { + waitForCondition(() -> { + mockTime.sleep(10L); + runOnce(true); + final Map committed = mockConsumer.committed(Collections.singleton(t1p1)); + return !committed.isEmpty() && committed.get(t1p1).offset() == expectedOffset; + }, + "Never committed offset " + expectedOffset + ); + + } + @ParameterizedTest @MethodSource("data") public void shouldTransmitTaskManagerMetrics(final boolean stateUpdaterEnabled, final boolean processingThreadsEnabled) { diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java index 6cf72e91db3e7..592326fae87e8 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java @@ -44,6 +44,7 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.StreamsConfig.InternalConfig; import org.apache.kafka.streams.TopologyWrapper; +import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Grouped; import org.apache.kafka.streams.kstream.JoinWindows; import org.apache.kafka.streams.kstream.KStream; @@ -2702,8 +2703,8 @@ public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount( final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder); - final KStream inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>()); - final KTable inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store"))); + final KStream inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>(Consumed.with(null, null))); + final KTable inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(Consumed.with(null, null)), new MaterializedInternal<>(Materialized.as("store"))); inputTopic .groupBy( (k, v) -> k, diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsProducerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsProducerTest.java index 3fbf3b912da8f..6c6362ee8bcbc 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsProducerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsProducerTest.java @@ -110,9 +110,9 @@ public class StreamsProducerTest { ); private final MockProducer nonEosMockProducer - = new MockProducer<>(cluster, true, new ByteArraySerializer(), new ByteArraySerializer()); + = new MockProducer<>(cluster, true, new org.apache.kafka.clients.producer.RoundRobinPartitioner(), new ByteArraySerializer(), new ByteArraySerializer()); private final MockProducer eosMockProducer - = new MockProducer<>(cluster, true, new ByteArraySerializer(), new ByteArraySerializer()); + = new MockProducer<>(cluster, true, new org.apache.kafka.clients.producer.RoundRobinPartitioner(), new ByteArraySerializer(), new ByteArraySerializer()); private StreamsProducer nonEosStreamsProducer; private StreamsProducer eosStreamsProducer; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java index b55c9a12fcfd3..6d812e0119e1d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java @@ -55,7 +55,7 @@ import org.apache.kafka.streams.processor.internals.testutil.DummyStreamsConfig; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import org.hamcrest.Matchers; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskSuite.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskSuite.java index 172321fab11e8..06a994a5aa8d3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskSuite.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskSuite.java @@ -31,13 +31,13 @@ */ @Suite @SelectClasses({ - StreamTaskTest.class, - StandbyTaskTest.class, - GlobalStateTaskTest.class, - TaskManagerTest.class, - TaskMetricsTest.class, - LegacyStickyTaskAssignorTest.class, - StreamsPartitionAssignorTest.class, + StreamTaskTest.class, + StandbyTaskTest.class, + GlobalStateTaskTest.class, + TaskManagerTest.class, + TaskMetricsTest.class, + LegacyStickyTaskAssignorTest.class, + StreamsPartitionAssignorTest.class, }) public class TaskSuite { } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetricsTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetricsTest.java index e613a3074aada..d436c9901f40f 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetricsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/metrics/ThreadMetricsTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.metrics.Gauge; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.Sensor.RecordingLevel; +import org.apache.kafka.streams.processor.internals.StreamThread; import org.apache.kafka.streams.processor.internals.StreamThreadTotalBlockedTime; import org.junit.jupiter.api.Test; @@ -413,6 +414,39 @@ public void shouldAddThreadStartTimeMetric() { ); } + @Test + public void shouldAddThreadStateTelemetryMetric() { + final Gauge threadStateProvider = (streamsMetrics, startTime) -> StreamThread.State.RUNNING.ordinal(); + ThreadMetrics.addThreadStateTelemetryMetric( + THREAD_ID, + streamsMetrics, + threadStateProvider + ); + verify(streamsMetrics).addThreadLevelMutableMetric( + "thread-state", + "The current state of the thread", + THREAD_ID, + threadStateProvider + ); + } + + @Test + public void shouldAddThreadStateJmxMetric() { + final Gauge threadStateProvider = (streamsMetrics, startTime) -> StreamThread.State.RUNNING; + ThreadMetrics.addThreadStateMetric( + THREAD_ID, + streamsMetrics, + threadStateProvider + ); + verify(streamsMetrics).addThreadLevelMutableMetric( + "state", + "The current state of the thread", + THREAD_ID, + threadStateProvider + ); + } + + @Test public void shouldAddTotalBlockedTimeMetric() { // Given: diff --git a/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java b/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java index 4aa4c1e182058..f263e43bfa351 100644 --- a/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.streams.query.internals.SucceededQueryResult; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import static org.hamcrest.MatcherAssert.assertThat; @@ -40,26 +39,24 @@ public void setUp() { } @Test - @DisplayName("Zero query results shouldn't error") void getOnlyPartitionResultNoResultsTest() { stringStateQueryResult.addResult(0, noResultsFound); final QueryResult result = stringStateQueryResult.getOnlyPartitionResult(); - assertThat(result, nullValue()); + assertThat("Zero query results shouldn't error", result, nullValue()); } @Test - @DisplayName("Valid query results still works") void getOnlyPartitionResultWithSingleResultTest() { stringStateQueryResult.addResult(0, validResult); final QueryResult result = stringStateQueryResult.getOnlyPartitionResult(); - assertThat(result.getResult(), is("Foo")); + assertThat("Valid query results still works", result.getResult(), is("Foo")); } @Test - @DisplayName("More than one query result throws IllegalArgumentException ") void getOnlyPartitionResultMultipleResults() { stringStateQueryResult.addResult(0, validResult); stringStateQueryResult.addResult(1, validResult); - assertThrows(IllegalArgumentException.class, () -> stringStateQueryResult.getOnlyPartitionResult()); + assertThrows(IllegalArgumentException.class, () -> stringStateQueryResult.getOnlyPartitionResult(), + "More than one query result throws IllegalArgumentException"); } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java b/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java index 0de1e1e606aef..78c6dedcbf45c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/KeyValueStoreTestDriver.java @@ -27,12 +27,13 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.DefaultProductionExceptionHandler; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.StateRestoreCallback; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.StreamPartitioner; import org.apache.kafka.streams.processor.TaskId; +import org.apache.kafka.streams.processor.api.ProcessorContext; +import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorTopology; @@ -62,7 +63,7 @@ import static org.mockito.Mockito.when; /** - * A component that provides a {@link #context() ProcessingContext} that can be supplied to a {@link KeyValueStore} so that + * A component that provides a {@link #context() StateStoreContext} that can be supplied to a {@link KeyValueStore} so that * all entries written to the Kafka topic by the store during {@link KeyValueStore#flush()} are captured for testing purposes. * This class simplifies testing of various {@link KeyValueStore} instances, especially those that use * {@link MeteredKeyValueStore} to monitor and write its entries to the Kafka topic. @@ -110,7 +111,7 @@ * *

                Restoring a store

                * This component can be used to test whether a {@link KeyValueStore} implementation properly - * {@link ProcessorContext#register(StateStore, StateRestoreCallback) registers itself} with the {@link ProcessorContext}, so that + * {@link StateStoreContext#register(StateStore, StateRestoreCallback) registers itself} with the {@link StateStoreContext}, so that * the persisted contents of a store are properly restored from the flushed entries when the store instance is started. *

                * To do this, create an instance of this driver component, {@link #addEntryToRestoreLog(Object, Object) add entries} that will be @@ -149,7 +150,7 @@ public class KeyValueStoreTestDriver { /** * Create a driver object that will have a {@link #context()} that records messages - * {@link ProcessorContext#forward(Object, Object) forwarded} by the store and that provides default serializers and + * {@link ProcessorContext#forward(Record) forwarded} by the store and that provides default serializers and * deserializers for the given built-in key and value types (e.g., {@code String.class}, {@code Integer.class}, * {@code Long.class}, and {@code byte[].class}). This can be used when store is created to rely upon the * ProcessorContext's default key and value serializers and deserializers. @@ -167,14 +168,14 @@ public static KeyValueStoreTestDriver create(final Class keyClas /** * Create a driver object that will have a {@link #context()} that records messages - * {@link ProcessorContext#forward(Object, Object) forwarded} by the store and that provides the specified serializers and + * {@link ProcessorContext#forward(Record) forwarded} by the store and that provides the specified serializers and * deserializers. This can be used when store is created to rely upon the ProcessorContext's default key and value serializers * and deserializers. * - * @param keySerializer the key serializer for the {@link ProcessorContext}; may not be null - * @param keyDeserializer the key deserializer for the {@link ProcessorContext}; may not be null - * @param valueSerializer the value serializer for the {@link ProcessorContext}; may not be null - * @param valueDeserializer the value deserializer for the {@link ProcessorContext}; may not be null + * @param keySerializer the key serializer for the {@link StateStoreContext}; may not be null + * @param keyDeserializer the key deserializer for the {@link StateStoreContext}; may not be null + * @param valueSerializer the value serializer for the {@link StateStoreContext}; may not be null + * @param valueDeserializer the value deserializer for the {@link StateStoreContext}; may not be null * @return the test driver; never null */ public static KeyValueStoreTestDriver create(final Serializer keySerializer, @@ -195,6 +196,7 @@ public static KeyValueStoreTestDriver create(final Serializer ke private final InternalMockProcessorContext context; private final StateSerdes stateSerdes; + @SuppressWarnings("resource") private KeyValueStoreTestDriver(final StateSerdes serdes) { props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "application-id"); @@ -213,7 +215,7 @@ private KeyValueStoreTestDriver(final StateSerdes serdes) { logContext, new TaskId(0, 0), new StreamsProducer( - new MockProducer<>(null, true, null, null), + new MockProducer<>(null, true, null, null, null), AT_LEAST_ONCE, Time.SYSTEM, logContext @@ -264,7 +266,7 @@ public void send(final String topic, stateDir.mkdirs(); stateSerdes = serdes; - context = new InternalMockProcessorContext(stateDir, serdes.keySerde(), serdes.valueSerde(), recordCollector, null) { + context = new InternalMockProcessorContext<>(stateDir, serdes.keySerde(), serdes.valueSerde(), recordCollector, null) { final ThreadCache cache = new ThreadCache(new LogContext("testCache "), 1024 * 1024L, metrics()); @Override @@ -298,7 +300,7 @@ private void recordFlushed(final K key, final V value) { /** * Get the entries that are restored to a KeyValueStore when it is constructed with this driver's {@link #context() - * ProcessorContext}. + * StateStoreContext}. * * @return the restore entries; never null but possibly a null iterator */ @@ -345,7 +347,7 @@ public void addEntryToRestoreLog(final K key, final V value) { * {@link #flushedEntryRemoved(Object)} methods. *

                * If the {@link KeyValueStore}'s are to be restored upon its startup, be sure to {@link #addEntryToRestoreLog(Object, Object) - * add the restore entries} before creating the store with the {@link ProcessorContext} returned by this method. + * add the restore entries} before creating the store with the {@link StateStoreContext} returned by this method. * * @return the processing context; never null * @see #addEntryToRestoreLog(Object, Object) @@ -378,7 +380,7 @@ public int checkForRestoredEntries(final KeyValueStore store) { /** * Utility method to compute the number of entries within the store. * - * @param store the key value store using this {@link #context()}. + * @param store the key value store using this {@link #context() StateStoreContext}. * @return the number of entries */ public int sizeOf(final KeyValueStore store) { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/NoOpWindowStore.java b/streams/src/test/java/org/apache/kafka/streams/state/NoOpWindowStore.java index e7b8f1da82dc3..5de4e65bd5615 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/NoOpWindowStore.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/NoOpWindowStore.java @@ -56,7 +56,7 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) {} + public void init(final StateStoreContext stateStoreContext, final StateStore root) {} @Override public void flush() { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java index f7578112da8f8..0f8ce890b1952 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java @@ -25,6 +25,7 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.serialization.LongSerializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogContext; @@ -37,11 +38,9 @@ import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.SessionWindow; import org.apache.kafka.streams.kstream.internals.TimeWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.Task.TaskType; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.KeyValueIterator; @@ -96,8 +95,8 @@ public abstract class AbstractDualSchemaRocksDBSegmentedBytesStoreTest { private static final String METRICS_SCOPE = "metrics-scope"; - private final long windowSizeForTimeWindow = 500; - private InternalMockProcessorContext context; + private long windowSizeForTimeWindow = 500; + private InternalMockProcessorContext context; private AbstractDualSchemaRocksDBSegmentedBytesStore bytesStore; private File stateDir; private final Window[] windows = new Window[4]; @@ -159,7 +158,7 @@ public void before() { new MockRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())) ); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); } @AfterEach @@ -204,7 +203,7 @@ AbstractDualSchemaRocksDBSegmentedBytesStore getBytesStore() { default: throw new IllegalStateException("Unknown SchemaType: " + schemaType()); } - }; + } AbstractSegments newSegments() { return new KeyValueSegments(storeName, METRICS_SCOPE, retention, segmentInterval); @@ -244,54 +243,43 @@ public void shouldPutAndFetch() { bytesStore.put(serializeKey(new Windowed<>(keyB, windows[2])), serializeValue(100)); bytesStore.put(serializeKey(new Windowed<>(keyC, windows[3])), serializeValue(200)); - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), 0, windows[2].start())) { - // For all tests, actualFrom is computed using observedStreamTime - retention + 1. - // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 - // all records expired as actual from is 59001 and to is 1000 - final List, Long>> expected = Collections.emptyList(); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) { - - // all records expired as actual from is 59001 and to is 1000 - final List, Long>> expected = Collections.emptyList(); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.fetch( - null, Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) { - - // all records expired as actual from is 59001 and to is 1000 - final List, Long>> expected = Collections.emptyList(); - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyB.getBytes()), null, 0, windows[3].start())) { - - // key B is expired as actual from is 59001 - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) - ); + // For all tests, actualFrom is computed using observedStreamTime - retention + 1. + // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 + // all records expired as actual from is 59001 and to is 1000 + assertEquals( + Collections.emptyList(), + toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(keyA.getBytes()), 0, windows[2].start())) + ); - assertEquals(expected, toList(values)); - } + // all records expired as actual from is 59001 and to is 1000 + assertEquals( + Collections.emptyList(), + toListAndCloseIterator(bytesStore.fetch( + Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, windows[2].start()) + ) + ); - try (final KeyValueIterator values = bytesStore.fetch( - null, null, 0, windows[3].start())) { + // all records expired as actual from is 59001 and to is 1000 + assertEquals( + Collections.emptyList(), + toListAndCloseIterator( + bytesStore.fetch(null, Bytes.wrap(keyB.getBytes()), 0, windows[2].start()) + ) + ); - // keys A and B expired as actual from is 59001 - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) - ); + // key B is expired as actual from is 59001 + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)), + toListAndCloseIterator( + bytesStore.fetch(Bytes.wrap(keyB.getBytes()), null, 0, windows[3].start()) + ) + ); - assertEquals(expected, toList(values)); - } + // keys A and B expired as actual from is 59001 + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)), + toListAndCloseIterator(bytesStore.fetch(null, null, 0, windows[3].start())) + ); } @Test @@ -305,55 +293,46 @@ public void shouldPutAndBackwardFetch() { bytesStore.put(serializeKey(new Windowed<>(keyB, windows[2])), serializeValue(100)); bytesStore.put(serializeKey(new Windowed<>(keyC, windows[3])), serializeValue(200)); - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), 0, windows[2].start())) { - - // For all tests, actualFrom is computed using observedStreamTime - retention + 1. - // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 - // all records expired as actual from is 59001 and to = 1000 - final List, Long>> expected = Collections.emptyList(); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) { - - // all records expired as actual from is 59001 and to = 1000 - final List, Long>> expected = Collections.emptyList(); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.backwardFetch( - null, Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) { - - // all records expired as actual from is 59001 and to = 1000 - final List, Long>> expected = Collections.emptyList(); - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyB.getBytes()), null, 0, windows[3].start())) { - - // only 1 record left as actual from is 59001 and to = 60,000 - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) - ); + // For all tests, actualFrom is computed using observedStreamTime - retention + 1. + // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 + // all records expired as actual from is 59001 and to = 1000 + assertEquals( + Collections.emptyList(), + toListAndCloseIterator(bytesStore.backwardFetch(Bytes.wrap(keyA.getBytes()), 0, windows[2].start())) + ); - assertEquals(expected, toList(values)); - } + // all records expired as actual from is 59001 and to = 1000 + assertEquals( + Collections.emptyList(), + toListAndCloseIterator(bytesStore.backwardFetch( + Bytes.wrap(keyA.getBytes()), + Bytes.wrap(keyB.getBytes()), + 0, + windows[2].start() + )) + ); - try (final KeyValueIterator values = bytesStore.backwardFetch( - null, null, 0, windows[3].start())) { + // all records expired as actual from is 59001 and to = 1000 + assertEquals( + Collections.emptyList(), + toListAndCloseIterator( + bytesStore.backwardFetch(null, Bytes.wrap(keyB.getBytes()), 0, windows[2].start()) + ) + ); - // only 1 record left as actual from is 59001 and to = 60,000 - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) - ); + // only 1 record left as actual from is 59001 and to = 60,000 + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)), + toListAndCloseIterator( + bytesStore.backwardFetch(Bytes.wrap(keyB.getBytes()), null, 0, windows[3].start()) + ) + ); - assertEquals(expected, toList(values)); - } + // only 1 record left as actual from is 59001 and to = 60,000 + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)), + toListAndCloseIterator(bytesStore.backwardFetch(null, null, 0, windows[3].start())) + ); } @Test @@ -376,52 +355,41 @@ public void shouldPutAndFetchEdgeSingleKey() { bytesStore.put(serializedKeyBEnd, serializeValue(150)); // Can fetch start/end edge for single key - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L), KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.fetch( + Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime)) + ); // Can fetch start/end edge for single key - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyB.getBytes()), startEdgeTime, endEdgeTime)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(keyB.getBytes()), startEdgeTime, endEdgeTime)) + ); // Can fetch from 0 to max for single key - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L), KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) + ); // Can fetch from 0 to max for single key - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) + ); } @Test @@ -443,9 +411,7 @@ public void shouldPutAndFetchEdgeKeyRange() { bytesStore.put(serializedKeyBStart, serializeValue(100)); bytesStore.put(serializedKeyBEnd, serializeValue(150)); // Can fetch from start/end for key range - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), startEdgeTime, endEdgeTime)) { - + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L), KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), @@ -457,13 +423,20 @@ public void shouldPutAndFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) ); - assertEquals(expected, toList(values)); + + assertEquals( + expected, + toListAndCloseIterator(bytesStore.fetch( + Bytes.wrap(keyA.getBytes()), + Bytes.wrap(keyB.getBytes()), + startEdgeTime, + endEdgeTime + )) + ); } // Can fetch from 0 to max for key range - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0L, Long.MAX_VALUE)) { - + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L), KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), @@ -475,33 +448,34 @@ public void shouldPutAndFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) ); - assertEquals(expected, toList(values)); - } - - // KeyB should be ignored and KeyA should be included even in storage - try (final KeyValueIterator values = bytesStore.fetch( - null, Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime - 1L)) { - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) + assertEquals( + expected, + toListAndCloseIterator(bytesStore.fetch( + Bytes.wrap(keyA.getBytes()), + Bytes.wrap(keyB.getBytes()), + 0L, + Long.MAX_VALUE + )) ); - - assertEquals(expected, toList(values)); } - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyB.getBytes()), null, startEdgeTime + 1, endEdgeTime)) { - - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) - ); - - assertEquals(expected, toList(values)); - } + // KeyB should be ignored and KeyA should be included even in storage + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L)), + toListAndCloseIterator( + bytesStore.fetch(null, Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime - 1L) + ) + ); - try (final KeyValueIterator values = bytesStore.fetch( - null, null, 0, Long.MAX_VALUE)) { + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L)), + toListAndCloseIterator( + bytesStore.fetch(Bytes.wrap(keyB.getBytes()), null, startEdgeTime + 1, endEdgeTime) + ) + ); + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L), KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), @@ -513,12 +487,14 @@ public void shouldPutAndFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) ); - assertEquals(expected, toList(values)); - } - try (final KeyValueIterator values = bytesStore.fetch( - null, null, startEdgeTime, endEdgeTime)) { + assertEquals( + expected, + toListAndCloseIterator(bytesStore.fetch(null, null, 0, Long.MAX_VALUE)) + ); + } + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L), KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L), @@ -531,7 +507,10 @@ public void shouldPutAndFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) ); - assertEquals(expected, toList(values)); + assertEquals( + expected, + toListAndCloseIterator(bytesStore.fetch(null, null, startEdgeTime, endEdgeTime)) + ); } } @@ -555,52 +534,40 @@ public void shouldPutAndBackwardFetchEdgeSingleKey() { bytesStore.put(serializedKeyBEnd, serializeValue(150)); // Can fetch start/end edge for single key - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.backwardFetch(Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime)) + ); // Can fetch start/end edge for single key - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyB.getBytes()), startEdgeTime, endEdgeTime)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L), KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.backwardFetch(Bytes.wrap(keyB.getBytes()), startEdgeTime, endEdgeTime)) + ); // Can fetch from 0 to max for single key - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.backwardFetch(Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) + ); // Can fetch from 0 to max for single key - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L), KeyValue.pair(new Windowed<>(keyB, startEdgeWindow), 100L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.backwardFetch(Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) + ); } @Test @@ -623,9 +590,7 @@ public void shouldPutAndBackwardFetchEdgeKeyRange() { bytesStore.put(serializedKeyBEnd, serializeValue(150)); // Can fetch from start/end for key range - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), startEdgeTime, endEdgeTime)) { - + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L), KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), @@ -637,13 +602,20 @@ public void shouldPutAndBackwardFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) ); - assertEquals(expected, toList(values)); + + assertEquals( + expected, + toListAndCloseIterator(bytesStore.backwardFetch( + Bytes.wrap(keyA.getBytes()), + Bytes.wrap(keyB.getBytes()), + startEdgeTime, + endEdgeTime + )) + ); } // Can fetch from 0 to max for key range - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0L, Long.MAX_VALUE)) { - + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L), KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), @@ -655,33 +627,32 @@ public void shouldPutAndBackwardFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) ); - assertEquals(expected, toList(values)); - } - // KeyB should be ignored and KeyA should be included even in storage - try (final KeyValueIterator values = bytesStore.backwardFetch( - null, Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime - 1L)) { - - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) + assertEquals( + expected, + toListAndCloseIterator(bytesStore.backwardFetch( + Bytes.wrap(keyA.getBytes()), + Bytes.wrap(keyB.getBytes()), + 0L, + Long.MAX_VALUE + )) ); - - assertEquals(expected, toList(values)); } - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyB.getBytes()), null, startEdgeTime + 1, endEdgeTime)) { - - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L) - ); - - assertEquals(expected, toList(values)); - } + // KeyB should be ignored and KeyA should be included even in storage + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L)), + toListAndCloseIterator( + bytesStore.backwardFetch(null, Bytes.wrap(keyA.getBytes()), startEdgeTime, endEdgeTime - 1L)) + ); - try (final KeyValueIterator values = bytesStore.backwardFetch( - null, null, 0, Long.MAX_VALUE)) { + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L)), + toListAndCloseIterator( + bytesStore.backwardFetch(Bytes.wrap(keyB.getBytes()), null, startEdgeTime + 1, endEdgeTime)) + ); + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L), KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), @@ -693,12 +664,14 @@ public void shouldPutAndBackwardFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) ); - assertEquals(expected, toList(values)); - } - try (final KeyValueIterator values = bytesStore.backwardFetch( - null, null, startEdgeTime, endEdgeTime)) { + assertEquals( + expected, + toListAndCloseIterator(bytesStore.backwardFetch(null, null, 0, Long.MAX_VALUE)) + ); + } + { final List, Long>> expected = getIndexSchema() == null ? asList( KeyValue.pair(new Windowed<>(keyB, endEdgeWindow), 150L), KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), @@ -710,7 +683,11 @@ public void shouldPutAndBackwardFetchEdgeKeyRange() { KeyValue.pair(new Windowed<>(keyA, endEdgeWindow), 50L), KeyValue.pair(new Windowed<>(keyA, startEdgeWindow), 10L) ); - assertEquals(expected, toList(values)); + + assertEquals( + expected, + toListAndCloseIterator(bytesStore.backwardFetch(null, null, startEdgeTime, endEdgeTime)) + ); } } @@ -724,6 +701,7 @@ public void shouldPutAndFetchWithPrefixKey() { final String keyB = "aa"; final String keyC = "aaa"; + windowSizeForTimeWindow = 1L; final Window maxWindow = new TimeWindow(Long.MAX_VALUE - 1, Long.MAX_VALUE); final Bytes serializedKeyA = serializeKey(new Windowed<>(keyA, maxWindow), false, Integer.MAX_VALUE); final Bytes serializedKeyB = serializeKey(new Windowed<>(keyB, maxWindow), false, Integer.MAX_VALUE); @@ -737,63 +715,49 @@ public void shouldPutAndFetchWithPrefixKey() { bytesStore.put(serializedKeyB, serializeValue(50)); bytesStore.put(serializedKeyC, serializeValue(100)); - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) { - - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) - ); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) { + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L)), + toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) + ); - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L), KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) - ); - - assertEquals(expected, toList(values)); - } - - // KeyC should be ignored and KeyA should be included even in storage, KeyC is before KeyB - // and KeyA is after KeyB - try (final KeyValueIterator values = bytesStore.fetch( - null, Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) { + ), + toListAndCloseIterator( + bytesStore.fetch(Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE) + ) + ); - final List, Long>> expected = asList( + // KeyC should be ignored and KeyA should be included even in storage, KeyC is before KeyB and KeyA is after KeyB + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L), KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.fetch(null, Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) + ); // KeyC should be included even in storage KeyC is before KeyB - try (final KeyValueIterator values = bytesStore.fetch( - Bytes.wrap(keyB.getBytes()), null, 0, Long.MAX_VALUE)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyC, maxWindow), 100L), KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L) - ); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.fetch( - null, null, 0, Long.MAX_VALUE)) { + ), + toListAndCloseIterator( + bytesStore.fetch(Bytes.wrap(keyB.getBytes()), null, 0, Long.MAX_VALUE) + ) + ); - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyC, maxWindow), 100L), KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L), KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.fetch(null, null, 0, Long.MAX_VALUE)) + ); } @Test @@ -807,6 +771,7 @@ public void shouldPutAndBackwardFetchWithPrefix() { final String keyB = "aa"; final String keyC = "aaa"; + windowSizeForTimeWindow = 1L; final Window maxWindow = new TimeWindow(Long.MAX_VALUE - 1, Long.MAX_VALUE); final Bytes serializedKeyA = serializeKey(new Windowed<>(keyA, maxWindow), false, Integer.MAX_VALUE); final Bytes serializedKeyB = serializeKey(new Windowed<>(keyB, maxWindow), false, Integer.MAX_VALUE); @@ -820,62 +785,48 @@ public void shouldPutAndBackwardFetchWithPrefix() { bytesStore.put(serializedKeyB, serializeValue(50)); bytesStore.put(serializedKeyC, serializeValue(100)); - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) { - - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L) - ); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) { + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L)), + toListAndCloseIterator(bytesStore.backwardFetch(Bytes.wrap(keyA.getBytes()), 0, Long.MAX_VALUE)) + ); - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L), KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L) - ); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.backwardFetch( - null, Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) { + ), + toListAndCloseIterator( + bytesStore.backwardFetch(Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE) + ) + ); - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L), KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L) - ); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.backwardFetch( - Bytes.wrap(keyB.getBytes()), null, 0, Long.MAX_VALUE)) { + ), + toListAndCloseIterator(bytesStore.backwardFetch(null, Bytes.wrap(keyB.getBytes()), 0, Long.MAX_VALUE)) + ); - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L), KeyValue.pair(new Windowed<>(keyC, maxWindow), 100L) - ); - - assertEquals(expected, toList(values)); - } - - try (final KeyValueIterator values = bytesStore.backwardFetch( - null, null, 0, Long.MAX_VALUE)) { + ), + toListAndCloseIterator(bytesStore.backwardFetch(Bytes.wrap(keyB.getBytes()), null, 0, Long.MAX_VALUE)) + ); - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, maxWindow), 10L), KeyValue.pair(new Windowed<>(keyB, maxWindow), 50L), KeyValue.pair(new Windowed<>(keyC, maxWindow), 100L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(bytesStore.backwardFetch(null, null, 0, Long.MAX_VALUE)) + ); } + @SuppressWarnings("resource") @Test public void shouldFetchSessionForSingleKey() { // Only for TimeFirstSessionKeySchema schema @@ -949,74 +900,59 @@ public void shouldFetchSessionForTimeRange() { // Fetch point - try (final KeyValueIterator values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 100L)) { - - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L) - ); - - assertEquals(expected, toList(values)); - } + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L)), + toListAndCloseIterator(((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 100L)) + ); // Fetch partial boundary - try (final KeyValueIterator values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 200L)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L), KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 200L)) + ); // Fetch partial - try (final KeyValueIterator values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(99L, 201L)) { - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L), KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(99L, 201L)) + ); // Fetch partial try (final KeyValueIterator values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(101L, 199L)) { - assertTrue(toList(values).isEmpty()); + assertTrue(toListAndCloseIterator(values).isEmpty()); } // Fetch all boundary - try (final KeyValueIterator values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 300L)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L), KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L), KeyValue.pair(new Windowed<>(keyC, sessionWindows[2]), 200L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(100L, 300L)) + ); // Fetch all - try (final KeyValueIterator values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(99L, 301L)) { - - final List, Long>> expected = asList( + assertEquals( + asList( KeyValue.pair(new Windowed<>(keyA, sessionWindows[0]), 10L), KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L), KeyValue.pair(new Windowed<>(keyC, sessionWindows[2]), 200L) - ); - - assertEquals(expected, toList(values)); - } + ), + toListAndCloseIterator(((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(99L, 301L)) + ); // Fetch all - try (final KeyValueIterator values = ((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(101L, 299L)) { - - final List, Long>> expected = Collections.singletonList( - KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L) - ); - - assertEquals(expected, toList(values)); - } + assertEquals( + Collections.singletonList(KeyValue.pair(new Windowed<>(keyB, sessionWindows[1]), 100L)), + toListAndCloseIterator(((RocksDBTimeOrderedSessionSegmentedBytesStore) bytesStore).fetchSessions(101L, 299L)) + ); } @Test @@ -1042,32 +978,31 @@ public void shouldSkipAndRemoveDanglingIndex() { final Bytes serializedKey2 = serializeKey(new Windowed<>(keyB, windows[2])); bytesStore.put(serializedKey2, serializeValue(20L)); - try (final KeyValueIterator results = bytesStore.fetch( - Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 1, 2000)) { - - final List, Long>> expected; - - // actual from: observedStreamTime - retention + 1 - if (getBaseSchema() instanceof TimeFirstWindowKeySchema) { - // For windowkeyschema, actual from is 1 - // observed stream time = 1000. Retention Period = 1000. - // actual from = (1000 - 1000 + 1) - // and search happens in the range 1-2000 - expected = asList( - KeyValue.pair(new Windowed<>(keyA, windows[0]), 10L), - KeyValue.pair(new Windowed<>(keyB, windows[2]), 20L) - ); - } else { - // For session key schema, actual from is 501 - // observed stream time = 1500. Retention Period = 1000. - // actual from = (1500 - 1000 + 1) - // and search happens in the range 501-2000 - expected = Collections.singletonList(KeyValue.pair(new Windowed<>(keyB, windows[2]), 20L)); - } + final List, Long>> expected; - assertEquals(expected, toList(results)); + // actual from: observedStreamTime - retention + 1 + if (getBaseSchema() instanceof TimeFirstWindowKeySchema) { + // For windowkeyschema, actual from is 1 + // observed stream time = 1000. Retention Period = 1000. + // actual from = (1000 - 1000 + 1) + // and search happens in the range 1-2000 + expected = asList( + KeyValue.pair(new Windowed<>(keyA, windows[0]), 10L), + KeyValue.pair(new Windowed<>(keyB, windows[2]), 20L) + ); + } else { + // For session key schema, actual from is 501 + // observed stream time = 1500. Retention Period = 1000. + // actual from = (1500 - 1000 + 1) + // and search happens in the range 501-2000 + expected = Collections.singletonList(KeyValue.pair(new Windowed<>(keyB, windows[2]), 20L)); } + assertEquals( + expected, + toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 1, 2000)) + ); + // Dangling index should be deleted. value = bytesStore.getIndex(serializedKey1); assertThat(value, is(nullValue())); @@ -1082,28 +1017,28 @@ public void shouldFindValuesWithinRange() { bytesStore.put(serializeKey(new Windowed<>(key, windows[2])), serializeValue(100)); // actual from: observedStreamTime - retention + 1 // retention = 1000 - try (final KeyValueIterator results = bytesStore.fetch(Bytes.wrap(key.getBytes()), 1, 999)) { - - final List, Long>> expected; + final List, Long>> expected; - // actual from: observedStreamTime - retention + 1 - if (getBaseSchema() instanceof TimeFirstWindowKeySchema) { - // For windowkeyschema, actual from is 1 - // observed stream time = 1000. actual from = (1000 - 1000 + 1) - // and search happens in the range 1-2000 - expected = asList( - KeyValue.pair(new Windowed<>(key, windows[0]), 10L), - KeyValue.pair(new Windowed<>(key, windows[1]), 50L) - ); - } else { - // For session key schema, actual from is 501 - // observed stream time = 1500. actual from = (1500 - 1000 + 1) - // and search happens in the range 501-2000 deeming first record as expired. - expected = Collections.singletonList(KeyValue.pair(new Windowed<>(key, windows[1]), 50L)); - } - - assertEquals(expected, toList(results)); + // actual from: observedStreamTime - retention + 1 + if (getBaseSchema() instanceof TimeFirstWindowKeySchema) { + // For windowkeyschema, actual from is 1 + // observed stream time = 1000. actual from = (1000 - 1000 + 1) + // and search happens in the range 1-2000 + expected = asList( + KeyValue.pair(new Windowed<>(key, windows[0]), 10L), + KeyValue.pair(new Windowed<>(key, windows[1]), 50L) + ); + } else { + // For session key schema, actual from is 501 + // observed stream time = 1500. actual from = (1500 - 1000 + 1) + // and search happens in the range 501-2000 deeming first record as expired. + expected = Collections.singletonList(KeyValue.pair(new Windowed<>(key, windows[1]), 50L)); } + + assertEquals( + expected, + toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 1, 999)) + ); } @Test @@ -1138,7 +1073,7 @@ public void shouldRollSegments() { bytesStore.put(serializeKey(new Windowed<>(key, windows[3])), serializeValue(1000)); assertEquals(Set.of(segments.segmentName(0), segments.segmentName(1)), segmentDirs()); - final List, Long>> results = toList(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0, 1500)); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0, 1500)); // For all tests, actualFrom is computed using observedStreamTime - retention + 1. // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 @@ -1148,7 +1083,7 @@ public void shouldRollSegments() { results ); - final List, Long>> results1 = toList(bytesStore.fetch(Bytes.wrap(key.getBytes()), 59000, 60000)); + final List, Long>> results1 = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 59000, 60000)); // only non expired record as actual from is 59001 assertEquals( @@ -1180,7 +1115,7 @@ public void shouldGetAllSegments() { segmentDirs() ); - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); // actualFrom is computed using observedStreamTime - retention + 1. // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 // only one record returned as actual from is 59001 @@ -1215,7 +1150,7 @@ public void shouldGetAllBackwards() { // For all tests, actualFrom is computed using observedStreamTime - retention + 1. // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 // key A expired as actual from is 59,001 - final List, Long>> results = toList(bytesStore.backwardAll()); + final List, Long>> results = toListAndCloseIterator(bytesStore.backwardAll()); assertEquals( Collections.singletonList( KeyValue.pair(new Windowed<>(keyB, windows[3]), 100L) @@ -1244,7 +1179,7 @@ public void shouldFetchAllSegments() { segmentDirs() ); - final List, Long>> results = toList(bytesStore.fetchAll(0L, 60_000L)); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetchAll(0L, 60_000L)); // For all tests, actualFrom is computed using observedStreamTime - retention + 1. // so actualFrom = 60000(observedStreamTime) - 1000(retention) + 1 = 59001 // only 1 record fetched as actual from is 59001 @@ -1279,8 +1214,8 @@ public void shouldLoadSegmentsWithOldStyleDateFormattedName() { bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); - final List, Long>> results = toList(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); + bytesStore.init(context, bytesStore); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); assertThat( results, equalTo( @@ -1311,8 +1246,8 @@ public void shouldLoadSegmentsWithOldStyleColonFormattedName() { bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); - final List, Long>> results = toList(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); + bytesStore.init(context, bytesStore); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); assertThat( results, equalTo( @@ -1332,7 +1267,7 @@ public void shouldBeAbleToWriteToReInitializedStore() { // need to create a segment so we can attempt to write to it again. bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50)); bytesStore.close(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); bytesStore.put(serializeKey(new Windowed<>(key, windows[1])), serializeValue(100)); } @@ -1354,17 +1289,17 @@ public void shouldCreateWriteBatches() { @Test public void shouldRestoreToByteStoreForActiveTask() { - shouldRestoreToByteStore(TaskType.ACTIVE); + shouldRestoreToByteStore(); } @Test public void shouldRestoreToByteStoreForStandbyTask() { context.transitionToStandby(null); - shouldRestoreToByteStore(TaskType.STANDBY); + shouldRestoreToByteStore(); } - private void shouldRestoreToByteStore(final TaskType taskType) { - bytesStore.init((StateStoreContext) context, bytesStore); + private void shouldRestoreToByteStore() { + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); final String key = "a"; @@ -1380,13 +1315,13 @@ private void shouldRestoreToByteStore(final TaskType taskType) { expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 100L)); // after restoration, only 1 record should be returned as actual from is 59001 and the prior record is expired. - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals(expected, results); } @Test public void shouldMatchPositionAfterPut() { - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); final String keyA = "a"; final String keyB = "b"; @@ -1422,7 +1357,7 @@ public void shouldRestoreRecordsAndConsistencyVectorSingleTopic() { Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); @@ -1435,7 +1370,7 @@ public void shouldRestoreRecordsAndConsistencyVectorSingleTopic() { expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L)); // after restoration, only non expired segments should be returned which is one as actual from is 59001 - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals(expected, results); assertThat(bytesStore.getPosition(), Matchers.notNullValue()); assertThat(bytesStore.getPosition().getPartitionPositions(""), Matchers.notNullValue()); @@ -1458,7 +1393,7 @@ public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics() { Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); @@ -1472,7 +1407,7 @@ public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics() { final List, Long>> expected = new ArrayList<>(); expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L)); - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals(expected, results); assertThat(bytesStore.getPosition(), Matchers.notNullValue()); assertThat(bytesStore.getPosition().getPartitionPositions("A"), Matchers.notNullValue()); @@ -1497,7 +1432,7 @@ public void shouldHandleTombstoneRecords() { Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); @@ -1516,7 +1451,7 @@ public void shouldHandleTombstoneRecords() { expected.add(new KeyValue<>(new Windowed<>(key, windows[0]), 50L)); } - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals(expected, results); assertThat(bytesStore.getPosition(), Matchers.notNullValue()); assertThat(bytesStore.getPosition().getPartitionPositions("A"), hasEntry(0, 2L)); @@ -1538,7 +1473,7 @@ public void shouldNotThrowWhenRestoringOnMissingHeaders() { Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); bytesStore.restoreAllInternal(getChangelogRecordsWithoutHeaders()); assertThat(bytesStore.getPosition(), is(Position.emptyPosition())); } @@ -1649,13 +1584,13 @@ private List> getChangelogRecordsWithoutHeaders() public void shouldMeasureExpiredRecords() { final Properties streamsConfig = StreamsTestUtils.getStreamsConfig(); final AbstractDualSchemaRocksDBSegmentedBytesStore bytesStore = getBytesStore(); - final InternalMockProcessorContext context = new InternalMockProcessorContext( + final InternalMockProcessorContext context = new InternalMockProcessorContext<>( TestUtils.tempDirectory(), new StreamsConfig(streamsConfig) ); final Time time = Time.SYSTEM; context.setSystemTimeMs(time.milliseconds()); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // write a record to advance stream time, with a high enough timestamp // that the subsequent record in windows[0] will already be expired. @@ -1736,36 +1671,40 @@ private Bytes serializeKeyForIndex(final Windowed key) { } } + @SuppressWarnings("resource") private byte[] serializeValue(final long value) { - return Serdes.Long().serializer().serialize("", value); + return new LongSerializer().serialize("", value); } - private List, Long>> toList(final KeyValueIterator iterator) { - final List, Long>> results = new ArrayList<>(); - final StateSerdes stateSerdes = StateSerdes.withBuiltinTypes("dummy", String.class, Long.class); - while (iterator.hasNext()) { - final KeyValue next = iterator.next(); - if (getBaseSchema() instanceof TimeFirstWindowKeySchema) { - final KeyValue, Long> deserialized = KeyValue.pair( - TimeFirstWindowKeySchema.fromStoreKey( - next.key.get(), - windowSizeForTimeWindow, - stateSerdes.keyDeserializer(), - stateSerdes.topic() - ), - stateSerdes.valueDeserializer().deserialize("dummy", next.value) - ); - results.add(deserialized); - } else if (getBaseSchema() instanceof TimeFirstSessionKeySchema) { - final KeyValue, Long> deserialized = KeyValue.pair( - TimeFirstSessionKeySchema.from(next.key.get(), stateSerdes.keyDeserializer(), "dummy"), - stateSerdes.valueDeserializer().deserialize("dummy", next.value) - ); - results.add(deserialized); - } else { - throw new IllegalStateException("Unrecognized serde schema"); + @SuppressWarnings("resource") + private List, Long>> toListAndCloseIterator(final KeyValueIterator iterator) { + try (iterator) { + final List, Long>> results = new ArrayList<>(); + final StateSerdes stateSerdes = StateSerdes.withBuiltinTypes("dummy", String.class, Long.class); + while (iterator.hasNext()) { + final KeyValue next = iterator.next(); + if (getBaseSchema() instanceof TimeFirstWindowKeySchema) { + final KeyValue, Long> deserialized = KeyValue.pair( + TimeFirstWindowKeySchema.fromStoreKey( + next.key.get(), + windowSizeForTimeWindow, + stateSerdes.keyDeserializer(), + stateSerdes.topic() + ), + stateSerdes.valueDeserializer().deserialize("dummy", next.value) + ); + results.add(deserialized); + } else if (getBaseSchema() instanceof TimeFirstSessionKeySchema) { + final KeyValue, Long> deserialized = KeyValue.pair( + TimeFirstSessionKeySchema.from(next.key.get(), stateSerdes.keyDeserializer(), "dummy"), + stateSerdes.valueDeserializer().deserialize("dummy", next.value) + ); + results.add(deserialized); + } else { + throw new IllegalStateException("Unrecognized serde schema"); + } } + return results; } - return results; } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java index 1e8c96f6f5299..1175add263fec 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java @@ -38,11 +38,11 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.IsEqual.equalTo; @@ -53,18 +53,17 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -@SuppressWarnings("unchecked") public abstract class AbstractKeyValueStoreTest { protected abstract KeyValueStore createKeyValueStore(final StateStoreContext context); - protected InternalMockProcessorContext context; + protected InternalMockProcessorContext context; protected KeyValueStore store; protected KeyValueStoreTestDriver driver; @BeforeEach public void before() { driver = KeyValueStoreTestDriver.create(Integer.class, String.class); - context = (InternalMockProcessorContext) driver.context(); + context = (InternalMockProcessorContext) driver.context(); context.setTime(10); store = createKeyValueStore(context); } @@ -75,16 +74,17 @@ public void after() { driver.clear(); } - private static Map getContents(final KeyValueIterator iter) { - final HashMap result = new HashMap<>(); - while (iter.hasNext()) { - final KeyValue entry = iter.next(); - result.put(entry.key, entry.value); + private static Map getContentsAndCloseIterator(final KeyValueIterator iter) { + try (iter) { + final HashMap result = new HashMap<>(); + while (iter.hasNext()) { + final KeyValue entry = iter.next(); + result.put(entry.key, entry.value); + } + return result; } - return result; } - @SuppressWarnings("unchecked") @Test public void shouldNotIncludeDeletedFromRangeResult() { store.close(); @@ -113,7 +113,7 @@ public byte[] serialize(final String topic, final String data) { // should not include deleted records in iterator final Map expectedContents = Collections.singletonMap(2, "two"); - assertEquals(expectedContents, getContents(store.all())); + assertEquals(expectedContents, getContentsAndCloseIterator(store.all())); } @Test @@ -142,7 +142,7 @@ public byte[] serialize(final String topic, final String data) { // should not include deleted records in iterator final Map expectedContents = Collections.singletonMap(2, "two"); - assertEquals(expectedContents, getContents(store.all())); + assertEquals(expectedContents, getContentsAndCloseIterator(store.all())); } @Test @@ -184,13 +184,13 @@ public void testPutGetRange() { expectedContents.put(4, "four"); // Check range iteration ... - assertEquals(expectedContents, getContents(store.range(2, 4))); - assertEquals(expectedContents, getContents(store.range(2, 6))); + assertEquals(expectedContents, getContentsAndCloseIterator(store.range(2, 4))); + assertEquals(expectedContents, getContentsAndCloseIterator(store.range(2, 6))); // Check all iteration ... expectedContents.put(0, "zero"); expectedContents.put(1, "one"); - assertEquals(expectedContents, getContents(store.all())); + assertEquals(expectedContents, getContentsAndCloseIterator(store.all())); } @Test @@ -232,13 +232,13 @@ public void testPutGetReverseRange() { expectedContents.put(4, "four"); // Check range iteration ... - assertEquals(expectedContents, getContents(store.reverseRange(2, 4))); - assertEquals(expectedContents, getContents(store.reverseRange(2, 6))); + assertEquals(expectedContents, getContentsAndCloseIterator(store.reverseRange(2, 4))); + assertEquals(expectedContents, getContentsAndCloseIterator(store.reverseRange(2, 6))); // Check all iteration ... expectedContents.put(0, "zero"); expectedContents.put(1, "one"); - assertEquals(expectedContents, getContents(store.reverseAll())); + assertEquals(expectedContents, getContentsAndCloseIterator(store.reverseAll())); } @Test @@ -498,14 +498,10 @@ public void shouldPutAll() { store.putAll(entries); - final List> allReturned = new ArrayList<>(); + final List> allReturned = toListAndCloseIterator(store.all()); final List> expectedReturned = Arrays.asList(KeyValue.pair(1, "one"), KeyValue.pair(2, "two")); - final Iterator> iterator = store.all(); - while (iterator.hasNext()) { - allReturned.add(iterator.next()); - } assertThat(allReturned, equalTo(expectedReturned)); } @@ -517,14 +513,10 @@ public void shouldPutReverseAll() { store.putAll(entries); - final List> allReturned = new ArrayList<>(); + final List> allReturned = toListAndCloseIterator(store.reverseAll()); final List> expectedReturned = Arrays.asList(KeyValue.pair(2, "two"), KeyValue.pair(1, "one")); - final Iterator> iterator = store.reverseAll(); - while (iterator.hasNext()) { - allReturned.add(iterator.next()); - } assertThat(allReturned, equalTo(expectedReturned)); } @@ -545,10 +537,10 @@ public void shouldReturnSameResultsForGetAndRangeWithEqualKeys() { store.putAll(entries); - final Iterator> iterator = store.range(2, 2); - - assertEquals(iterator.next().value, store.get(2)); - assertFalse(iterator.hasNext()); + try (final KeyValueIterator iterator = store.range(2, 2)) { + assertEquals(iterator.next().value, store.get(2)); + assertFalse(iterator.hasNext()); + } } @Test @@ -560,10 +552,10 @@ public void shouldReturnSameResultsForGetAndReverseRangeWithEqualKeys() { store.putAll(entries); - final Iterator> iterator = store.reverseRange(2, 2); - - assertEquals(iterator.next().value, store.get(2)); - assertFalse(iterator.hasNext()); + try (final KeyValueIterator iterator = store.reverseRange(2, 2)) { + assertEquals(iterator.next().value, store.get(2)); + assertFalse(iterator.hasNext()); + } } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java index c659fa08417d3..7a78716530a18 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java @@ -25,6 +25,7 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.serialization.LongSerializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogContext; @@ -36,7 +37,6 @@ import org.apache.kafka.streams.kstream.Window; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.SessionWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; @@ -88,7 +88,7 @@ public abstract class AbstractRocksDBSegmentedBytesStoreTest { private final long windowSizeForTimeWindow = 500; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private AbstractRocksDBSegmentedBytesStore bytesStore; private File stateDir; private final Window[] windows = new Window[4]; @@ -140,7 +140,7 @@ public void before(final SegmentedBytesStore.KeySchema schema) { new MockRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())) ); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); } @AfterEach @@ -168,21 +168,21 @@ public void shouldPutAndFetch(final SegmentedBytesStore.KeySchema schema) { Bytes.wrap(keyA.getBytes()), 0, windows[2].start())) { // All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1) // for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000 - assertEquals(Collections.emptyList(), toList(values)); + assertEquals(Collections.emptyList(), toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.fetch( Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) { // All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1) // for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000 - assertEquals(Collections.emptyList(), toList(values)); + assertEquals(Collections.emptyList(), toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.fetch( null, Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) { // All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1) // for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000 - assertEquals(Collections.emptyList(), toList(values)); + assertEquals(Collections.emptyList(), toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.fetch( @@ -193,7 +193,7 @@ public void shouldPutAndFetch(final SegmentedBytesStore.KeySchema schema) { KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) ); - assertEquals(expected, toList(values)); + assertEquals(expected, toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.fetch( @@ -204,7 +204,7 @@ public void shouldPutAndFetch(final SegmentedBytesStore.KeySchema schema) { KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) ); - assertEquals(expected, toList(values)); + assertEquals(expected, toListAndCloseIterator(values)); } } @@ -225,7 +225,7 @@ public void shouldPutAndBackwardFetch(final SegmentedBytesStore.KeySchema schema // All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1) // for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000 - assertEquals(Collections.emptyList(), toList(values)); + assertEquals(Collections.emptyList(), toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.backwardFetch( @@ -233,7 +233,7 @@ public void shouldPutAndBackwardFetch(final SegmentedBytesStore.KeySchema schema // All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1) // for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000 - assertEquals(Collections.emptyList(), toList(values)); + assertEquals(Collections.emptyList(), toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.backwardFetch( @@ -241,7 +241,7 @@ public void shouldPutAndBackwardFetch(final SegmentedBytesStore.KeySchema schema // All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1) // for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000 - assertEquals(Collections.emptyList(), toList(values)); + assertEquals(Collections.emptyList(), toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.backwardFetch( @@ -252,7 +252,7 @@ public void shouldPutAndBackwardFetch(final SegmentedBytesStore.KeySchema schema KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) ); - assertEquals(expected, toList(values)); + assertEquals(expected, toListAndCloseIterator(values)); } try (final KeyValueIterator values = bytesStore.backwardFetch( @@ -263,7 +263,7 @@ public void shouldPutAndBackwardFetch(final SegmentedBytesStore.KeySchema schema KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L) ); - assertEquals(expected, toList(values)); + assertEquals(expected, toListAndCloseIterator(values)); } } @@ -289,7 +289,7 @@ public void shouldFindValuesWithinRange(final SegmentedBytesStore.KeySchema sche expected.add(KeyValue.pair(new Windowed<>(key, windows[1]), 50L)); } - assertEquals(expected, toList(results)); + assertEquals(expected, toListAndCloseIterator(results)); } } @@ -322,7 +322,7 @@ public void shouldRollSegments(final SegmentedBytesStore.KeySchema schema) { bytesStore.put(serializeKey(new Windowed<>(key, windows[3])), serializeValue(1000)); assertEquals(Set.of(segments.segmentName(0), segments.segmentName(1)), segmentDirs()); - final List, Long>> results = toList(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0, 1500)); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0, 1500)); /* * All records expired as observed stream time = 60,000 which sets actual-from to 59001(60,000 - 1000 + 1). to = 1500. */ @@ -355,7 +355,7 @@ public void shouldGetAllSegments(final SegmentedBytesStore.KeySchema schema) { /* * Only 1 record returned. observed stream time = 60000, actual from = 59001 (60000 - 1000 + 1) and to = Long.MAX. */ - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals( Collections.singletonList( KeyValue.pair(new Windowed<>(key, windows[3]), 100L) @@ -388,7 +388,7 @@ public void shouldFetchAllSegments(final SegmentedBytesStore.KeySchema schema) { /* * Only 1 record returned. observed stream time = 60000, actual from = 59001 (60000 - 1000 + 1) and to = 60,000. */ - final List, Long>> results = toList(bytesStore.fetchAll(0L, 60_000L)); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetchAll(0L, 60_000L)); assertEquals( Collections.singletonList( KeyValue.pair(new Windowed<>(key, windows[3]), 100L) @@ -422,8 +422,8 @@ public void shouldLoadSegmentsWithOldStyleDateFormattedName(final SegmentedBytes bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); - final List, Long>> results = toList(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); + bytesStore.init(context, bytesStore); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); assertThat( results, equalTo( @@ -456,8 +456,8 @@ public void shouldLoadSegmentsWithOldStyleColonFormattedName(final SegmentedByte bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); - final List, Long>> results = toList(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); + bytesStore.init(context, bytesStore); + final List, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L)); assertThat( results, equalTo( @@ -479,7 +479,7 @@ public void shouldBeAbleToWriteToReInitializedStore(final SegmentedBytesStore.Ke // need to create a segment so we can attempt to write to it again. bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50)); bytesStore.close(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); bytesStore.put(serializeKey(new Windowed<>(key, windows[1])), serializeValue(100)); } @@ -515,7 +515,7 @@ public void shouldRestoreToByteStoreForStandbyTask(final SegmentedBytesStore.Key } private void shouldRestoreToByteStore() { - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); final String key = "a"; @@ -533,7 +533,7 @@ private void shouldRestoreToByteStore() { final List, Long>> expected = new ArrayList<>(); expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 100L)); - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals(expected, results); } @@ -541,7 +541,7 @@ private void shouldRestoreToByteStore() { @MethodSource("getKeySchemas") public void shouldMatchPositionAfterPut(final SegmentedBytesStore.KeySchema schema) { before(schema); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); final String keyA = "a"; final String keyB = "b"; @@ -579,7 +579,7 @@ public void shouldRestoreRecordsAndConsistencyVectorSingleTopic(final SegmentedB Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); @@ -594,7 +594,7 @@ public void shouldRestoreRecordsAndConsistencyVectorSingleTopic(final SegmentedB final List, Long>> expected = new ArrayList<>(); expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L)); - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals(expected, results); assertThat(bytesStore.getPosition(), Matchers.notNullValue()); assertThat(bytesStore.getPosition().getPartitionPositions(""), Matchers.notNullValue()); @@ -619,7 +619,7 @@ public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics(final Segment Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); @@ -634,7 +634,7 @@ public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics(final Segment final List, Long>> expected = new ArrayList<>(); expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L)); - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); assertEquals(expected, results); assertThat(bytesStore.getPosition(), Matchers.notNullValue()); assertThat(bytesStore.getPosition().getPartitionPositions("A"), Matchers.notNullValue()); @@ -661,7 +661,7 @@ public void shouldHandleTombstoneRecords(final SegmentedBytesStore.KeySchema sch Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // 0 segments initially. assertEquals(0, bytesStore.getSegments().size()); @@ -675,7 +675,7 @@ public void shouldHandleTombstoneRecords(final SegmentedBytesStore.KeySchema sch * SessionKeySchema, it's 1500. Which changes the actual-from while fetching. In case of SessionKeySchema, the * fetch happens from 501 to end while for WindowKeySchema it's from 1 to end. */ - final List, Long>> results = toList(bytesStore.all()); + final List, Long>> results = toListAndCloseIterator(bytesStore.all()); if (schema instanceof SessionKeySchema) { assertEquals(Collections.emptyList(), results); } else { @@ -705,7 +705,7 @@ public void shouldNotThrowWhenRestoringOnMissingHeaders(final SegmentedBytesStor Time.SYSTEM ); bytesStore = getBytesStore(); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); bytesStore.restoreAllInternal(getChangelogRecordsWithoutHeaders()); assertThat(bytesStore.getPosition(), is(Position.emptyPosition())); } @@ -816,13 +816,13 @@ public void shouldMeasureExpiredRecords(final SegmentedBytesStore.KeySchema sche before(schema); final Properties streamsConfig = StreamsTestUtils.getStreamsConfig(); final AbstractRocksDBSegmentedBytesStore bytesStore = getBytesStore(); - final InternalMockProcessorContext context = new InternalMockProcessorContext( + final InternalMockProcessorContext context = new InternalMockProcessorContext<>( TestUtils.tempDirectory(), new StreamsConfig(streamsConfig) ); final Time time = Time.SYSTEM; context.setSystemTimeMs(time.milliseconds()); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); // write a record to advance stream time, with a high enough timestamp // that the subsequent record in windows[0] will already be expired. @@ -878,36 +878,40 @@ private Bytes serializeKey(final Windowed key) { } } + @SuppressWarnings("resource") private byte[] serializeValue(final long value) { - return Serdes.Long().serializer().serialize("", value); + return new LongSerializer().serialize("", value); } - private List, Long>> toList(final KeyValueIterator iterator) { - final List, Long>> results = new ArrayList<>(); - final StateSerdes stateSerdes = StateSerdes.withBuiltinTypes("dummy", String.class, Long.class); - while (iterator.hasNext()) { - final KeyValue next = iterator.next(); - if (schema instanceof WindowKeySchema) { - final KeyValue, Long> deserialized = KeyValue.pair( - WindowKeySchema.fromStoreKey( - next.key.get(), - windowSizeForTimeWindow, - stateSerdes.keyDeserializer(), - stateSerdes.topic() - ), - stateSerdes.valueDeserializer().deserialize("dummy", next.value) - ); - results.add(deserialized); - } else if (schema instanceof SessionKeySchema) { - final KeyValue, Long> deserialized = KeyValue.pair( - SessionKeySchema.from(next.key.get(), stateSerdes.keyDeserializer(), "dummy"), - stateSerdes.valueDeserializer().deserialize("dummy", next.value) - ); - results.add(deserialized); - } else { - throw new IllegalStateException("Unrecognized serde schema"); + @SuppressWarnings("resource") + private List, Long>> toListAndCloseIterator(final KeyValueIterator iterator) { + try (iterator) { + final List, Long>> results = new ArrayList<>(); + final StateSerdes stateSerdes = StateSerdes.withBuiltinTypes("dummy", String.class, Long.class); + while (iterator.hasNext()) { + final KeyValue next = iterator.next(); + if (schema instanceof WindowKeySchema) { + final KeyValue, Long> deserialized = KeyValue.pair( + WindowKeySchema.fromStoreKey( + next.key.get(), + windowSizeForTimeWindow, + stateSerdes.keyDeserializer(), + stateSerdes.topic() + ), + stateSerdes.valueDeserializer().deserialize("dummy", next.value) + ); + results.add(deserialized); + } else if (schema instanceof SessionKeySchema) { + final KeyValue, Long> deserialized = KeyValue.pair( + SessionKeySchema.from(next.key.get(), stateSerdes.keyDeserializer(), "dummy"), + stateSerdes.valueDeserializer().deserialize("dummy", next.value) + ); + results.add(deserialized); + } else { + throw new IllegalStateException("Unrecognized serde schema"); + } } + return results; } - return results; } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java index 700694fc6dafd..7760d32e43119 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.Stores; @@ -45,7 +44,7 @@ import static java.util.Objects.requireNonNull; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; -import static org.apache.kafka.test.StreamsTestUtils.valuesToSet; +import static org.apache.kafka.test.StreamsTestUtils.valuesToSetAndCloseIterator; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -177,7 +176,7 @@ public void testRolling() { // expired record assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); @@ -193,7 +192,7 @@ public void testRolling() { if (storeType() == StoreType.RocksDBWindowStore) { assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); @@ -201,32 +200,32 @@ public void testRolling() { } else { assertEquals( new HashSet<>(Collections.singletonList("one")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); } assertEquals( new HashSet<>(Collections.singletonList("two")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("four")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("five")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); @@ -243,13 +242,13 @@ public void testRolling() { assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); @@ -266,7 +265,7 @@ public void testRolling() { assertEquals( // expired record new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); @@ -274,32 +273,32 @@ public void testRolling() { assertEquals( // expired record new HashSet<>(Collections.singletonList("two")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); } assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("four")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("five")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("six")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); @@ -316,50 +315,50 @@ public void testRolling() { assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( // expired record new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("four")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("five")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("six")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("seven")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE)))); @@ -376,25 +375,25 @@ public void testRolling() { assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); @@ -402,7 +401,7 @@ public void testRolling() { assertEquals( // expired record new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); @@ -410,7 +409,7 @@ public void testRolling() { assertEquals( // expired record new HashSet<>(Collections.singletonList("four")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); @@ -418,25 +417,25 @@ public void testRolling() { } assertEquals( new HashSet<>(Collections.singletonList("five")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("six")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("seven")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("eight")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 8 + WINDOW_SIZE)))); @@ -458,7 +457,7 @@ public void testSegmentMaintenance() { windowStore.close(); windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, true, Serdes.Integer(), Serdes.String()); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); context.setTime(0L); windowStore.put(0, "v", 0); @@ -480,14 +479,14 @@ public void testSegmentMaintenance() { segmentDirs(baseDir) ); - WindowStoreIterator iter; int fetchedCount; - iter = windowStore.fetch(0, ofEpochMilli(0L), ofEpochMilli(SEGMENT_INTERVAL * 4)); - fetchedCount = 0; - while (iter.hasNext()) { - iter.next(); - fetchedCount++; + try (final WindowStoreIterator iter = windowStore.fetch(0, ofEpochMilli(0L), ofEpochMilli(SEGMENT_INTERVAL * 4))) { + fetchedCount = 0; + while (iter.hasNext()) { + iter.next(); + fetchedCount++; + } } assertEquals(4, fetchedCount); @@ -498,11 +497,12 @@ public void testSegmentMaintenance() { windowStore.put(0, "v", SEGMENT_INTERVAL * 3); - iter = windowStore.fetch(0, ofEpochMilli(0L), ofEpochMilli(SEGMENT_INTERVAL * 4)); - fetchedCount = 0; - while (iter.hasNext()) { - iter.next(); - fetchedCount++; + try (final WindowStoreIterator iter = windowStore.fetch(0, ofEpochMilli(0L), ofEpochMilli(SEGMENT_INTERVAL * 4))) { + fetchedCount = 0; + while (iter.hasNext()) { + iter.next(); + fetchedCount++; + } } // 1 extra record is expired in the case of RocksDBWindowStore as // actualFrom = observedStreamTime - retentionPeriod + 1. The +1 @@ -520,11 +520,12 @@ public void testSegmentMaintenance() { windowStore.put(0, "v", SEGMENT_INTERVAL * 5); - iter = windowStore.fetch(0, ofEpochMilli(SEGMENT_INTERVAL * 4), ofEpochMilli(SEGMENT_INTERVAL * 10)); - fetchedCount = 0; - while (iter.hasNext()) { - iter.next(); - fetchedCount++; + try (final WindowStoreIterator iter = windowStore.fetch(0, ofEpochMilli(SEGMENT_INTERVAL * 4), ofEpochMilli(SEGMENT_INTERVAL * 10))) { + fetchedCount = 0; + while (iter.hasNext()) { + iter.next(); + fetchedCount++; + } } // the latest record has a timestamp > 60k. So, the +1 in actualFrom calculation in @@ -552,7 +553,7 @@ public void testInitialLoading() { windowStore.close(); windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String()); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); // put something in the store to advance its stream time and expire the old segments windowStore.put(1, "v", 6L * SEGMENT_INTERVAL); @@ -568,7 +569,7 @@ public void testInitialLoading() { assertEquals(expected, actual); - try (final WindowStoreIterator iter = windowStore.fetch(0, ofEpochMilli(0L), ofEpochMilli(1000000L))) { + try (final WindowStoreIterator iter = windowStore.fetch(0, ofEpochMilli(0L), ofEpochMilli(1000000L))) { while (iter.hasNext()) { iter.next(); } @@ -583,7 +584,6 @@ public void testInitialLoading() { ); } - @SuppressWarnings("unchecked") @Test public void testRestore() throws Exception { final long startTime = SEGMENT_INTERVAL * 2; @@ -610,62 +610,62 @@ public void testRestore() throws Exception { false, Serdes.Integer(), Serdes.String()); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); // For all tests, for WindowStore actualFrom is computed using observedStreamTime - retention + 1. // while for TimeOrderedWindowStores, actualFrom = observedStreamTime - retention assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 8 + WINDOW_SIZE)))); @@ -679,25 +679,25 @@ public void testRestore() throws Exception { assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); @@ -713,14 +713,14 @@ public void testRestore() throws Exception { if (storeType() == StoreType.RocksDBWindowStore) { assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); } else { assertEquals( new HashSet<>(Collections.singletonList("four")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); @@ -728,25 +728,25 @@ public void testRestore() throws Exception { } assertEquals( new HashSet<>(Collections.singletonList("five")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("six")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("seven")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("eight")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 8 + WINDOW_SIZE)))); @@ -762,11 +762,12 @@ public void testRestore() throws Exception { ); } + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldMatchPositionAfterPut() { final MeteredWindowStore meteredSessionStore = (MeteredWindowStore) windowStore; final ChangeLoggingWindowBytesStore changeLoggingSessionBytesStore = (ChangeLoggingWindowBytesStore) meteredSessionStore.wrapped(); - final WrappedStateStore rocksDBWindowStore = (WrappedStateStore) changeLoggingSessionBytesStore.wrapped(); + final WrappedStateStore rocksDBWindowStore = (WrappedStateStore) changeLoggingSessionBytesStore.wrapped(); context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders())); windowStore.put(0, "0", SEGMENT_INTERVAL); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java index 161f790c14278..744f3ebc86473 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java @@ -21,8 +21,10 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; @@ -31,7 +33,6 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.SessionWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.query.Position; @@ -93,7 +94,7 @@ enum StoreType { private MockRecordCollector recordCollector; - InternalMockProcessorContext context; + InternalMockProcessorContext context; SessionStore buildSessionStore(final long retentionPeriod, final Serde keySerde, @@ -159,7 +160,7 @@ public void setUp() { new MockStreamsMetrics(new Metrics()))); context.setTime(1L); - sessionStore.init((StateStoreContext) context, sessionStore); + sessionStore.init(context, sessionStore); } @AfterEach @@ -240,6 +241,7 @@ public void shouldFetchAllSessionsWithSameRecordKey() { } } + @SuppressWarnings("resource") @Test public void shouldFindSessionsForTimeRange() { sessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 5L); @@ -552,7 +554,7 @@ public void shouldBackwardFindSessionsToMerge() { public void shouldFetchExactKeys() { sessionStore.close(); sessionStore = buildSessionStore(0x7a00000000000000L, Serdes.String(), Serdes.Long()); - sessionStore.init((StateStoreContext) context, sessionStore); + sessionStore.init(context, sessionStore); sessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 1L); sessionStore.put(new Windowed<>("aa", new SessionWindow(0, 10)), 2L); @@ -608,7 +610,7 @@ public void shouldFetchExactKeys() { public void shouldBackwardFetchExactKeys() { sessionStore.close(); sessionStore = buildSessionStore(0x7a00000000000000L, Serdes.String(), Serdes.Long()); - sessionStore.init((StateStoreContext) context, sessionStore); + sessionStore.init(context, sessionStore); sessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 1L); sessionStore.put(new Windowed<>("aa", new SessionWindow(0, 10)), 2L); @@ -665,7 +667,7 @@ public void shouldFetchAndIterateOverExactBinaryKeys() { final SessionStore sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.Bytes(), Serdes.String()); - sessionStore.init((StateStoreContext) context, sessionStore); + sessionStore.init(context, sessionStore); final Bytes key1 = Bytes.wrap(new byte[] {0}); final Bytes key2 = Bytes.wrap(new byte[] {0, 0}); @@ -704,7 +706,7 @@ public void shouldBackwardFetchAndIterateOverExactBinaryKeys() { final SessionStore sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.Bytes(), Serdes.String()); - sessionStore.init((StateStoreContext) context, sessionStore); + sessionStore.init(context, sessionStore); final Bytes key1 = Bytes.wrap(new byte[] {0}); final Bytes key2 = Bytes.wrap(new byte[] {0, 0}); @@ -747,10 +749,11 @@ public void testIteratorPeek() { sessionStore.put(new Windowed<>("aa", new SessionWindow(10, 20)), 4L); try (final KeyValueIterator, Long> iterator = sessionStore.findSessions("a", 0L, 20)) { - - assertEquals(iterator.peekNextKey(), new Windowed<>("a", new SessionWindow(0L, 0L))); - assertEquals(iterator.peekNextKey(), iterator.next().key); - assertEquals(iterator.peekNextKey(), iterator.next().key); + assertEquals(new Windowed<>("a", new SessionWindow(0L, 0L)), iterator.peekNextKey()); + final Windowed k1 = iterator.peekNextKey(); + assertEquals(iterator.next().key, k1); + final Windowed k2 = iterator.peekNextKey(); + assertEquals(iterator.next().key, k2); assertFalse(iterator.hasNext()); } } @@ -763,15 +766,15 @@ public void testIteratorPeekBackward() { sessionStore.put(new Windowed<>("aa", new SessionWindow(10, 20)), 4L); try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions("a", 0L, 20)) { - - assertEquals(iterator.peekNextKey(), new Windowed<>("a", new SessionWindow(10L, 20L))); - assertEquals(iterator.peekNextKey(), iterator.next().key); - assertEquals(iterator.peekNextKey(), iterator.next().key); + assertEquals(new Windowed<>("a", new SessionWindow(10L, 20L)), iterator.peekNextKey()); + final Windowed k1 = iterator.peekNextKey(); + assertEquals(iterator.next().key, k1); + final Windowed k2 = iterator.peekNextKey(); + assertEquals(iterator.next().key, k2); assertFalse(iterator.hasNext()); } } - @SuppressWarnings("unchecked") @Test public void shouldRestore() { final List, Long>> expected = Arrays.asList( @@ -842,7 +845,7 @@ public void shouldReturnSameResultsForSingleKeyFindSessionsAndEqualKeyRangeFindS public void shouldMeasureExpiredRecords() { final Properties streamsConfig = StreamsTestUtils.getStreamsConfig(); final SessionStore sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.String(), Serdes.Long()); - final InternalMockProcessorContext context = new InternalMockProcessorContext( + final InternalMockProcessorContext context = new InternalMockProcessorContext<>( TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector @@ -850,7 +853,7 @@ public void shouldMeasureExpiredRecords() { final Time time = Time.SYSTEM; context.setTime(1L); context.setSystemTimeMs(time.milliseconds()); - sessionStore.init((StateStoreContext) context, sessionStore); + sessionStore.init(context, sessionStore); // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired // Note that rocksdb will only expire segments at a time (where segment interval = 60,000 for this retention period) @@ -894,11 +897,13 @@ public void shouldNotThrowExceptionRemovingNonexistentKey() { sessionStore.remove(new Windowed<>("a", new SessionWindow(0, 1))); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnFindSessionsNullKey() { assertThrows(NullPointerException.class, () -> sessionStore.findSessions(null, 1L, 2L)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnFetchNullKey() { assertThrows(NullPointerException.class, () -> sessionStore.fetch(null)); @@ -914,12 +919,11 @@ public void shouldThrowNullPointerExceptionOnPutNullKey() { assertThrows(NullPointerException.class, () -> sessionStore.put(null, 1L)); } + @SuppressWarnings("resource") @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - final String keyFrom = Serdes.String().deserializer() - .deserialize("", Serdes.Integer().serializer().serialize("", -1)); - final String keyTo = Serdes.String().deserializer() - .deserialize("", Serdes.Integer().serializer().serialize("", 1)); + final String keyFrom = new StringDeserializer().deserialize("", new IntegerSerializer().serialize("", -1)); + final String keyTo = new StringDeserializer().deserialize("", new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(); final KeyValueIterator, Long> iterator = sessionStore.findSessions(keyFrom, keyTo, 0L, 10L)) { @@ -968,16 +972,12 @@ public void shouldRemoveExpired() { @Test public void shouldMatchPositionAfterPut() { - final MeteredSessionStore meteredSessionStore = (MeteredSessionStore) sessionStore; - final ChangeLoggingSessionBytesStore changeLoggingSessionBytesStore = (ChangeLoggingSessionBytesStore) meteredSessionStore.wrapped(); - final SessionStore wrapped = (SessionStore) changeLoggingSessionBytesStore.wrapped(); - context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders())); - sessionStore.put(new Windowed("a", new SessionWindow(0, 0)), 1L); + sessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 1L); context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders())); - sessionStore.put(new Windowed("aa", new SessionWindow(0, 10)), 2L); + sessionStore.put(new Windowed<>("aa", new SessionWindow(0, 10)), 2L); context.setRecordContext(new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders())); - sessionStore.put(new Windowed("a", new SessionWindow(10, 20)), 3L); + sessionStore.put(new Windowed<>("a", new SessionWindow(10, 20)), 3L); final Position expected = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 3L))))); final Position actual = sessionStore.getPosition(); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java index 732527c2c3a5f..8d2e7e61abd0c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java @@ -30,7 +30,6 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.state.KeyValueIterator; @@ -60,9 +59,9 @@ import static java.util.Arrays.asList; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.apache.kafka.test.StreamsTestUtils.toSet; -import static org.apache.kafka.test.StreamsTestUtils.valuesToSet; +import static org.apache.kafka.test.StreamsTestUtils.valuesToSetAndCloseIterator; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.MatcherAssert.assertThat; @@ -89,7 +88,7 @@ public abstract class AbstractWindowBytesStoreTest { final KeyValue, String> five = windowedPair(5, "five", defaultStartTime + 5); WindowStore windowStore; - InternalMockProcessorContext context; + InternalMockProcessorContext context; MockRecordCollector recordCollector; final File baseDir = TestUtils.tempDirectory("test"); @@ -117,7 +116,7 @@ protected void setup() { new MockStreamsMetrics(new Metrics()))); context.setTime(1L); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); } @AfterEach @@ -131,12 +130,12 @@ public void testRangeAndSinglePointFetch() { assertEquals( new HashSet<>(Collections.singletonList("zero")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, - ofEpochMilli(defaultStartTime + 0 - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0 + WINDOW_SIZE)))); + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); - putSecondBatch(windowStore, defaultStartTime, context); + putSecondBatch(windowStore, defaultStartTime); assertEquals("two+1", windowStore.fetch(2, defaultStartTime + 3L)); assertEquals("two+2", windowStore.fetch(2, defaultStartTime + 4L)); @@ -147,91 +146,91 @@ public void testRangeAndSinglePointFetch() { assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime - 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime - 2L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("two")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime - 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime - 1L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2", "two+3")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2", "two+3", "two+4")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2", "two+3", "two+4", "two+5")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2", "two+3", "two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+1", "two+2", "two+3", "two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 6L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 6L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+2", "two+3", "two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 7L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 7L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+3", "two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 8L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 8L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 9L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 9L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+5", "two+6")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 10L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 10L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("two+6")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 11L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 11L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 12L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 12L + WINDOW_SIZE)))); @@ -264,14 +263,14 @@ public void shouldGetAll() { assertEquals( asList(zero, one, two, three, four, five), - toList(windowStore.all()) + toListAndCloseIterator(windowStore.all()) ); } @Test public void shouldGetAllNonDeletedRecords() { // Add some records - windowStore.put(0, "zero", defaultStartTime + 0); + windowStore.put(0, "zero", defaultStartTime); windowStore.put(1, "one", defaultStartTime + 1); windowStore.put(2, "two", defaultStartTime + 2); windowStore.put(3, "three", defaultStartTime + 3); @@ -284,7 +283,7 @@ public void shouldGetAllNonDeletedRecords() { // Only non-deleted records should appear in the all() iterator assertEquals( asList(zero, two, four), - toList(windowStore.all()) + toListAndCloseIterator(windowStore.all()) ); } @@ -292,7 +291,7 @@ public void shouldGetAllNonDeletedRecords() { public void shouldGetAllReturnTimestampOrderedRecords() { // Add some records in different order windowStore.put(4, "four", defaultStartTime + 4); - windowStore.put(0, "zero", defaultStartTime + 0); + windowStore.put(0, "zero", defaultStartTime); windowStore.put(2, "two", defaultStartTime + 2); windowStore.put(3, "three", defaultStartTime + 3); windowStore.put(1, "one", defaultStartTime + 1); @@ -302,13 +301,13 @@ public void shouldGetAllReturnTimestampOrderedRecords() { assertEquals( asList(zero, one, two, three, four), - toList(windowStore.all()) + toListAndCloseIterator(windowStore.all()) ); } @Test public void shouldEarlyClosedIteratorStillGetAllRecords() { - windowStore.put(0, "zero", defaultStartTime + 0); + windowStore.put(0, "zero", defaultStartTime); windowStore.put(1, "one", defaultStartTime + 1); final KeyValueIterator, String> it = windowStore.all(); @@ -318,7 +317,7 @@ public void shouldEarlyClosedIteratorStillGetAllRecords() { // A new all() iterator after a previous all() iterator was closed should return all elements. assertEquals( asList(zero, one), - toList(windowStore.all()) + toListAndCloseIterator(windowStore.all()) ); } @@ -328,7 +327,7 @@ public void shouldGetBackwardAll() { assertEquals( asList(five, four, three, two, one, zero), - toList(windowStore.backwardAll()) + toListAndCloseIterator(windowStore.backwardAll()) ); } @@ -338,15 +337,15 @@ public void shouldFetchAllInTimeRange() { assertEquals( asList(one, two, three, four), - toList(windowStore.fetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 4))) + toListAndCloseIterator(windowStore.fetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 4))) ); assertEquals( asList(zero, one, two, three), - toList(windowStore.fetchAll(ofEpochMilli(defaultStartTime), ofEpochMilli(defaultStartTime + 3))) + toListAndCloseIterator(windowStore.fetchAll(ofEpochMilli(defaultStartTime), ofEpochMilli(defaultStartTime + 3))) ); assertEquals( asList(one, two, three, four, five), - toList(windowStore.fetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 5))) + toListAndCloseIterator(windowStore.fetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 5))) ); } @@ -356,15 +355,15 @@ public void shouldBackwardFetchAllInTimeRange() { assertEquals( asList(four, three, two, one), - toList(windowStore.backwardFetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 4))) + toListAndCloseIterator(windowStore.backwardFetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 4))) ); assertEquals( asList(three, two, one, zero), - toList(windowStore.backwardFetchAll(ofEpochMilli(defaultStartTime), ofEpochMilli(defaultStartTime + 3))) + toListAndCloseIterator(windowStore.backwardFetchAll(ofEpochMilli(defaultStartTime), ofEpochMilli(defaultStartTime + 3))) ); assertEquals( asList(five, four, three, two, one), - toList(windowStore.backwardFetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 5))) + toListAndCloseIterator(windowStore.backwardFetchAll(ofEpochMilli(defaultStartTime + 1), ofEpochMilli(defaultStartTime + 5))) ); } @@ -374,55 +373,55 @@ public void testFetchRange() { assertEquals( asList(zero, one), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 0, 1, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( Collections.singletonList(one), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 1, 1, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( asList(one, two, three), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 1, 3, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( asList(zero, one, two, three), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 0, 5, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( asList(zero, one, two, three, four, five), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 0, 5, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE + 5L))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); assertEquals( asList(two, three, four, five), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 0, 5, ofEpochMilli(defaultStartTime + 2L), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE + 5L))) + ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); assertEquals( Collections.emptyList(), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 4, 5, ofEpochMilli(defaultStartTime + 2L), @@ -430,7 +429,7 @@ public void testFetchRange() { ); assertEquals( Collections.emptyList(), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 0, 3, ofEpochMilli(defaultStartTime + 3L), @@ -438,26 +437,26 @@ public void testFetchRange() { ); assertEquals( asList(zero, one, two), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( null, 2, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), + ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE + 2L))) ); assertEquals( asList(two, three, four, five), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( 2, null, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), + ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); assertEquals( asList(zero, one, two, three, four, five), - toList(windowStore.fetch( + toListAndCloseIterator(windowStore.fetch( null, null, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), + ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); } @@ -469,55 +468,55 @@ public void testBackwardFetchRange() { assertEquals( asList(one, zero), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 0, 1, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( Collections.singletonList(one), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 1, 1, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( asList(three, two, one), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 1, 3, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( asList(three, two, one, zero), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 0, 5, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE))) ); assertEquals( asList(five, four, three, two, one, zero), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 0, 5, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE + 5L))) + ofEpochMilli(defaultStartTime - WINDOW_SIZE), + ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); assertEquals( asList(five, four, three, two), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 0, 5, ofEpochMilli(defaultStartTime + 2L), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE + 5L))) + ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); assertEquals( Collections.emptyList(), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 4, 5, ofEpochMilli(defaultStartTime + 2L), @@ -525,7 +524,7 @@ public void testBackwardFetchRange() { ); assertEquals( Collections.emptyList(), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 0, 3, ofEpochMilli(defaultStartTime + 3L), @@ -533,26 +532,26 @@ public void testBackwardFetchRange() { ); assertEquals( asList(two, one, zero), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( null, 2, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), + ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE + 2L))) ); assertEquals( asList(five, four, three, two), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( 2, null, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), + ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); assertEquals( asList(five, four, three, two, one, zero), - toList(windowStore.backwardFetch( + toListAndCloseIterator(windowStore.backwardFetch( null, null, - ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), + ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE + 5L))) ); } @@ -563,70 +562,70 @@ public void testPutAndFetchBefore() { assertEquals( new HashSet<>(Collections.singletonList("zero")), - valuesToSet(windowStore.fetch(0, ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 0L)))); + valuesToSetAndCloseIterator(windowStore.fetch(0, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime)))); assertEquals( new HashSet<>(Collections.singletonList("one")), - valuesToSet(windowStore.fetch(1, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L)))); + valuesToSetAndCloseIterator(windowStore.fetch(1, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L)))); assertEquals( new HashSet<>(Collections.singletonList("two")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L)))); assertEquals( new HashSet<>(Collections.singletonList("three")), - valuesToSet(windowStore.fetch(3, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L)))); + valuesToSetAndCloseIterator(windowStore.fetch(3, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L)))); assertEquals( new HashSet<>(Collections.singletonList("four")), - valuesToSet(windowStore.fetch(4, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L)))); + valuesToSetAndCloseIterator(windowStore.fetch(4, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L)))); assertEquals( new HashSet<>(Collections.singletonList("five")), - valuesToSet(windowStore.fetch(5, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L)))); + valuesToSetAndCloseIterator(windowStore.fetch(5, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L)))); - putSecondBatch(windowStore, defaultStartTime, context); + putSecondBatch(windowStore, defaultStartTime); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime - 1L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime - 1L)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 0L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 0L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L)))); assertEquals( new HashSet<>(Collections.singletonList("two")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L)))); assertEquals( new HashSet<>(asList("two", "two+1")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2", "two+3")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L)))); assertEquals( new HashSet<>(asList("two+1", "two+2", "two+3", "two+4")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 6L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 6L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 6L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 6L)))); assertEquals( new HashSet<>(asList("two+2", "two+3", "two+4", "two+5")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 7L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 7L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 7L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 7L)))); assertEquals( new HashSet<>(asList("two+3", "two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 8L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 8L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 8L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 8L)))); assertEquals( new HashSet<>(asList("two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 9L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 9L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 9L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 9L)))); assertEquals( new HashSet<>(asList("two+5", "two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 10L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 10L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 10L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 10L)))); assertEquals( new HashSet<>(Collections.singletonList("two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 11L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 11L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 11L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 11L)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 12L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 12L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 12L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 12L)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 13L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 13L)))); + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 13L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 13L)))); // Flush the store and verify all current entries were properly flushed ... windowStore.flush(); @@ -652,90 +651,90 @@ public void testPutAndFetchAfter() { assertEquals( new HashSet<>(Collections.singletonList("zero")), - valuesToSet(windowStore.fetch(0, ofEpochMilli(defaultStartTime + 0L), - ofEpochMilli(defaultStartTime + 0L + WINDOW_SIZE)))); + valuesToSetAndCloseIterator(windowStore.fetch(0, ofEpochMilli(defaultStartTime), + ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("one")), - valuesToSet(windowStore.fetch(1, ofEpochMilli(defaultStartTime + 1L), + valuesToSetAndCloseIterator(windowStore.fetch(1, ofEpochMilli(defaultStartTime + 1L), ofEpochMilli(defaultStartTime + 1L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("two")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L), ofEpochMilli(defaultStartTime + 2L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(3, ofEpochMilli(defaultStartTime + 3L), + valuesToSetAndCloseIterator(windowStore.fetch(3, ofEpochMilli(defaultStartTime + 3L), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("four")), - valuesToSet(windowStore.fetch(4, ofEpochMilli(defaultStartTime + 4L), + valuesToSetAndCloseIterator(windowStore.fetch(4, ofEpochMilli(defaultStartTime + 4L), ofEpochMilli(defaultStartTime + 4L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("five")), - valuesToSet(windowStore.fetch(5, ofEpochMilli(defaultStartTime + 5L), + valuesToSetAndCloseIterator(windowStore.fetch(5, ofEpochMilli(defaultStartTime + 5L), ofEpochMilli(defaultStartTime + 5L + WINDOW_SIZE)))); - putSecondBatch(windowStore, defaultStartTime, context); + putSecondBatch(windowStore, defaultStartTime); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 2L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 2L), ofEpochMilli(defaultStartTime - 2L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("two")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 1L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 1L), ofEpochMilli(defaultStartTime - 1L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1")), - valuesToSet(windowStore + valuesToSetAndCloseIterator(windowStore .fetch(2, ofEpochMilli(defaultStartTime), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 1L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 1L), ofEpochMilli(defaultStartTime + 1L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two", "two+1", "two+2", "two+3")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L), ofEpochMilli(defaultStartTime + 2L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+1", "two+2", "two+3", "two+4")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 3L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 3L), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+2", "two+3", "two+4", "two+5")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 4L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 4L), ofEpochMilli(defaultStartTime + 4L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+3", "two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 5L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 5L), ofEpochMilli(defaultStartTime + 5L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+4", "two+5", "two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 6L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 6L), ofEpochMilli(defaultStartTime + 6L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("two+5", "two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 7L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 7L), ofEpochMilli(defaultStartTime + 7L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.singletonList("two+6")), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 8L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 8L), ofEpochMilli(defaultStartTime + 8L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 9L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 9L), ofEpochMilli(defaultStartTime + 9L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 10L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 10L), ofEpochMilli(defaultStartTime + 10L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 11L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 11L), ofEpochMilli(defaultStartTime + 11L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 12L), + valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 12L), ofEpochMilli(defaultStartTime + 12L + WINDOW_SIZE)))); // Flush the store and verify all current entries were properly flushed ... @@ -763,13 +762,13 @@ public void testPutAndFetchAfter() { public void testPutSameKeyTimestamp() { windowStore.close(); windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, true, Serdes.Integer(), Serdes.String()); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); windowStore.put(0, "zero", defaultStartTime); assertEquals( new HashSet<>(Collections.singletonList("zero")), - valuesToSet(windowStore.fetch(0, ofEpochMilli(defaultStartTime - WINDOW_SIZE), + valuesToSetAndCloseIterator(windowStore.fetch(0, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); windowStore.put(0, "zero", defaultStartTime); @@ -778,31 +777,31 @@ public void testPutSameKeyTimestamp() { assertEquals( new HashSet<>(asList("zero", "zero", "zero+", "zero++")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("zero", "zero", "zero+", "zero++")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("zero", "zero", "zero+", "zero++")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L + WINDOW_SIZE)))); assertEquals( new HashSet<>(asList("zero", "zero", "zero+", "zero++")), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( new HashSet<>(Collections.emptyList()), - valuesToSet(windowStore.fetch( + valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L + WINDOW_SIZE)))); @@ -845,7 +844,7 @@ public void shouldFetchAndIterateOverExactKeys() { Serdes.String(), Serdes.String()); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); windowStore.put("a", "0001", 0); windowStore.put("aa", "0002", 0); @@ -855,7 +854,7 @@ public void shouldFetchAndIterateOverExactKeys() { final Set expected = new HashSet<>(asList("0001", "0003", "0005")); assertThat( - valuesToSet(windowStore.fetch("a", ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), + valuesToSetAndCloseIterator(windowStore.fetch("a", ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expected) ); @@ -887,12 +886,14 @@ public void testDeleteAndUpdate() { windowStore.put(1, "one", currentTime); windowStore.put(1, "one v2", currentTime); - WindowStoreIterator iterator = windowStore.fetch(1, 0, currentTime); - assertEquals(new KeyValue<>(currentTime, "one v2"), iterator.next()); + try (final WindowStoreIterator iterator = windowStore.fetch(1, 0, currentTime)) { + assertEquals(new KeyValue<>(currentTime, "one v2"), iterator.next()); + } windowStore.put(1, null, currentTime); - iterator = windowStore.fetch(1, 0, currentTime); - assertFalse(iterator.hasNext()); + try (final WindowStoreIterator iterator = windowStore.fetch(1, 0, currentTime)) { + assertFalse(iterator.hasNext()); + } } @Test @@ -905,6 +906,7 @@ public void shouldThrowNullPointerExceptionOnPutNullKey() { assertThrows(NullPointerException.class, () -> windowStore.put(null, "anyValue", 0L)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnGetNullKey() { assertThrows(NullPointerException.class, () -> windowStore.fetch(null, ofEpochMilli(1L), ofEpochMilli(2L))); @@ -917,7 +919,7 @@ public void shouldFetchAndIterateOverExactBinaryKeys() { true, Serdes.Bytes(), Serdes.String()); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); final Bytes key1 = Bytes.wrap(new byte[] {0}); final Bytes key2 = Bytes.wrap(new byte[] {0, 0}); @@ -934,17 +936,17 @@ public void shouldFetchAndIterateOverExactBinaryKeys() { final Set expectedKey1 = new HashSet<>(asList("1", "4", "7")); assertThat( - valuesToSet(windowStore.fetch(key1, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), + valuesToSetAndCloseIterator(windowStore.fetch(key1, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expectedKey1) ); final Set expectedKey2 = new HashSet<>(asList("2", "5", "8")); assertThat( - valuesToSet(windowStore.fetch(key2, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), + valuesToSetAndCloseIterator(windowStore.fetch(key2, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expectedKey2) ); final Set expectedKey3 = new HashSet<>(asList("3", "6", "9")); assertThat( - valuesToSet(windowStore.fetch(key3, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), + valuesToSetAndCloseIterator(windowStore.fetch(key3, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expectedKey3) ); @@ -990,7 +992,7 @@ public void shouldMeasureExpiredRecords() { final Properties streamsConfig = StreamsTestUtils.getStreamsConfig(); final WindowStore windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String()); - final InternalMockProcessorContext context = new InternalMockProcessorContext( + final InternalMockProcessorContext context = new InternalMockProcessorContext<>( TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector @@ -998,7 +1000,7 @@ public void shouldMeasureExpiredRecords() { final Time time = Time.SYSTEM; context.setSystemTimeMs(time.milliseconds()); context.setTime(1L); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired windowStore.put(1, "initial record", 2 * RETENTION_PERIOD); @@ -1108,7 +1110,7 @@ public void shouldNotThrowConcurrentModificationException() { public void testFetchDuplicates() { windowStore.close(); windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, true, Serdes.Integer(), Serdes.String()); - windowStore.init((StateStoreContext) context, windowStore); + windowStore.init(context, windowStore); long currentTime = 0; windowStore.put(1, "one", currentTime); @@ -1135,7 +1137,7 @@ public void testFetchDuplicates() { private void putFirstBatch(final WindowStore store, @SuppressWarnings("SameParameterValue") final long startTime, - final InternalMockProcessorContext context) { + final InternalMockProcessorContext context) { context.setRecordContext(createRecordContext(startTime)); store.put(0, "zero", startTime); store.put(1, "one", startTime + 1L); @@ -1146,8 +1148,7 @@ private void putFirstBatch(final WindowStore store, } private void putSecondBatch(final WindowStore store, - @SuppressWarnings("SameParameterValue") final long startTime, - final InternalMockProcessorContext context) { + @SuppressWarnings("SameParameterValue") final long startTime) { store.put(2, "two+1", startTime + 3L); store.put(2, "two+2", startTime + 4L); store.put(2, "two+3", startTime + 5L); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemoryKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemoryKeyValueStoreTest.java index 5b3c6a71ee05a..e2e37ad1e4ade 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemoryKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemoryKeyValueStoreTest.java @@ -73,7 +73,7 @@ public class CachingInMemoryKeyValueStoreTest extends AbstractKeyValueStoreTest private static final String TOPIC = "topic"; private static final String CACHE_NAMESPACE = "0_0-store-name"; private final int maxCacheSizeBytes = 150; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private CachingKeyValueStore store; private KeyValueStore underlyingStore; private ThreadCache cache; @@ -89,7 +89,7 @@ public void setUp() { cache = new ThreadCache(new LogContext("testCache "), maxCacheSizeBytes, new MockStreamsMetrics(new Metrics())); context = new InternalMockProcessorContext<>(null, null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); - store.init((StateStoreContext) context, null); + store.init(context, null); } @SuppressWarnings("unchecked") @@ -111,8 +111,8 @@ public void shouldDelegateInit() { final KeyValueStore inner = mock(InMemoryKeyValueStore.class); final CachingKeyValueStore outer = new CachingKeyValueStore(inner, false); when(inner.name()).thenReturn("store"); - outer.init((StateStoreContext) context, outer); - verify(inner).init((StateStoreContext) context, outer); + outer.init(context, outer); + verify(inner).init(context, outer); } @Test @@ -182,7 +182,7 @@ private void setUpCloseTests() { cache = mock(ThreadCache.class); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); - store.init((StateStoreContext) context, store); + store.init(context, store); } @Test @@ -460,10 +460,18 @@ public void shouldDeleteItemsFromCache() { store.put(bytesKey("a"), bytesValue("a")); store.delete(bytesKey("a")); assertNull(store.get(bytesKey("a"))); - assertFalse(store.range(bytesKey("a"), bytesKey("b")).hasNext()); - assertFalse(store.reverseRange(bytesKey("a"), bytesKey("b")).hasNext()); - assertFalse(store.all().hasNext()); - assertFalse(store.reverseAll().hasNext()); + try (final KeyValueIterator iterator = store.range(bytesKey("a"), bytesKey("b"))) { + assertFalse(iterator.hasNext()); + } + try (final KeyValueIterator iterator = store.reverseRange(bytesKey("a"), bytesKey("b"))) { + assertFalse(iterator.hasNext()); + } + try (final KeyValueIterator iterator = store.all()) { + assertFalse(iterator.hasNext()); + } + try (final KeyValueIterator iterator = store.reverseAll()) { + assertFalse(iterator.hasNext()); + } } @Test @@ -472,10 +480,18 @@ public void shouldNotShowItemsDeletedFromCacheButFlushedToStoreBeforeDelete() { store.flush(); store.delete(bytesKey("a")); assertNull(store.get(bytesKey("a"))); - assertFalse(store.range(bytesKey("a"), bytesKey("b")).hasNext()); - assertFalse(store.reverseRange(bytesKey("a"), bytesKey("b")).hasNext()); - assertFalse(store.all().hasNext()); - assertFalse(store.reverseAll().hasNext()); + try (final KeyValueIterator iterator = store.range(bytesKey("a"), bytesKey("b"))) { + assertFalse(iterator.hasNext()); + } + try (final KeyValueIterator iterator = store.reverseRange(bytesKey("a"), bytesKey("b"))) { + assertFalse(iterator.hasNext()); + } + try (final KeyValueIterator iterator = store.all()) { + assertFalse(iterator.hasNext()); + } + try (final KeyValueIterator iterator = store.reverseAll()) { + assertFalse(iterator.hasNext()); + } } @Test @@ -502,6 +518,7 @@ public void shouldThrowIfTryingToWriteToClosedCachingStore() { }); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToDoRangeQueryOnClosedCachingStore() { assertThrows(InvalidStateStoreException.class, () -> { @@ -510,6 +527,7 @@ public void shouldThrowIfTryingToDoRangeQueryOnClosedCachingStore() { }); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToDoReverseRangeQueryOnClosedCachingStore() { assertThrows(InvalidStateStoreException.class, () -> { @@ -518,6 +536,7 @@ public void shouldThrowIfTryingToDoReverseRangeQueryOnClosedCachingStore() { }); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToDoAllQueryOnClosedCachingStore() { assertThrows(InvalidStateStoreException.class, () -> { @@ -526,6 +545,7 @@ public void shouldThrowIfTryingToDoAllQueryOnClosedCachingStore() { }); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToDoReverseAllQueryOnClosedCachingStore() { assertThrows(InvalidStateStoreException.class, () -> { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java index fcf63e97863be..af4dbf3a446c1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java @@ -19,7 +19,7 @@ import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogCaptureAppender; @@ -31,7 +31,6 @@ import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.Change; import org.apache.kafka.streams.kstream.internals.SessionWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; @@ -60,7 +59,7 @@ import static java.util.Arrays.asList; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.apache.kafka.test.StreamsTestUtils.verifyKeyValueList; import static org.apache.kafka.test.StreamsTestUtils.verifyWindowedKeyValue; import static org.hamcrest.CoreMatchers.hasItem; @@ -78,7 +77,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -@SuppressWarnings("PointlessArithmeticExpression") @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class CachingInMemorySessionStoreTest { @@ -94,7 +92,7 @@ public class CachingInMemorySessionStoreTest { private final Bytes keyB = Bytes.wrap("b".getBytes()); private SessionStore underlyingStore; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private CachingSessionStore cachingStore; private ThreadCache cache; @@ -105,7 +103,7 @@ public void before() { cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics())); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } @AfterEach @@ -118,8 +116,8 @@ public void shouldDelegateInit() { final SessionStore inner = mock(InMemorySessionStore.class); final CachingSessionStore outer = new CachingSessionStore(inner, SEGMENT_INTERVAL); when(inner.name()).thenReturn("store"); - outer.init((StateStoreContext) context, outer); - verify(inner).init((StateStoreContext) context, outer); + outer.init(context, outer); + verify(inner).init(context, outer); } @Test @@ -299,9 +297,9 @@ private void setUpCloseTests() { when(underlyingStore.name()).thenReturn("store-name"); cachingStore = new CachingSessionStore(underlyingStore, SEGMENT_INTERVAL); cache = mock(ThreadCache.class); - final InternalMockProcessorContext context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); + final InternalMockProcessorContext context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } @Test @@ -401,7 +399,7 @@ public void shouldFetchAllSessionsWithSameRecordKey() { // add one that shouldn't appear in the results cachingStore.put(new Windowed<>(keyAA, new SessionWindow(0, 0)), "5".getBytes()); - final List, byte[]>> results = toList(cachingStore.fetch(keyA)); + final List, byte[]>> results = toListAndCloseIterator(cachingStore.fetch(keyA)); verifyKeyValueList(expected, results); } @@ -420,7 +418,7 @@ public void shouldBackwardFetchAllSessionsWithSameRecordKey() { // add one that shouldn't appear in the results cachingStore.put(new Windowed<>(keyAA, new SessionWindow(0, 0)), "5".getBytes()); - final List, byte[]>> results = toList(cachingStore.backwardFetch(keyA)); + final List, byte[]>> results = toListAndCloseIterator(cachingStore.backwardFetch(keyA)); Collections.reverse(results); verifyKeyValueList(expected, results); } @@ -439,7 +437,7 @@ public void shouldFlushItemsToStoreOnEviction() { @Test public void shouldQueryItemsInCacheAndStore() { final List, byte[]>> added = addSessionsUntilOverflow("a"); - final List, byte[]>> actual = toList(cachingStore.findSessions( + final List, byte[]>> actual = toListAndCloseIterator(cachingStore.findSessions( Bytes.wrap("a".getBytes(StandardCharsets.UTF_8)), 0, added.size() * 10L)); @@ -465,8 +463,8 @@ public void shouldRemove() { @Test public void shouldFetchCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed a4 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 3, SEGMENT_INTERVAL * 3)); final Windowed a5 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 4, SEGMENT_INTERVAL * 4)); @@ -492,8 +490,8 @@ public void shouldFetchCorrectlyAcrossSegments() { @Test public void shouldBackwardFetchCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed a4 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 3, SEGMENT_INTERVAL * 3)); final Windowed a5 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 4, SEGMENT_INTERVAL * 4)); @@ -519,9 +517,9 @@ public void shouldBackwardFetchCorrectlyAcrossSegments() { @Test public void shouldFetchRangeCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed aa3 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); cachingStore.put(a1, "1".getBytes()); @@ -542,9 +540,9 @@ public void shouldFetchRangeCorrectlyAcrossSegments() { @Test public void shouldBackwardFetchRangeCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed aa3 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); cachingStore.put(a1, "1".getBytes()); @@ -738,12 +736,14 @@ public void shouldClearNamespaceCacheOnClose() { assertEquals(0, cache.size()); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToFetchFromClosedCachingStore() { cachingStore.close(); assertThrows(InvalidStateStoreException.class, () -> cachingStore.fetch(keyA)); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToFindMergeSessionFromClosedCachingStore() { cachingStore.close(); @@ -762,11 +762,13 @@ public void shouldThrowIfTryingToPutIntoClosedCachingStore() { assertThrows(InvalidStateStoreException.class, () -> cachingStore.put(new Windowed<>(keyA, new SessionWindow(0, 0)), "1".getBytes())); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnFindSessionsNullKey() { assertThrows(NullPointerException.class, () -> cachingStore.findSessions(null, 1L, 2L)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnFetchNullKey() { assertThrows(NullPointerException.class, () -> cachingStore.fetch(null)); @@ -782,10 +784,11 @@ public void shouldThrowNullPointerExceptionOnPutNullKey() { assertThrows(NullPointerException.class, () -> cachingStore.put(null, "1".getBytes())); } + @SuppressWarnings("resource") @Test public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); final KeyValueIterator, byte[]> iterator = cachingStore.backwardFindSessions(keyFrom, keyTo, 0L, 10L)) { @@ -804,10 +807,11 @@ public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() } } + @SuppressWarnings("resource") @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); final KeyValueIterator, byte[]> iterator = cachingStore.findSessions(keyFrom, keyTo, 0L, 10L)) { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java index b267c62244003..0db31dfe03925 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java @@ -19,7 +19,7 @@ import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogCaptureAppender; @@ -31,7 +31,6 @@ import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.Change; import org.apache.kafka.streams.kstream.internals.SessionWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; @@ -60,7 +59,7 @@ import static java.util.Arrays.asList; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.apache.kafka.test.StreamsTestUtils.verifyKeyValueList; import static org.apache.kafka.test.StreamsTestUtils.verifyWindowedKeyValue; import static org.hamcrest.CoreMatchers.hasItem; @@ -94,7 +93,7 @@ public class CachingPersistentSessionStoreTest { private SessionStore underlyingStore; private CachingSessionStore cachingStore; private ThreadCache cache; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; @BeforeEach public void before() { @@ -111,7 +110,7 @@ public void before() { this.context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } @AfterEach @@ -306,10 +305,10 @@ private void setUpCloseTests() { when(underlyingStore.name()).thenReturn("store-name"); cachingStore = new CachingSessionStore(underlyingStore, SEGMENT_INTERVAL); cache = mock(ThreadCache.class); - final InternalMockProcessorContext context = + final InternalMockProcessorContext context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } @Test @@ -409,7 +408,7 @@ public void shouldFetchAllSessionsWithSameRecordKey() { // add one that shouldn't appear in the results cachingStore.put(new Windowed<>(keyAA, new SessionWindow(0, 0)), "5".getBytes()); - final List, byte[]>> results = toList(cachingStore.fetch(keyA)); + final List, byte[]>> results = toListAndCloseIterator(cachingStore.fetch(keyA)); verifyKeyValueList(expected, results); } @@ -428,7 +427,7 @@ public void shouldBackwardFetchAllSessionsWithSameRecordKey() { // add one that shouldn't appear in the results cachingStore.put(new Windowed<>(keyAA, new SessionWindow(0, 0)), "5".getBytes()); - final List, byte[]>> results = toList(cachingStore.backwardFetch(keyA)); + final List, byte[]>> results = toListAndCloseIterator(cachingStore.backwardFetch(keyA)); Collections.reverse(results); verifyKeyValueList(expected, results); } @@ -448,7 +447,7 @@ public void shouldFlushItemsToStoreOnEviction() { @Test public void shouldQueryItemsInCacheAndStore() { final List, byte[]>> added = addSessionsUntilOverflow("a"); - final List, byte[]>> actual = toList(cachingStore.findSessions( + final List, byte[]>> actual = toListAndCloseIterator(cachingStore.findSessions( Bytes.wrap("a".getBytes(StandardCharsets.UTF_8)), 0, added.size() * 10L @@ -476,8 +475,8 @@ public void shouldRemove() { @Test public void shouldFetchCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed a4 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 3, SEGMENT_INTERVAL * 3)); final Windowed a5 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 4, SEGMENT_INTERVAL * 4)); @@ -503,8 +502,8 @@ public void shouldFetchCorrectlyAcrossSegments() { @Test public void shouldBackwardFetchCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed a4 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 3, SEGMENT_INTERVAL * 3)); final Windowed a5 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 4, SEGMENT_INTERVAL * 4)); @@ -530,9 +529,9 @@ public void shouldBackwardFetchCorrectlyAcrossSegments() { @Test public void shouldFetchRangeCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed aa3 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); cachingStore.put(a1, "1".getBytes()); @@ -553,9 +552,9 @@ public void shouldFetchRangeCorrectlyAcrossSegments() { @Test public void shouldBackwardFetchRangeCorrectlyAcrossSegments() { - final Windowed a1 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 0, SEGMENT_INTERVAL * 0)); - final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 1, SEGMENT_INTERVAL * 1)); + final Windowed a1 = new Windowed<>(keyA, new SessionWindow(0, 0)); + final Windowed aa1 = new Windowed<>(keyAA, new SessionWindow(0, 0)); + final Windowed a2 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL, SEGMENT_INTERVAL)); final Windowed a3 = new Windowed<>(keyA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); final Windowed aa3 = new Windowed<>(keyAA, new SessionWindow(SEGMENT_INTERVAL * 2, SEGMENT_INTERVAL * 2)); cachingStore.put(a1, "1".getBytes()); @@ -765,12 +764,14 @@ public void shouldClearNamespaceCacheOnClose() { assertEquals(0, cache.size()); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToFetchFromClosedCachingStore() { cachingStore.close(); assertThrows(InvalidStateStoreException.class, () -> cachingStore.fetch(keyA)); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToFindMergeSessionFromClosedCachingStore() { cachingStore.close(); @@ -789,11 +790,13 @@ public void shouldThrowIfTryingToPutIntoClosedCachingStore() { assertThrows(InvalidStateStoreException.class, () -> cachingStore.put(new Windowed<>(keyA, new SessionWindow(0, 0)), "1".getBytes())); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnFindSessionsNullKey() { assertThrows(NullPointerException.class, () -> cachingStore.findSessions(null, 1L, 2L)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnFetchNullKey() { assertThrows(NullPointerException.class, () -> cachingStore.fetch(null)); @@ -809,10 +812,11 @@ public void shouldThrowNullPointerExceptionOnPutNullKey() { assertThrows(NullPointerException.class, () -> cachingStore.put(null, "1".getBytes())); } + @SuppressWarnings("resource") @Test public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); final KeyValueIterator, byte[]> iterator = @@ -832,10 +836,11 @@ public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() } } + @SuppressWarnings("resource") @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); final KeyValueIterator, byte[]> iterator = cachingStore.findSessions(keyFrom, keyTo, 0L, 10L)) { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java index 5735e2f7f4e42..4548da5bd1134 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java @@ -19,8 +19,10 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; @@ -34,7 +36,6 @@ import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.TimeWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; @@ -73,7 +74,7 @@ import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; import static org.apache.kafka.streams.state.internals.ThreadCacheTest.memoryCacheEntrySize; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.apache.kafka.test.StreamsTestUtils.verifyAllWindowedKeyValues; import static org.apache.kafka.test.StreamsTestUtils.verifyKeyValueList; import static org.apache.kafka.test.StreamsTestUtils.verifyWindowedKeyValue; @@ -125,7 +126,7 @@ public void setUp() { cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics())); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } @AfterEach @@ -139,8 +140,8 @@ public void shouldDelegateInit() { final WindowStore inner = mock(WindowStore.class); final CachingWindowStore outer = new CachingWindowStore(inner, WINDOW_SIZE, SEGMENT_INTERVAL); when(inner.name()).thenReturn("store"); - outer.init((StateStoreContext) context, outer); - verify(inner).init((StateStoreContext) context, outer); + outer.init(context, outer); + verify(inner).init(context, outer); } @Test @@ -210,8 +211,8 @@ public void process(final Record record) { final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), streamsConfiguration, initialWallClockTime); final TestInputTopic inputTopic = driver.createInputTopic(TOPIC, - Serdes.String().serializer(), - Serdes.String().serializer(), + new StringSerializer(), + new StringSerializer(), initialWallClockTime, Duration.ZERO); @@ -312,8 +313,9 @@ private static Bytes bytesKey(final String key) { return Bytes.wrap(key.getBytes()); } + @SuppressWarnings("resource") private String stringFrom(final byte[] from) { - return Serdes.String().deserializer().deserialize("", from); + return new StringDeserializer().deserialize("", from); } @Test @@ -812,12 +814,14 @@ public void shouldClearNamespaceCacheOnClose() { assertEquals(0, cache.size()); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToFetchFromClosedCachingStore() { cachingStore.close(); assertThrows(InvalidStateStoreException.class, () -> cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(10))); } + @SuppressWarnings("resource") @Test public void shouldThrowIfTryingToFetchRangeFromClosedCachingStore() { cachingStore.close(); @@ -844,7 +848,7 @@ public void shouldFetchAndIterateOverExactKeys() { KeyValue.pair(SEGMENT_INTERVAL, bytesValue("0005")) ); final List> actual = - toList(cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); verifyKeyValueList(expected, actual); } @@ -862,7 +866,7 @@ public void shouldBackwardFetchAndIterateOverExactKeys() { KeyValue.pair(0L, bytesValue("0001")) ); final List> actual = - toList(cachingStore.backwardFetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); verifyKeyValueList(expected, actual); } @@ -880,14 +884,14 @@ public void shouldFetchAndIterateOverKeyRange() { windowedPair("a", "0003", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( asList( windowedPair("aa", "0002", 0), windowedPair("aa", "0004", 1)), - toList(cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( @@ -898,7 +902,7 @@ public void shouldFetchAndIterateOverKeyRange() { windowedPair("aa", "0004", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -916,14 +920,14 @@ public void shouldFetchAndIterateOverKeyBackwardRange() { windowedPair("a", "0003", 1), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( asList( windowedPair("aa", "0004", 1), windowedPair("aa", "0002", 0)), - toList(cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( @@ -934,7 +938,7 @@ public void shouldFetchAndIterateOverKeyBackwardRange() { windowedPair("a", "0003", 1), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -984,15 +988,17 @@ public void shouldNotThrowNullPointerExceptionOnPutNullValue() { cachingStore.put(bytesKey("a"), null, 0L); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnFetchNullKey() { assertThrows(NullPointerException.class, () -> cachingStore.fetch(null, ofEpochMilli(1L), ofEpochMilli(2L))); } + @SuppressWarnings("resource") @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class); final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) { @@ -1009,10 +1015,11 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { } } + @SuppressWarnings("resource") @Test public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class); final KeyValueIterator, byte[]> iterator = @@ -1070,7 +1077,7 @@ private void setUpCloseTests() { cache = mock(ThreadCache.class); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } private static KeyValue, byte[]> windowedPair(final String key, final String value, final long timestamp) { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStoreTest.java index 34259a60e77f2..154517d3b94f4 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingKeyValueBytesStoreTest.java @@ -29,7 +29,6 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.StreamsConfig.InternalConfig; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; @@ -75,7 +74,7 @@ public class ChangeLoggingKeyValueBytesStoreTest { private final MockRecordCollector collector = new MockRecordCollector(); private final InMemoryKeyValueStore inner = new InMemoryKeyValueStore("kv"); private final ChangeLoggingKeyValueBytesStore store = new ChangeLoggingKeyValueBytesStore(inner); - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private final StreamsConfig streamsConfig = streamsConfigMock(); private final Bytes hi = Bytes.wrap("hi".getBytes()); private final Bytes hello = Bytes.wrap("hello".getBytes()); @@ -90,7 +89,7 @@ public class ChangeLoggingKeyValueBytesStoreTest { public void before() { context = mockContext(); context.setTime(0); - store.init((StateStoreContext) context, store); + store.init(context, store); } private InternalMockProcessorContext mockContext() { @@ -116,8 +115,8 @@ public void shouldDelegateInit() { final InternalMockProcessorContext context = mockContext(); final KeyValueStore innerMock = mock(InMemoryKeyValueStore.class); final StateStore outer = new ChangeLoggingKeyValueBytesStore(innerMock); - outer.init((StateStoreContext) context, outer); - verify(innerMock).init((StateStoreContext) context, outer); + outer.init(context, outer); + verify(innerMock).init(context, outer); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStoreTest.java index 9d2058bd158cb..d3243ef2fc669 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingSessionBytesStoreTest.java @@ -19,9 +19,9 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.SessionWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ProcessorContextImpl; import org.apache.kafka.streams.query.Position; +import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.SessionStore; import org.junit.jupiter.api.AfterEach; @@ -58,12 +58,12 @@ public class ChangeLoggingSessionBytesStoreTest { @BeforeEach public void setUp() { store = new ChangeLoggingSessionBytesStore(inner); - store.init((StateStoreContext) context, store); + store.init(context, store); } @AfterEach public void tearDown() { - verify(inner).init((StateStoreContext) context, store); + verify(inner).init(context, store); } @Test @@ -105,60 +105,68 @@ public void shouldLogRemoves() { verify(context, times(2)).logChange(store.name(), binaryKey, null, 0L, Position.emptyPosition()); } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFetching() { - store.fetch(bytesKey); - - verify(inner).fetch(bytesKey); + try (final KeyValueIterator, byte[]> unused = store.fetch(bytesKey)) { + verify(inner).fetch(bytesKey); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenBackwardFetching() { - store.backwardFetch(bytesKey); - - verify(inner).backwardFetch(bytesKey); + try (final KeyValueIterator, byte[]> unused = store.backwardFetch(bytesKey)) { + verify(inner).backwardFetch(bytesKey); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFetchingRange() { - store.fetch(bytesKey, bytesKey); - - verify(inner).fetch(bytesKey, bytesKey); + try (final KeyValueIterator, byte[]> unused = store.fetch(bytesKey, bytesKey)) { + verify(inner).fetch(bytesKey, bytesKey); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenBackwardFetchingRange() { - store.backwardFetch(bytesKey, bytesKey); - - verify(inner).backwardFetch(bytesKey, bytesKey); + try (final KeyValueIterator, byte[]> unused = store.backwardFetch(bytesKey, bytesKey)) { + verify(inner).backwardFetch(bytesKey, bytesKey); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFindingSessions() { - store.findSessions(bytesKey, 0, 1); - - verify(inner).findSessions(bytesKey, 0, 1); + try (final KeyValueIterator, byte[]> unused = store.findSessions(bytesKey, 0, 1)) { + verify(inner).findSessions(bytesKey, 0, 1); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenBackwardFindingSessions() { - store.backwardFindSessions(bytesKey, 0, 1); - - verify(inner).backwardFindSessions(bytesKey, 0, 1); + try (final KeyValueIterator, byte[]> unused = store.backwardFindSessions(bytesKey, 0, 1)) { + verify(inner).backwardFindSessions(bytesKey, 0, 1); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFindingSessionRange() { - store.findSessions(bytesKey, bytesKey, 0, 1); - - verify(inner).findSessions(bytesKey, bytesKey, 0, 1); + try (final KeyValueIterator, byte[]> unused = store.findSessions(bytesKey, bytesKey, 0, 1)) { + verify(inner).findSessions(bytesKey, bytesKey, 0, 1); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenBackwardFindingSessionRange() { - store.backwardFindSessions(bytesKey, bytesKey, 0, 1); - - verify(inner).backwardFindSessions(bytesKey, bytesKey, 0, 1); + try (final KeyValueIterator, byte[]> unused = store.backwardFindSessions(bytesKey, bytesKey, 0, 1)) { + verify(inner).backwardFindSessions(bytesKey, bytesKey, 0, 1); + } } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStoreTest.java index 1cf338ad9547d..668ef7d21e6c9 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedKeyValueBytesStoreTest.java @@ -22,7 +22,6 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.ValueAndTimestamp; @@ -47,7 +46,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -@SuppressWarnings("rawtypes") @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class ChangeLoggingTimestampedKeyValueBytesStoreTest { @@ -66,12 +64,12 @@ public class ChangeLoggingTimestampedKeyValueBytesStoreTest { @BeforeEach public void before() { - final InternalMockProcessorContext context = mockContext(); + final InternalMockProcessorContext context = mockContext(); context.setTime(0); - store.init((StateStoreContext) context, store); + store.init(context, store); } - private InternalMockProcessorContext mockContext() { + private InternalMockProcessorContext mockContext() { return new InternalMockProcessorContext<>( TestUtils.tempDirectory(), Serdes.String(), @@ -88,12 +86,12 @@ public void after() { @Test public void shouldDelegateInit() { - final InternalMockProcessorContext context = mockContext(); + final InternalMockProcessorContext context = mockContext(); final KeyValueStore inner = mock(InMemoryKeyValueStore.class); final StateStore outer = new ChangeLoggingTimestampedKeyValueBytesStore(inner); - outer.init((StateStoreContext) context, outer); - verify(inner).init((StateStoreContext) context, outer); + outer.init(context, outer); + verify(inner).init(context, outer); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStoreTest.java index 0c8882bb36f21..03701bdcb0086 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStoreTest.java @@ -18,10 +18,12 @@ package org.apache.kafka.streams.state.internals; import org.apache.kafka.common.utils.Bytes; -import org.apache.kafka.streams.processor.StateStoreContext; +import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.processor.internals.ProcessorContextImpl; import org.apache.kafka.streams.query.Position; +import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.WindowStore; +import org.apache.kafka.streams.state.WindowStoreIterator; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -58,12 +60,12 @@ public class ChangeLoggingTimestampedWindowBytesStoreTest { @BeforeEach public void setUp() { store = new ChangeLoggingTimestampedWindowBytesStore(inner, false); - store.init((StateStoreContext) context, store); + store.init(context, store); } @AfterEach public void tearDown() { - verify(inner).init((StateStoreContext) context, store); + verify(inner).init(context, store); } @Test @@ -72,7 +74,6 @@ public void shouldDelegateInit() { } @Test - @SuppressWarnings("deprecation") public void shouldLogPuts() { final Bytes key = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 0); when(inner.getPosition()).thenReturn(Position.emptyPosition()); @@ -94,25 +95,26 @@ public void shouldLogPutsWithPosition() { verify(context).logChange(store.name(), key, value, 42, POSITION); } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFetching() { - store.fetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10)); - - verify(inner).fetch(bytesKey, 0, 10); + try (final WindowStoreIterator unused = store.fetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10))) { + verify(inner).fetch(bytesKey, 0, 10); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFetchingRange() { - store.fetch(bytesKey, bytesKey, ofEpochMilli(0), ofEpochMilli(1)); - - verify(inner).fetch(bytesKey, bytesKey, 0, 1); + try (final KeyValueIterator, byte[]> unused = store.fetch(bytesKey, bytesKey, ofEpochMilli(0), ofEpochMilli(1))) { + verify(inner).fetch(bytesKey, bytesKey, 0, 1); + } } @Test - @SuppressWarnings("deprecation") public void shouldRetainDuplicatesWhenSet() { store = new ChangeLoggingTimestampedWindowBytesStore(inner, true); - store.init((StateStoreContext) context, store); + store.init(context, store); final Bytes key1 = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 1); final Bytes key2 = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 2); when(inner.getPosition()).thenReturn(Position.emptyPosition()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStoreTest.java index aab24ad45c744..23550bf6acb24 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingVersionedKeyValueBytesStoreTest.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.VersionedBytesStore; @@ -50,7 +49,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -@SuppressWarnings("rawtypes") @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class ChangeLoggingVersionedKeyValueBytesStoreTest { @@ -61,7 +59,7 @@ public class ChangeLoggingVersionedKeyValueBytesStoreTest { private static final long HISTORY_RETENTION = 1000L; private final MockRecordCollector collector = new MockRecordCollector(); - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private VersionedBytesStore inner; private ChangeLoggingVersionedKeyValueBytesStore store; @@ -72,10 +70,10 @@ public void before() { context = mockContext(); context.setTime(0); - store.init((StateStoreContext) context, store); + store.init(context, store); } - private InternalMockProcessorContext mockContext() { + private InternalMockProcessorContext mockContext() { return new InternalMockProcessorContext<>( TestUtils.tempDirectory(), Serdes.String(), @@ -103,9 +101,9 @@ public void shouldDelegateInit() { final VersionedBytesStore mockInner = mock(VersionedBytesStore.class); store = new ChangeLoggingVersionedKeyValueBytesStore(mockInner); - store.init((StateStoreContext) context, store); + store.init(context, store); - verify(mockInner).init((StateStoreContext) context, store); + verify(mockInner).init(context, store); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStoreTest.java index 3cbacf77fecae..2607e56ad9ff1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingWindowBytesStoreTest.java @@ -18,10 +18,12 @@ package org.apache.kafka.streams.state.internals; import org.apache.kafka.common.utils.Bytes; -import org.apache.kafka.streams.processor.StateStoreContext; +import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.processor.internals.ProcessorContextImpl; import org.apache.kafka.streams.query.Position; +import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.WindowStore; +import org.apache.kafka.streams.state.WindowStoreIterator; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -57,12 +59,12 @@ public class ChangeLoggingWindowBytesStoreTest { @BeforeEach public void setUp() { store = new ChangeLoggingWindowBytesStore(inner, false, WindowKeySchema::toStoreKeyBinary); - store.init((StateStoreContext) context, store); + store.init(context, store); } @AfterEach public void tearDown() { - verify(inner).init((StateStoreContext) context, store); + verify(inner).init(context, store); } @Test @@ -92,38 +94,42 @@ public void shouldLogPutsWithPosition() { verify(context).logChange(store.name(), key, value, 0L, POSITION); } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFetching() { - store.fetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10)); - - verify(inner).fetch(bytesKey, 0, 10); + try (final WindowStoreIterator unused = store.fetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10))) { + verify(inner).fetch(bytesKey, 0, 10); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenBackwardFetching() { - store.backwardFetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10)); - - verify(inner).backwardFetch(bytesKey, 0, 10); + try (final WindowStoreIterator unused = store.backwardFetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10))) { + verify(inner).backwardFetch(bytesKey, 0, 10); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenFetchingRange() { - store.fetch(bytesKey, bytesKey, ofEpochMilli(0), ofEpochMilli(1)); - - verify(inner).fetch(bytesKey, bytesKey, 0, 1); + try (final KeyValueIterator, byte[]> unused = store.fetch(bytesKey, bytesKey, ofEpochMilli(0), ofEpochMilli(1))) { + verify(inner).fetch(bytesKey, bytesKey, 0, 1); + } } + @SuppressWarnings({"resource", "unused"}) @Test public void shouldDelegateToUnderlyingStoreWhenBackwardFetchingRange() { - store.backwardFetch(bytesKey, bytesKey, ofEpochMilli(0), ofEpochMilli(1)); - - verify(inner).backwardFetch(bytesKey, bytesKey, 0, 1); + try (final KeyValueIterator, byte[]> unused = store.backwardFetch(bytesKey, bytesKey, ofEpochMilli(0), ofEpochMilli(1))) { + verify(inner).backwardFetch(bytesKey, bytesKey, 0, 1); + } } @Test public void shouldRetainDuplicatesWhenSet() { store = new ChangeLoggingWindowBytesStore(inner, true, WindowKeySchema::toStoreKeyBinary); - store.init((StateStoreContext) context, store); + store.init(context, store); when(inner.getPosition()).thenReturn(Position.emptyPosition()); final Bytes key1 = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 1); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyKeyValueStoreTest.java index 3af926a3aa434..f3644f4353707 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyKeyValueStoreTest.java @@ -22,7 +22,6 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StoreQueryParameters; import org.apache.kafka.streams.errors.InvalidStateStoreException; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ProcessorStateManager; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.KeyValueStore; @@ -43,7 +42,7 @@ import static java.util.Arrays.asList; import static java.util.Collections.singletonList; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -80,7 +79,7 @@ private KeyValueStore newStoreInstance() { Serdes.String()) .build(); - @SuppressWarnings("rawtypes") final InternalMockProcessorContext context = + final InternalMockProcessorContext context = new InternalMockProcessorContext<>( new StateSerdes<>( ProcessorStateManager.storeChangelogTopic("appId", storeName, null), @@ -91,7 +90,7 @@ private KeyValueStore newStoreInstance() { ); context.setTime(1L); - store.init((StateStoreContext) context, store); + store.init(context, store); return store; } @@ -141,6 +140,7 @@ public void shouldThrowNullPointerExceptionOnPrefixScanNullPrefix() { assertThrows(NullPointerException.class, () -> theStore.prefixScan(null, new StringSerializer())); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerExceptionOnPrefixScanNullPrefixKeySerializer() { assertThrows(NullPointerException.class, () -> theStore.prefixScan("aa", null)); @@ -276,7 +276,7 @@ public void shouldSupportRange() { stubOneUnderlying.put("b", "b"); stubOneUnderlying.put("c", "c"); - final List> results = toList(theStore.range("a", "b")); + final List> results = toListAndCloseIterator(theStore.range("a", "b")); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("b", "b"))); assertEquals(2, results.size()); @@ -288,7 +288,7 @@ public void shouldSupportReverseRange() { stubOneUnderlying.put("b", "b"); stubOneUnderlying.put("c", "c"); - final List> results = toList(theStore.reverseRange("a", "b")); + final List> results = toListAndCloseIterator(theStore.reverseRange("a", "b")); assertArrayEquals( asList( new KeyValue<>("b", "b"), @@ -303,7 +303,7 @@ public void shouldReturnKeysWithGivenPrefixExcludingNextKeyLargestKey() { stubOneUnderlying.put("abcd", "b"); stubOneUnderlying.put("abce", "c"); - final List> results = toList(theStore.prefixScan("abcd", new StringSerializer())); + final List> results = toListAndCloseIterator(theStore.prefixScan("abcd", new StringSerializer())); assertTrue(results.contains(new KeyValue<>("abcd", "b"))); assertEquals(1, results.size()); } @@ -314,7 +314,7 @@ public void shouldSupportPrefixScan() { stubOneUnderlying.put("aa", "b"); stubOneUnderlying.put("b", "c"); - final List> results = toList(theStore.prefixScan("a", new StringSerializer())); + final List> results = toListAndCloseIterator(theStore.prefixScan("a", new StringSerializer())); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("aa", "b"))); assertEquals(2, results.size()); @@ -333,7 +333,7 @@ public void shouldSupportRangeAcrossMultipleKVStores() { cache.put("d", "d"); cache.put("x", "x"); - final List> results = toList(theStore.range("a", "e")); + final List> results = toListAndCloseIterator(theStore.range("a", "e")); assertArrayEquals( asList( new KeyValue<>("a", "a"), @@ -357,7 +357,7 @@ public void shouldSupportPrefixScanAcrossMultipleKVStores() { cache.put("ab", "d"); cache.put("x", "x"); - final List> results = toList(theStore.prefixScan("a", new StringSerializer())); + final List> results = toListAndCloseIterator(theStore.prefixScan("a", new StringSerializer())); assertArrayEquals( asList( new KeyValue<>("a", "a"), @@ -380,7 +380,7 @@ public void shouldSupportReverseRangeAcrossMultipleKVStores() { cache.put("d", "d"); cache.put("x", "x"); - final List> results = toList(theStore.reverseRange("a", "e")); + final List> results = toListAndCloseIterator(theStore.reverseRange("a", "e")); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("b", "b"))); assertTrue(results.contains(new KeyValue<>("c", "c"))); @@ -401,7 +401,7 @@ public void shouldSupportAllAcrossMultipleStores() { cache.put("d", "d"); cache.put("x", "x"); - final List> results = toList(theStore.all()); + final List> results = toListAndCloseIterator(theStore.all()); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("b", "b"))); assertTrue(results.contains(new KeyValue<>("c", "c"))); @@ -424,7 +424,7 @@ public void shouldSupportReverseAllAcrossMultipleStores() { cache.put("d", "d"); cache.put("x", "x"); - final List> results = toList(theStore.reverseAll()); + final List> results = toListAndCloseIterator(theStore.reverseAll()); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("b", "b"))); assertTrue(results.contains(new KeyValue<>("c", "c"))); @@ -444,26 +444,31 @@ public void shouldThrowInvalidStoreExceptionOnApproximateNumEntriesDuringRebalan assertThrows(InvalidStateStoreException.class, () -> rebalancing().approximateNumEntries()); } + @SuppressWarnings("resource") @Test public void shouldThrowInvalidStoreExceptionOnRangeDuringRebalance() { assertThrows(InvalidStateStoreException.class, () -> rebalancing().range("anything", "something")); } + @SuppressWarnings("resource") @Test public void shouldThrowInvalidStoreExceptionOnReverseRangeDuringRebalance() { assertThrows(InvalidStateStoreException.class, () -> rebalancing().reverseRange("anything", "something")); } + @SuppressWarnings("resource") @Test public void shouldThrowInvalidStoreExceptionOnPrefixScanDuringRebalance() { assertThrows(InvalidStateStoreException.class, () -> rebalancing().prefixScan("anything", new StringSerializer())); } + @SuppressWarnings("resource") @Test public void shouldThrowInvalidStoreExceptionOnAllDuringRebalance() { assertThrows(InvalidStateStoreException.class, () -> rebalancing().all()); } + @SuppressWarnings("resource") @Test public void shouldThrowInvalidStoreExceptionOnReverseAllDuringRebalance() { assertThrows(InvalidStateStoreException.class, () -> rebalancing().reverseAll()); @@ -487,7 +492,7 @@ public void shouldGetApproximateEntriesAcrossAllStores() { @Test public void shouldReturnLongMaxValueOnOverflow() { - stubProviderTwo.addStore(storeName, new NoOpReadOnlyStore() { + stubProviderTwo.addStore(storeName, new NoOpReadOnlyStore<>() { @Override public long approximateNumEntries() { return Long.MAX_VALUE; @@ -500,13 +505,13 @@ public long approximateNumEntries() { @Test public void shouldReturnLongMaxValueOnUnderflow() { - stubProviderTwo.addStore(storeName, new NoOpReadOnlyStore() { + stubProviderTwo.addStore(storeName, new NoOpReadOnlyStore<>() { @Override public long approximateNumEntries() { return Long.MAX_VALUE; } }); - stubProviderTwo.addStore("my-storeA", new NoOpReadOnlyStore() { + stubProviderTwo.addStore("my-storeA", new NoOpReadOnlyStore<>() { @Override public long approximateNumEntries() { return Long.MAX_VALUE; diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlySessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlySessionStoreTest.java index 6e4e42ae7ecec..59d6ad1e17525 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlySessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlySessionStoreTest.java @@ -36,7 +36,7 @@ import java.util.List; import static java.util.Collections.singletonList; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.IsEqual.equalTo; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -69,7 +69,7 @@ public void shouldFetchResultsFromUnderlyingSessionStore() { underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 1L); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(10, 10)), 2L); - final List, Long>> results = toList(sessionStore.fetch("a")); + final List, Long>> results = toListAndCloseIterator(sessionStore.fetch("a")); assertEquals(Arrays.asList(KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 1L), KeyValue.pair(new Windowed<>("a", new SessionWindow(10, 10)), 2L)), results); @@ -93,8 +93,8 @@ public void shouldFindValueForKeyWhenMultiStores() { underlyingSessionStore.put(keyOne, 0L); secondUnderlying.put(keyTwo, 10L); - final List, Long>> keyOneResults = toList(sessionStore.fetch("key-one")); - final List, Long>> keyTwoResults = toList(sessionStore.fetch("key-two")); + final List, Long>> keyOneResults = toListAndCloseIterator(sessionStore.fetch("key-one")); + final List, Long>> keyTwoResults = toListAndCloseIterator(sessionStore.fetch("key-two")); assertEquals(singletonList(KeyValue.pair(keyOne, 0L)), keyOneResults); assertEquals(singletonList(KeyValue.pair(keyTwo, 10L)), keyTwoResults); @@ -146,7 +146,7 @@ public void shouldFetchKeyRangeAcrossStores() { stubProviderTwo.addStore(storeName, secondUnderlying); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 0L); secondUnderlying.put(new Windowed<>("b", new SessionWindow(0, 0)), 10L); - final List, Long>> results = StreamsTestUtils.toList(sessionStore.fetch("a", "b")); + final List, Long>> results = StreamsTestUtils.toListAndCloseIterator(sessionStore.fetch("a", "b")); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 0L), KeyValue.pair(new Windowed<>("b", new SessionWindow(0, 0)), 10L)))); @@ -159,7 +159,7 @@ public void shouldFetchKeyRangeAcrossStoresWithNullKeyFrom() { stubProviderTwo.addStore(storeName, secondUnderlying); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 0L); secondUnderlying.put(new Windowed<>("b", new SessionWindow(0, 0)), 10L); - final List, Long>> results = StreamsTestUtils.toList(sessionStore.fetch(null, "b")); + final List, Long>> results = StreamsTestUtils.toListAndCloseIterator(sessionStore.fetch(null, "b")); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 0L), KeyValue.pair(new Windowed<>("b", new SessionWindow(0, 0)), 10L)))); @@ -172,7 +172,7 @@ public void shouldFetchKeyRangeAcrossStoresWithNullKeyTo() { stubProviderTwo.addStore(storeName, secondUnderlying); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 0L); secondUnderlying.put(new Windowed<>("b", new SessionWindow(0, 0)), 10L); - final List, Long>> results = StreamsTestUtils.toList(sessionStore.fetch("a", null)); + final List, Long>> results = StreamsTestUtils.toListAndCloseIterator(sessionStore.fetch("a", null)); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 0L), KeyValue.pair(new Windowed<>("b", new SessionWindow(0, 0)), 10L)))); @@ -185,7 +185,7 @@ public void shouldFetchKeyRangeAcrossStoresWithNullKeyFromKeyTo() { stubProviderTwo.addStore(storeName, secondUnderlying); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 0L); secondUnderlying.put(new Windowed<>("b", new SessionWindow(0, 0)), 10L); - final List, Long>> results = StreamsTestUtils.toList(sessionStore.fetch(null, null)); + final List, Long>> results = StreamsTestUtils.toListAndCloseIterator(sessionStore.fetch(null, null)); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 0L), KeyValue.pair(new Windowed<>("b", new SessionWindow(0, 0)), 10L)))); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyWindowStoreTest.java index 18e775033f08f..2d22e6e15a745 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CompositeReadOnlyWindowStoreTest.java @@ -90,7 +90,7 @@ public void shouldFetchValuesFromWindowStore() { assertEquals( asList(new KeyValue<>(0L, "my-value"), new KeyValue<>(10L, "my-later-value")), - StreamsTestUtils.toList(windowStore.fetch("my-key", ofEpochMilli(0L), ofEpochMilli(25L))) + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("my-key", ofEpochMilli(0L), ofEpochMilli(25L))) ); } @@ -102,7 +102,7 @@ public void shouldBackwardFetchValuesFromWindowStore() { assertEquals( asList(new KeyValue<>(10L, "my-later-value"), new KeyValue<>(0L, "my-value")), - StreamsTestUtils.toList(windowStore.backwardFetch("my-key", ofEpochMilli(0L), ofEpochMilli(25L))) + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch("my-key", ofEpochMilli(0L), ofEpochMilli(25L))) ); } @@ -132,9 +132,9 @@ public void shouldFindValueForKeyWhenMultiStores() { secondUnderlying.put("key-two", "value-two", 10L); final List> keyOneResults = - StreamsTestUtils.toList(windowStore.fetch("key-one", ofEpochMilli(0L), ofEpochMilli(1L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("key-one", ofEpochMilli(0L), ofEpochMilli(1L))); final List> keyTwoResults = - StreamsTestUtils.toList(windowStore.fetch("key-two", ofEpochMilli(10L), ofEpochMilli(11L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("key-two", ofEpochMilli(10L), ofEpochMilli(11L))); assertEquals(Collections.singletonList(KeyValue.pair(0L, "value-one")), keyOneResults); assertEquals(Collections.singletonList(KeyValue.pair(10L, "value-two")), keyTwoResults); @@ -150,9 +150,9 @@ public void shouldFindValueForKeyWhenMultiStoresBackwards() { secondUnderlying.put("key-two", "value-two", 10L); final List> keyOneResults = - StreamsTestUtils.toList(windowStore.backwardFetch("key-one", ofEpochMilli(0L), ofEpochMilli(1L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch("key-one", ofEpochMilli(0L), ofEpochMilli(1L))); final List> keyTwoResults = - StreamsTestUtils.toList(windowStore.backwardFetch("key-two", ofEpochMilli(10L), ofEpochMilli(11L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch("key-two", ofEpochMilli(10L), ofEpochMilli(11L))); assertEquals(Collections.singletonList(KeyValue.pair(0L, "value-one")), keyOneResults); assertEquals(Collections.singletonList(KeyValue.pair(10L, "value-two")), keyTwoResults); @@ -164,7 +164,7 @@ public void shouldNotGetValuesFromOtherStores() { underlyingWindowStore.put("some-key", "my-value", 1L); final List> results = - StreamsTestUtils.toList(windowStore.fetch("some-key", ofEpochMilli(0L), ofEpochMilli(2L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("some-key", ofEpochMilli(0L), ofEpochMilli(2L))); assertEquals(Collections.singletonList(new KeyValue<>(1L, "my-value")), results); } @@ -174,7 +174,7 @@ public void shouldNotGetValuesBackwardFromOtherStores() { underlyingWindowStore.put("some-key", "my-value", 1L); final List> results = - StreamsTestUtils.toList(windowStore.backwardFetch("some-key", ofEpochMilli(0L), ofEpochMilli(2L))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch("some-key", ofEpochMilli(0L), ofEpochMilli(2L))); assertEquals(Collections.singletonList(new KeyValue<>(1L, "my-value")), results); } @@ -348,7 +348,7 @@ public void shouldFetchKeyRangeAcrossStores() { underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.fetch("a", "b", ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("a", "b", ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); @@ -362,7 +362,7 @@ public void shouldFetchKeyRangeAcrossStoresWithNullKeyTo() { secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.fetch("b", null, ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch("b", null, ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b"), KeyValue.pair(new Windowed<>("c", new TimeWindow(10, 10 + WINDOW_SIZE)), "c")))); @@ -376,7 +376,7 @@ public void shouldFetchKeyRangeAcrossStoresWithNullKeyFrom() { secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.fetch(null, "b", ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch(null, "b", ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); @@ -390,7 +390,7 @@ public void shouldFetchKeyRangeAcrossStoresWithNullKeyFromKeyTo() { secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.fetch(null, null, ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetch(null, null, ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b"), @@ -405,7 +405,7 @@ public void shouldBackwardFetchKeyRangeAcrossStoresWithNullKeyTo() { secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.backwardFetch("b", null, ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch("b", null, ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("c", new TimeWindow(10, 10 + WINDOW_SIZE)), "c"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); @@ -419,7 +419,7 @@ public void shouldBackwardFetchKeyRangeAcrossStoresWithNullKeyFrom() { secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.backwardFetch(null, "b", ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch(null, "b", ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b") @@ -434,7 +434,7 @@ public void shouldBackwardFetchKeyRangeAcrossStoresWithNullKeyFromKeyTo() { secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.backwardFetch(null, null, ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch(null, null, ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("c", new TimeWindow(10, 10 + WINDOW_SIZE)), "c"), @@ -448,7 +448,7 @@ public void shouldBackwardFetchKeyRangeAcrossStores() { underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.backwardFetch("a", "b", ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetch("a", "b", ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); @@ -473,7 +473,7 @@ public void shouldGetAllAcrossStores() { stubProviderTwo.addStore(storeName, secondUnderlying); underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); - final List, String>> results = StreamsTestUtils.toList(windowStore.all()); + final List, String>> results = StreamsTestUtils.toListAndCloseIterator(windowStore.all()); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); @@ -486,7 +486,7 @@ public void shouldGetBackwardAllAcrossStores() { stubProviderTwo.addStore(storeName, secondUnderlying); underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); - final List, String>> results = StreamsTestUtils.toList(windowStore.backwardAll()); + final List, String>> results = StreamsTestUtils.toListAndCloseIterator(windowStore.backwardAll()); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); @@ -500,7 +500,7 @@ public void shouldFetchAllAcrossStores() { underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.fetchAll(ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.fetchAll(ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); @@ -514,7 +514,7 @@ public void shouldBackwardFetchAllAcrossStores() { underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); final List, String>> results = - StreamsTestUtils.toList(windowStore.backwardFetchAll(ofEpochMilli(0), ofEpochMilli(10))); + StreamsTestUtils.toListAndCloseIterator(windowStore.backwardFetchAll(ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/FilteredCacheIteratorTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/FilteredCacheIteratorTest.java index dd0d8bbf4a1c7..9c8fd2ce5f336 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/FilteredCacheIteratorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/FilteredCacheIteratorTest.java @@ -30,7 +30,7 @@ import java.util.List; import static java.util.Arrays.asList; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -91,7 +91,7 @@ public boolean hasNext(final KeyValueIterator iterator) { @Test public void shouldAllowEntryMatchingHasNextCondition() { - final List> keyValues = toList(allIterator); + final List> keyValues = toListAndCloseIterator(allIterator); assertThat(keyValues, equalTo(entries)); } @@ -122,7 +122,7 @@ public void shouldNotHaveNextIfHasNextConditionNotMet() { @Test public void shouldFilterEntriesNotMatchingHasNextCondition() { - final List> keyValues = toList(firstEntryIterator); + final List> keyValues = toListAndCloseIterator(firstEntryIterator); assertThat(keyValues, equalTo(Collections.singletonList(firstEntry))); } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/GlobalStateStoreProviderTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/GlobalStateStoreProviderTest.java index 7b714e781d2af..8c28a9eabeca3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/GlobalStateStoreProviderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/GlobalStateStoreProviderTest.java @@ -22,7 +22,6 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.InvalidStateStoreException; import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.ProcessorContextImpl; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; @@ -118,7 +117,7 @@ public void before() { when(mockContext.taskId()).thenReturn(new TaskId(0, 0)); when(mockContext.appConfigs()).thenReturn(CONFIGS); for (final StateStore store : stores.values()) { - store.init((StateStoreContext) mockContext, null); + store.init(mockContext, null); } } @@ -128,7 +127,7 @@ public void shouldReturnSingleItemListIfStoreExists() { new GlobalStateStoreProvider(Collections.singletonMap("global", new NoOpReadOnlyStore<>())); final List> stores = provider.stores("global", QueryableStoreTypes.keyValueStore()); - assertEquals(stores.size(), 1); + assertEquals(1, stores.size()); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryKeyValueStoreTest.java index 3e70fe493ba49..eb57bee38bf04 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryKeyValueStoreTest.java @@ -21,6 +21,7 @@ import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.serialization.UUIDSerializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.processor.StateStoreContext; @@ -96,7 +97,6 @@ InMemoryKeyValueStore getInMemoryStore() { return new InMemoryKeyValueStore("in-memory-store-test"); } - @SuppressWarnings("unchecked") @Test public void shouldRemoveKeysWithNullValues() { store.close(); @@ -190,10 +190,11 @@ public void shouldReturnKeysWithGivenPrefixExcludingNextKeyLargestKey() { } } + @SuppressWarnings("resource") @Test public void shouldReturnUUIDsWithStringPrefix() { final List> entries = new ArrayList<>(); - final Serializer uuidSerializer = Serdes.UUID().serializer(); + final UUIDSerializer uuidSerializer = new UUIDSerializer(); final UUID uuid1 = UUID.randomUUID(); final UUID uuid2 = UUID.randomUUID(); final String prefix = uuid1.toString().substring(0, 4); @@ -249,6 +250,7 @@ public void shouldReturnNoKeys() { assertThat(numberOfKeysReturned, is(0)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerIfPrefixKeySerializerIsNull() { assertThrows(NullPointerException.class, () -> byteStore.prefixScan("bb", null)); @@ -256,7 +258,7 @@ public void shouldThrowNullPointerIfPrefixKeySerializerIsNull() { @Test public void shouldMatchPositionAfterPut() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders())); inMemoryKeyValueStore.put(bytesKey("key1"), bytesValue("value1")); @@ -272,14 +274,15 @@ public void shouldMatchPositionAfterPut() { @Test public void iteratorHasNextOnEmptyStoreShouldReturnFalse() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); - final KeyValueIterator iter = inMemoryKeyValueStore.all(); - assertFalse(iter.hasNext()); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); + try (final KeyValueIterator iter = inMemoryKeyValueStore.all()) { + assertFalse(iter.hasNext()); + } } @Test public void iteratorHasNextOnDeletedEntryShouldReturnFalse() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key"), bytesValue("value")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -290,7 +293,7 @@ public void iteratorHasNextOnDeletedEntryShouldReturnFalse() { @Test public void iteratorHasNextShouldNotAdvanceIterator() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key"), bytesValue("value")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -300,7 +303,7 @@ public void iteratorHasNextShouldNotAdvanceIterator() { @Test public void iteratorHasNextShouldReturnTrueIfElementsRemaining() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key1"), bytesValue("value1")); inMemoryKeyValueStore.put(bytesKey("key2"), bytesValue("value2")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -311,7 +314,7 @@ public void iteratorHasNextShouldReturnTrueIfElementsRemaining() { @Test public void iteratorNextShouldReturnNextElement() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key"), bytesValue("value")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -322,7 +325,7 @@ public void iteratorNextShouldReturnNextElement() { @Test public void iteratorNextAfterHasNextShouldReturnNextElement() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key"), bytesValue("value")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -334,14 +337,14 @@ public void iteratorNextAfterHasNextShouldReturnNextElement() { @Test public void iteratorNextOnEmptyStoreShouldThrowException() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); final KeyValueIterator iter = inMemoryKeyValueStore.all(); assertThrows(NoSuchElementException.class, iter::next); } @Test public void iteratorNextShouldThrowExceptionIfRemainingElementsDeleted() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key1"), bytesValue("value1")); inMemoryKeyValueStore.put(bytesKey("key2"), bytesValue("value2")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -356,7 +359,7 @@ public void iteratorNextShouldThrowExceptionIfRemainingElementsDeleted() { @Test public void iteratorNextShouldSkipDeletedElements() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key1"), bytesValue("value1")); inMemoryKeyValueStore.put(bytesKey("key2"), bytesValue("value2")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -369,7 +372,7 @@ public void iteratorNextShouldSkipDeletedElements() { @Test public void iteratorNextShouldIterateOverAllElements() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key1"), bytesValue("value1")); inMemoryKeyValueStore.put(bytesKey("key2"), bytesValue("value2")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -387,14 +390,14 @@ public void iteratorNextShouldIterateOverAllElements() { @Test public void iteratorPeekNextKeyOnEmptyStoreShouldThrowException() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); final KeyValueIterator iter = inMemoryKeyValueStore.all(); assertThrows(NoSuchElementException.class, iter::peekNextKey); } @Test public void iteratorPeekNextKeyOnDeletedEntryShouldThrowException() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key"), bytesValue("value")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -405,7 +408,7 @@ public void iteratorPeekNextKeyOnDeletedEntryShouldThrowException() { @Test public void iteratorPeekNextKeyShouldNotAdvanceIterator() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key"), bytesValue("value")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -415,7 +418,7 @@ public void iteratorPeekNextKeyShouldNotAdvanceIterator() { @Test public void iteratorPeekNextKeyShouldSkipDeletedElements() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); inMemoryKeyValueStore.put(bytesKey("key1"), bytesValue("value1")); inMemoryKeyValueStore.put(bytesKey("key2"), bytesValue("value2")); final KeyValueIterator iter = inMemoryKeyValueStore.all(); @@ -426,7 +429,7 @@ public void iteratorPeekNextKeyShouldSkipDeletedElements() { @Test public void iteratorShouldThrowIllegalStateExceptionIfAlreadyClosed() { - inMemoryKeyValueStore.init((StateStoreContext) context, inMemoryKeyValueStore); + inMemoryKeyValueStore.init(context, inMemoryKeyValueStore); final KeyValueIterator iter = inMemoryKeyValueStore.all(); iter.close(); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryLRUCacheStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryLRUCacheStoreTest.java index 97ae0235e2c8e..eb23f80db729e 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryLRUCacheStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemoryLRUCacheStoreTest.java @@ -133,7 +133,6 @@ public void testEvict() { assertEquals(3, driver.numFlushedEntryRemoved()); } - @SuppressWarnings("unchecked") @Test public void testRestoreEvict() { store.close(); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java index fd93122d09b2c..1775d76e101f1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java @@ -52,7 +52,11 @@ public void shouldNotExpireFromOpenIterator() { assertFalse(iterator.hasNext()); iterator.close(); - assertFalse(sessionStore.findSessions("a", "b", 0L, 20L).hasNext()); + + try (final KeyValueIterator, Long> it = + sessionStore.findSessions("a", "b", 0L, 20L)) { + assertFalse(it.hasNext()); + } } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentTest.java index 119bda69c9f16..e71704f32af27 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentTest.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.query.Position; @@ -44,8 +43,6 @@ import static org.hamcrest.Matchers.not; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.STRICT_STUBS) @@ -68,11 +65,7 @@ public void shouldDeleteStateDirectoryOnDestroy() throws Exception { final String directoryPath = TestUtils.tempDirectory().getAbsolutePath(); final File directory = new File(directoryPath); - final ProcessorContext mockContext = mock(ProcessorContext.class); - when(mockContext.appConfigs()).thenReturn(mkMap(mkEntry(METRICS_RECORDING_LEVEL_CONFIG, "INFO"))); - when(mockContext.stateDir()).thenReturn(directory); - - segment.openDB(mockContext.appConfigs(), mockContext.stateDir()); + segment.openDB(mkMap(mkEntry(METRICS_RECORDING_LEVEL_CONFIG, "INFO")), directory); assertTrue(new File(directoryPath, "window").exists()); assertTrue(new File(directoryPath + File.separator + "window", "segment").exists()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapperTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapperTest.java index cb6e15be9fe5b..f7018a7fae348 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapperTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueStoreWrapperTest.java @@ -60,15 +60,15 @@ public class KeyValueStoreWrapperTest { @Mock private VersionedKeyValueStore versionedStore; @Mock - private ProcessorContext context; + private ProcessorContext context; @Mock - private Query query; + private Query query; @Mock private PositionBound positionBound; @Mock private QueryConfig queryConfig; @Mock - private QueryResult result; + private QueryResult result; @Mock private Position position; @@ -295,20 +295,20 @@ public void shouldReturnIsOpenForVersionedStore() { assertThat(wrapper.isOpen(), equalTo(false)); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldQueryTimestampedStore() { givenWrapperWithTimestampedStore(); - when(timestampedStore.query(query, positionBound, queryConfig)).thenReturn(result); + when(timestampedStore.query(query, positionBound, queryConfig)).thenReturn((QueryResult) result); assertThat(wrapper.query(query, positionBound, queryConfig), equalTo(result)); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldQueryVersionedStore() { givenWrapperWithVersionedStore(); - when(versionedStore.query(query, positionBound, queryConfig)).thenReturn(result); + when(versionedStore.query(query, positionBound, queryConfig)).thenReturn((QueryResult) result); assertThat(wrapper.query(query, positionBound, queryConfig), equalTo(result)); } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ListValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ListValueStoreTest.java index d0a5c487aa2e7..81d89791facc8 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ListValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ListValueStoreTest.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.errors.InvalidStateStoreException; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.KeyValueStore; @@ -40,12 +39,12 @@ import java.util.Collections; import static java.util.Arrays.asList; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; public class ListValueStoreTest { - private enum StoreType { InMemory, RocksDB } + public enum StoreType { InMemory, RocksDB } private KeyValueStore listStore; @@ -66,7 +65,7 @@ public void setup(final StoreType storeType) { new MockStreamsMetrics(new Metrics()))); context.setTime(1L); - listStore.init((StateStoreContext) context, listStore); + listStore.init(context, listStore); } @AfterEach @@ -103,7 +102,7 @@ public void shouldGetAll(final StoreType storeType) { assertEquals( asList(zero, zeroAgain, one, two), - toList(listStore.all()) + toListAndCloseIterator(listStore.all()) ); } @@ -130,7 +129,7 @@ public void shouldGetAllNonDeletedRecords(final StoreType storeType) { assertEquals( asList(zero, two, four), - toList(listStore.all()) + toListAndCloseIterator(listStore.all()) ); } @@ -158,7 +157,7 @@ public void shouldGetAllReturnTimestampOrderedRecords(final StoreType storeType) assertEquals( asList(zero, one, two1, two2, three, four), - toList(listStore.all()) + toListAndCloseIterator(listStore.all()) ); } @@ -185,7 +184,7 @@ public void shouldAllowDeleteWhileIterateRecords(final StoreType storeType) { it.close(); // A new all() iterator after a previous all() iterator was closed should not return deleted records. - assertEquals(Collections.singletonList(one), toList(listStore.all())); + assertEquals(Collections.singletonList(one), toListAndCloseIterator(listStore.all())); } @ParameterizedTest diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentTest.java index a538c7f5ec649..bc1a49dd44fb1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/LogicalKeyValueSegmentTest.java @@ -26,7 +26,6 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.InvalidStateStoreException; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder; import org.apache.kafka.test.InternalMockProcessorContext; @@ -65,7 +64,7 @@ public class LogicalKeyValueSegmentTest { @BeforeEach public void setUp() { physicalStore = new RocksDBStore(STORE_NAME, DB_FILE_DIR, new RocksDBMetricsRecorder(METRICS_SCOPE, STORE_NAME), false); - physicalStore.init((StateStoreContext) new InternalMockProcessorContext<>( + physicalStore.init(new InternalMockProcessorContext<>( TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/MergedSortedCacheKeyValueBytesStoreIteratorTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/MergedSortedCacheKeyValueBytesStoreIteratorTest.java index a678908b0432c..1a63f835639d9 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/MergedSortedCacheKeyValueBytesStoreIteratorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/MergedSortedCacheKeyValueBytesStoreIteratorTest.java @@ -148,19 +148,51 @@ public void shouldNotHaveNextIfOnlyCacheItemsAndAllDeleted() { assertFalse(createIterator().hasNext()); } + @Test + public void shouldIterateCacheOnly() { + final byte[][] bytes = {{0}, {1}, {2}}; + for (final byte[] aByte : bytes) { + cache.put(namespace, Bytes.wrap(aByte), new LRUCacheEntry(aByte)); + } + + try (final MergedSortedCacheKeyValueBytesStoreIterator iterator = createIterator()) { + assertArrayEquals(bytes[0], iterator.next().key.get()); + assertArrayEquals(bytes[1], iterator.next().key.get()); + assertArrayEquals(bytes[2], iterator.next().key.get()); + assertFalse(iterator.hasNext()); + } + } + + @Test + public void shouldIterateStoreOnly() { + final byte[][] bytes = {{0}, {1}, {2}}; + for (final byte[] aByte : bytes) { + store.put(Bytes.wrap(aByte), aByte); + } + + try (final MergedSortedCacheKeyValueBytesStoreIterator iterator = createIterator()) { + assertArrayEquals(bytes[0], iterator.next().key.get()); + assertArrayEquals(bytes[1], iterator.next().key.get()); + assertArrayEquals(bytes[2], iterator.next().key.get()); + assertFalse(iterator.hasNext()); + } + } + @Test public void shouldSkipAllDeletedFromCache() { final byte[][] bytes = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}}; for (final byte[] aByte : bytes) { final Bytes aBytes = Bytes.wrap(aByte); store.put(aBytes, aByte); - cache.put(namespace, aBytes, new LRUCacheEntry(aByte)); } + + cache.put(namespace, Bytes.wrap(new byte[]{-1}), new LRUCacheEntry(null)); cache.put(namespace, Bytes.wrap(bytes[1]), new LRUCacheEntry(null)); cache.put(namespace, Bytes.wrap(bytes[2]), new LRUCacheEntry(null)); cache.put(namespace, Bytes.wrap(bytes[3]), new LRUCacheEntry(null)); cache.put(namespace, Bytes.wrap(bytes[8]), new LRUCacheEntry(null)); cache.put(namespace, Bytes.wrap(bytes[11]), new LRUCacheEntry(null)); + cache.put(namespace, Bytes.wrap(new byte[]{14}), new LRUCacheEntry(null)); try (final MergedSortedCacheKeyValueBytesStoreIterator iterator = createIterator()) { assertArrayEquals(bytes[0], iterator.next().key.get()); @@ -174,6 +206,13 @@ public void shouldSkipAllDeletedFromCache() { } } + @Test + public void shouldNotHaveNextIfBothIteratorsInitializedEmpty() { + try (final MergedSortedCacheKeyValueBytesStoreIterator iterator = createIterator()) { + assertFalse(iterator.hasNext()); + } + } + @Test public void shouldPeekNextKey() { final KeyValueStore kv = new InMemoryKeyValueStore("one"); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStoreTest.java index 2509702dcdf1f..4a8c891355dd1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredKeyValueStoreTest.java @@ -31,7 +31,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.ProcessorStateManager; @@ -94,7 +93,7 @@ public class MeteredKeyValueStoreTest { @Mock private KeyValueStore inner; @Mock - private InternalProcessorContext context; + private InternalProcessorContext context; private MeteredKeyValueStore metered; private final Metrics metrics = new Metrics(); @@ -132,7 +131,7 @@ private void setUp() { } private void init() { - metered.init((StateStoreContext) context, metered); + metered.init(context, metered); } @Test @@ -145,8 +144,8 @@ public void shouldDelegateInit() { Serdes.String(), Serdes.String() ); - doNothing().when(inner).init((StateStoreContext) context, outer); - outer.init((StateStoreContext) context, outer); + doNothing().when(inner).init(context, outer); + outer.init(context, outer); } @Test @@ -184,7 +183,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String topic) keySerde, valueSerde ); - metered.init((StateStoreContext) context, metered); + metered.init(context, metered); metered.get(KEY); metered.put(KEY, VALUE); @@ -213,7 +212,7 @@ public void testMetrics() { @Test public void shouldRecordRestoreLatencyOnInit() { setUp(); - doNothing().when(inner).init((StateStoreContext) context, metered); + doNothing().when(inner).init(context, metered); init(); @@ -415,6 +414,7 @@ public void shouldThrowNullPointerOnPutAllIfAnyKeyIsNull() { assertThrows(NullPointerException.class, () -> metered.putAll(Collections.singletonList(KeyValue.pair(null, VALUE)))); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnPrefixScanIfPrefixIsNull() { setUpWithoutContext(); @@ -422,24 +422,28 @@ public void shouldThrowNullPointerOnPrefixScanIfPrefixIsNull() { assertThrows(NullPointerException.class, () -> metered.prefixScan(null, stringSerializer)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnRangeIfFromIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> metered.range(null, "to")); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnRangeIfToIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> metered.range("from", null)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnReverseRangeIfFromIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> metered.reverseRange(null, "to")); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnReverseRangeIfToIsNull() { setUpWithoutContext(); @@ -462,6 +466,7 @@ public void shouldGetRecordsWithPrefixKey() { assertTrue((Double) metric.metricValue() > 0); } + @SuppressWarnings("unused") @Test public void shouldTrackOpenIteratorsMetric() { setUp(); @@ -474,13 +479,14 @@ public void shouldTrackOpenIteratorsMetric() { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); - try (final KeyValueIterator iterator = metered.prefixScan(KEY, stringSerializer)) { + try (final KeyValueIterator unused = metered.prefixScan(KEY, stringSerializer)) { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(1L)); } assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); } + @SuppressWarnings("unused") @Test public void shouldTimeIteratorDuration() { setUp(); @@ -495,7 +501,7 @@ public void shouldTimeIteratorDuration() { assertThat((Double) iteratorDurationAvgMetric.metricValue(), equalTo(Double.NaN)); assertThat((Double) iteratorDurationMaxMetric.metricValue(), equalTo(Double.NaN)); - try (final KeyValueIterator iterator = metered.all()) { + try (final KeyValueIterator unused = metered.all()) { // nothing to do, just close immediately mockTime.sleep(2); } @@ -512,6 +518,7 @@ public void shouldTimeIteratorDuration() { assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(3.0 * TimeUnit.MILLISECONDS.toNanos(1))); } + @SuppressWarnings("unused") @Test public void shouldTrackOldestOpenIteratorTimestamp() { setUp(); @@ -526,7 +533,7 @@ public void shouldTrackOldestOpenIteratorTimestamp() { KeyValueIterator second = null; final long secondTimestamp; try { - try (final KeyValueIterator first = metered.all()) { + try (final KeyValueIterator unused = metered.all()) { final long oldestTimestamp = mockTime.milliseconds(); assertThat((Long) oldestIteratorTimestampMetric.metricValue(), equalTo(oldestTimestamp)); mockTime.sleep(100); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredSessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredSessionStoreTest.java index 08a8b1ceaf4cc..ee1b686dadeb3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredSessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredSessionStoreTest.java @@ -34,7 +34,6 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.SessionWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.ProcessorStateManager; @@ -102,7 +101,7 @@ public class MeteredSessionStoreTest { @Mock private SessionStore innerStore; @Mock - private InternalProcessorContext context; + private InternalProcessorContext context; private Map tags; @@ -134,7 +133,7 @@ public void setUp() { } private void init() { - store.init((StateStoreContext) context, store); + store.init(context, store); } @Test @@ -147,8 +146,8 @@ public void shouldDelegateInit() { Serdes.String(), new MockTime() ); - doNothing().when(innerStore).init((StateStoreContext) context, outer); - outer.init((StateStoreContext) context, outer); + doNothing().when(innerStore).init(context, outer); + outer.init(context, outer); } @Test @@ -187,7 +186,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String topic) valueSerde, new MockTime() ); - store.init((StateStoreContext) context, store); + store.init(context, store); store.fetchSession(KEY, START_TIMESTAMP, END_TIMESTAMP); store.put(WINDOWED_KEY, VALUE); @@ -514,6 +513,7 @@ public void shouldThrowNullPointerOnRemoveIfWindowIsNull() { assertThrows(NullPointerException.class, () -> store.remove(new Windowed<>(KEY, null))); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnFetchIfKeyIsNull() { setUpWithoutContext(); @@ -526,66 +526,77 @@ public void shouldThrowNullPointerOnFetchSessionIfKeyIsNull() { assertThrows(NullPointerException.class, () -> store.fetchSession(null, 0, Long.MAX_VALUE)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnFetchRangeIfFromIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.fetch(null, "to")); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnFetchRangeIfToIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.fetch("from", null)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnBackwardFetchIfKeyIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.backwardFetch(null)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnBackwardFetchIfFromIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.backwardFetch(null, "to")); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnBackwardFetchIfToIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.backwardFetch("from", null)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnFindSessionsIfKeyIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.findSessions(null, 0, 0)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnFindSessionsRangeIfFromIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.findSessions(null, "a", 0, 0)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnFindSessionsRangeIfToIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.findSessions("a", null, 0, 0)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnBackwardFindSessionsIfKeyIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.backwardFindSessions(null, 0, 0)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnBackwardFindSessionsRangeIfFromIsNull() { setUpWithoutContext(); assertThrows(NullPointerException.class, () -> store.backwardFindSessions(null, "a", 0, 0)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnBackwardFindSessionsRangeIfToIsNull() { setUpWithoutContext(); @@ -640,6 +651,7 @@ public void shouldRemoveMetricsEvenIfWrappedStoreThrowsOnClose() { assertThat(storeMetrics(), empty()); } + @SuppressWarnings("unused") @Test public void shouldTrackOpenIteratorsMetric() { setUp(); @@ -651,13 +663,14 @@ public void shouldTrackOpenIteratorsMetric() { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); - try (final KeyValueIterator, String> iterator = store.backwardFetch(KEY)) { + try (final KeyValueIterator, String> unused = store.backwardFetch(KEY)) { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(1L)); } assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); } + @SuppressWarnings("unused") @Test public void shouldTimeIteratorDuration() { setUp(); @@ -672,7 +685,7 @@ public void shouldTimeIteratorDuration() { assertThat((Double) iteratorDurationAvgMetric.metricValue(), equalTo(Double.NaN)); assertThat((Double) iteratorDurationMaxMetric.metricValue(), equalTo(Double.NaN)); - try (final KeyValueIterator, String> iterator = store.backwardFetch(KEY)) { + try (final KeyValueIterator, String> unused = store.backwardFetch(KEY)) { // nothing to do, just close immediately mockTime.sleep(2); } @@ -689,6 +702,7 @@ public void shouldTimeIteratorDuration() { assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(3.0 * TimeUnit.MILLISECONDS.toNanos(1))); } + @SuppressWarnings("unused") @Test public void shouldTrackOldestOpenIteratorTimestamp() { setUp(); @@ -703,7 +717,7 @@ public void shouldTrackOldestOpenIteratorTimestamp() { KeyValueIterator, String> second = null; final long secondTimestamp; try { - try (final KeyValueIterator, String> first = store.backwardFetch(KEY)) { + try (final KeyValueIterator, String> unused = store.backwardFetch(KEY)) { final long oldestTimestamp = mockTime.milliseconds(); assertThat((Long) oldestIteratorTimestampMetric.metricValue(), equalTo(oldestTimestamp)); mockTime.sleep(100); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedKeyValueStoreTest.java index bb33ef5553202..fa42cb0728335 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedKeyValueStoreTest.java @@ -32,7 +32,6 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.StreamsException; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.ProcessorStateManager; @@ -95,7 +94,7 @@ public class MeteredTimestampedKeyValueStoreTest { @Mock private KeyValueStore inner; @Mock - private InternalProcessorContext context; + private InternalProcessorContext context; private MockTime mockTime; private static final Map CONFIGS = mkMap(mkEntry(StreamsConfig.InternalConfig.TOPIC_PREFIX_ALTERNATIVE, APPLICATION_ID)); @@ -143,7 +142,7 @@ private void setUpWithExpectSerdes() { } private void init() { - metered.init((StateStoreContext) context, metered); + metered.init(context, metered); } @Test @@ -156,8 +155,8 @@ public void shouldDelegateInit() { Serdes.String(), new ValueAndTimestampSerde<>(Serdes.String()) ); - doNothing().when(inner).init((StateStoreContext) context, outer); - outer.init((StateStoreContext) context, outer); + doNothing().when(inner).init(context, outer); + outer.init(context, outer); } @Test @@ -195,7 +194,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String topic) keySerde, valueSerde ); - metered.init((StateStoreContext) context, metered); + metered.init(context, metered); metered.get(KEY); metered.put(KEY, VALUE_AND_TIMESTAMP); @@ -240,8 +239,8 @@ public void shouldGetWithBinary() { init(); final RawAndDeserializedValue valueWithBinary = metered.getWithBinary(KEY); - assertEquals(valueWithBinary.value, VALUE_AND_TIMESTAMP); - assertArrayEquals(valueWithBinary.serializedValue, VALUE_AND_TIMESTAMP_BYTES); + assertEquals(VALUE_AND_TIMESTAMP, valueWithBinary.value); + assertArrayEquals(VALUE_AND_TIMESTAMP_BYTES, valueWithBinary.serializedValue); } @Test @@ -408,7 +407,7 @@ public void shouldNotThrowExceptionIfSerdesCorrectlySetFromProcessorContext() { null, null ); - store.init((StateStoreContext) context, inner); + store.init(context, inner); try { store.put("key", ValueAndTimestamp.make(42L, 60000)); @@ -434,7 +433,7 @@ public void shouldNotThrowExceptionIfSerdesCorrectlySetFromConstructorParameters Serdes.String(), new ValueAndTimestampSerde<>(Serdes.Long()) ); - store.init((StateStoreContext) context, inner); + store.init(context, inner); try { store.put("key", ValueAndTimestamp.make(42L, 60000)); @@ -446,6 +445,7 @@ public void shouldNotThrowExceptionIfSerdesCorrectlySetFromConstructorParameters } } + @SuppressWarnings("unused") @Test public void shouldTrackOpenIteratorsMetric() { setUp(); @@ -457,13 +457,14 @@ public void shouldTrackOpenIteratorsMetric() { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); - try (final KeyValueIterator> iterator = metered.all()) { + try (final KeyValueIterator> unused = metered.all()) { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(1L)); } assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); } + @SuppressWarnings("unused") @Test public void shouldTimeIteratorDuration() { setUp(); @@ -478,7 +479,7 @@ public void shouldTimeIteratorDuration() { assertThat((Double) iteratorDurationAvgMetric.metricValue(), equalTo(Double.NaN)); assertThat((Double) iteratorDurationMaxMetric.metricValue(), equalTo(Double.NaN)); - try (final KeyValueIterator> iterator = metered.all()) { + try (final KeyValueIterator> unused = metered.all()) { // nothing to do, just close immediately mockTime.sleep(2); } @@ -495,6 +496,7 @@ public void shouldTimeIteratorDuration() { assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(3.0 * TimeUnit.MILLISECONDS.toNanos(1))); } + @SuppressWarnings("unused") @Test public void shouldTrackOldestOpenIteratorTimestamp() { setUp(); @@ -509,7 +511,7 @@ public void shouldTrackOldestOpenIteratorTimestamp() { KeyValueIterator> second = null; final long secondTimestamp; try { - try (final KeyValueIterator> first = metered.all()) { + try (final KeyValueIterator> unused = metered.all()) { final long oldestTimestamp = mockTime.milliseconds(); assertThat((Long) oldestIteratorTimestampMetric.metricValue(), equalTo(oldestTimestamp)); mockTime.sleep(100); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedWindowStoreTest.java index 2826054f3d885..a3fe59c6e8ba6 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredTimestampedWindowStoreTest.java @@ -29,7 +29,6 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.StreamsException; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.ProcessorStateManager; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; @@ -69,7 +68,7 @@ public class MeteredTimestampedWindowStoreTest { private static final byte[] VALUE_AND_TIMESTAMP_BYTES = "\0\0\0\0\0\0\0avalue".getBytes(); private static final int WINDOW_SIZE_MS = 10; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private final TaskId taskId = new TaskId(0, 0, "My-Topology"); @Mock private WindowStore innerStoreMock; @@ -145,9 +144,9 @@ public void shouldDelegateInit() { ); when(inner.name()).thenReturn("store"); - outer.init((StateStoreContext) context, outer); + outer.init(context, outer); - verify(inner).init((StateStoreContext) context, outer); + verify(inner).init(context, outer); } @Test @@ -192,7 +191,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String topic) valueSerde ); - store.init((StateStoreContext) context, store); + store.init(context, store); store.fetch(KEY, TIMESTAMP); store.put(KEY, VALUE_AND_TIMESTAMP, TIMESTAMP); @@ -203,7 +202,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String topic) @Test public void shouldCloseUnderlyingStore() { setUp(); - store.init((StateStoreContext) context, store); + store.init(context, store); store.close(); verify(innerStoreMock).close(); @@ -214,7 +213,7 @@ public void shouldNotExceptionIfFetchReturnsNull() { setUp(); when(innerStoreMock.fetch(Bytes.wrap("a".getBytes()), 0)).thenReturn(null); - store.init((StateStoreContext) context, store); + store.init(context, store); assertNull(store.fetch("a", 0)); } @@ -230,7 +229,7 @@ public void shouldNotThrowExceptionIfSerdesCorrectlySetFromProcessorContext() { null, null ); - store.init((StateStoreContext) context, innerStoreMock); + store.init(context, innerStoreMock); try { store.put("key", ValueAndTimestamp.make(42L, 60000), 60000L); @@ -254,7 +253,7 @@ public void shouldNotThrowExceptionIfSerdesCorrectlySetFromConstructorParameters Serdes.String(), new ValueAndTimestampSerde<>(Serdes.Long()) ); - store.init((StateStoreContext) context, innerStoreMock); + store.init(context, innerStoreMock); try { store.put("key", ValueAndTimestamp.make(42L, 60000), 60000L); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStoreTest.java index 5511a39fe2c3d..f0a7f23c09c78 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredVersionedKeyValueStoreTest.java @@ -27,7 +27,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.ProcessorStateManager; @@ -103,7 +102,7 @@ public class MeteredVersionedKeyValueStoreTest { private final Metrics metrics = new Metrics(); private final Time mockTime = new MockTime(); private final String threadId = Thread.currentThread().getName(); - private final InternalProcessorContext context = mock(InternalProcessorContext.class); + private final InternalProcessorContext context = mock(InternalProcessorContext.class); private Map tags; private MeteredVersionedKeyValueStore store; @@ -123,7 +122,7 @@ public void setUp() { ); store = newMeteredStore(inner); - store.init((StateStoreContext) context, store); + store.init(context, store); } private MeteredVersionedKeyValueStore newMeteredStore(final VersionedBytesStore inner) { @@ -139,7 +138,7 @@ private MeteredVersionedKeyValueStore newMeteredStore(final Vers @Test public void shouldDelegateInit() { // init is already called in setUp() - verify(inner).init((StateStoreContext) context, store); + verify(inner).init(context, store); } @Test @@ -176,7 +175,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String change keySerde, valueSerde ); - store.init((StateStoreContext) context, store); + store.init(context, store); store.put(KEY, VALUE, TIMESTAMP); @@ -308,14 +307,14 @@ public void shouldThrowOnMultiVersionedKeyQueryInvalidTimeRange() { } - @SuppressWarnings("unchecked") + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldDelegateAndAddExecutionInfoOnCustomQuery() { - final Query query = mock(Query.class); + final Query query = mock(Query.class); final PositionBound positionBound = mock(PositionBound.class); final QueryConfig queryConfig = mock(QueryConfig.class); - final QueryResult result = mock(QueryResult.class); - when(inner.query(query, positionBound, queryConfig)).thenReturn(result); + final QueryResult result = mock(QueryResult.class); + when(inner.query(query, positionBound, queryConfig)).thenReturn((QueryResult) result); when(queryConfig.isCollectExecutionInfo()).thenReturn(true); assertThat(store.query(query, positionBound, queryConfig), is(result)); @@ -359,6 +358,7 @@ public void shouldDelegateGetPosition() { assertThat(store.getPosition(), is(position)); } + @SuppressWarnings("unused") @Test public void shouldTrackOpenIteratorsMetric() { final MultiVersionedKeyQuery query = MultiVersionedKeyQuery.withKey(KEY); @@ -374,13 +374,14 @@ public void shouldTrackOpenIteratorsMetric() { final QueryResult> result = store.query(query, bound, config); - try (final VersionedRecordIterator iterator = result.getResult()) { + try (final VersionedRecordIterator unused = result.getResult()) { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(1L)); } assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); } + @SuppressWarnings("unused") @Test public void shouldTimeIteratorDuration() { final MultiVersionedKeyQuery query = MultiVersionedKeyQuery.withKey(KEY); @@ -398,7 +399,7 @@ public void shouldTimeIteratorDuration() { assertThat((Double) iteratorDurationMaxMetric.metricValue(), equalTo(Double.NaN)); final QueryResult> first = store.query(query, bound, config); - try (final VersionedRecordIterator iterator = first.getResult()) { + try (final VersionedRecordIterator unused = first.getResult()) { // nothing to do, just close immediately mockTime.sleep(2); } @@ -407,7 +408,7 @@ public void shouldTimeIteratorDuration() { assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(2.0 * TimeUnit.MILLISECONDS.toNanos(1))); final QueryResult> second = store.query(query, bound, config); - try (final VersionedRecordIterator iterator = second.getResult()) { + try (final VersionedRecordIterator unused = second.getResult()) { // nothing to do, just close immediately mockTime.sleep(3); } @@ -416,6 +417,7 @@ public void shouldTimeIteratorDuration() { assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(3.0 * TimeUnit.MILLISECONDS.toNanos(1))); } + @SuppressWarnings("unused") @Test public void shouldTrackOldestOpenIteratorTimestamp() { final MultiVersionedKeyQuery query = MultiVersionedKeyQuery.withKey(KEY); @@ -433,7 +435,7 @@ public void shouldTrackOldestOpenIteratorTimestamp() { VersionedRecordIterator secondIterator = null; final long secondTime; try { - try (final VersionedRecordIterator iterator = first.getResult()) { + try (final VersionedRecordIterator unused = first.getResult()) { final long oldestTimestamp = mockTime.milliseconds(); assertThat((Long) oldestIteratorTimestampMetric.metricValue(), equalTo(oldestTimestamp)); mockTime.sleep(100); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredWindowStoreTest.java index b4aeccd0c8393..ba557104ebdef 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/MeteredWindowStoreTest.java @@ -34,7 +34,6 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.ProcessorStateManager; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.state.KeyValueIterator; @@ -95,7 +94,7 @@ public class MeteredWindowStoreTest { private static final long TIMESTAMP = 42L; private final String threadId = Thread.currentThread().getName(); - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; @SuppressWarnings("unchecked") private final WindowStore innerStoreMock = mock(WindowStore.class); private final MockTime mockTime = new MockTime(); @@ -146,8 +145,8 @@ public void shouldDelegateInit() { new SerdeThatDoesntHandleNull() ); when(innerStoreMock.name()).thenReturn("store"); - doNothing().when(innerStoreMock).init((StateStoreContext) context, outer); - outer.init((StateStoreContext) context, outer); + doNothing().when(innerStoreMock).init(context, outer); + outer.init(context, outer); } @Test @@ -185,7 +184,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String topic) keySerde, valueSerde ); - store.init((StateStoreContext) context, store); + store.init(context, store); store.fetch(KEY, TIMESTAMP); store.put(KEY, VALUE, TIMESTAMP); @@ -193,7 +192,7 @@ private void doShouldPassChangelogTopicNameToStateStoreSerde(final String topic) @Test public void testMetrics() { - store.init((StateStoreContext) context, store); + store.init(context, store); final JmxReporter reporter = new JmxReporter(); final MetricsContext metricsContext = new KafkaMetricsContext("kafka.streams"); reporter.contextChange(metricsContext); @@ -212,8 +211,8 @@ public void testMetrics() { @Test public void shouldRecordRestoreLatencyOnInit() { - doNothing().when(innerStoreMock).init((StateStoreContext) context, store); - store.init((StateStoreContext) context, store); + doNothing().when(innerStoreMock).init(context, store); + store.init(context, store); // it suffices to verify one restore metric since all restore metrics are recorded by the same sensor // and the sensor is tested elsewhere @@ -226,7 +225,7 @@ public void shouldPutToInnerStoreAndRecordPutMetrics() { final byte[] bytes = "a".getBytes(); doNothing().when(innerStoreMock).put(eq(Bytes.wrap(bytes)), any(), eq(context.timestamp())); - store.init((StateStoreContext) context, store); + store.init(context, store); store.put("a", "a", context.timestamp()); // it suffices to verify one put metric since all put metrics are recorded by the same sensor @@ -240,7 +239,7 @@ public void shouldFetchFromInnerStoreAndRecordFetchMetrics() { when(innerStoreMock.fetch(Bytes.wrap("a".getBytes()), 1, 1)) .thenReturn(KeyValueIterators.emptyWindowStoreIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); store.fetch("a", ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; // it suffices to verify one fetch metric since all fetch metrics are recorded by the same sensor @@ -254,7 +253,7 @@ public void shouldReturnNoRecordWhenFetchedKeyHasExpired() { when(innerStoreMock.fetch(Bytes.wrap("a".getBytes()), 1, 1 + RETENTION_PERIOD)) .thenReturn(KeyValueIterators.emptyWindowStoreIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); store.fetch("a", ofEpochMilli(1), ofEpochMilli(1).plus(RETENTION_PERIOD, ChronoUnit.MILLIS)).close(); // recorded on close; } @@ -269,7 +268,7 @@ public void shouldFetchRangeFromInnerStoreAndRecordFetchMetrics() { when(innerStoreMock.fetch(null, null, 1, 1)) .thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); store.fetch("a", "b", ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; store.fetch(null, "b", ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; store.fetch("a", null, ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; @@ -286,7 +285,7 @@ public void shouldBackwardFetchFromInnerStoreAndRecordFetchMetrics() { when(innerStoreMock.backwardFetch(Bytes.wrap("a".getBytes()), Bytes.wrap("b".getBytes()), 1, 1)) .thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); store.backwardFetch("a", "b", ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; // it suffices to verify one fetch metric since all fetch metrics are recorded by the same sensor @@ -306,7 +305,7 @@ public void shouldBackwardFetchRangeFromInnerStoreAndRecordFetchMetrics() { when(innerStoreMock.backwardFetch(null, null, 1, 1)) .thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); store.backwardFetch("a", "b", ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; store.backwardFetch(null, "b", ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; store.backwardFetch("a", null, ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; @@ -322,7 +321,7 @@ public void shouldBackwardFetchRangeFromInnerStoreAndRecordFetchMetrics() { public void shouldFetchAllFromInnerStoreAndRecordFetchMetrics() { when(innerStoreMock.fetchAll(1, 1)).thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); store.fetchAll(ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; // it suffices to verify one fetch metric since all fetch metrics are recorded by the same sensor @@ -335,7 +334,7 @@ public void shouldFetchAllFromInnerStoreAndRecordFetchMetrics() { public void shouldBackwardFetchAllFromInnerStoreAndRecordFetchMetrics() { when(innerStoreMock.backwardFetchAll(1, 1)).thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); store.backwardFetchAll(ofEpochMilli(1), ofEpochMilli(1)).close(); // recorded on close; // it suffices to verify one fetch metric since all fetch metrics are recorded by the same sensor @@ -348,7 +347,7 @@ public void shouldBackwardFetchAllFromInnerStoreAndRecordFetchMetrics() { public void shouldRecordFlushLatency() { doNothing().when(innerStoreMock).flush(); - store.init((StateStoreContext) context, store); + store.init(context, store); store.flush(); // it suffices to verify one flush metric since all flush metrics are recorded by the same sensor @@ -361,7 +360,7 @@ public void shouldRecordFlushLatency() { public void shouldNotThrowNullPointerExceptionIfFetchReturnsNull() { when(innerStoreMock.fetch(Bytes.wrap("a".getBytes()), 0)).thenReturn(null); - store.init((StateStoreContext) context, store); + store.init(context, store); assertNull(store.fetch("a", 0)); } @@ -394,7 +393,7 @@ public void shouldNotSetFlushListenerOnWrappedNoneCachingStore() { @Test public void shouldCloseUnderlyingStore() { doNothing().when(innerStoreMock).close(); - store.init((StateStoreContext) context, store); + store.init(context, store); store.close(); } @@ -402,7 +401,7 @@ public void shouldCloseUnderlyingStore() { @Test public void shouldRemoveMetricsOnClose() { doNothing().when(innerStoreMock).close(); - store.init((StateStoreContext) context, store); + store.init(context, store); assertThat(storeMetrics(), not(empty())); store.close(); @@ -412,7 +411,7 @@ public void shouldRemoveMetricsOnClose() { @Test public void shouldRemoveMetricsEvenIfWrappedStoreThrowsOnClose() { doThrow(new RuntimeException("Oops!")).when(innerStoreMock).close(); - store.init((StateStoreContext) context, store); + store.init(context, store); // There's always a "count" metric registered assertThat(storeMetrics(), not(empty())); @@ -425,37 +424,41 @@ public void shouldThrowNullPointerOnPutIfKeyIsNull() { assertThrows(NullPointerException.class, () -> store.put(null, "a", 1L)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnFetchIfKeyIsNull() { assertThrows(NullPointerException.class, () -> store.fetch(null, 0L, 1L)); } + @SuppressWarnings("resource") @Test public void shouldThrowNullPointerOnBackwardFetchIfKeyIsNull() { assertThrows(NullPointerException.class, () -> store.backwardFetch(null, 0L, 1L)); } + @SuppressWarnings("unused") @Test public void shouldTrackOpenIteratorsMetric() { when(innerStoreMock.all()).thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); final KafkaMetric openIteratorsMetric = metric("num-open-iterators"); assertThat(openIteratorsMetric, not(nullValue())); assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); - try (final KeyValueIterator, String> iterator = store.all()) { + try (final KeyValueIterator, String> unused = store.all()) { assertThat((Long) openIteratorsMetric.metricValue(), equalTo(1L)); } assertThat((Long) openIteratorsMetric.metricValue(), equalTo(0L)); } + @SuppressWarnings("unused") @Test public void shouldTimeIteratorDuration() { when(innerStoreMock.all()).thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); final KafkaMetric iteratorDurationAvgMetric = metric("iterator-duration-avg"); final KafkaMetric iteratorDurationMaxMetric = metric("iterator-duration-max"); @@ -465,7 +468,7 @@ public void shouldTimeIteratorDuration() { assertThat((Double) iteratorDurationAvgMetric.metricValue(), equalTo(Double.NaN)); assertThat((Double) iteratorDurationMaxMetric.metricValue(), equalTo(Double.NaN)); - try (final KeyValueIterator, String> iterator = store.all()) { + try (final KeyValueIterator, String> unused = store.all()) { // nothing to do, just close immediately mockTime.sleep(2); } @@ -473,7 +476,7 @@ public void shouldTimeIteratorDuration() { assertThat((double) iteratorDurationAvgMetric.metricValue(), equalTo(2.0 * TimeUnit.MILLISECONDS.toNanos(1))); assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(2.0 * TimeUnit.MILLISECONDS.toNanos(1))); - try (final KeyValueIterator, String> iterator = store.all()) { + try (final KeyValueIterator, String> unused = store.all()) { // nothing to do, just close immediately mockTime.sleep(3); } @@ -482,10 +485,11 @@ public void shouldTimeIteratorDuration() { assertThat((double) iteratorDurationMaxMetric.metricValue(), equalTo(3.0 * TimeUnit.MILLISECONDS.toNanos(1))); } + @SuppressWarnings("unused") @Test public void shouldTrackOldestOpenIteratorTimestamp() { when(innerStoreMock.all()).thenReturn(KeyValueIterators.emptyIterator()); - store.init((StateStoreContext) context, store); + store.init(context, store); final KafkaMetric oldestIteratorTimestampMetric = metric("oldest-iterator-open-since-ms"); assertThat(oldestIteratorTimestampMetric, not(nullValue())); @@ -495,7 +499,7 @@ public void shouldTrackOldestOpenIteratorTimestamp() { KeyValueIterator, String> second = null; final long secondTimestamp; try { - try (final KeyValueIterator, String> first = store.all()) { + try (final KeyValueIterator, String> unused = store.all()) { final long oldestTimestamp = mockTime.milliseconds(); assertThat((Long) oldestIteratorTimestampMetric.metricValue(), equalTo(oldestTimestamp)); mockTime.sleep(100); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/ReadOnlyWindowStoreStub.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/ReadOnlyWindowStoreStub.java index 67f27f4a1dd87..c574950ac9002 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/ReadOnlyWindowStoreStub.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/ReadOnlyWindowStoreStub.java @@ -378,7 +378,7 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) {} + public void init(final StateStoreContext stateStoreContext, final StateStore root) {} @Override public void flush() { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java index 5ddcf5bef551e..08248b020544e 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java @@ -28,7 +28,6 @@ import org.rocksdb.AbstractCompactionFilter.Context; import org.rocksdb.AbstractCompactionFilterFactory; import org.rocksdb.AbstractWalFilter; -import org.rocksdb.AccessHint; import org.rocksdb.BuiltinComparator; import org.rocksdb.ColumnFamilyOptions; import org.rocksdb.CompactionPriority; @@ -112,6 +111,8 @@ public class RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest { add("setMaxBackgroundCompactions"); add("maxBackgroundFlushes"); add("setMaxBackgroundFlushes"); + add("tablePropertiesCollectorFactory"); + add("setTablePropertiesCollectorFactory"); addAll(walRelatedMethods); } }; @@ -176,9 +177,6 @@ private Object[] getDBOptionsParameters(final Class[] parameterTypes) throws case "java.util.Collection": parameters[i] = new ArrayList<>(); break; - case "org.rocksdb.AccessHint": - parameters[i] = AccessHint.NONE; - break; case "org.rocksdb.Cache": parameters[i] = new LRUCache(1L); break; diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBStoreTest.java index 2c98c3427d0d3..8a02289890e79 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBStoreTest.java @@ -33,6 +33,7 @@ import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.serialization.UUIDSerializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -132,7 +133,7 @@ public class RocksDBStoreTest extends AbstractKeyValueStoreTest { @Mock private RocksDBMetricsRecorder metricsRecorder; - InternalMockProcessorContext context; + InternalMockProcessorContext context; RocksDBStore rocksDBStore; @BeforeEach @@ -178,14 +179,14 @@ private RocksDBStore getRocksDBStoreWithCustomManagedIterators() { return new RocksDBStore(DB_NAME, DB_FILE_DIR, metricsRecorder, false); } - private InternalMockProcessorContext getProcessorContext(final Properties streamsProps) { - return new InternalMockProcessorContext( + private InternalMockProcessorContext getProcessorContext(final Properties streamsProps) { + return new InternalMockProcessorContext<>( TestUtils.tempDirectory(), new StreamsConfig(streamsProps) ); } - private InternalMockProcessorContext getProcessorContext( + private InternalMockProcessorContext getProcessorContext( final RecordingLevel recordingLevel, final Class rocksDBConfigSetterClass) { @@ -195,7 +196,7 @@ private InternalMockProcessorContext getProcessorContext( return getProcessorContext(streamsProps); } - private InternalMockProcessorContext getProcessorContext(final RecordingLevel recordingLevel) { + private InternalMockProcessorContext getProcessorContext(final RecordingLevel recordingLevel) { final Properties streamsProps = StreamsTestUtils.getStreamsConfig(); streamsProps.setProperty(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, recordingLevel.name()); return getProcessorContext(streamsProps); @@ -235,7 +236,7 @@ public void shouldRemoveValueProvidersFromInjectedMetricsRecorderOnClose() { } public static class RocksDBConfigSetterWithUserProvidedStatistics implements RocksDBConfigSetter { - public RocksDBConfigSetterWithUserProvidedStatistics(){} + public RocksDBConfigSetterWithUserProvidedStatistics() {} public void setConfig(final String storeName, final Options options, final Map configs) { lastStatistics = new Statistics(); @@ -306,7 +307,7 @@ public void shouldCloseStatisticsWhenUserProvidesNoStatistics() throws Exception public static class RocksDBConfigSetterWithUserProvidedNewBlockBasedTableFormatConfig implements RocksDBConfigSetter { - public RocksDBConfigSetterWithUserProvidedNewBlockBasedTableFormatConfig(){} + public RocksDBConfigSetterWithUserProvidedNewBlockBasedTableFormatConfig() {} public void setConfig(final String storeName, final Options options, final Map configs) { options.setTableFormatConfig(new BlockBasedTableConfig()); @@ -335,7 +336,7 @@ public void shouldThrowWhenUserProvidesNewBlockBasedTableFormatConfig() { } public static class RocksDBConfigSetterWithUserProvidedNewPlainTableFormatConfig implements RocksDBConfigSetter { - public RocksDBConfigSetterWithUserProvidedNewPlainTableFormatConfig(){} + public RocksDBConfigSetterWithUserProvidedNewPlainTableFormatConfig() {} public void setConfig(final String storeName, final Options options, final Map configs) { options.setTableFormatConfig(new PlainTableConfig()); @@ -361,7 +362,7 @@ public void shouldNotSetCacheInValueProvidersWhenUserProvidesPlainTableFormatCon @Test public void shouldNotThrowExceptionOnRestoreWhenThereIsPreExistingRocksDbFiles() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); rocksDBStore.put(new Bytes("existingKey".getBytes(UTF_8)), "existingValue".getBytes(UTF_8)); rocksDBStore.flush(); @@ -388,14 +389,14 @@ public void shouldCallRocksDbConfigSetter() { props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, MockRocksDbConfigSetter.class); final Object param = new Object(); props.put("abc.def", param); - final InternalMockProcessorContext context = new InternalMockProcessorContext( + final InternalMockProcessorContext context = new InternalMockProcessorContext<>( dir, Serdes.String(), Serdes.String(), new StreamsConfig(props) ); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertTrue(MockRocksDbConfigSetter.called); assertThat(MockRocksDbConfigSetter.configMap.get("abc.def"), equalTo(param)); @@ -404,7 +405,7 @@ public void shouldCallRocksDbConfigSetter() { @Test public void shouldThrowProcessorStateExceptionOnOpeningReadOnlyDir() { final File tmpDir = TestUtils.tempDirectory(); - final InternalMockProcessorContext tmpContext = new InternalMockProcessorContext(tmpDir, new StreamsConfig(StreamsTestUtils.getStreamsConfig())); + final InternalMockProcessorContext tmpContext = new InternalMockProcessorContext<>(tmpDir, new StreamsConfig(StreamsTestUtils.getStreamsConfig())); assertTrue(tmpDir.setReadOnly()); @@ -424,7 +425,7 @@ public void shouldPutAll() { new Bytes(stringSerializer.serialize(null, "3")), stringSerializer.serialize(null, "c"))); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); rocksDBStore.putAll(entries); rocksDBStore.flush(); @@ -447,7 +448,7 @@ public void shouldPutAll() { @Test public void shouldMatchPositionAfterPut() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders())); rocksDBStore.put(new Bytes(stringSerializer.serialize(null, "one")), stringSerializer.serialize(null, "A")); @@ -483,7 +484,7 @@ public void shouldReturnKeysWithGivenPrefix() { new Bytes(stringSerializer.serialize(null, "prefix_1")), stringSerializer.serialize(null, "f"))); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); rocksDBStore.putAll(entries); rocksDBStore.flush(); @@ -518,7 +519,7 @@ public void shouldReturnKeysWithGivenPrefixExcludingNextKeyLargestKey() { new Bytes(stringSerializer.serialize(null, "abce")), stringSerializer.serialize(null, "f"))); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); rocksDBStore.putAll(entries); rocksDBStore.flush(); @@ -537,7 +538,7 @@ public void shouldReturnKeysWithGivenPrefixExcludingNextKeyLargestKey() { @Test public void shouldAllowCustomManagedIterators() { rocksDBStore = getRocksDBStoreWithCustomManagedIterators(); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final Set> openIterators = new HashSet<>(); final KeyValueIterator prefixScanIterator = rocksDBStore.prefixScan("abcd", stringSerializer, openIterators); @@ -566,10 +567,11 @@ public void shouldAllowCustomManagedIterators() { assertThat(openIterators.size(), is(0)); } + @SuppressWarnings("resource") @Test public void shouldRequireOpenIteratorsWhenUsingCustomManagedIterators() { rocksDBStore = getRocksDBStoreWithCustomManagedIterators(); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThrows(IllegalStateException.class, () -> rocksDBStore.prefixScan("abcd", stringSerializer)); @@ -581,10 +583,11 @@ public void shouldRequireOpenIteratorsWhenUsingCustomManagedIterators() { assertThrows(IllegalStateException.class, () -> rocksDBStore.reverseAll()); } + @SuppressWarnings("resource") @Test public void shouldNotAllowOpenIteratorsWhenUsingAutoManagedIterators() { rocksDBStore = getRocksDBStore(); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final Set> openIterators = new HashSet<>(); assertThrows(IllegalStateException.class, @@ -597,10 +600,11 @@ public void shouldNotAllowOpenIteratorsWhenUsingAutoManagedIterators() { assertThrows(IllegalStateException.class, () -> rocksDBStore.reverseAll(openIterators)); } + @SuppressWarnings("resource") @Test public void shouldReturnUUIDsWithStringPrefix() { final List> entries = new ArrayList<>(); - final Serializer uuidSerializer = Serdes.UUID().serializer(); + final UUIDSerializer uuidSerializer = new UUIDSerializer(); final UUID uuid1 = UUID.randomUUID(); final UUID uuid2 = UUID.randomUUID(); final String prefix = uuid1.toString().substring(0, 4); @@ -613,7 +617,7 @@ public void shouldReturnUUIDsWithStringPrefix() { new Bytes(uuidSerializer.serialize(null, uuid2)), stringSerializer.serialize(null, "b"))); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); rocksDBStore.putAll(entries); rocksDBStore.flush(); @@ -648,7 +652,7 @@ public void shouldReturnNoKeys() { entries.add(new KeyValue<>( new Bytes(stringSerializer.serialize(null, "c")), stringSerializer.serialize(null, "e"))); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); rocksDBStore.putAll(entries); rocksDBStore.flush(); @@ -667,7 +671,7 @@ public void shouldReturnNoKeys() { public void shouldRestoreAll() { final List> entries = getKeyValueEntries(); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restore(rocksDBStore.name(), entries); assertEquals( @@ -689,7 +693,7 @@ public void shouldRestoreAll() { @Test public void shouldPutOnlyIfAbsentValue() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final Bytes keyBytes = new Bytes(stringSerializer.serialize(null, "one")); final byte[] valueBytes = stringSerializer.serialize(null, "A"); final byte[] valueBytesUpdate = stringSerializer.serialize(null, "B"); @@ -706,7 +710,7 @@ public void shouldHandleDeletesOnRestoreAll() { final List> entries = getKeyValueEntries(); entries.add(new KeyValue<>("1".getBytes(UTF_8), null)); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restore(rocksDBStore.name(), entries); try (final KeyValueIterator iterator = rocksDBStore.all()) { @@ -731,7 +735,7 @@ public void shouldHandleDeletesAndPutBackOnRestoreAll() { // this will restore key "1" as WriteBatch applies updates in order entries.add(new KeyValue<>("1".getBytes(UTF_8), "restored".getBytes(UTF_8))); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restore(rocksDBStore.name(), entries); try (final KeyValueIterator iterator = rocksDBStore.all()) { @@ -765,7 +769,7 @@ public void shouldHandleDeletesAndPutBackOnRestoreAll() { public void shouldRestoreThenDeleteOnRestoreAll() { final List> entries = getKeyValueEntries(); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restore(rocksDBStore.name(), entries); @@ -806,7 +810,7 @@ public void shouldRestoreThenDeleteOnRestoreAll() { @Test public void shouldThrowNullPointerExceptionOnNullPut() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThrows( NullPointerException.class, () -> rocksDBStore.put(null, stringSerializer.serialize(null, "someVal"))); @@ -814,7 +818,7 @@ public void shouldThrowNullPointerExceptionOnNullPut() { @Test public void shouldThrowNullPointerExceptionOnNullPutAll() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThrows( NullPointerException.class, () -> rocksDBStore.put(null, stringSerializer.serialize(null, "someVal"))); @@ -822,7 +826,7 @@ public void shouldThrowNullPointerExceptionOnNullPutAll() { @Test public void shouldThrowNullPointerExceptionOnNullGet() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThrows( NullPointerException.class, () -> rocksDBStore.get(null)); @@ -830,7 +834,7 @@ public void shouldThrowNullPointerExceptionOnNullGet() { @Test public void shouldThrowNullPointerExceptionOnDelete() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThrows( NullPointerException.class, () -> rocksDBStore.delete(null)); @@ -838,7 +842,7 @@ public void shouldThrowNullPointerExceptionOnDelete() { @Test public void shouldReturnValueOnRange() { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final KeyValue kv0 = new KeyValue<>("0", "zero"); final KeyValue kv1 = new KeyValue<>("1", "one"); @@ -859,7 +863,7 @@ public void shouldReturnValueOnRange() { @Test public void shouldThrowProcessorStateExceptionOnPutDeletedDir() throws IOException { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); Utils.delete(dir); rocksDBStore.put( new Bytes(stringSerializer.serialize(null, "anyKey")), @@ -872,13 +876,13 @@ public void shouldHandleToggleOfEnablingBloomFilters() { final Properties props = StreamsTestUtils.getStreamsConfig(); props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, TestingBloomFilterRocksDBConfigSetter.class); dir = TestUtils.tempDirectory(); - context = new InternalMockProcessorContext(dir, + context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsConfig(props)); enableBloomFilters = false; - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final List expectedValues = new ArrayList<>(); expectedValues.add("a"); @@ -903,7 +907,7 @@ public void shouldHandleToggleOfEnablingBloomFilters() { // reopen with Bloom Filters enabled // should open fine without errors enableBloomFilters = true; - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); for (final KeyValue keyValue : keyValues) { final byte[] valBytes = rocksDBStore.get(new Bytes(keyValue.key)); @@ -930,7 +934,7 @@ public void shouldVerifyThatMetricsRecordedFromStatisticsGetMeasurementsFromRock final MonotonicProcessorRecordContext processorRecordContext = new MonotonicProcessorRecordContext("test", 0); when(context.recordMetadata()).thenReturn(Optional.of(processorRecordContext)); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final byte[] key = "hello".getBytes(); final byte[] value = "world".getBytes(); rocksDBStore.put(Bytes.wrap(key), value); @@ -963,7 +967,7 @@ public void shouldVerifyThatMetricsRecordedFromPropertiesGetMeasurementsFromRock final MonotonicProcessorRecordContext processorRecordContext = new MonotonicProcessorRecordContext("test", 0); when(context.recordMetadata()).thenReturn(Optional.of(processorRecordContext)); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final byte[] key = "hello".getBytes(); final byte[] value = "world".getBytes(); rocksDBStore.put(Bytes.wrap(key), value); @@ -993,7 +997,7 @@ public void shouldVerifyThatPropertyBasedMetricsUseValidPropertyName() { when(context.appConfigs()).thenReturn(new StreamsConfig(props).originals()); when(context.stateDir()).thenReturn(dir); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); final List propertyNames = Arrays.asList( "num-entries-active-mem-table", @@ -1068,10 +1072,10 @@ public void shouldCloseOpenRangeIteratorsWhenStoreClosedAndThrowInvalidStateStor store.close(); - assertThrows(InvalidStateStoreException.class, () -> iteratorOne.hasNext()); - assertThrows(InvalidStateStoreException.class, () -> iteratorOne.next()); - assertThrows(InvalidStateStoreException.class, () -> iteratorTwo.hasNext()); - assertThrows(InvalidStateStoreException.class, () -> iteratorTwo.next()); + assertThrows(InvalidStateStoreException.class, iteratorOne::hasNext); + assertThrows(InvalidStateStoreException.class, iteratorOne::next); + assertThrows(InvalidStateStoreException.class, iteratorTwo::hasNext); + assertThrows(InvalidStateStoreException.class, iteratorTwo::next); } } @@ -1088,7 +1092,7 @@ public void shouldRestoreRecordsAndConsistencyVectorSingleTopic() { Serdes.String(), new StreamsConfig(props) ); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restoreWithHeaders(rocksDBStore.name(), entries); assertEquals( @@ -1125,7 +1129,7 @@ public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics() { Serdes.String(), new StreamsConfig(props) ); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restoreWithHeaders(rocksDBStore.name(), entries); assertEquals( @@ -1164,7 +1168,7 @@ public void shouldHandleTombstoneRecords() { Serdes.String(), new StreamsConfig(props) ); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restoreWithHeaders(rocksDBStore.name(), entries); assertNull(stringDeserializer.deserialize( @@ -1188,7 +1192,7 @@ public void shouldNotThrowWhenRestoringOnMissingHeaders() { Serdes.String(), new StreamsConfig(props) ); - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); context.restore(rocksDBStore.name(), entries); assertThat(rocksDBStore.getPosition(), is(Position.emptyPosition())); } @@ -1281,8 +1285,7 @@ private List> getKeyValueEntries() { private List> getDeserializedList(final KeyValueIterator iter) { final List> bytes = Utils.toList(iter); - final List> result = bytes.stream().map(kv -> new KeyValue(kv.key.toString(), stringDeserializer.deserialize(null, kv.value))).collect(Collectors.toList()); - return result; + return bytes.stream().map(kv -> new KeyValue<>(kv.key.toString(), stringDeserializer.deserialize(null, kv.value))).collect(Collectors.toList()); } private Statistics getStatistics(final RocksDBStore rocksDBStore) throws Exception { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBufferTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBufferTest.java index 8fbde2f78e172..69e7d31e31b4b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBufferTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBufferTest.java @@ -19,16 +19,16 @@ import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.processor.internals.SerdeGetter; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -61,20 +61,18 @@ public class RocksDBTimeOrderedKeyValueBufferTest { @BeforeEach public void setUp() { - when(serdeGetter.keySerde()).thenReturn(new Serdes.StringSerde()); - when(serdeGetter.valueSerde()).thenReturn(new Serdes.StringSerde()); final Metrics metrics = new Metrics(); offset = 0; streamsMetrics = new StreamsMetricsImpl(metrics, "test-client", "processId", new MockTime()); - context = new MockInternalNewProcessorContext<>(StreamsTestUtils.getStreamsConfig(), new TaskId(0, 0), TestUtils.tempDirectory()); + context = new MockInternalProcessorContext<>(StreamsTestUtils.getStreamsConfig(), new TaskId(0, 0), TestUtils.tempDirectory()); } - private void createBuffer(final Duration grace) { + private void createBuffer(final Duration grace, final Serde serde) { final RocksDBTimeOrderedKeyValueBytesStore store = new RocksDBTimeOrderedKeyValueBytesStoreSupplier("testing").get(); - buffer = new RocksDBTimeOrderedKeyValueBuffer<>(store, grace, "testing", false); + buffer = new RocksDBTimeOrderedKeyValueBuffer<>(store, serde, serde, grace, "testing", false); buffer.setSerdesIfNull(serdeGetter); - buffer.init((StateStoreContext) context, store); + buffer.init(context, store); } private boolean pipeRecord(final String key, final String value, final long time) { @@ -83,16 +81,19 @@ private boolean pipeRecord(final String key, final String value, final long time return buffer.put(time, record, context.recordContext()); } + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldReturnIfRecordWasAdded() { - createBuffer(Duration.ofMillis(1)); + when(serdeGetter.keySerde()).thenReturn((Serde) new Serdes.StringSerde()); + when(serdeGetter.valueSerde()).thenReturn((Serde) new Serdes.StringSerde()); + createBuffer(Duration.ofMillis(1), null); assertThat(pipeRecord("K", "V", 2L), equalTo(true)); assertThat(pipeRecord("K", "V", 0L), equalTo(false)); } @Test public void shouldPutInBufferAndUpdateFields() { - createBuffer(Duration.ofMinutes(1)); + createBuffer(Duration.ofMinutes(1), Serdes.String()); assertNumSizeAndTimestamp(buffer, 0, Long.MAX_VALUE, 0); pipeRecord("1", "0", 0L); assertNumSizeAndTimestamp(buffer, 1, 0, 42); @@ -102,7 +103,7 @@ public void shouldPutInBufferAndUpdateFields() { @Test public void shouldAddAndEvictRecord() { - createBuffer(Duration.ZERO); + createBuffer(Duration.ZERO, Serdes.String()); final AtomicInteger count = new AtomicInteger(0); pipeRecord("1", "0", 0L); assertNumSizeAndTimestamp(buffer, 1, 0, 42); @@ -111,9 +112,12 @@ public void shouldAddAndEvictRecord() { assertThat(count.get(), equalTo(1)); } + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldAddAndEvictRecordTwice() { - createBuffer(Duration.ZERO); + when(serdeGetter.keySerde()).thenReturn((Serde) new Serdes.StringSerde()); + when(serdeGetter.valueSerde()).thenReturn((Serde) new Serdes.StringSerde()); + createBuffer(Duration.ZERO, null); final AtomicInteger count = new AtomicInteger(0); pipeRecord("1", "0", 0L); assertNumSizeAndTimestamp(buffer, 1, 0, 42); @@ -129,7 +133,7 @@ public void shouldAddAndEvictRecordTwice() { @Test public void shouldAddAndEvictRecordTwiceWithNonZeroGrace() { - createBuffer(Duration.ofMillis(1)); + createBuffer(Duration.ofMillis(1), Serdes.String()); final AtomicInteger count = new AtomicInteger(0); pipeRecord("1", "0", 0L); buffer.evictWhile(() -> buffer.numRecords() > 0, r -> count.getAndIncrement()); @@ -141,9 +145,12 @@ public void shouldAddAndEvictRecordTwiceWithNonZeroGrace() { assertThat(count.get(), equalTo(1)); } + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldAddRecordsTwiceAndEvictRecordsOnce() { - createBuffer(Duration.ZERO); + when(serdeGetter.keySerde()).thenReturn((Serde) new Serdes.StringSerde()); + when(serdeGetter.valueSerde()).thenReturn((Serde) new Serdes.StringSerde()); + createBuffer(Duration.ZERO, null); final AtomicInteger count = new AtomicInteger(0); pipeRecord("1", "0", 0L); buffer.evictWhile(() -> buffer.numRecords() > 1, r -> count.getAndIncrement()); @@ -153,9 +160,12 @@ public void shouldAddRecordsTwiceAndEvictRecordsOnce() { assertThat(count.get(), equalTo(2)); } + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldDropLateRecords() { - createBuffer(Duration.ZERO); + when(serdeGetter.keySerde()).thenReturn((Serde) new Serdes.StringSerde()); + when(serdeGetter.valueSerde()).thenReturn((Serde) new Serdes.StringSerde()); + createBuffer(Duration.ZERO, null); pipeRecord("1", "0", 1L); assertNumSizeAndTimestamp(buffer, 1, 1, 42); pipeRecord("2", "0", 0L); @@ -164,7 +174,7 @@ public void shouldDropLateRecords() { @Test public void shouldDropLateRecordsWithNonZeroGrace() { - createBuffer(Duration.ofMillis(1)); + createBuffer(Duration.ofMillis(1), Serdes.String()); pipeRecord("1", "0", 2L); assertNumSizeAndTimestamp(buffer, 1, 2, 42); pipeRecord("2", "0", 1L); @@ -173,9 +183,12 @@ public void shouldDropLateRecordsWithNonZeroGrace() { assertNumSizeAndTimestamp(buffer, 2, 1, 84); } + @SuppressWarnings({"rawtypes", "unchecked"}) @Test public void shouldHandleCollidingKeys() { - createBuffer(Duration.ofMillis(1)); + when(serdeGetter.keySerde()).thenReturn((Serde) new Serdes.StringSerde()); + when(serdeGetter.valueSerde()).thenReturn((Serde) new Serdes.StringSerde()); + createBuffer(Duration.ofMillis(1), null); final AtomicInteger count = new AtomicInteger(0); pipeRecord("2", "0", 0L); buffer.evictWhile(() -> buffer.numRecords() > 0, r -> count.getAndIncrement()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBytesStoreTest.java index 08271715a8da8..1b465da5111e1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedKeyValueBytesStoreTest.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.test.InternalMockProcessorContext; @@ -45,7 +44,7 @@ class RocksDBTimeOrderedKeyValueBytesStoreTest { - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private RocksDBTimeOrderedKeyValueBytesStore bytesStore; private File stateDir; final String storeName = "bytes-store"; @@ -65,7 +64,7 @@ public void before() { new MockRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())) ); - bytesStore.init((StateStoreContext) context, bytesStore); + bytesStore.init(context, bytesStore); } @AfterEach @@ -94,6 +93,7 @@ public void shouldCreateEmptyWriteBatches() { assertEquals(0, writeBatchMap.size()); } + @SuppressWarnings("resource") private byte[] serializeValue(final Long value) { final Serde valueSerde = new Serdes.LongSerde(); final byte[] valueBytes = valueSerde.serializer().serialize(topic, value); @@ -101,6 +101,7 @@ private byte[] serializeValue(final Long value) { return buffered.serialize(0).array(); } + @SuppressWarnings("resource") private Bytes serializeKey(final String key, final int seqnum, final long timestamp) { final Serde keySerde = new Serdes.StringSerde(); return Bytes.wrap( diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java index 27e34fd1b266c..c3bb67cd5240a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.state.KeyValueIterator; import org.hamcrest.core.IsNull; @@ -55,7 +54,7 @@ RocksDBStore getRocksDBStore() { @Test public void shouldOpenNewStoreInRegularMode() { try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode")); } @@ -68,13 +67,13 @@ public void shouldOpenNewStoreInRegularMode() { @Test public void shouldOpenExistingStoreInRegularMode() throws Exception { // prepare store - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); rocksDBStore.put(new Bytes("key".getBytes()), "timestamped".getBytes()); rocksDBStore.close(); // re-open store try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode")); } finally { @@ -127,7 +126,7 @@ public void shouldMigrateDataFromDefaultToTimestampColumnFamily() throws Excepti prepareOldStore(); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode")); } @@ -425,7 +424,7 @@ private void verifyOldAndNewColumnFamily() throws Exception { // check that still in upgrade mode try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode")); } finally { @@ -459,7 +458,7 @@ private void verifyOldAndNewColumnFamily() throws Exception { // check that still in regular mode try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + rocksDBStore.init(context, rocksDBStore); assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode")); } @@ -468,7 +467,7 @@ private void verifyOldAndNewColumnFamily() throws Exception { private void prepareOldStore() { final RocksDBStore keyValueStore = new RocksDBStore(DB_NAME, METRICS_SCOPE); try { - keyValueStore.init((StateStoreContext) context, keyValueStore); + keyValueStore.init(context, keyValueStore); keyValueStore.put(new Bytes("key1".getBytes()), "1".getBytes()); keyValueStore.put(new Bytes("key2".getBytes()), "22".getBytes()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreTest.java index 00cbdd198b6f6..19fbd9f486c07 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBVersionedStoreTest.java @@ -28,7 +28,6 @@ import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.query.ResultOrder; import org.apache.kafka.streams.state.VersionedRecord; import org.apache.kafka.streams.state.VersionedRecordIterator; @@ -69,7 +68,7 @@ public class RocksDBVersionedStoreTest { private static final String DROPPED_RECORDS_METRIC = "dropped-records-total"; private static final String TASK_LEVEL_GROUP = "stream-task-metrics"; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private Map expectedMetricsTags; private RocksDBVersionedStore store; @@ -90,7 +89,7 @@ public void before() { ); store = new RocksDBVersionedStore(STORE_NAME, METRICS_SCOPE, HISTORY_RETENTION, SEGMENT_INTERVAL); - store.init((StateStoreContext) context, store); + store.init(context, store); } @AfterEach @@ -801,7 +800,7 @@ public void shouldAllowZeroHistoryRetention() { // recreate store with zero history retention store.close(); store = new RocksDBVersionedStore(STORE_NAME, METRICS_SCOPE, 0L, SEGMENT_INTERVAL); - store.init((StateStoreContext) context, store); + store.init(context, store); // put and get putToStore("k", "v", BASE_TIMESTAMP, PUT_RETURN_CODE_VALID_TO_UNDEFINED); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/SegmentIteratorTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/SegmentIteratorTest.java index 0e68c7f8fcd7b..99cd84a228480 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/SegmentIteratorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/SegmentIteratorTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecorder; @@ -55,10 +54,9 @@ public class SegmentIteratorTest { private SegmentIterator iterator = null; - @SuppressWarnings("rawtypes") @BeforeEach public void before() { - final InternalMockProcessorContext context = new InternalMockProcessorContext<>( + final InternalMockProcessorContext context = new InternalMockProcessorContext<>( TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), @@ -67,8 +65,8 @@ public void before() { new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics()))); - segmentOne.init((StateStoreContext) context, segmentOne); - segmentTwo.init((StateStoreContext) context, segmentTwo); + segmentOne.init(context, segmentOne); + segmentTwo.init(context, segmentTwo); segmentOne.put(Bytes.wrap("a".getBytes()), "1".getBytes()); segmentOne.put(Bytes.wrap("b".getBytes()), "2".getBytes()); segmentTwo.put(Bytes.wrap("c".getBytes()), "3".getBytes()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializerTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializerTest.java index 2891825ba5207..41f8758b0c691 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/StoreSerdeInitializerTest.java @@ -21,10 +21,8 @@ import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.kstream.internals.WrappingNullableUtils; -import org.apache.kafka.streams.processor.ProcessorContext; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.state.StateSerdes; -import org.apache.kafka.test.MockInternalNewProcessorContext; +import org.apache.kafka.test.MockInternalProcessorContext; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -56,13 +54,13 @@ public void shouldPrepareStoreSerdeForProcessorContext() { final Serde keySerde = new Serdes.StringSerde(); final Serde valueSerde = new Serdes.StringSerde(); - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareKeySerde(any(), any())).thenReturn(keySerde); utilsMock.when(() -> WrappingNullableUtils.prepareValueSerde(any(), any())).thenReturn(valueSerde); final StateSerdes result = StoreSerdeInitializer.prepareStoreSerde( - (ProcessorContext) context, "myStore", "topic", keySerde, valueSerde, WrappingNullableUtils::prepareValueSerde); + context, "myStore", "topic", keySerde, valueSerde, WrappingNullableUtils::prepareValueSerde); assertThat(result.keySerde(), equalTo(keySerde)); assertThat(result.valueSerde(), equalTo(valueSerde)); @@ -71,13 +69,13 @@ public void shouldPrepareStoreSerdeForProcessorContext() { @Test public void shouldThrowStreamsExceptionOnUndefinedKeySerdeForProcessorContext() { - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareKeySerde(any(), any())) .thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG")); final Throwable exception = assertThrows(StreamsException.class, - () -> StoreSerdeInitializer.prepareStoreSerde((ProcessorContext) context, "myStore", "topic", + () -> StoreSerdeInitializer.prepareStoreSerde(context, "myStore", "topic", new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde)); assertThat(exception.getMessage(), equalTo("Failed to initialize key serdes for store myStore")); @@ -86,13 +84,13 @@ public void shouldThrowStreamsExceptionOnUndefinedKeySerdeForProcessorContext() @Test public void shouldThrowStreamsExceptionOnUndefinedValueSerdeForProcessorContext() { - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareValueSerde(any(), any())) .thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG")); final Throwable exception = assertThrows(StreamsException.class, - () -> StoreSerdeInitializer.prepareStoreSerde((ProcessorContext) context, "myStore", "topic", + () -> StoreSerdeInitializer.prepareStoreSerde(context, "myStore", "topic", new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde)); assertThat(exception.getMessage(), equalTo("Failed to initialize value serdes for store myStore")); @@ -101,13 +99,13 @@ public void shouldThrowStreamsExceptionOnUndefinedValueSerdeForProcessorContext( @Test public void shouldThrowStreamsExceptionOnUndefinedKeySerdeForStateStoreContext() { - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareKeySerde(any(), any())) .thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG")); final Throwable exception = assertThrows(StreamsException.class, - () -> StoreSerdeInitializer.prepareStoreSerde((StateStoreContext) context, "myStore", "topic", + () -> StoreSerdeInitializer.prepareStoreSerde(context, "myStore", "topic", new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde)); assertThat(exception.getMessage(), equalTo("Failed to initialize key serdes for store myStore")); @@ -116,13 +114,13 @@ public void shouldThrowStreamsExceptionOnUndefinedKeySerdeForStateStoreContext() @Test public void shouldThrowStreamsExceptionOnUndefinedValueSerdeForStateStoreContext() { - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareValueSerde(any(), any())) .thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG")); final Throwable exception = assertThrows(StreamsException.class, - () -> StoreSerdeInitializer.prepareStoreSerde((StateStoreContext) context, "myStore", "topic", + () -> StoreSerdeInitializer.prepareStoreSerde(context, "myStore", "topic", new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde)); assertThat(exception.getMessage(), equalTo("Failed to initialize value serdes for store myStore")); @@ -131,12 +129,12 @@ public void shouldThrowStreamsExceptionOnUndefinedValueSerdeForStateStoreContext @Test public void shouldThrowStreamsExceptionWithExplicitErrorMessageForProcessorContext() { - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareKeySerde(any(), any())).thenThrow(new StreamsException("")); final Throwable exception = assertThrows(StreamsException.class, - () -> StoreSerdeInitializer.prepareStoreSerde((ProcessorContext) context, "myStore", "topic", + () -> StoreSerdeInitializer.prepareStoreSerde(context, "myStore", "topic", new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde)); assertThat(exception.getMessage(), equalTo("Failed to initialize key serdes for store myStore")); @@ -144,12 +142,12 @@ public void shouldThrowStreamsExceptionWithExplicitErrorMessageForProcessorConte @Test public void shouldThrowStreamsExceptionWithExplicitErrorMessageForStateStoreContext() { - final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(); utilsMock.when(() -> WrappingNullableUtils.prepareValueSerde(any(), any())).thenThrow(new StreamsException("")); final Throwable exception = assertThrows(StreamsException.class, - () -> StoreSerdeInitializer.prepareStoreSerde((StateStoreContext) context, "myStore", "topic", + () -> StoreSerdeInitializer.prepareStoreSerde(context, "myStore", "topic", new Serdes.StringSerde(), new Serdes.StringSerde(), WrappingNullableUtils::prepareValueSerde)); assertThat(exception.getMessage(), equalTo("Failed to initialize value serdes for store myStore")); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/StreamThreadStateStoreProviderTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/StreamThreadStateStoreProviderTest.java index d1cdf53c3e4f1..212a65eacddf9 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/StreamThreadStateStoreProviderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/StreamThreadStateStoreProviderTest.java @@ -21,7 +21,7 @@ import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.common.PartitionInfo; @@ -165,8 +165,8 @@ public void before() { properties.put(StreamsConfig.STATE_DIR_CONFIG, stateDir.getPath()); final StreamsConfig streamsConfig = new StreamsConfig(properties); - final MockConsumer mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); - final MockConsumer mockRestoreConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + final MockConsumer mockConsumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); + final MockConsumer mockRestoreConsumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); final MockProducer mockProducer = new MockProducer<>(); final MockAdminClient mockAdminClient = MockAdminClient.create().build(); configureClients(mockRestoreConsumer, mockAdminClient, "applicationId-kv-store-changelog"); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java index fbe6f1b73e90b..f2f1d513704b5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java @@ -19,8 +19,10 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; @@ -34,7 +36,6 @@ import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.TimeWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.api.Record; @@ -72,7 +73,7 @@ import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; import static org.apache.kafka.streams.state.internals.ThreadCacheTest.memoryCacheEntrySize; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.apache.kafka.test.StreamsTestUtils.verifyAllWindowedKeyValues; import static org.apache.kafka.test.StreamsTestUtils.verifyKeyValueList; import static org.apache.kafka.test.StreamsTestUtils.verifyWindowedKeyValue; @@ -102,7 +103,7 @@ public class TimeOrderedCachingPersistentWindowStoreTest { private static final String CACHE_NAMESPACE = "0_0-store-name"; private ThreadCache cache; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private TimeFirstWindowKeySchema baseKeySchema; private WindowStore underlyingStore; private TimeOrderedCachingWindowStore cachingStore; @@ -121,7 +122,7 @@ private void setUp(final boolean hasIndex) { cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics())); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } @AfterEach @@ -137,8 +138,8 @@ public void shouldDelegateInit(final boolean hasIndex) { when(inner.hasIndex()).thenReturn(hasIndex); final TimeOrderedCachingWindowStore outer = new TimeOrderedCachingWindowStore(inner, WINDOW_SIZE, SEGMENT_INTERVAL); - outer.init((StateStoreContext) context, outer); - verify(inner, times(1)).init((StateStoreContext) context, outer); + outer.init(context, outer); + verify(inner, times(1)).init(context, outer); } @ParameterizedTest @@ -217,8 +218,8 @@ public void process(final Record record) { final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); - streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName()); + streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName()); streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10 * 1000L); @@ -226,8 +227,8 @@ public void process(final Record record) { final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), streamsConfiguration, initialWallClockTime); final TestInputTopic inputTopic = driver.createInputTopic(TOPIC, - Serdes.String().serializer(), - Serdes.String().serializer(), + new StringSerializer(), + new StringSerializer(), initialWallClockTime, Duration.ZERO); @@ -335,8 +336,9 @@ private static Bytes bytesKey(final String key) { return Bytes.wrap(key.getBytes()); } + @SuppressWarnings("resource") private String stringFrom(final byte[] from) { - return Serdes.String().deserializer().deserialize("", from); + return new StringDeserializer().deserialize("", from); } @ParameterizedTest @@ -887,6 +889,7 @@ public void shouldClearNamespaceCacheOnClose(final boolean hasIndex) { assertEquals(0, cache.size()); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldThrowIfTryingToFetchFromClosedCachingStore(final boolean hasIndex) { @@ -895,6 +898,7 @@ public void shouldThrowIfTryingToFetchFromClosedCachingStore(final boolean hasIn assertThrows(InvalidStateStoreException.class, () -> cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(10))); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldThrowIfTryingToFetchRangeFromClosedCachingStore(final boolean hasIndex) { @@ -945,7 +949,7 @@ public void shouldSkipNonExistBaseKeyInCache(final boolean hasIndex) { windowedPair("a", "0001", 1), windowedPair("aa", "0002", 0) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } else { @@ -954,7 +958,7 @@ public void shouldSkipNonExistBaseKeyInCache(final boolean hasIndex) { windowedPair("aa", "0002", 0), windowedPair("a", "0001", 1) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -976,7 +980,7 @@ public void shouldFetchAndIterateOverExactKeys(final boolean hasIndex) { KeyValue.pair(SEGMENT_INTERVAL, bytesValue("0005")) ); final List> actual = - toList(cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); verifyKeyValueList(expected, actual); } @@ -996,7 +1000,7 @@ public void shouldBackwardFetchAndIterateOverExactKeys(final boolean hasIndex) { KeyValue.pair(0L, bytesValue("0001")) ); final List> actual = - toList(cachingStore.backwardFetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); verifyKeyValueList(expected, actual); } @@ -1016,14 +1020,14 @@ public void shouldFetchAndIterateOverKeyRange(final boolean hasIndex) { windowedPair("a", "0003", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( asList( windowedPair("aa", "0002", 0), windowedPair("aa", "0004", 1)), - toList(cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); if (hasIndex) { @@ -1035,7 +1039,7 @@ public void shouldFetchAndIterateOverKeyRange(final boolean hasIndex) { windowedPair("aa", "0004", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } else { @@ -1047,7 +1051,7 @@ public void shouldFetchAndIterateOverKeyRange(final boolean hasIndex) { windowedPair("aa", "0004", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -1069,14 +1073,14 @@ public void shouldFetchAndIterateOverKeyBackwardRange(final boolean hasIndex) { windowedPair("a", "0003", 1), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( asList( windowedPair("aa", "0004", 1), windowedPair("aa", "0002", 0)), - toList(cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); if (!hasIndex) { @@ -1089,7 +1093,7 @@ public void shouldFetchAndIterateOverKeyBackwardRange(final boolean hasIndex) { windowedPair("aa", "0002", 0), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } else { @@ -1102,7 +1106,7 @@ public void shouldFetchAndIterateOverKeyBackwardRange(final boolean hasIndex) { windowedPair("a", "0003", 1), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -1162,6 +1166,7 @@ public void shouldNotThrowNullPointerExceptionOnPutNullValue(final boolean hasIn cachingStore.put(bytesKey("a"), null, 0L); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldThrowNullPointerExceptionOnFetchNullKey(final boolean hasIndex) { @@ -1169,12 +1174,13 @@ public void shouldThrowNullPointerExceptionOnFetchNullKey(final boolean hasIndex assertThrows(NullPointerException.class, () -> cachingStore.fetch(null, ofEpochMilli(1L), ofEpochMilli(2L))); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey(final boolean hasIndex) { setUp(hasIndex); - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) { @@ -1191,12 +1197,13 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey(final boolean } } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey(final boolean hasIndex) { setUp(hasIndex); - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); final KeyValueIterator, byte[]> iterator = @@ -1252,7 +1259,7 @@ private void setUpCloseTests() { cache = mock(ThreadCache.class); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } private static KeyValue, byte[]> windowedPair(final String key, final String value, final long timestamp) { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java index 1d81dffdd7813..56c19c985299a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java @@ -29,7 +29,6 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.internals.Change; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; @@ -96,21 +95,21 @@ private void setup(final String testName, final Function bufferSuppli this.bufferSupplier = bufferSupplier; } - private static MockInternalProcessorContext makeContext() { + private static MockInternalProcessorContext makeContext() { final Properties properties = new Properties(); properties.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID); properties.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ""); final TaskId taskId = new TaskId(0, 0); - final MockInternalProcessorContext context = new MockInternalProcessorContext(properties, taskId, TestUtils.tempDirectory()); + final MockInternalProcessorContext context = new MockInternalProcessorContext<>(properties, taskId, TestUtils.tempDirectory()); context.setRecordCollector(new MockRecordCollector()); return context; } - private static void cleanup(final MockInternalProcessorContext context, final TimeOrderedKeyValueBuffer> buffer) { + private static void cleanup(final MockInternalProcessorContext context, final TimeOrderedKeyValueBuffer> buffer) { try { buffer.close(); Utils.delete(context.stateDir()); @@ -124,8 +123,8 @@ private static void cleanup(final MockInternalProcessorContext context, final Ti public void shouldInit(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); cleanup(context, buffer); } @@ -134,8 +133,8 @@ public void shouldInit(final String testName, final Function bufferSu public void shouldAcceptData(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "2p93nf"); cleanup(context, buffer); } @@ -145,8 +144,8 @@ public void shouldAcceptData(final String testName, final Function bu public void shouldRejectNullValues(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); try { buffer.put(0, new Record<>("asdf", null, 0L), getContext(0)); fail("expected an exception"); @@ -161,8 +160,8 @@ public void shouldRejectNullValues(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "qwer"); assertThat(buffer.numRecords(), is(1)); buffer.evictWhile(() -> true, kv -> { }); @@ -175,8 +174,8 @@ public void shouldRemoveData(final String testName, final Function bu public void shouldRespectEvictionPredicate(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "eyt"); putRecord(buffer, context, 1L, 0L, "zxcv", "rtg"); assertThat(buffer.numRecords(), is(2)); @@ -194,8 +193,8 @@ public void shouldRespectEvictionPredicate(final String testName, final Function public void shouldTrackCount(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "oin"); assertThat(buffer.numRecords(), is(1)); putRecord(buffer, context, 1L, 0L, "asdf", "wekjn"); @@ -210,8 +209,8 @@ public void shouldTrackCount(final String testName, final Function bu public void shouldTrackSize(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "23roni"); assertThat(buffer.bufferSize(), is(43L)); putRecord(buffer, context, 1L, 0L, "asdf", "3l"); @@ -226,8 +225,8 @@ public void shouldTrackSize(final String testName, final Function buf public void shouldTrackMinTimestamp(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 1L, 0L, "asdf", "2093j"); assertThat(buffer.minTimestamp(), is(1L)); putRecord(buffer, context, 0L, 0L, "zxcv", "3gon4i"); @@ -240,8 +239,8 @@ public void shouldTrackMinTimestamp(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 1L, 0L, "zxcv", "o23i4"); assertThat(buffer.numRecords(), is(1)); @@ -288,8 +287,8 @@ public void shouldEvictOldestAndUpdateSizeAndCountAndMinTimestamp(final String t public void shouldReturnUndefinedOnPriorValueForNotBufferedKey(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); assertThat(buffer.priorValueForBuffered("ASDF"), is(Maybe.undefined())); } @@ -299,8 +298,8 @@ public void shouldReturnUndefinedOnPriorValueForNotBufferedKey(final String test public void shouldReturnPriorValueForBufferedKey(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); final ProcessorRecordContext recordContext = getContext(0L); context.setRecordContext(recordContext); @@ -315,8 +314,8 @@ public void shouldReturnPriorValueForBufferedKey(final String testName, final Fu public void shouldFlush(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 2L, 0L, "asdf", "2093j"); putRecord(buffer, context, 1L, 1L, "zxcv", "3gon4i"); putRecord(buffer, context, 0L, 2L, "deleteme", "deadbeef"); @@ -388,8 +387,8 @@ public void shouldFlush(final String testName, final Function bufferS public void shouldRestoreOldUnversionedFormat(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -509,8 +508,8 @@ public void shouldRestoreOldUnversionedFormat(final String testName, final Funct public void shouldRestoreV1Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -633,8 +632,8 @@ public void shouldRestoreV1Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -759,8 +758,8 @@ public void shouldRestoreV3FormatWithV2Header(final String testName, final Funct // V2 header, so we need to be sure to handle this case as well. // Note the data is the same as the V3 test. final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -882,8 +881,8 @@ public void shouldRestoreV3FormatWithV2Header(final String testName, final Funct public void shouldRestoreV3Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -1005,8 +1004,8 @@ public void shouldRestoreV3Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -1039,7 +1038,7 @@ public void shouldNotRestoreUnrecognizedVersionRecord(final String testName, fin } private static void putRecord(final TimeOrderedKeyValueBuffer> buffer, - final MockInternalProcessorContext context, + final MockInternalProcessorContext context, final long streamTime, final long recordTimestamp, final String key, @@ -1049,11 +1048,12 @@ private static void putRecord(final TimeOrderedKeyValueBuffer(key, new Change<>(value, null), 0L), recordContext); } + @SuppressWarnings("resource") private static BufferValue getBufferValue(final String value, final long timestamp) { return new BufferValue( null, null, - Serdes.String().serializer().serialize(null, value), + new StringSerializer().serialize(null, value), getContext(timestamp) ); } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedWindowStoreTest.java index 856649d950394..a82ca8e73006d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedWindowStoreTest.java @@ -19,8 +19,10 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; @@ -34,7 +36,6 @@ import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.TimeWindow; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; @@ -71,7 +72,7 @@ import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; import static org.apache.kafka.streams.state.internals.ThreadCacheTest.memoryCacheEntrySize; -import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.toListAndCloseIterator; import static org.apache.kafka.test.StreamsTestUtils.verifyAllWindowedKeyValues; import static org.apache.kafka.test.StreamsTestUtils.verifyKeyValueList; import static org.apache.kafka.test.StreamsTestUtils.verifyWindowedKeyValue; @@ -100,7 +101,7 @@ public class TimeOrderedWindowStoreTest { private static final String TOPIC = "topic"; private static final String CACHE_NAMESPACE = "0_0-store-name"; - private InternalMockProcessorContext context; + private InternalMockProcessorContext context; private RocksDBTimeOrderedWindowSegmentedBytesStore bytesStore; private RocksDBTimeOrderedWindowStore underlyingStore; private TimeOrderedCachingWindowStore cachingStore; @@ -120,7 +121,7 @@ public void setUp(final boolean hasIndex) { cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics())); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } @AfterEach @@ -139,8 +140,8 @@ public void shouldDelegateInit(final boolean hasIndex) { reset(inner); when(inner.name()).thenReturn("store"); - outer.init((StateStoreContext) context, outer); - verify(inner).init((StateStoreContext) context, outer); + outer.init(context, outer); + verify(inner).init(context, outer); } @ParameterizedTest @@ -223,8 +224,8 @@ public void process(final Record record) { final Properties streamsConfiguration = new Properties(); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); - streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName()); + streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName()); streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10 * 1000L); @@ -232,8 +233,8 @@ public void process(final Record record) { final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), streamsConfiguration, initialWallClockTime); final TestInputTopic inputTopic = driver.createInputTopic(TOPIC, - Serdes.String().serializer(), - Serdes.String().serializer(), + new StringSerializer(), + new StringSerializer(), initialWallClockTime, Duration.ZERO); @@ -341,8 +342,9 @@ private static Bytes bytesKey(final String key) { return Bytes.wrap(key.getBytes()); } + @SuppressWarnings("resource") private String stringFrom(final byte[] from) { - return Serdes.String().deserializer().deserialize("", from); + return new StringDeserializer().deserialize("", from); } @ParameterizedTest @@ -893,6 +895,7 @@ public void shouldClearNamespaceCacheOnClose(final boolean hasIndex) { assertEquals(0, cache.size()); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldThrowIfTryingToFetchFromClosedCachingStore(final boolean hasIndex) { @@ -901,6 +904,7 @@ public void shouldThrowIfTryingToFetchFromClosedCachingStore(final boolean hasIn assertThrows(InvalidStateStoreException.class, () -> cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(10))); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldThrowIfTryingToFetchRangeFromClosedCachingStore(final boolean hasIndex) { @@ -951,7 +955,7 @@ public void shouldSkipNonExistBaseKeyInCache(final boolean hasIndex) { windowedPair("a", "0001", 1), windowedPair("aa", "0002", 0) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } else { @@ -960,7 +964,7 @@ public void shouldSkipNonExistBaseKeyInCache(final boolean hasIndex) { windowedPair("aa", "0002", 0), windowedPair("a", "0001", 1) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -982,7 +986,7 @@ public void shouldFetchAndIterateOverExactKeys(final boolean hasIndex) { KeyValue.pair(SEGMENT_INTERVAL, bytesValue("0005")) ); final List> actual = - toList(cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); verifyKeyValueList(expected, actual); } @@ -1002,7 +1006,7 @@ public void shouldBackwardFetchAndIterateOverExactKeys(final boolean hasIndex) { KeyValue.pair(0L, bytesValue("0001")) ); final List> actual = - toList(cachingStore.backwardFetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); verifyKeyValueList(expected, actual); } @@ -1022,14 +1026,14 @@ public void shouldFetchAndIterateOverKeyRange(final boolean hasIndex) { windowedPair("a", "0003", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( asList( windowedPair("aa", "0002", 0), windowedPair("aa", "0004", 1)), - toList(cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); if (hasIndex) { @@ -1041,7 +1045,7 @@ public void shouldFetchAndIterateOverKeyRange(final boolean hasIndex) { windowedPair("aa", "0004", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } else { @@ -1053,7 +1057,7 @@ public void shouldFetchAndIterateOverKeyRange(final boolean hasIndex) { windowedPair("aa", "0004", 1), windowedPair("a", "0005", SEGMENT_INTERVAL) ), - toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -1075,14 +1079,14 @@ public void shouldFetchAndIterateOverKeyBackwardRange(final boolean hasIndex) { windowedPair("a", "0003", 1), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); verifyKeyValueList( asList( windowedPair("aa", "0004", 1), windowedPair("aa", "0002", 0)), - toList(cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); if (!hasIndex) { @@ -1095,7 +1099,7 @@ public void shouldFetchAndIterateOverKeyBackwardRange(final boolean hasIndex) { windowedPair("aa", "0002", 0), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } else { @@ -1108,7 +1112,7 @@ public void shouldFetchAndIterateOverKeyBackwardRange(final boolean hasIndex) { windowedPair("a", "0003", 1), windowedPair("a", "0001", 0) ), - toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + toListAndCloseIterator(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) ); } @@ -1168,6 +1172,7 @@ public void shouldNotThrowNullPointerExceptionOnPutNullValue(final boolean hasIn cachingStore.put(bytesKey("a"), null, 0L); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldThrowNullPointerExceptionOnFetchNullKey(final boolean hasIndex) { @@ -1175,12 +1180,13 @@ public void shouldThrowNullPointerExceptionOnFetchNullKey(final boolean hasIndex assertThrows(NullPointerException.class, () -> cachingStore.fetch(null, ofEpochMilli(1L), ofEpochMilli(2L))); } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey(final boolean hasIndex) { setUp(hasIndex); - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) { @@ -1197,12 +1203,13 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey(final boolean } } + @SuppressWarnings("resource") @ParameterizedTest @ValueSource(booleans = {true, false}) public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey(final boolean hasIndex) { setUp(hasIndex); - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + final Bytes keyFrom = Bytes.wrap(new IntegerSerializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(new IntegerSerializer().serialize("", 1)); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); final KeyValueIterator, byte[]> iterator = @@ -1263,7 +1270,7 @@ private void setUpCloseTests() { cache = mock(ThreadCache.class); context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); - cachingStore.init((StateStoreContext) context, cachingStore); + cachingStore.init(context, cachingStore); } private static KeyValue, byte[]> windowedPair(final String key, final String value, final long timestamp) { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentTest.java index 82a76ba13a605..633d14c1e63b6 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentTest.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.query.Position; @@ -44,8 +43,6 @@ import static org.hamcrest.Matchers.not; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.STRICT_STUBS) @@ -68,11 +65,7 @@ public void shouldDeleteStateDirectoryOnDestroy() throws Exception { final String directoryPath = TestUtils.tempDirectory().getAbsolutePath(); final File directory = new File(directoryPath); - final ProcessorContext mockContext = mock(ProcessorContext.class); - when(mockContext.appConfigs()).thenReturn(mkMap(mkEntry(METRICS_RECORDING_LEVEL_CONFIG, "INFO"))); - when(mockContext.stateDir()).thenReturn(directory); - - segment.openDB(mockContext.appConfigs(), mockContext.stateDir()); + segment.openDB(mkMap(mkEntry(METRICS_RECORDING_LEVEL_CONFIG, "INFO")), directory); assertTrue(new File(directoryPath, "window").exists()); assertTrue(new File(directoryPath + File.separator + "window", "segment").exists()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowKeySchemaTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowKeySchemaTest.java index 8f73e94274f37..b170c750d7afa 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowKeySchemaTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/WindowKeySchemaTest.java @@ -126,7 +126,7 @@ interface TriFunction { private final Window window = new TimeWindow(startTime, endTime); private final Windowed windowedKey = new Windowed<>(key, window); private KeySchema keySchema; - private final Serde> keySerde = new WindowedSerdes.TimeWindowedSerde<>(serde, Long.MAX_VALUE); + private final Serde> keySerde = new WindowedSerdes.TimeWindowedSerde<>(serde, endTime - startTime); private final StateSerdes stateSerdes = new StateSerdes<>("dummy", serde, Serdes.ByteArray()); public SchemaType schemaType; @@ -401,7 +401,7 @@ public void shouldSerializeDeserialize(final SchemaType type) { final byte[] bytes = keySerde.serializer().serialize(topic, windowedKey); final Windowed result = keySerde.deserializer().deserialize(topic, bytes); // TODO: fix this part as last bits of KAFKA-4468 - assertEquals(new Windowed<>(key, new TimeWindow(startTime, Long.MAX_VALUE)), result); + assertEquals(new Windowed<>(key, new TimeWindow(startTime, endTime)), result); } @EnumSource(SchemaType.class) diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java index 8d189144504d9..819aaa200ead8 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java @@ -32,6 +32,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.Cache; import java.io.File; import java.io.IOException; @@ -57,8 +58,8 @@ public class RocksDBBlockCacheMetricsTest { public static Stream stores() { final File stateDir = TestUtils.tempDirectory("state"); return Stream.of( - Arguments.of(new RocksDBStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), TASK_ID, stateDir)), - Arguments.of(new RocksDBTimestampedStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), TASK_ID, stateDir)) + Arguments.of(new RocksDBStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext<>(new Properties(), TASK_ID, stateDir)), + Arguments.of(new RocksDBTimestampedStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext<>(new Properties(), TASK_ID, stateDir)) ); } @@ -79,8 +80,11 @@ static void withStore(final RocksDBStore store, final StateStoreContext context, @ParameterizedTest @MethodSource("stores") public void shouldRecordCorrectBlockCacheCapacity(final RocksDBStore store, final StateStoreContext ctx) { - withStore(store, ctx, () -> - assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.CAPACITY_OF_BLOCK_CACHE, BigInteger.valueOf(50 * 1024 * 1024L))); + withStore( + store, + ctx, + () -> assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.CAPACITY_OF_BLOCK_CACHE, BigInteger.valueOf(50 * 1024 * 1024L)) + ); } @ParameterizedTest @@ -88,8 +92,10 @@ public void shouldRecordCorrectBlockCacheCapacity(final RocksDBStore store, fina public void shouldRecordCorrectBlockCacheUsage(final RocksDBStore store, final StateStoreContext ctx) { withStore(store, ctx, () -> { final BlockBasedTableConfigWithAccessibleCache tableFormatConfig = (BlockBasedTableConfigWithAccessibleCache) store.getOptions().tableFormatConfig(); - final long usage = tableFormatConfig.blockCache().getUsage(); - assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + try (final Cache blockCache = tableFormatConfig.blockCache()) { + final long usage = blockCache.getUsage(); + assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + } }); } @@ -98,11 +104,14 @@ public void shouldRecordCorrectBlockCacheUsage(final RocksDBStore store, final S public void shouldRecordCorrectBlockCachePinnedUsage(final RocksDBStore store, final StateStoreContext ctx) { withStore(store, ctx, () -> { final BlockBasedTableConfigWithAccessibleCache tableFormatConfig = (BlockBasedTableConfigWithAccessibleCache) store.getOptions().tableFormatConfig(); - final long usage = tableFormatConfig.blockCache().getPinnedUsage(); - assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.PINNED_USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + try (final Cache blockCache = tableFormatConfig.blockCache()) { + final long usage = blockCache.getPinnedUsage(); + assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.PINNED_USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + } }); } + @SuppressWarnings("resource") public void assertMetric(final StateStoreContext context, final String group, final String metricName, final T expected) { final StreamsMetricsImpl metrics = ProcessorContextUtils.metricsImpl(context); final MetricName name = metrics.metricsRegistry().metricName( diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorderTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorderTest.java index 7136ee66b2e2a..a0c068b59ee5d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecorderTest.java @@ -173,6 +173,22 @@ public void shouldThrowIfMetricRecorderIsReInitialisedWithDifferentTask() { ); } + @Test + public void shouldThrowIfMetricRecorderIsInitialisedWithNullMetrics() { + assertThrows( + NullPointerException.class, + () -> recorder.init(null, TASK_ID1) + ); + } + + @Test + public void shouldThrowIfMetricRecorderIsInitialisedWithNullTaskId() { + assertThrows( + NullPointerException.class, + () -> recorder.init(streamsMetrics, null) + ); + } + @Test public void shouldThrowIfMetricRecorderIsReInitialisedWithDifferentStreamsMetrics() { assertThrows( @@ -458,10 +474,8 @@ public void shouldRecordStatisticsBasedMetrics() { final double expectedCompactionTimeMaxSensor = 24.0; when(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_OPENS)).thenReturn(5L); - when(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_CLOSES)).thenReturn(3L); when(statisticsToAdd2.getAndResetTickerCount(TickerType.NO_FILE_OPENS)).thenReturn(7L); - when(statisticsToAdd2.getAndResetTickerCount(TickerType.NO_FILE_CLOSES)).thenReturn(4L); - final double expectedNumberOfOpenFilesSensor = (5 + 7) - (3 + 4); + final double expectedNumberOfOpenFilesSensor = -1; when(statisticsToAdd1.getAndResetTickerCount(TickerType.NO_FILE_ERRORS)).thenReturn(34L); when(statisticsToAdd2.getAndResetTickerCount(TickerType.NO_FILE_ERRORS)).thenReturn(11L); @@ -469,8 +483,8 @@ public void shouldRecordStatisticsBasedMetrics() { recorder.record(now); - verify(statisticsToAdd1, times(17)).getAndResetTickerCount(isA(TickerType.class)); - verify(statisticsToAdd2, times(17)).getAndResetTickerCount(isA(TickerType.class)); + verify(statisticsToAdd1, times(15)).getAndResetTickerCount(isA(TickerType.class)); + verify(statisticsToAdd2, times(15)).getAndResetTickerCount(isA(TickerType.class)); verify(statisticsToAdd1, times(2)).getHistogramData(isA(HistogramType.class)); verify(statisticsToAdd2, times(2)).getHistogramData(isA(HistogramType.class)); verify(bytesWrittenToDatabaseSensor).record(expectedBytesWrittenToDatabaseSensor, now); diff --git a/streams/src/test/java/org/apache/kafka/streams/utils/TestUtils.java b/streams/src/test/java/org/apache/kafka/streams/utils/TestUtils.java index 1a8f8d896ddd1..96d19dbb47e2d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/utils/TestUtils.java +++ b/streams/src/test/java/org/apache/kafka/streams/utils/TestUtils.java @@ -18,19 +18,39 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.processor.api.FixedKeyProcessor; +import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; +import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.processor.api.ProcessorWrapper; +import org.apache.kafka.streams.processor.api.WrappedFixedKeyProcessorSupplier; +import org.apache.kafka.streams.processor.api.WrappedProcessorSupplier; +import org.apache.kafka.streams.processor.internals.StoreFactory; +import org.apache.kafka.streams.state.StoreBuilder; +import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper.WrapperRecorder; import org.junit.jupiter.api.TestInfo; +import org.mockito.Mockito; import java.lang.reflect.Method; import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; +import static org.apache.kafka.streams.StreamsConfig.APPLICATION_ID_CONFIG; +import static org.apache.kafka.streams.StreamsConfig.BOOTSTRAP_SERVERS_CONFIG; import static org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout; import static org.hamcrest.MatcherAssert.assertThat; public class TestUtils { + + public static final String PROCESSOR_WRAPPER_COUNTER_CONFIG = "wrapped.counter"; + /** * Waits for the given {@link KafkaStreams} instances to all be in a specific {@link KafkaStreams.State}. * This method uses polling, which can be more error prone and slightly slower. @@ -85,4 +105,169 @@ private static String sanitize(final String str) { .replace(' ', '_') .replace('=', '_'); } + + /** + * Quick method of generating a config map prepopulated with the required + * StreamsConfig properties + */ + public static Map dummyStreamsConfigMap() { + final Map baseConfigs = new HashMap<>(); + baseConfigs.put(APPLICATION_ID_CONFIG, "dummy-app-id"); + baseConfigs.put(BOOTSTRAP_SERVERS_CONFIG, "local"); + return baseConfigs; + } + + public static StoreFactory mockStoreFactory(final String name) { + final StoreFactory storeFactory = Mockito.mock(StoreFactory.class); + Mockito.when(storeFactory.storeName()).thenReturn(name); + return storeFactory; + } + + /** + * Simple pass-through processor wrapper that counts the number of processors + * it wraps. + * To retrieve the current count, pass an instance of AtomicInteger into the configs + * alongside the wrapper itself. Use the config key defined with {@link #PROCESSOR_WRAPPER_COUNTER_CONFIG} + */ + public static class RecordingProcessorWrapper implements ProcessorWrapper { + + private WrapperRecorder recorder; + + @SuppressWarnings("unchecked") + @Override + public void configure(final Map configs) { + if (configs.containsKey(PROCESSOR_WRAPPER_COUNTER_CONFIG)) { + recorder = (WrapperRecorder) configs.get(PROCESSOR_WRAPPER_COUNTER_CONFIG); + } else { + recorder = new WrapperRecorder(); + } + } + + public static class WrapperRecorder { + private final Set uniqueStores = new HashSet<>(); + private final Set processorStoresCounted = new HashSet<>(); + private final Set wrappedProcessorNames = Collections.synchronizedSet(new HashSet<>()); + + public void wrapProcessorSupplier(final String name) { + wrappedProcessorNames.add(name); + } + + public void wrapStateStore(final String processorName, final String storeName) { + if (!uniqueStores.contains(storeName)) { + uniqueStores.add(storeName); + } + + final String processorStoreKey = processorName + storeName; + if (!processorStoresCounted.contains(processorStoreKey)) { + processorStoresCounted.add(processorStoreKey); + } + } + + public int numWrappedProcessors() { + return wrappedProcessorNames.size(); + } + + // Number of unique state stores in the topology connected to their processors via the + // ProcessorSupplier#stores method. State stores connected to more than one processor are + // counted only once + public int numUniqueStateStores() { + return uniqueStores.size(); + } + + // Number of stores connected to a processor via the ProcessorSupplier#stores method (ie the size + // of the set returned by #stores), summed across all processors in the topology. + // Equal to the number of unique - + // pairings. Will be greater than or equal to the value of #numUniqueStateStores, as this method + // will double count any stores connected to more than one processor + public int numConnectedStateStores() { + return processorStoresCounted.size(); + } + + public Set wrappedProcessorNames() { + return wrappedProcessorNames; + } + + } + + @Override + public WrappedProcessorSupplier wrapProcessorSupplier(final String processorName, + final ProcessorSupplier processorSupplier) { + + return new CountingDelegatingProcessorSupplier<>(recorder, processorName, processorSupplier); + } + + @Override + public WrappedFixedKeyProcessorSupplier wrapFixedKeyProcessorSupplier(final String processorName, + final FixedKeyProcessorSupplier processorSupplier) { + return new CountingDelegatingFixedKeyProcessorSupplier<>(recorder, processorName, processorSupplier); + } + } + + private static class CountingDelegatingProcessorSupplier + implements WrappedProcessorSupplier { + + private final WrapperRecorder counter; + private final String processorName; + private final ProcessorSupplier delegate; + + public CountingDelegatingProcessorSupplier(final WrapperRecorder counter, + final String processorName, + final ProcessorSupplier processorSupplier) { + this.counter = counter; + this.processorName = processorName; + this.delegate = processorSupplier; + + counter.wrapProcessorSupplier(processorName); + } + + @Override + public Set> stores() { + final Set> stores = delegate.stores(); + if (stores != null) { + for (final StoreBuilder store : stores) { + counter.wrapStateStore(processorName, store.name()); + } + } + return stores; + } + + @Override + public Processor get() { + return delegate.get(); + } + } + + private static class CountingDelegatingFixedKeyProcessorSupplier + implements WrappedFixedKeyProcessorSupplier { + + private final WrapperRecorder counter; + private final String processorName; + private final FixedKeyProcessorSupplier delegate; + + public CountingDelegatingFixedKeyProcessorSupplier(final WrapperRecorder counter, + final String processorName, + final FixedKeyProcessorSupplier processorSupplier) { + this.counter = counter; + this.processorName = processorName; + this.delegate = processorSupplier; + + counter.wrapProcessorSupplier(processorName); + } + + @Override + public Set> stores() { + final Set> stores = delegate.stores(); + if (stores != null) { + for (final StoreBuilder store : stores) { + counter.wrapStateStore(processorName, store.name()); + } + } + return stores; + } + + @Override + public FixedKeyProcessor get() { + return delegate.get(); + } + } } diff --git a/streams/src/test/java/org/apache/kafka/test/GenericInMemoryKeyValueStore.java b/streams/src/test/java/org/apache/kafka/test/GenericInMemoryKeyValueStore.java index 97e1f095a4540..f13add1dc414e 100644 --- a/streams/src/test/java/org/apache/kafka/test/GenericInMemoryKeyValueStore.java +++ b/streams/src/test/java/org/apache/kafka/test/GenericInMemoryKeyValueStore.java @@ -59,9 +59,9 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { + public void init(final StateStoreContext stateStoreContext, final StateStore root) { if (root != null) { - context.register(root, null); + stateStoreContext.register(root, null); } this.open = true; diff --git a/streams/src/test/java/org/apache/kafka/test/GenericInMemoryTimestampedKeyValueStore.java b/streams/src/test/java/org/apache/kafka/test/GenericInMemoryTimestampedKeyValueStore.java index b8acffb81d49f..e5d599032a53a 100644 --- a/streams/src/test/java/org/apache/kafka/test/GenericInMemoryTimestampedKeyValueStore.java +++ b/streams/src/test/java/org/apache/kafka/test/GenericInMemoryTimestampedKeyValueStore.java @@ -61,9 +61,9 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { + public void init(final StateStoreContext stateStoreContext, final StateStore root) { if (root != null) { - context.register(root, null); + stateStoreContext.register(root, null); } this.open = true; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestBackgroundThreadExceptionHandler.java b/streams/src/test/java/org/apache/kafka/test/MockCachedKeyValueStore.java similarity index 56% rename from connect/runtime/src/test/java/org/apache/kafka/connect/util/TestBackgroundThreadExceptionHandler.java rename to streams/src/test/java/org/apache/kafka/test/MockCachedKeyValueStore.java index 8726d5c87f980..ee55757f7bf7a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TestBackgroundThreadExceptionHandler.java +++ b/streams/src/test/java/org/apache/kafka/test/MockCachedKeyValueStore.java @@ -14,23 +14,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.connect.util; +package org.apache.kafka.test; -/** - * An UncaughtExceptionHandler that can be registered with one or more threads which tracks the - * first exception so the main thread can check for uncaught exceptions. - */ -public class TestBackgroundThreadExceptionHandler implements Thread.UncaughtExceptionHandler { - private Throwable firstException = null; +import org.apache.kafka.streams.state.internals.CacheFlushListener; +import org.apache.kafka.streams.state.internals.CachedStateStore; + +public class MockCachedKeyValueStore extends MockKeyValueStore implements CachedStateStore { + + public MockCachedKeyValueStore(String name, boolean persistent) { + super(name, persistent); + } + + @Override + public boolean setFlushListener(CacheFlushListener listener, boolean sendOldValues) { + return false; + } @Override - public void uncaughtException(Thread t, Throwable e) { - if (this.firstException == null) - this.firstException = e; + public void flushCache() { + } - public void verifyNoExceptions() { - if (this.firstException != null) - throw new AssertionError(this.firstException); + @Override + public void clearCache() { + } } diff --git a/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java b/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java index 84ad3e3f80071..17eb34d803b3e 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java +++ b/streams/src/test/java/org/apache/kafka/test/MockClientSupplier.java @@ -20,7 +20,7 @@ import org.apache.kafka.clients.admin.MockAdminClient; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerConfig; @@ -45,8 +45,8 @@ public class MockClientSupplier implements KafkaClientSupplier { public MockAdminClient adminClient = new MockAdminClient(); private final List> preparedProducers = new LinkedList<>(); public final List> producers = new LinkedList<>(); - public final MockConsumer consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); - public final MockConsumer restoreConsumer = new MockConsumer<>(OffsetResetStrategy.LATEST); + public final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); + public final MockConsumer restoreConsumer = new MockConsumer<>(AutoOffsetResetStrategy.LATEST.name()); public void setApplicationIdForProducer(final String applicationId) { this.applicationId = applicationId; @@ -76,7 +76,7 @@ public Producer getProducer(final Map config) { final MockProducer producer; if (preparedProducers.isEmpty()) { - producer = new MockProducer<>(cluster, true, BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER); + producer = new MockProducer<>(cluster, true, null, BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER); } else { producer = preparedProducers.remove(0); } diff --git a/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java b/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java deleted file mode 100644 index a3c7194680d79..0000000000000 --- a/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.test; - -import org.apache.kafka.common.header.Headers; -import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.utils.Bytes; -import org.apache.kafka.streams.processor.CommitCallback; -import org.apache.kafka.streams.processor.StateRestoreCallback; -import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.TaskId; -import org.apache.kafka.streams.processor.To; -import org.apache.kafka.streams.processor.api.FixedKeyRecord; -import org.apache.kafka.streams.processor.api.MockProcessorContext; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.streams.processor.internals.ProcessorMetadata; -import org.apache.kafka.streams.processor.internals.ProcessorNode; -import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.RecordCollector; -import org.apache.kafka.streams.processor.internals.StreamTask; -import org.apache.kafka.streams.processor.internals.Task.TaskType; -import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.query.Position; -import org.apache.kafka.streams.state.StoreBuilder; -import org.apache.kafka.streams.state.internals.ThreadCache; -import org.apache.kafka.streams.state.internals.ThreadCache.DirtyEntryFlushListener; - -import java.io.File; -import java.util.Objects; -import java.util.Properties; - -public class MockInternalNewProcessorContext extends MockProcessorContext implements InternalProcessorContext { - - private ProcessorNode currentNode; - private long currentSystemTimeMs; - private final TaskType taskType = TaskType.ACTIVE; - - private long timestamp = 0; - private Headers headers = new RecordHeaders(); - private ProcessorMetadata processorMetadata; - - public MockInternalNewProcessorContext() { - processorMetadata = new ProcessorMetadata(); - } - - public MockInternalNewProcessorContext(final Properties config, final TaskId taskId, final File stateDir) { - super(config, taskId, stateDir); - processorMetadata = new ProcessorMetadata(); - } - - @Override - public void setSystemTimeMs(long timeMs) { - currentSystemTimeMs = timeMs; - } - - @Override - public long currentSystemTimeMs() { - return currentSystemTimeMs; - } - - @Override - public long currentStreamTimeMs() { - return 0; - } - - @Override - public StreamsMetricsImpl metrics() { - return (StreamsMetricsImpl) super.metrics(); - } - - @Override - public ProcessorRecordContext recordContext() { - return new ProcessorRecordContext(timestamp(), offset(), partition(), topic(), headers()); - } - - @Override - public void setRecordContext(final ProcessorRecordContext recordContext) { - setRecordMetadata( - recordContext.topic(), - recordContext.partition(), - recordContext.offset() - ); - this.headers = recordContext.headers(); - this.timestamp = recordContext.timestamp(); - } - - public void setTimestamp(final long timestamp) { - this.timestamp = timestamp; - } - - public void setHeaders(final Headers headers) { - this.headers = headers; - } - - @Override - public void setCurrentNode(final ProcessorNode currentNode) { - this.currentNode = currentNode; - } - - @Override - public ProcessorNode currentNode() { - return currentNode; - } - - @Override - public ThreadCache cache() { - return null; - } - - @Override - public void initialize() {} - - @Override - public void uninitialize() {} - - @Override - public void register(final StateStore store, - final StateRestoreCallback stateRestoreCallback) { - addStateStore(store); - } - - @Override - public void register(final StateStore store, - final StateRestoreCallback stateRestoreCallback, - final CommitCallback checkpoint) { - addStateStore(store); - } - - @Override - public void forward(K key, V value) { - throw new UnsupportedOperationException("Migrate to new implementation"); - } - - @Override - public void forward(K key, V value, To to) { - throw new UnsupportedOperationException("Migrate to new implementation"); - } - - @Override - public String topic() { - if (recordMetadata().isPresent()) return recordMetadata().get().topic(); - else return null; - } - - @Override - public int partition() { - if (recordMetadata().isPresent()) return recordMetadata().get().partition(); - else return 0; - } - - @Override - public long offset() { - if (recordMetadata().isPresent()) return recordMetadata().get().offset(); - else return 0; - } - - @Override - public Headers headers() { - return headers; - } - - @Override - public long timestamp() { - return timestamp; - } - - @Override - public TaskType taskType() { - return taskType; - } - - @Override - public void logChange(final String storeName, - final Bytes key, - final byte[] value, - final long timestamp, - final Position position) { - } - - @Override - public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) { - } - - @Override - public void transitionToStandby(final ThreadCache newCache) { - } - - @Override - public void registerCacheFlushListener(final String namespace, final DirtyEntryFlushListener listener) { - } - - @Override - public T getStateStore(StoreBuilder builder) { - return getStateStore(builder.name()); - } - - @Override - public String changelogFor(final String storeName) { - return "mock-changelog"; - } - - @Override - public void addProcessorMetadataKeyValue(final String key, final long value) { - processorMetadata.put(key, value); - } - - @Override - public Long processorMetadataForKey(final String key) { - return processorMetadata.get(key); - } - - @Override - public void setProcessorMetadata(final ProcessorMetadata metadata) { - Objects.requireNonNull(metadata); - processorMetadata = metadata; - } - - @Override - public ProcessorMetadata processorMetadata() { - return processorMetadata; - } - - @Override - public void forward(final FixedKeyRecord record) { - forward(new Record<>(record.key(), record.value(), record.timestamp(), record.headers())); - } - - @Override - public void forward(final FixedKeyRecord record, - final String childName) { - forward( - new Record<>(record.key(), record.value(), record.timestamp(), record.headers()), - childName - ); - } -} \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java b/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java index 4d4ad0e4dc088..019410642fcf8 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java +++ b/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java @@ -16,6 +16,8 @@ */ package org.apache.kafka.test; +import org.apache.kafka.common.header.Headers; +import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.processor.CommitCallback; import org.apache.kafka.streams.processor.StateRestoreCallback; @@ -23,8 +25,8 @@ import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.To; import org.apache.kafka.streams.processor.api.FixedKeyRecord; +import org.apache.kafka.streams.processor.api.MockProcessorContext; import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.api.RecordMetadata; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.ProcessorMetadata; import org.apache.kafka.streams.processor.internals.ProcessorNode; @@ -34,6 +36,7 @@ import org.apache.kafka.streams.processor.internals.Task.TaskType; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.query.Position; +import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.internals.ThreadCache; import org.apache.kafka.streams.state.internals.ThreadCache.DirtyEntryFlushListener; @@ -41,17 +44,18 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Properties; -@SuppressWarnings("deprecation") -public class MockInternalProcessorContext extends org.apache.kafka.streams.processor.MockProcessorContext implements InternalProcessorContext { +public class MockInternalProcessorContext extends MockProcessorContext implements InternalProcessorContext { - private final Map restoreCallbacks = new LinkedHashMap<>(); - private ProcessorNode currentNode; + private ProcessorNode currentNode; private RecordCollector recordCollector; + private final Map restoreCallbacks = new LinkedHashMap<>(); private long currentSystemTimeMs; private final TaskType taskType = TaskType.ACTIVE; + + private long timestamp = 0; + private Headers headers = new RecordHeaders(); private ProcessorMetadata processorMetadata; public MockInternalProcessorContext() { @@ -74,18 +78,13 @@ public long currentSystemTimeMs() { } @Override - public StreamsMetricsImpl metrics() { - return (StreamsMetricsImpl) super.metrics(); + public long currentStreamTimeMs() { + return 0; } @Override - public void forward(final Record record) { - forward(record.key(), record.value(), To.all().withTimestamp(record.timestamp())); - } - - @Override - public void forward(final Record record, final String childName) { - forward(record.key(), record.value(), To.child(childName).withTimestamp(record.timestamp())); + public StreamsMetricsImpl metrics() { + return (StreamsMetricsImpl) super.metrics(); } @Override @@ -93,29 +92,32 @@ public ProcessorRecordContext recordContext() { return new ProcessorRecordContext(timestamp(), offset(), partition(), topic(), headers()); } - @Override - public Optional recordMetadata() { - return Optional.of(recordContext()); - } - @Override public void setRecordContext(final ProcessorRecordContext recordContext) { setRecordMetadata( recordContext.topic(), recordContext.partition(), - recordContext.offset(), - recordContext.headers(), - recordContext.timestamp() + recordContext.offset() ); + this.headers = recordContext.headers(); + this.timestamp = recordContext.timestamp(); + } + + public void setTimestamp(final long timestamp) { + this.timestamp = timestamp; + } + + public void setHeaders(final Headers headers) { + this.headers = headers; } @Override - public void setCurrentNode(final ProcessorNode currentNode) { + public void setCurrentNode(final ProcessorNode currentNode) { this.currentNode = currentNode; } @Override - public ProcessorNode currentNode() { + public ProcessorNode currentNode() { return currentNode; } @@ -143,7 +145,7 @@ public void setRecordCollector(final RecordCollector recordCollector) { public void register(final StateStore store, final StateRestoreCallback stateRestoreCallback) { restoreCallbacks.put(store.name(), stateRestoreCallback); - super.register(store, stateRestoreCallback); + addStateStore(store); } @Override @@ -151,13 +153,51 @@ public void register(final StateStore store, final StateRestoreCallback stateRestoreCallback, final CommitCallback checkpoint) { restoreCallbacks.put(store.name(), stateRestoreCallback); - super.register(store, stateRestoreCallback); + addStateStore(store); } public StateRestoreCallback stateRestoreCallback(final String storeName) { return restoreCallbacks.get(storeName); } + @Override + public void forward(K key, V value) { + throw new UnsupportedOperationException("Migrate to new implementation"); + } + + @Override + public void forward(K key, V value, To to) { + throw new UnsupportedOperationException("Migrate to new implementation"); + } + + @Override + public String topic() { + if (recordMetadata().isPresent()) return recordMetadata().get().topic(); + else return null; + } + + @Override + public int partition() { + if (recordMetadata().isPresent()) return recordMetadata().get().partition(); + else return 0; + } + + @Override + public long offset() { + if (recordMetadata().isPresent()) return recordMetadata().get().offset(); + else return 0; + } + + @Override + public Headers headers() { + return headers; + } + + @Override + public long timestamp() { + return timestamp; + } + @Override public TaskType taskType() { return taskType; @@ -183,6 +223,11 @@ public void transitionToStandby(final ThreadCache newCache) { public void registerCacheFlushListener(final String namespace, final DirtyEntryFlushListener listener) { } + @Override + public T getStateStore(StoreBuilder builder) { + return getStateStore(builder.name()); + } + @Override public String changelogFor(final String storeName) { return "mock-changelog"; @@ -210,12 +255,13 @@ public ProcessorMetadata processorMetadata() { } @Override - public void forward(final FixedKeyRecord record) { + public void forward(final FixedKeyRecord record) { forward(new Record<>(record.key(), record.value(), record.timestamp(), record.headers())); } @Override - public void forward(final FixedKeyRecord record, final String childName) { + public void forward(final FixedKeyRecord record, + final String childName) { forward( new Record<>(record.key(), record.value(), record.timestamp(), record.headers()), childName diff --git a/streams/src/test/java/org/apache/kafka/test/MockKeyValueStore.java b/streams/src/test/java/org/apache/kafka/test/MockKeyValueStore.java index 1a7db989fed8d..a2a6ac43dfd76 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockKeyValueStore.java +++ b/streams/src/test/java/org/apache/kafka/test/MockKeyValueStore.java @@ -57,9 +57,9 @@ public String name() { } @Override - public void init(final StateStoreContext context, + public void init(final StateStoreContext stateStoreContext, final StateStore root) { - context.register(root, stateRestoreCallback); + stateStoreContext.register(root, stateRestoreCallback); initialized = true; closed = false; } diff --git a/streams/src/test/java/org/apache/kafka/test/MockKeyValueStoreBuilder.java b/streams/src/test/java/org/apache/kafka/test/MockKeyValueStoreBuilder.java index 2faf89b16223d..15c896ad07629 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockKeyValueStoreBuilder.java +++ b/streams/src/test/java/org/apache/kafka/test/MockKeyValueStoreBuilder.java @@ -39,6 +39,6 @@ public MockKeyValueStore build() { } public StoreFactory asFactory() { - return new StoreBuilderWrapper(this); + return StoreBuilderWrapper.wrapStoreBuilder(this); } } diff --git a/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java b/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java index 345a5824a20e3..cb59b642db047 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java +++ b/streams/src/test/java/org/apache/kafka/test/MockRestoreConsumer.java @@ -19,7 +19,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.MockConsumer; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.Serializer; @@ -42,7 +42,7 @@ public final class MockRestoreConsumer extends MockConsumer> recordBuffer = new ArrayList<>(); public MockRestoreConsumer(final Serializer keySerializer, final Serializer valueSerializer) { - super(OffsetResetStrategy.EARLIEST); + super(AutoOffsetResetStrategy.EARLIEST.name()); reset(); this.keySerializer = keySerializer; diff --git a/streams/src/test/java/org/apache/kafka/test/NoOpReadOnlyStore.java b/streams/src/test/java/org/apache/kafka/test/NoOpReadOnlyStore.java index 4bba4b057516b..aa8f4a5b8bcd6 100644 --- a/streams/src/test/java/org/apache/kafka/test/NoOpReadOnlyStore.java +++ b/streams/src/test/java/org/apache/kafka/test/NoOpReadOnlyStore.java @@ -77,15 +77,15 @@ public String name() { } @Override - public void init(final StateStoreContext context, final StateStore root) { + public void init(final StateStoreContext stateStoreContext, final StateStore root) { if (rocksdbStore) { // cf. RocksDBStore - new File(context.stateDir() + File.separator + "rocksdb" + File.separator + name).mkdirs(); + new File(stateStoreContext.stateDir() + File.separator + "rocksdb" + File.separator + name).mkdirs(); } else { - new File(context.stateDir() + File.separator + name).mkdir(); + new File(stateStoreContext.stateDir() + File.separator + name).mkdir(); } this.initialized = true; - context.register(root, (k, v) -> { }); + stateStoreContext.register(root, (k, v) -> { }); } @Override diff --git a/streams/src/test/java/org/apache/kafka/test/ReadOnlySessionStoreStub.java b/streams/src/test/java/org/apache/kafka/test/ReadOnlySessionStoreStub.java index e9c03e161e91b..928566fa7790b 100644 --- a/streams/src/test/java/org/apache/kafka/test/ReadOnlySessionStoreStub.java +++ b/streams/src/test/java/org/apache/kafka/test/ReadOnlySessionStoreStub.java @@ -184,7 +184,7 @@ public String name() { } @Override - public void init(StateStoreContext context, StateStore root) {} + public void init(StateStoreContext stateStoreContext, StateStore root) {} @Override public void flush() { diff --git a/streams/src/test/java/org/apache/kafka/test/StreamsTestUtils.java b/streams/src/test/java/org/apache/kafka/test/StreamsTestUtils.java index 6fb19fd5c724f..6833f023cbd82 100644 --- a/streams/src/test/java/org/apache/kafka/test/StreamsTestUtils.java +++ b/streams/src/test/java/org/apache/kafka/test/StreamsTestUtils.java @@ -35,8 +35,6 @@ import org.mockito.quality.Strictness; -import java.io.Closeable; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -116,20 +114,16 @@ public static Properties getStreamsConfig() { return getStreamsConfig(UUID.randomUUID().toString()); } - public static List> toList(final Iterator> iterator) { - final List> results = new ArrayList<>(); + public static List> toListAndCloseIterator(final KeyValueIterator iterator) { + try (iterator) { + final List> results = new ArrayList<>(); - while (iterator.hasNext()) { - results.add(iterator.next()); - } + while (iterator.hasNext()) { + results.add(iterator.next()); + } - if (iterator instanceof Closeable) { - try { - ((Closeable) iterator).close(); - } catch (IOException e) { /* do nothing */ } + return results; } - - return results; } public static Set> toSet(final Iterator> iterator) { @@ -141,7 +135,7 @@ public static Set> toSet(final Iterator> it return results; } - public static Set valuesToSet(final Iterator> iterator) { + public static Set valuesToSet(final KeyValueIterator iterator) { final Set results = new HashSet<>(); while (iterator.hasNext()) { @@ -150,6 +144,12 @@ public static Set valuesToSet(final Iterator> iterator) return results; } + public static Set valuesToSetAndCloseIterator(final KeyValueIterator iterator) { + try (iterator) { + return valuesToSet(iterator); + } + } + public static void verifyKeyValueList(final List> expected, final List> actual) { assertThat(actual.size(), equalTo(expected.size())); for (int i = 0; i < actual.size(); i++) { @@ -254,8 +254,8 @@ public static boolean containsMetric(final Metrics metrics, } /** - * Used to keep tests simple, and ignore calls from {@link org.apache.kafka.streams.internals.ApiUtils#checkSupplier(Supplier)} )}. - * @return true if the stack context is within a {@link org.apache.kafka.streams.internals.ApiUtils#checkSupplier(Supplier)} )} call + * Used to keep tests simple, and ignore calls from {@link org.apache.kafka.streams.internals.ApiUtils#checkSupplier(Supplier)}. + * @return true if the stack context is within a {@link org.apache.kafka.streams.internals.ApiUtils#checkSupplier(Supplier)} call */ public static boolean isCheckSupplierCall() { return Arrays.stream(Thread.currentThread().getStackTrace()) diff --git a/streams/src/test/resources/log4j.properties b/streams/src/test/resources/log4j.properties deleted file mode 100644 index b7e1fb2d60ea4..0000000000000 --- a/streams/src/test/resources/log4j.properties +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.kafka=ERROR -log4j.logger.state.change.logger=ERROR -log4j.logger.org.apache.kafka=ERROR -log4j.logger.org.apache.zookeeper=ERROR -log4j.logger.org.apache.kafka.clients=ERROR - -# These are the only logs we will likely ever find anything useful in to debug Streams test failures -log4j.logger.org.apache.kafka.clients.consumer=INFO -log4j.logger.org.apache.kafka.clients.producer=INFO -log4j.logger.org.apache.kafka.streams=INFO - -# printing out the configs takes up a huge amount of the allotted characters, -# and provides little value as we can always figure out the test configs without the logs -log4j.logger.org.apache.kafka.clients.producer.ProducerConfig=ERROR -log4j.logger.org.apache.kafka.clients.consumer.ConsumerConfig=ERROR -log4j.logger.org.apache.kafka.clients.admin.AdminClientConfig=ERROR -log4j.logger.org.apache.kafka.streams.StreamsConfig=ERROR diff --git a/streams/src/test/resources/log4j2.yaml b/streams/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..0942036a33c80 --- /dev/null +++ b/streams/src/test/resources/log4j2.yaml @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: kafka + level: ERROR + + - name: state.change.logger + level: ERROR + + - name: org.apache.kafka + level: ERROR + + - name: org.apache.kafka.clients + level: ERROR + + - name: org.apache.kafka.clients.consumer + level: INFO + + - name: org.apache.kafka.clients.producer + level: INFO + + - name: org.apache.kafka.streams + level: INFO + + - name: org.apache.kafka.clients.producer.ProducerConfig + level: ERROR + + - name: org.apache.kafka.clients.consumer.ConsumerConfig + level: ERROR + + - name: org.apache.kafka.clients.admin.AdminClientConfig + level: ERROR + + - name: org.apache.kafka.streams.StreamsConfig + level: ERROR diff --git a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala index 9a8034bac5af3..89f461a8fea89 100644 --- a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala +++ b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala @@ -18,7 +18,7 @@ package org.apache.kafka.streams.scala.kstream import org.apache.kafka.common.serialization.Serde import org.apache.kafka.streams.kstream.{Consumed => ConsumedJ} -import org.apache.kafka.streams.Topology +import org.apache.kafka.streams.{AutoOffsetReset, Topology} import org.apache.kafka.streams.processor.TimestampExtractor object Consumed { @@ -36,12 +36,32 @@ object Consumed { * @param valueSerde the value serde to use. * @return a new instance of [[Consumed]] */ + @deprecated("Use `with` method that accepts `AutoOffsetReset` instead", "4.0.0") def `with`[K, V]( timestampExtractor: TimestampExtractor, resetPolicy: Topology.AutoOffsetReset )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = ConsumedJ.`with`(keySerde, valueSerde, timestampExtractor, resetPolicy) + /** + * Create an instance of [[Consumed]] with the supplied arguments. `null` values are acceptable. + * + * @tparam K key type + * @tparam V value type + * @param timestampExtractor the timestamp extractor to used. If `null` the default timestamp extractor from + * config will be used + * @param resetPolicy the offset reset policy to be used. If `null` the default reset policy from config + * will be used + * @param keySerde the key serde to use. + * @param valueSerde the value serde to use. + * @return a new instance of [[Consumed]] + */ + def `with`[K, V]( + timestampExtractor: TimestampExtractor, + resetPolicy: AutoOffsetReset + )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = + ConsumedJ.`with`(keySerde, valueSerde, timestampExtractor, resetPolicy) + /** * Create an instance of [[Consumed]] with key and value Serdes. * @@ -74,8 +94,22 @@ object Consumed { * @param resetPolicy the offset reset policy to be used. If `null` the default reset policy from config will be used * @return a new instance of [[Consumed]] */ + @deprecated("Use `with` method that accepts `AutoOffsetReset` instead", "4.0.0") def `with`[K, V]( resetPolicy: Topology.AutoOffsetReset )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = ConsumedJ.`with`(resetPolicy).withKeySerde(keySerde).withValueSerde(valueSerde) + + /** + * Create an instance of [[Consumed]] with a `org.apache.kafka.streams.AutoOffsetReset`. + * + * @tparam K key type + * @tparam V value type + * @param resetPolicy the offset reset policy to be used. If `null` the default reset policy from config will be used + * @return a new instance of [[Consumed]] + */ + def `with`[K, V]( + resetPolicy: AutoOffsetReset + )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = + ConsumedJ.`with`(resetPolicy).withKeySerde(keySerde).withValueSerde(valueSerde) } diff --git a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala index 5e6cc4f3f2205..76918a6f742e2 100644 --- a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala +++ b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala @@ -17,14 +17,7 @@ package org.apache.kafka.streams.scala package kstream -import org.apache.kafka.streams.kstream.{ - GlobalKTable, - JoinWindows, - KStream => KStreamJ, - Printed, - ValueTransformerSupplier, - ValueTransformerWithKeySupplier -} +import org.apache.kafka.streams.kstream.{GlobalKTable, JoinWindows, KStream => KStreamJ, Printed} import org.apache.kafka.streams.processor.TopicNameExtractor import org.apache.kafka.streams.processor.api.{FixedKeyProcessorSupplier, ProcessorSupplier} import org.apache.kafka.streams.scala.FunctionsCompatConversions.{ @@ -35,9 +28,7 @@ import org.apache.kafka.streams.scala.FunctionsCompatConversions.{ MapperFromFunction, PredicateFromFunction, ValueMapperFromFunction, - ValueMapperWithKeyFromFunction, - ValueTransformerSupplierAsJava, - ValueTransformerSupplierWithKeyAsJava + ValueMapperWithKeyFromFunction } import scala.jdk.CollectionConverters._ @@ -492,230 +483,6 @@ class KStream[K, V](val inner: KStreamJ[K, V]) { def toTable(named: Named, materialized: Materialized[K, V, ByteArrayKeyValueStore]): KTable[K, V] = new KTable(inner.toTable(named, materialized)) - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerSupplier` that generates a `ValueTransformer` - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerSupplier[V, Iterable[VR]], - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, stateStoreNames: _*)) - - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerSupplier` that generates a `ValueTransformer` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerSupplier[V, Iterable[VR]], - named: Named, - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, named, stateStoreNames: _*)) - - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerWithKeySupplier` that generates a `ValueTransformerWithKey` - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerWithKeySupplier[K, V, Iterable[VR]], - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, stateStoreNames: _*)) - - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerWithKeySupplier` that generates a `ValueTransformerWithKey` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerWithKeySupplier[K, V, Iterable[VR]], - named: Named, - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, named, stateStoreNames: _*)) - - /** - * Transform the value of each input record into a new value (with possible new type) of the output record. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerSupplier` that generates a `ValueTransformer` - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, String*) instead.") - def transformValues[VR]( - valueTransformerSupplier: ValueTransformerSupplier[V, VR], - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.transformValues[VR](valueTransformerSupplier, stateStoreNames: _*)) - - /** - * Transform the value of each input record into a new value (with possible new type) of the output record. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerSupplier` that generates a `ValueTransformer` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def transformValues[VR]( - valueTransformerSupplier: ValueTransformerSupplier[V, VR], - named: Named, - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.transformValues[VR](valueTransformerSupplier, named, stateStoreNames: _*)) - - /** - * Transform the value of each input record into a new value (with possible new type) of the output record. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerWithKeySupplier` that generates a `ValueTransformerWithKey` - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, String*) instead.") - def transformValues[VR]( - valueTransformerSupplier: ValueTransformerWithKeySupplier[K, V, VR], - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.transformValues[VR](valueTransformerSupplier, stateStoreNames: _*)) - - /** - * Transform the value of each input record into a new value (with possible new type) of the output record. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerWithKeySupplier` that generates a `ValueTransformerWithKey` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def transformValues[VR]( - valueTransformerSupplier: ValueTransformerWithKeySupplier[K, V, VR], - named: Named, - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.transformValues[VR](valueTransformerSupplier, named, stateStoreNames: _*)) - - /** - * Process all records in this stream, one record at a time, by applying a `Processor` (provided by the given - * `processorSupplier`). - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `Processor`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param processorSupplier a function that generates a `org.apache.kafka.streams.processor.Processor` - * @param stateStoreNames the names of the state store used by the processor - * @see `org.apache.kafka.streams.kstream.KStream#process` - */ - @deprecated(since = "3.0", message = "Use process(ProcessorSupplier, String*) instead.") - def process( - processorSupplier: () => org.apache.kafka.streams.processor.Processor[K, V], - stateStoreNames: String* - ): Unit = { - val processorSupplierJ: org.apache.kafka.streams.processor.ProcessorSupplier[K, V] = () => processorSupplier() - inner.process(processorSupplierJ, stateStoreNames: _*) - } - - /** - * Process all records in this stream, one record at a time, by applying a `Processor` (provided by the given - * `processorSupplier`). - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `Processor`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param processorSupplier a function that generates a `org.apache.kafka.streams.processor.Processor` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state store used by the processor - * @see `org.apache.kafka.streams.kstream.KStream#process` - */ - @deprecated(since = "3.0", message = "Use process(ProcessorSupplier, String*) instead.") - def process( - processorSupplier: () => org.apache.kafka.streams.processor.Processor[K, V], - named: Named, - stateStoreNames: String* - ): Unit = { - val processorSupplierJ: org.apache.kafka.streams.processor.ProcessorSupplier[K, V] = () => processorSupplier() - inner.process(processorSupplierJ, named, stateStoreNames: _*) - } - /** * Process all records in this stream, one record at a time, by applying a `Processor` (provided by the given * `processorSupplier`). diff --git a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KTable.scala b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KTable.scala index 590e4b08fbdb2..6a7f42285a6db 100644 --- a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KTable.scala +++ b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KTable.scala @@ -17,6 +17,7 @@ package org.apache.kafka.streams.scala package kstream +import scala.jdk.FunctionWrappers.AsJavaBiFunction import org.apache.kafka.common.utils.Bytes import org.apache.kafka.streams.kstream.{KTable => KTableJ, TableJoined, ValueJoiner, ValueTransformerWithKeySupplier} import org.apache.kafka.streams.scala.FunctionsCompatConversions.{ @@ -643,6 +644,26 @@ class KTable[K, V](val inner: KTableJ[K, V]) { ): KTable[K, VR] = new KTable(inner.join(other.inner, keyExtractor.asJavaFunction, joiner, materialized)) + /** + * Join records of this [[KTable]] with another [[KTable]]'s records using non-windowed inner join. Records from this + * table are joined according to the result of keyExtractor on the other KTable. + * + * @param other the other [[KTable]] to be joined with this [[KTable]], keyed on the value obtained from keyExtractor + * @param keyExtractor a function that extracts the foreign key from this table's key and value + * @param joiner a function that computes the join result for a pair of matching records + * @param materialized a `Materialized` that describes how the `StateStore` for the resulting [[KTable]] + * should be materialized. + * @return a [[KTable]] that contains join-records for each key and values computed by the given joiner, + * one for each matched record-pair with the same key + */ + def join[VR, KO, VO]( + other: KTable[KO, VO], + keyExtractor: (K, V) => KO, + joiner: ValueJoiner[V, VO, VR], + materialized: Materialized[K, VR, KeyValueStore[Bytes, Array[Byte]]] + ): KTable[K, VR] = + new KTable(inner.join(other.inner, AsJavaBiFunction[K, V, KO](keyExtractor), joiner, materialized)) + /** * Join records of this [[KTable]] with another [[KTable]]'s records using non-windowed inner join. Records from this * table are joined according to the result of keyExtractor on the other KTable. @@ -666,6 +687,29 @@ class KTable[K, V](val inner: KTableJ[K, V]) { ): KTable[K, VR] = new KTable(inner.join(other.inner, keyExtractor.asJavaFunction, joiner, tableJoined, materialized)) + /** + * Join records of this [[KTable]] with another [[KTable]]'s records using non-windowed inner join. Records from this + * table are joined according to the result of keyExtractor on the other KTable. + * + * @param other the other [[KTable]] to be joined with this [[KTable]], keyed on the value obtained from keyExtractor + * @param keyExtractor a function that extracts the foreign key from this table's key and value + * @param joiner a function that computes the join result for a pair of matching records + * @param tableJoined a `org.apache.kafka.streams.kstream.TableJoined` used to configure + * partitioners and names of internal topics and stores + * @param materialized a `Materialized` that describes how the `StateStore` for the resulting [[KTable]] + * should be materialized. + * @return a [[KTable]] that contains join-records for each key and values computed by the given joiner, + * one for each matched record-pair with the same key + */ + def join[VR, KO, VO]( + other: KTable[KO, VO], + keyExtractor: (K, V) => KO, + joiner: ValueJoiner[V, VO, VR], + tableJoined: TableJoined[K, KO], + materialized: Materialized[K, VR, KeyValueStore[Bytes, Array[Byte]]] + ): KTable[K, VR] = + new KTable(inner.join(other.inner, AsJavaBiFunction[K, V, KO](keyExtractor), joiner, tableJoined, materialized)) + /** * Join records of this [[KTable]] with another [[KTable]]'s records using non-windowed left join. Records from this * table are joined according to the result of keyExtractor on the other KTable. @@ -686,6 +730,26 @@ class KTable[K, V](val inner: KTableJ[K, V]) { ): KTable[K, VR] = new KTable(inner.leftJoin(other.inner, keyExtractor.asJavaFunction, joiner, materialized)) + /** + * Join records of this [[KTable]] with another [[KTable]]'s records using non-windowed left join. Records from this + * table are joined according to the result of keyExtractor on the other KTable. + * + * @param other the other [[KTable]] to be joined with this [[KTable]], keyed on the value obtained from keyExtractor + * @param keyExtractor a function that extracts the foreign key from this table's key and value + * @param joiner a function that computes the join result for a pair of matching records + * @param materialized a `Materialized` that describes how the `StateStore` for the resulting [[KTable]] + * should be materialized. + * @return a [[KTable]] that contains join-records for each key and values computed by the given joiner, + * one for each matched record-pair with the same key + */ + def leftJoin[VR, KO, VO]( + other: KTable[KO, VO], + keyExtractor: (K, V) => KO, + joiner: ValueJoiner[V, VO, VR], + materialized: Materialized[K, VR, KeyValueStore[Bytes, Array[Byte]]] + ): KTable[K, VR] = + new KTable(inner.leftJoin(other.inner, AsJavaBiFunction[K, V, KO](keyExtractor), joiner, materialized)) + /** * Join records of this [[KTable]] with another [[KTable]]'s records using non-windowed left join. Records from this * table are joined according to the result of keyExtractor on the other KTable. @@ -709,6 +773,29 @@ class KTable[K, V](val inner: KTableJ[K, V]) { ): KTable[K, VR] = new KTable(inner.leftJoin(other.inner, keyExtractor.asJavaFunction, joiner, tableJoined, materialized)) + /** + * Join records of this [[KTable]] with another [[KTable]]'s records using non-windowed left join. Records from this + * table are joined according to the result of keyExtractor on the other KTable. + * + * @param other the other [[KTable]] to be joined with this [[KTable]], keyed on the value obtained from keyExtractor + * @param keyExtractor a function that extracts the foreign key from this table's key and value + * @param joiner a function that computes the join result for a pair of matching records + * @param tableJoined a `org.apache.kafka.streams.kstream.TableJoined` used to configure + * partitioners and names of internal topics and stores + * @param materialized a `Materialized` that describes how the `StateStore` for the resulting [[KTable]] + * should be materialized. + * @return a [[KTable]] that contains join-records for each key and values computed by the given joiner, + * one for each matched record-pair with the same key + */ + def leftJoin[VR, KO, VO]( + other: KTable[KO, VO], + keyExtractor: (K, V) => KO, + joiner: ValueJoiner[V, VO, VR], + tableJoined: TableJoined[K, KO], + materialized: Materialized[K, VR, KeyValueStore[Bytes, Array[Byte]]] + ): KTable[K, VR] = + new KTable(inner.leftJoin(other.inner, AsJavaBiFunction[K, V, KO](keyExtractor), joiner, tableJoined, materialized)) + /** * Get the name of the local state store used that can be used to query this [[KTable]]. * diff --git a/streams/streams-scala/src/test/resources/log4j.properties b/streams/streams-scala/src/test/resources/log4j.properties deleted file mode 100644 index 93ffc165654a2..0000000000000 --- a/streams/streams-scala/src/test/resources/log4j.properties +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018 Lightbend Inc. -# Copyright (C) 2017-2018 Alexis Seigneurin. -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Set root logger level to DEBUG and its only appender to A1. -log4j.rootLogger=INFO, R - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender - -log4j.appender.R=org.apache.log4j.RollingFileAppender -log4j.appender.R.File=logs/kafka-streams-scala.log - -log4j.appender.R.MaxFileSize=100KB -# Keep one backup file -log4j.appender.R.MaxBackupIndex=1 - -# A1 uses PatternLayout. -log4j.appender.R.layout=org.apache.log4j.PatternLayout -log4j.appender.R.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n diff --git a/streams/streams-scala/src/test/resources/log4j2.yaml b/streams/streams-scala/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..6e1d18834bed8 --- /dev/null +++ b/streams/streams-scala/src/test/resources/log4j2.yaml @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "%-4r [%t] %-5p %c %x - %m%n" + + Appenders: + Console: + name: A1 + RollingFile: + - name: R + fileName: logs/kafka-streams-scala.log + filePattern: "streams-scala-%d{yyyy-MM-dd}.log" + PatternLayout: + pattern: "${logPattern}" + SizeBasedTriggeringPolicy: + size: "100KB" + DefaultRolloverStrategy: + max: 1 + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: R diff --git a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala index 0b44165164b93..4656a4d12fcd6 100644 --- a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala +++ b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala @@ -16,7 +16,8 @@ */ package org.apache.kafka.streams.scala.kstream -import org.apache.kafka.streams.Topology +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy +import org.apache.kafka.streams.AutoOffsetReset import org.apache.kafka.streams.kstream.internals.ConsumedInternal import org.apache.kafka.streams.processor.FailOnInvalidTimestamp import org.apache.kafka.streams.scala.serialization.Serdes @@ -38,15 +39,15 @@ class ConsumedTest { @Test def testCreateConsumedWithTimestampExtractorAndResetPolicy(): Unit = { val timestampExtractor = new FailOnInvalidTimestamp() - val resetPolicy = Topology.AutoOffsetReset.LATEST + val resetPolicy = AutoOffsetReset.latest() val consumed: Consumed[String, Long] = - Consumed.`with`[String, Long](timestampExtractor, resetPolicy) + Consumed.`with`(timestampExtractor, resetPolicy) val internalConsumed = new ConsumedInternal(consumed) assertEquals(Serdes.stringSerde.getClass, internalConsumed.keySerde.getClass) assertEquals(Serdes.longSerde.getClass, internalConsumed.valueSerde.getClass) assertEquals(timestampExtractor, internalConsumed.timestampExtractor) - assertEquals(resetPolicy, internalConsumed.offsetResetPolicy) + assertEquals(AutoOffsetResetStrategy.StrategyType.LATEST, internalConsumed.offsetResetPolicy.offsetResetStrategy()) } @Test @@ -59,14 +60,15 @@ class ConsumedTest { assertEquals(Serdes.longSerde.getClass, internalConsumed.valueSerde.getClass) assertEquals(timestampExtractor, internalConsumed.timestampExtractor) } + @Test def testCreateConsumedWithResetPolicy(): Unit = { - val resetPolicy = Topology.AutoOffsetReset.LATEST + val resetPolicy = AutoOffsetReset.latest() val consumed: Consumed[String, Long] = Consumed.`with`[String, Long](resetPolicy) val internalConsumed = new ConsumedInternal(consumed) assertEquals(Serdes.stringSerde.getClass, internalConsumed.keySerde.getClass) assertEquals(Serdes.longSerde.getClass, internalConsumed.valueSerde.getClass) - assertEquals(resetPolicy, internalConsumed.offsetResetPolicy) + assertEquals(AutoOffsetResetStrategy.StrategyType.LATEST, internalConsumed.offsetResetPolicy.offsetResetStrategy()) } } diff --git a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala index 88fe8e8980d15..6a0b6c1b0e988 100644 --- a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala +++ b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala @@ -18,17 +18,9 @@ package org.apache.kafka.streams.scala.kstream import java.time.Duration.ofSeconds import java.time.{Duration, Instant} -import org.apache.kafka.streams.kstream.{ - JoinWindows, - Named, - ValueTransformer, - ValueTransformerSupplier, - ValueTransformerWithKey, - ValueTransformerWithKeySupplier -} +import org.apache.kafka.streams.kstream.{JoinWindows, Named} import org.apache.kafka.streams.processor.api -import org.apache.kafka.streams.processor.ProcessorContext -import org.apache.kafka.streams.processor.api.{Processor, ProcessorSupplier} +import org.apache.kafka.streams.processor.api.{FixedKeyRecord, Processor, ProcessorSupplier} import org.apache.kafka.streams.scala.ImplicitConversions._ import org.apache.kafka.streams.scala.serialization.Serdes._ import org.apache.kafka.streams.scala.StreamsBuilder @@ -40,7 +32,6 @@ import org.junit.jupiter.api.Test import java.util import java.util.Collections -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ class KStreamTest extends TestDriver { @@ -287,64 +278,29 @@ class KStreamTest extends TestDriver { testDriver.close() } - @nowarn - @Test - def testCorrectlyFlatTransformValuesInRecords(): Unit = { - class TestTransformer extends ValueTransformer[String, Iterable[String]] { - override def init(context: ProcessorContext): Unit = {} - - override def transform(value: String): Iterable[String] = - Array(s"$value-transformed") - - override def close(): Unit = {} - } - val builder = new StreamsBuilder() - val sourceTopic = "source" - val sinkTopic = "sink" - - val stream = builder.stream[String, String](sourceTopic) - stream - .flatTransformValues(new ValueTransformerSupplier[String, Iterable[String]] { - def get(): ValueTransformer[String, Iterable[String]] = - new TestTransformer - }) - .to(sinkTopic) - - val now = Instant.now() - val testDriver = createTestDriver(builder, now) - val testInput = testDriver.createInput[String, String](sourceTopic) - val testOutput = testDriver.createOutput[String, String](sinkTopic) - - testInput.pipeInput("1", "value", now) - - assertEquals("value-transformed", testOutput.readValue) - - assertTrue(testOutput.isEmpty) - - testDriver.close() - } - - @nowarn @Test - def testCorrectlyFlatTransformValuesInRecordsWithKey(): Unit = { - class TestTransformer extends ValueTransformerWithKey[String, String, Iterable[String]] { - override def init(context: ProcessorContext): Unit = {} - - override def transform(key: String, value: String): Iterable[String] = - Array(s"$value-transformed-$key") + def testProcessValuesCorrectlyRecords(): Unit = { + val processorSupplier: api.FixedKeyProcessorSupplier[String, String, String] = + () => + new api.FixedKeyProcessor[String, String, String] { + private var context: api.FixedKeyProcessorContext[String, String] = _ + + override def init(context: api.FixedKeyProcessorContext[String, String]): Unit = + this.context = context + + override def process(record: FixedKeyRecord[String, String]): Unit = { + val processedValue = s"${record.value()}-processed" + context.forward(record.withValue(processedValue)) + } + } - override def close(): Unit = {} - } val builder = new StreamsBuilder() val sourceTopic = "source" val sinkTopic = "sink" val stream = builder.stream[String, String](sourceTopic) stream - .flatTransformValues(new ValueTransformerWithKeySupplier[String, String, Iterable[String]] { - def get(): ValueTransformerWithKey[String, String, Iterable[String]] = - new TestTransformer - }) + .processValues(processorSupplier) .to(sinkTopic) val now = Instant.now() @@ -354,7 +310,9 @@ class KStreamTest extends TestDriver { testInput.pipeInput("1", "value", now) - assertEquals("value-transformed-1", testOutput.readValue) + val result = testOutput.readKeyValue() + assertEquals("value-processed", result.value) + assertEquals("1", result.key) assertTrue(testOutput.isEmpty) diff --git a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KTableTest.scala b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KTableTest.scala index 36031907339a4..e473c6579af8e 100644 --- a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KTableTest.scala +++ b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KTableTest.scala @@ -534,4 +534,84 @@ class KTableTest extends TestDriver { testDriver.close() } + + @Test + def testJoinWithBiFunctionKeyExtractor(): Unit = { + val builder = new StreamsBuilder() + val sourceTopic1 = "source1" + val sourceTopic2 = "source2" + val sinkTopic = "sink" + + val table1 = builder.stream[String, String](sourceTopic1).toTable + val table2 = builder.stream[String, String](sourceTopic2).toTable + + table1 + .join[String, String, String]( + table2, + (key: String, value: String) => s"$key-$value", + joiner = (v1: String, v2: String) => s"$v1+$v2", + materialized = Materialized.`with`[String, String, ByteArrayKeyValueStore] + ) + .toStream + .to(sinkTopic) + + val testDriver = createTestDriver(builder) + val testInput1 = testDriver.createInput[String, String](sourceTopic1) + val testInput2 = testDriver.createInput[String, String](sourceTopic2) + val testOutput = testDriver.createOutput[String, String](sinkTopic) + + testInput1.pipeInput("k1", "v1") + testInput2.pipeInput("k1-v1", "v2") + + val record = testOutput.readKeyValue + assertEquals("k1", record.key) + assertEquals("v1+v2", record.value) + + testDriver.close() + } + + @Test + def testLeftJoinWithBiFunctionKeyExtractor(): Unit = { + val builder = new StreamsBuilder() + val sourceTopic1 = "source1" + val sourceTopic2 = "source2" + val sinkTopic = "sink" + + val table1 = builder.stream[String, String](sourceTopic1).toTable + val table2 = builder.stream[String, String](sourceTopic2).toTable + + table1 + .leftJoin[String, String, String]( + table2, + (key: String, value: String) => s"$key-$value", + joiner = (v1: String, v2: String) => s"${v1}+${Option(v2).getOrElse("null")}", + materialized = Materialized.`with`[String, String, ByteArrayKeyValueStore] + ) + .toStream + .to(sinkTopic) + + val testDriver = createTestDriver(builder) + val testInput1 = testDriver.createInput[String, String](sourceTopic1) + val testInput2 = testDriver.createInput[String, String](sourceTopic2) + val testOutput = testDriver.createOutput[String, String](sinkTopic) + + // First insert into the foreign key table (table2) + testInput2.pipeInput("k1-v1", "v2") + + // Then insert into the primary table (table1) + testInput1.pipeInput("k1", "v1") + + val record1 = testOutput.readKeyValue + assertEquals("k1", record1.key) + assertEquals("v1+v2", record1.value) + + // Test with non-matching foreign key (should still output due to left join) + testInput1.pipeInput("k2", "v3") + + val record2 = testOutput.readKeyValue + assertEquals("k2", record2.key) + assertEquals("v3+null", record2.value) + + testDriver.close() + } } diff --git a/streams/test-utils/src/main/java/org/apache/kafka/streams/TopologyTestDriver.java b/streams/test-utils/src/main/java/org/apache/kafka/streams/TopologyTestDriver.java index cbf314e4426bb..2fc8400239d61 100644 --- a/streams/test-utils/src/main/java/org/apache/kafka/streams/TopologyTestDriver.java +++ b/streams/test-utils/src/main/java/org/apache/kafka/streams/TopologyTestDriver.java @@ -21,10 +21,11 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; @@ -44,13 +45,13 @@ import org.apache.kafka.streams.errors.TopologyException; import org.apache.kafka.streams.internals.StreamsConfigUtils; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.PunctuationType; import org.apache.kafka.streams.processor.Punctuator; import org.apache.kafka.streams.processor.StateRestoreListener; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; +import org.apache.kafka.streams.processor.api.ProcessorContext; import org.apache.kafka.streams.processor.internals.ChangelogRegister; import org.apache.kafka.streams.processor.internals.ClientUtils; import org.apache.kafka.streams.processor.internals.GlobalProcessorContextImpl; @@ -335,9 +336,9 @@ private TopologyTestDriver(final InternalTopologyBuilder builder, streamsMetrics ); - consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); + consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); final Serializer bytesSerializer = new ByteArraySerializer(); - producer = new MockProducer<>(true, bytesSerializer, bytesSerializer) { + producer = new MockProducer<>(Cluster.empty(), true, null, bytesSerializer, bytesSerializer) { @Override public List partitionsFor(final String topic) { return Collections.singletonList(new PartitionInfo(topic, PARTITION_ID, null, null, null)); @@ -411,7 +412,7 @@ private void setupGlobalTask(final Time mockWallClockTime, final StreamsMetricsImpl streamsMetrics, final ThreadCache cache) { if (globalTopology != null) { - final MockConsumer globalConsumer = new MockConsumer<>(OffsetResetStrategy.NONE); + final MockConsumer globalConsumer = new MockConsumer<>(AutoOffsetResetStrategy.NONE.name()); for (final String topicName : globalTopology.sourceTopics()) { final TopicPartition partition = new TopicPartition(topicName, 0); globalPartitionsByInputTopic.put(topicName, partition); @@ -1207,19 +1208,17 @@ public KeyValueStoreFacade(final TimestampedKeyValueStore inner) { } @Override - public void init(final StateStoreContext context, final StateStore root) { - inner.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + inner.init(stateStoreContext, root); } @Override - public void put(final K key, - final V value) { + public void put(final K key, final V value) { inner.put(key, ValueAndTimestamp.make(value, ConsumerRecord.NO_TIMESTAMP)); } @Override - public V putIfAbsent(final K key, - final V value) { + public V putIfAbsent(final K key, final V value) { return getValueOrNull(inner.putIfAbsent(key, ValueAndTimestamp.make(value, ConsumerRecord.NO_TIMESTAMP))); } @@ -1273,8 +1272,8 @@ public WindowStoreFacade(final TimestampedWindowStore store) { } @Override - public void init(final StateStoreContext context, final StateStore root) { - inner.init(context, root); + public void init(final StateStoreContext stateStoreContext, final StateStore root) { + inner.init(stateStoreContext, root); } @Override diff --git a/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/MockProcessorContext.java b/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/MockProcessorContext.java index aa4fa139d5f0e..fc7d27a3bb792 100644 --- a/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/MockProcessorContext.java +++ b/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/MockProcessorContext.java @@ -25,8 +25,6 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.StreamsMetrics; -import org.apache.kafka.streams.Topology; -import org.apache.kafka.streams.TopologyTestDriver; import org.apache.kafka.streams.internals.ApiUtils; import org.apache.kafka.streams.processor.internals.ClientUtils; import org.apache.kafka.streams.processor.internals.RecordCollector; @@ -43,17 +41,14 @@ import java.util.Properties; /** - * {@link MockProcessorContext} is a mock of {@link ProcessorContext} for users to test their {@link Processor}, - * {@link org.apache.kafka.streams.kstream.Transformer}, and {@link org.apache.kafka.streams.kstream.ValueTransformer} - * implementations. + * {@link MockProcessorContext} is a mock of {@link ProcessorContext} for users to test their + * {@link org.apache.kafka.streams.kstream.ValueTransformer} implementations. *

                * The tests for this class (org.apache.kafka.streams.MockProcessorContextTest) include several behavioral * tests that serve as example usage. *

                * Note that this class does not take any automated actions (such as firing scheduled punctuators). * It simply captures any data it witnesses. - * If you require more automated tests, we recommend wrapping your {@link Processor} in a minimal source-processor-sink - * {@link Topology} and using the {@link TopologyTestDriver}. * * @deprecated Since 4.0. Use {@link org.apache.kafka.streams.processor.api.MockProcessorContext} instead. */ @@ -426,8 +421,8 @@ public long offset() { *

                Note, that headers should never be {@code null} in the actual Kafka Streams runtime, * even if they could be empty. However, this mock does not guarantee non-{@code null} headers. * Thus, you either need to add a {@code null} check to your production code to use this mock - * for testing or you always need to set headers manually via {@link #setHeaders(Headers)} to - * avoid a {@link NullPointerException} from your {@link Processor} implementation. + * for testing, or you always need to set headers manually via {@link #setHeaders(Headers)} to + * avoid a {@link NullPointerException} from your {@link org.apache.kafka.streams.kstream.ValueTransformer}implementation. * * @return the headers */ diff --git a/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/api/MockProcessorContext.java b/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/api/MockProcessorContext.java index 5a506163bb211..52a2308dafe4c 100644 --- a/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/api/MockProcessorContext.java +++ b/streams/test-utils/src/main/java/org/apache/kafka/streams/processor/api/MockProcessorContext.java @@ -419,7 +419,7 @@ public void forward(final Record public List> forwarded(final String childName) { final LinkedList> result = new LinkedList<>(); for (final CapturedForward capture : capturedForwards) { - if (!capture.childName().isPresent() || capture.childName().equals(Optional.of(childName))) { + if (capture.childName().isEmpty() || capture.childName().equals(Optional.of(childName))) { result.add(capture); } } diff --git a/streams/test-utils/src/test/java/org/apache/kafka/streams/MockProcessorContextTest.java b/streams/test-utils/src/test/java/org/apache/kafka/streams/MockProcessorContextTest.java index bb2800dcc8681..ebb38dd773ad6 100644 --- a/streams/test-utils/src/test/java/org/apache/kafka/streams/MockProcessorContextTest.java +++ b/streams/test-utils/src/test/java/org/apache/kafka/streams/MockProcessorContextTest.java @@ -21,6 +21,8 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.streams.kstream.Transformer; +import org.apache.kafka.streams.processor.MockProcessorContext; import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.PunctuationType; import org.apache.kafka.streams.processor.Punctuator; @@ -53,24 +55,36 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -@SuppressWarnings("deprecation") // this is a test of a deprecated API +@Deprecated public class MockProcessorContextTest { + @Test public void shouldCaptureOutputRecords() { - final org.apache.kafka.streams.processor.AbstractProcessor processor = new org.apache.kafka.streams.processor.AbstractProcessor() { + final Transformer> transformer = new Transformer<>() { + private ProcessorContext context; + + @Override + public void init(final ProcessorContext context) { + this.context = context; + } + @Override - public void process(final String key, final Long value) { - context().forward(key + value, key.length() + value); + public KeyValue transform(final String key, final Long value) { + context.forward(key + value, key.length() + value); + return null; } + + @Override + public void close() { } }; - final org.apache.kafka.streams.processor.MockProcessorContext context = new org.apache.kafka.streams.processor.MockProcessorContext(); - processor.init(context); + final MockProcessorContext context = new MockProcessorContext(); + transformer.init(context); - processor.process("foo", 5L); - processor.process("barbaz", 50L); + transformer.transform("foo", 5L); + transformer.transform("barbaz", 50L); - final Iterator forwarded = context.forwarded().iterator(); + final Iterator forwarded = context.forwarded().iterator(); assertEquals(new KeyValue<>("foo5", 8L), forwarded.next().keyValue()); assertEquals(new KeyValue<>("barbaz50", 56L), forwarded.next().keyValue()); assertFalse(forwarded.hasNext()); @@ -82,21 +96,32 @@ public void process(final String key, final Long value) { @Test public void shouldCaptureOutputRecordsUsingTo() { - final org.apache.kafka.streams.processor.AbstractProcessor processor = new org.apache.kafka.streams.processor.AbstractProcessor() { + final Transformer> transformer = new Transformer<>() { + private ProcessorContext context; + @Override - public void process(final String key, final Long value) { - context().forward(key + value, key.length() + value, To.all()); + public void init(final ProcessorContext context) { + this.context = context; } + + @Override + public KeyValue transform(final String key, final Long value) { + context.forward(key + value, key.length() + value); + return null; + } + + @Override + public void close() { } }; - final org.apache.kafka.streams.processor.MockProcessorContext context = new org.apache.kafka.streams.processor.MockProcessorContext(); + final MockProcessorContext context = new MockProcessorContext(); - processor.init(context); + transformer.init(context); - processor.process("foo", 5L); - processor.process("barbaz", 50L); + transformer.transform("foo", 5L); + transformer.transform("barbaz", 50L); - final Iterator forwarded = context.forwarded().iterator(); + final Iterator forwarded = context.forwarded().iterator(); assertEquals(new KeyValue<>("foo5", 8L), forwarded.next().keyValue()); assertEquals(new KeyValue<>("barbaz50", 56L), forwarded.next().keyValue()); assertFalse(forwarded.hasNext()); @@ -108,39 +133,50 @@ public void process(final String key, final Long value) { @Test public void shouldCaptureRecordsOutputToChildByName() { - final org.apache.kafka.streams.processor.AbstractProcessor processor = new org.apache.kafka.streams.processor.AbstractProcessor() { + final Transformer> transformer = new Transformer<>() { + private ProcessorContext context; private int count = 0; @Override - public void process(final String key, final Long value) { + public void init(final ProcessorContext context) { + this.context = context; + } + + @Override + public KeyValue transform(final String key, final Long value) { if (count == 0) { - context().forward("start", -1L, To.all()); // broadcast + context.forward("start", -1L, To.all()); // broadcast } final To toChild = count % 2 == 0 ? To.child("george") : To.child("pete"); - context().forward(key + value, key.length() + value, toChild); + context.forward(key + value, key.length() + value, toChild); count++; + + return null; } + + @Override + public void close() { } }; - final org.apache.kafka.streams.processor.MockProcessorContext context = new org.apache.kafka.streams.processor.MockProcessorContext(); + final MockProcessorContext context = new MockProcessorContext(); - processor.init(context); + transformer.init(context); - processor.process("foo", 5L); - processor.process("barbaz", 50L); + transformer.transform("foo", 5L); + transformer.transform("barbaz", 50L); { - final Iterator forwarded = context.forwarded().iterator(); + final Iterator forwarded = context.forwarded().iterator(); - final org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward forward1 = forwarded.next(); + final MockProcessorContext.CapturedForward forward1 = forwarded.next(); assertEquals(new KeyValue<>("start", -1L), forward1.keyValue()); assertNull(forward1.childName()); - final org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward forward2 = forwarded.next(); + final MockProcessorContext.CapturedForward forward2 = forwarded.next(); assertEquals(new KeyValue<>("foo5", 8L), forward2.keyValue()); assertEquals("george", forward2.childName()); - final org.apache.kafka.streams.processor.MockProcessorContext.CapturedForward forward3 = forwarded.next(); + final MockProcessorContext.CapturedForward forward3 = forwarded.next(); assertEquals(new KeyValue<>("barbaz50", 56L), forward3.keyValue()); assertEquals("pete", forward3.childName()); @@ -148,21 +184,21 @@ public void process(final String key, final Long value) { } { - final Iterator forwarded = context.forwarded("george").iterator(); + final Iterator forwarded = context.forwarded("george").iterator(); assertEquals(new KeyValue<>("start", -1L), forwarded.next().keyValue()); assertEquals(new KeyValue<>("foo5", 8L), forwarded.next().keyValue()); assertFalse(forwarded.hasNext()); } { - final Iterator forwarded = context.forwarded("pete").iterator(); + final Iterator forwarded = context.forwarded("pete").iterator(); assertEquals(new KeyValue<>("start", -1L), forwarded.next().keyValue()); assertEquals(new KeyValue<>("barbaz50", 56L), forwarded.next().keyValue()); assertFalse(forwarded.hasNext()); } { - final Iterator forwarded = context.forwarded("steve").iterator(); + final Iterator forwarded = context.forwarded("steve").iterator(); assertEquals(new KeyValue<>("start", -1L), forwarded.next().keyValue()); assertFalse(forwarded.hasNext()); } @@ -170,27 +206,37 @@ public void process(final String key, final Long value) { @Test public void shouldCaptureCommitsAndAllowReset() { - final org.apache.kafka.streams.processor.AbstractProcessor processor = new org.apache.kafka.streams.processor.AbstractProcessor() { + final Transformer transformer = new Transformer<>() { + private ProcessorContext context; private int count = 0; @Override - public void process(final String key, final Long value) { + public void init(final ProcessorContext context) { + this.context = context; + } + + @Override + public Object transform(final String key, final Long value) { if (++count > 2) { - context().commit(); + context.commit(); } + return null; } + + @Override + public void close() { } }; - final org.apache.kafka.streams.processor.MockProcessorContext context = new org.apache.kafka.streams.processor.MockProcessorContext(); + final MockProcessorContext context = new MockProcessorContext(); - processor.init(context); + transformer.init(context); - processor.process("foo", 5L); - processor.process("barbaz", 50L); + transformer.transform("foo", 5L); + transformer.transform("barbaz", 50L); assertFalse(context.committed()); - processor.process("foobar", 500L); + transformer.transform("foobar", 500L); assertTrue(context.committed()); @@ -201,19 +247,31 @@ public void process(final String key, final Long value) { @Test public void shouldStoreAndReturnStateStores() { - final org.apache.kafka.streams.processor.AbstractProcessor processor = new org.apache.kafka.streams.processor.AbstractProcessor() { + final Transformer transformer = new Transformer<>() { + private ProcessorContext context; + @Override - public void process(final String key, final Long value) { - final KeyValueStore stateStore = context().getStateStore("my-state"); + public void init(final ProcessorContext context) { + this.context = context; + } + + @Override + public Object transform(final String key, final Long value) { + final KeyValueStore stateStore = context.getStateStore("my-state"); stateStore.put(key, (stateStore.get(key) == null ? 0 : stateStore.get(key)) + value); stateStore.put("all", (stateStore.get("all") == null ? 0 : stateStore.get("all")) + value); + + return null; } + + @Override + public void close() { } }; final StoreBuilder> storeBuilder = Stores.keyValueStoreBuilder( - Stores.inMemoryKeyValueStore("my-state"), - Serdes.String(), - Serdes.Long()).withLoggingDisabled(); + Stores.inMemoryKeyValueStore("my-state"), + Serdes.String(), + Serdes.Long()).withLoggingDisabled(); final KeyValueStore store = storeBuilder.build(); @@ -239,10 +297,10 @@ public void process(final String key, final Long value) { store.init(mockInternalProcessorContext, store); - processor.init(mockInternalProcessorContext); + transformer.init(mockInternalProcessorContext); - processor.process("foo", 5L); - processor.process("bar", 50L); + transformer.transform("foo", 5L); + transformer.transform("bar", 50L); assertEquals(5L, (long) store.get("foo")); assertEquals(50L, (long) store.get("bar")); @@ -254,27 +312,39 @@ public void shouldCaptureApplicationAndRecordMetadata() { final Properties config = new Properties(); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "testMetadata"); - final org.apache.kafka.streams.processor.AbstractProcessor processor = new org.apache.kafka.streams.processor.AbstractProcessor() { + final Transformer> transformer = new Transformer<>() { + private ProcessorContext context; + @Override - public void process(final String key, final Object value) { - context().forward("appId", context().applicationId()); - context().forward("taskId", context().taskId()); + public void init(final ProcessorContext context) { + this.context = context; + } - context().forward("topic", context().topic()); - context().forward("partition", context().partition()); - context().forward("offset", context().offset()); - context().forward("timestamp", context().timestamp()); + @Override + public KeyValue transform(final String key, final Long value) { + context.forward("appId", context.applicationId()); + context.forward("taskId", context.taskId()); - context().forward("key", key); - context().forward("value", value); + context.forward("topic", context.topic()); + context.forward("partition", context.partition()); + context.forward("offset", context.offset()); + context.forward("timestamp", context.timestamp()); + + context.forward("key", key); + context.forward("value", value); + + return null; } + + @Override + public void close() { } }; - final org.apache.kafka.streams.processor.MockProcessorContext context = new org.apache.kafka.streams.processor.MockProcessorContext(config); - processor.init(context); + final MockProcessorContext context = new MockProcessorContext(config); + transformer.init(context); try { - processor.process("foo", 5L); + transformer.transform("foo", 5L); fail("Should have thrown an exception."); } catch (final IllegalStateException expected) { // expected, since the record metadata isn't initialized @@ -284,8 +354,8 @@ public void process(final String key, final Object value) { context.setRecordMetadata("t1", 0, 0L, new RecordHeaders(), 0L); { - processor.process("foo", 5L); - final Iterator forwarded = context.forwarded().iterator(); + transformer.transform("foo", 5L); + final Iterator forwarded = context.forwarded().iterator(); assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue()); assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue()); assertEquals(new KeyValue<>("topic", "t1"), forwarded.next().keyValue()); @@ -305,8 +375,8 @@ public void process(final String key, final Object value) { context.setCurrentStreamTimeMs(30L); { - processor.process("bar", 50L); - final Iterator forwarded = context.forwarded().iterator(); + transformer.transform("bar", 50L); + final Iterator forwarded = context.forwarded().iterator(); assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue()); assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue()); assertEquals(new KeyValue<>("topic", "t1"), forwarded.next().keyValue()); @@ -325,8 +395,8 @@ public void process(final String key, final Object value) { context.setPartition(30); { - processor.process("baz", 500L); - final Iterator forwarded = context.forwarded().iterator(); + transformer.transform("baz", 500L); + final Iterator forwarded = context.forwarded().iterator(); assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue()); assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue()); assertEquals(new KeyValue<>("topic", "t2"), forwarded.next().keyValue()); @@ -340,7 +410,7 @@ public void process(final String key, final Object value) { @Test public void shouldCapturePunctuator() { - final org.apache.kafka.streams.processor.Processor processor = new org.apache.kafka.streams.processor.Processor() { + final Transformer> transformer = new Transformer<>() { @Override public void init(final ProcessorContext context) { context.schedule( @@ -351,19 +421,19 @@ public void init(final ProcessorContext context) { } @Override - public void process(final String key, final Long value) { + public KeyValue transform(final String key, final Long value) { + return null; } @Override - public void close() { - } + public void close() { } }; - final org.apache.kafka.streams.processor.MockProcessorContext context = new org.apache.kafka.streams.processor.MockProcessorContext(); + final MockProcessorContext context = new MockProcessorContext(); - processor.init(context); + transformer.init(context); - final org.apache.kafka.streams.processor.MockProcessorContext.CapturedPunctuator capturedPunctuator = context.scheduledPunctuators().get(0); + final MockProcessorContext.CapturedPunctuator capturedPunctuator = context.scheduledPunctuators().get(0); assertEquals(1000L, capturedPunctuator.getIntervalMs()); assertEquals(PunctuationType.WALL_CLOCK_TIME, capturedPunctuator.getType()); assertFalse(capturedPunctuator.cancelled()); @@ -374,6 +444,7 @@ public void close() { assertTrue(context.committed()); } + @SuppressWarnings("resource") @Test public void fullConstructorShouldSetAllExpectedAttributes() { final Properties config = new Properties(); @@ -382,7 +453,7 @@ public void fullConstructorShouldSetAllExpectedAttributes() { config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.LongSerde.class); final File dummyFile = new File(""); - final org.apache.kafka.streams.processor.MockProcessorContext context = new org.apache.kafka.streams.processor.MockProcessorContext(config, new TaskId(1, 1), dummyFile); + final MockProcessorContext context = new MockProcessorContext(config, new TaskId(1, 1), dummyFile); assertEquals("testFullConstructor", context.applicationId()); assertEquals(new TaskId(1, 1), context.taskId()); diff --git a/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextAPITest.java b/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextAPITest.java index cde1e8f1a9858..929b5d6533d4c 100644 --- a/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextAPITest.java +++ b/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextAPITest.java @@ -51,7 +51,7 @@ public class MockProcessorContextAPITest { @Test public void shouldCaptureOutputRecords() { - final Processor processor = new Processor() { + final Processor processor = new Processor<>() { private ProcessorContext context; @Override @@ -87,7 +87,7 @@ public void process(final Record record) { @Test public void shouldCaptureRecordsOutputToChildByName() { - final Processor processor = new Processor() { + final Processor processor = new Processor<>() { private ProcessorContext context; @Override @@ -158,7 +158,7 @@ public void init(final ProcessorContext context) { @Test public void shouldCaptureCommitsAndAllowReset() { - final Processor processor = new Processor() { + final Processor processor = new Processor<>() { private ProcessorContext context; private int count = 0; @@ -195,7 +195,7 @@ public void process(final Record record) { @Test public void shouldStoreAndReturnStateStores() { - final Processor processor = new Processor() { + final Processor processor = new Processor<>() { private ProcessorContext context; @Override @@ -246,7 +246,7 @@ public void shouldCaptureApplicationAndRecordMetadata() { ) ); - final Processor processor = new Processor() { + final Processor processor = new Processor<>() { private ProcessorContext context; @Override @@ -302,7 +302,7 @@ public void process(final Record record) { @Test public void shouldCapturePunctuator() { - final Processor processor = new Processor() { + final Processor processor = new Processor<>() { @Override public void init(final ProcessorContext context) { context.schedule( @@ -331,13 +331,14 @@ public void process(final Record record) {} assertThat(context.committed(), is(true)); } + @SuppressWarnings("resource") @Test public void fullConstructorShouldSetAllExpectedAttributes() { final Properties config = new Properties(); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "testFullConstructor"); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ""); - config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass()); + config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.StringSerde.class.getName()); + config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.LongSerde.class.getName()); final File dummyFile = new File(""); final MockProcessorContext context = @@ -347,8 +348,8 @@ public void fullConstructorShouldSetAllExpectedAttributes() { assertThat(context.taskId(), is(new TaskId(1, 1))); assertThat(context.appConfigs().get(StreamsConfig.APPLICATION_ID_CONFIG), is("testFullConstructor")); assertThat(context.appConfigsWithPrefix("application.").get("id"), is("testFullConstructor")); - assertThat(context.keySerde().getClass(), is(Serdes.String().getClass())); - assertThat(context.valueSerde().getClass(), is(Serdes.Long().getClass())); + assertThat(context.keySerde().getClass(), is(Serdes.StringSerde.class)); + assertThat(context.valueSerde().getClass(), is(Serdes.LongSerde.class)); assertThat(context.stateDir(), is(dummyFile)); } } diff --git a/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextStateStoreTest.java b/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextStateStoreTest.java index d06cd44b900b7..521a39d692b9f 100644 --- a/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextStateStoreTest.java +++ b/streams/test-utils/src/test/java/org/apache/kafka/streams/test/MockProcessorContextStateStoreTest.java @@ -155,7 +155,7 @@ public static Stream parameters() { return values.stream(); } - @ParameterizedTest(name = "builder = {0}, timestamped = {1}, caching = {2}, logging = {3}") + @ParameterizedTest @MethodSource(value = "parameters") public void shouldEitherInitOrThrow(final StoreBuilder builder, final boolean timestamped, @@ -178,7 +178,7 @@ public void shouldEitherInitOrThrow(final StoreBuilder builder, () -> store.init(context.getStateStoreContext(), store) ); } else { - final InternalProcessorContext internalProcessorContext = mock(InternalProcessorContext.class); + final InternalProcessorContext internalProcessorContext = mock(InternalProcessorContext.class); when(internalProcessorContext.taskId()).thenReturn(context.taskId()); when(internalProcessorContext.stateDir()).thenReturn(stateDir); when(internalProcessorContext.metrics()).thenReturn((StreamsMetricsImpl) context.metrics()); diff --git a/streams/test-utils/src/test/java/org/apache/kafka/streams/test/wordcount/WindowedWordCountProcessorTest.java b/streams/test-utils/src/test/java/org/apache/kafka/streams/test/wordcount/WindowedWordCountProcessorTest.java index 8186c9652161e..ddd120adb5def 100644 --- a/streams/test-utils/src/test/java/org/apache/kafka/streams/test/wordcount/WindowedWordCountProcessorTest.java +++ b/streams/test-utils/src/test/java/org/apache/kafka/streams/test/wordcount/WindowedWordCountProcessorTest.java @@ -62,7 +62,7 @@ public void shouldWorkWithInMemoryStore() { .withLoggingDisabled() // Changelog is not supported by MockProcessorContext. .withCachingDisabled() // Caching is not supported by MockProcessorContext. .build(); - final InternalProcessorContext internalProcessorContext = mockInternalProcessorContext(context); + final InternalProcessorContext internalProcessorContext = mockInternalProcessorContext(context); store.init(internalProcessorContext, store); internalProcessorContext.register(store, null); @@ -119,7 +119,7 @@ public void shouldWorkWithPersistentStore() throws IOException { .withLoggingDisabled() // Changelog is not supported by MockProcessorContext. .withCachingDisabled() // Caching is not supported by MockProcessorContext. .build(); - final InternalProcessorContext internalProcessorContext = mockInternalProcessorContext(context, stateDir); + final InternalProcessorContext internalProcessorContext = mockInternalProcessorContext(context, stateDir); store.init(internalProcessorContext, store); internalProcessorContext.register(store, null); @@ -157,13 +157,13 @@ public void shouldWorkWithPersistentStore() throws IOException { } } - private InternalProcessorContext mockInternalProcessorContext(final MockProcessorContext context) { + private InternalProcessorContext mockInternalProcessorContext(final MockProcessorContext context) { return mockInternalProcessorContext(context, null); } - private InternalProcessorContext mockInternalProcessorContext(final MockProcessorContext context, + private InternalProcessorContext mockInternalProcessorContext(final MockProcessorContext context, final File stateDir) { - final InternalProcessorContext internalProcessorContext = mock(InternalProcessorContext.class); + final InternalProcessorContext internalProcessorContext = mock(InternalProcessorContext.class); when(internalProcessorContext.taskId()).thenReturn(context.taskId()); when(internalProcessorContext.metrics()).thenReturn((StreamsMetricsImpl) context.metrics()); when(internalProcessorContext.appConfigs()).thenReturn(context.appConfigs()); diff --git a/streams/test-utils/src/test/resources/log4j.properties b/streams/test-utils/src/test/resources/log4j.properties deleted file mode 100644 index be36f90299a77..0000000000000 --- a/streams/test-utils/src/test/resources/log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.org.apache.kafka=INFO diff --git a/streams/test-utils/src/test/resources/log4j2.yaml b/streams/test-utils/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..be546a18b55e6 --- /dev/null +++ b/streams/test-utils/src/test/resources/log4j2.yaml @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka + level: INFO diff --git a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java b/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java deleted file mode 100644 index 27712cc5ace4e..0000000000000 --- a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; -import org.apache.kafka.streams.processor.AbstractProcessor; -import org.apache.kafka.streams.processor.Processor; -import org.apache.kafka.streams.processor.ProcessorContext; -import org.apache.kafka.streams.processor.ProcessorSupplier; - -import java.util.Properties; - -public class StreamsUpgradeTest { - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] + " " : "")); - } - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - - System.out.println("StreamsTest instance started (StreamsUpgradeTest v0.10.0)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + streamsProperties); - - final KStreamBuilder builder = new KStreamBuilder(); - final KStream dataStream = builder.stream("data"); - dataStream.process(printProcessorSupplier()); - dataStream.to("echo"); - - final Properties config = new Properties(); - config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsUpgradeTest"); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final KafkaStreams streams = new KafkaStreams(builder, config); - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - System.out.println("closing Kafka Streams instance"); - System.out.flush(); - streams.close(); - System.out.println("UPGRADE-TEST-CLIENT-CLOSED"); - System.out.flush(); - } - }); - } - - private static ProcessorSupplier printProcessorSupplier() { - return new ProcessorSupplier() { - public Processor get() { - return new AbstractProcessor() { - private int numRecordsProcessed = 0; - - @Override - public void init(final ProcessorContext context) { - System.out.println("[0.10.0] initializing processor: topic=data taskId=" + context.taskId()); - numRecordsProcessed = 0; - } - - @Override - public void process(final K key, final V value) { - numRecordsProcessed++; - if (numRecordsProcessed % 100 == 0) { - System.out.println("processed " + numRecordsProcessed + " records from topic=data"); - } - } - - @Override - public void punctuate(final long timestamp) {} - - @Override - public void close() {} - }; - } - }; - } -} diff --git a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java deleted file mode 100644 index 1528b2c472bbb..0000000000000 --- a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.ForeachAction; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; - -import java.util.Properties; - -public class StreamsUpgradeToCooperativeRebalanceTest { - - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] : "")); - } - - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - final Properties config = new Properties(); - - System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v0.10.0)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + config); - - config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade"); - config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final String sourceTopic = config.getProperty("source.topic", "source"); - final String sinkTopic = config.getProperty("sink.topic", "sink"); - final int reportInterval = Integer.parseInt(config.getProperty("report.interval", "100")); - final String upgradePhase = config.getProperty("upgrade.phase", ""); - - final KStreamBuilder builder = new KStreamBuilder(); - - final KStream upgradeStream = builder.stream(sourceTopic); - upgradeStream.foreach(new ForeachAction() { - int recordCounter = 0; - - @Override - public void apply(final String key, final String value) { - if (recordCounter++ % reportInterval == 0) { - System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); - System.out.flush(); - } - } - } - ); - upgradeStream.to(sinkTopic); - - final KafkaStreams streams = new KafkaStreams(builder, config); - - - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - streams.close(); - System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); - System.out.flush(); - })); - } -} diff --git a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java b/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java deleted file mode 100644 index 379720b956273..0000000000000 --- a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; -import org.apache.kafka.streams.processor.AbstractProcessor; -import org.apache.kafka.streams.processor.Processor; -import org.apache.kafka.streams.processor.ProcessorContext; -import org.apache.kafka.streams.processor.ProcessorSupplier; - -import java.util.Properties; - -public class StreamsUpgradeTest { - - /** - * This test cannot be executed, as long as Kafka 0.10.1.2 is not released - */ - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] + " " : "")); - } - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - - System.out.println("StreamsTest instance started (StreamsUpgradeTest v0.10.1)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + streamsProperties); - - final KStreamBuilder builder = new KStreamBuilder(); - final KStream dataStream = builder.stream("data"); - dataStream.process(printProcessorSupplier()); - dataStream.to("echo"); - - final Properties config = new Properties(); - config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsUpgradeTest"); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final KafkaStreams streams = new KafkaStreams(builder, config); - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - System.out.println("closing Kafka Streams instance"); - System.out.flush(); - streams.close(); - System.out.println("UPGRADE-TEST-CLIENT-CLOSED"); - System.out.flush(); - } - }); - } - - private static ProcessorSupplier printProcessorSupplier() { - return new ProcessorSupplier() { - public Processor get() { - return new AbstractProcessor() { - private int numRecordsProcessed = 0; - - @Override - public void init(final ProcessorContext context) { - System.out.println("[0.10.1] initializing processor: topic=data taskId=" + context.taskId()); - numRecordsProcessed = 0; - } - - @Override - public void process(final K key, final V value) { - numRecordsProcessed++; - if (numRecordsProcessed % 100 == 0) { - System.out.println("processed " + numRecordsProcessed + " records from topic=data"); - } - } - - @Override - public void punctuate(final long timestamp) {} - - @Override - public void close() {} - }; - } - }; - } -} diff --git a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java deleted file mode 100644 index 4efe70911abe5..0000000000000 --- a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.ForeachAction; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; - -import java.util.Properties; - -public class StreamsUpgradeToCooperativeRebalanceTest { - - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] : "")); - } - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - final Properties config = new Properties(); - - System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v0.10.1)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + config); - - config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade"); - config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final String sourceTopic = config.getProperty("source.topic", "source"); - final String sinkTopic = config.getProperty("sink.topic", "sink"); - final int reportInterval = Integer.parseInt(config.getProperty("report.interval", "100")); - final String upgradePhase = config.getProperty("upgrade.phase", ""); - - final KStreamBuilder builder = new KStreamBuilder(); - - final KStream upgradeStream = builder.stream(sourceTopic); - upgradeStream.foreach(new ForeachAction() { - int recordCounter = 0; - - @Override - public void apply(final String key, final String value) { - if (recordCounter++ % reportInterval == 0) { - System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); - System.out.flush(); - } - } - } - ); - upgradeStream.to(sinkTopic); - - final KafkaStreams streams = new KafkaStreams(builder, config); - - - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - streams.close(); - System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); - System.out.flush(); - })); - } -} diff --git a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java b/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java deleted file mode 100644 index 75e548439ceb5..0000000000000 --- a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; -import org.apache.kafka.streams.processor.AbstractProcessor; -import org.apache.kafka.streams.processor.Processor; -import org.apache.kafka.streams.processor.ProcessorContext; -import org.apache.kafka.streams.processor.ProcessorSupplier; - -import java.util.Properties; - -public class StreamsUpgradeTest { - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 1) { - System.err.println("StreamsUpgradeTest requires one argument (properties-file) but provided none"); - } - final String propFileName = args[0]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - - System.out.println("StreamsTest instance started (StreamsUpgradeTest v0.10.2)"); - System.out.println("props=" + streamsProperties); - - final KStreamBuilder builder = new KStreamBuilder(); - final KStream dataStream = builder.stream("data"); - dataStream.process(printProcessorSupplier()); - dataStream.to("echo"); - - final Properties config = new Properties(); - config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsUpgradeTest"); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final KafkaStreams streams = new KafkaStreams(builder, config); - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - streams.close(); - System.out.println("UPGRADE-TEST-CLIENT-CLOSED"); - System.out.flush(); - } - }); - } - - private static ProcessorSupplier printProcessorSupplier() { - return new ProcessorSupplier() { - public Processor get() { - return new AbstractProcessor() { - private int numRecordsProcessed = 0; - - @Override - public void init(final ProcessorContext context) { - System.out.println("[0.10.2] initializing processor: topic=data taskId=" + context.taskId()); - numRecordsProcessed = 0; - } - - @Override - public void process(final K key, final V value) { - numRecordsProcessed++; - if (numRecordsProcessed % 100 == 0) { - System.out.println("processed " + numRecordsProcessed + " records from topic=data"); - } - } - - @Override - public void punctuate(final long timestamp) {} - - @Override - public void close() {} - }; - } - }; - } -} diff --git a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java deleted file mode 100644 index 1cc115f3c061d..0000000000000 --- a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.ForeachAction; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; - -import java.util.Properties; - -public class StreamsUpgradeToCooperativeRebalanceTest { - - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 1) { - System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires one argument (properties-file) but none provided"); - } - final String propFileName = args[0]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - final Properties config = new Properties(); - - System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v0.10.2)"); - System.out.println("props=" + config); - - config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade"); - config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final String sourceTopic = config.getProperty("source.topic", "source"); - final String sinkTopic = config.getProperty("sink.topic", "sink"); - final int reportInterval = Integer.parseInt(config.getProperty("report.interval", "100")); - final String upgradePhase = config.getProperty("upgrade.phase", ""); - - final KStreamBuilder builder = new KStreamBuilder(); - - final KStream upgradeStream = builder.stream(sourceTopic); - upgradeStream.foreach(new ForeachAction() { - int recordCounter = 0; - - @Override - public void apply(final String key, final String value) { - if (recordCounter++ % reportInterval == 0) { - System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); - System.out.flush(); - } - } - } - ); - upgradeStream.to(sinkTopic); - - final KafkaStreams streams = new KafkaStreams(builder, config); - - - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - streams.close(); - System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); - System.out.flush(); - })); - } -} diff --git a/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestClient.java b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestClient.java new file mode 100644 index 0000000000000..dc0ad4d5601c8 --- /dev/null +++ b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestClient.java @@ -0,0 +1,299 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.tests; + +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.common.utils.KafkaThread; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.Topology; +import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler; +import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.Grouped; +import org.apache.kafka.streams.kstream.KGroupedStream; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.Materialized; +import org.apache.kafka.streams.kstream.Produced; +import org.apache.kafka.streams.kstream.Suppressed.BufferConfig; +import org.apache.kafka.streams.kstream.TimeWindows; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.state.Stores; +import org.apache.kafka.streams.state.WindowStore; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.time.Duration; +import java.time.Instant; +import java.util.Properties; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.apache.kafka.streams.kstream.Suppressed.untilWindowCloses; + +public class SmokeTestClient extends SmokeTestUtil { + + private final String name; + + private KafkaStreams streams; + private boolean uncaughtException = false; + private boolean started; + private volatile boolean closed; + + private static void addShutdownHook(final String name, final Runnable runnable) { + if (name != null) { + Runtime.getRuntime().addShutdownHook(KafkaThread.nonDaemon(name, runnable)); + } else { + Runtime.getRuntime().addShutdownHook(new Thread(runnable)); + } + } + + private static File tempDirectory() { + final String prefix = "kafka-"; + final File file; + try { + file = Files.createTempDirectory(prefix).toFile(); + } catch (final IOException ex) { + throw new RuntimeException("Failed to create a temp dir", ex); + } + file.deleteOnExit(); + + addShutdownHook("delete-temp-file-shutdown-hook", () -> { + try { + Utils.delete(file); + } catch (final IOException e) { + System.out.println("Error deleting " + file.getAbsolutePath()); + e.printStackTrace(System.out); + } + }); + + return file; + } + + public SmokeTestClient(final String name) { + this.name = name; + } + + public boolean started() { + return started; + } + + public boolean closed() { + return closed; + } + + public void start(final Properties streamsProperties) { + final Topology build = getTopology(); + streams = new KafkaStreams(build, getStreamsConfig(streamsProperties)); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + streams.setStateListener((newState, oldState) -> { + System.out.printf("%s %s: %s -> %s%n", name, Instant.now(), oldState, newState); + if (oldState == KafkaStreams.State.REBALANCING && newState == KafkaStreams.State.RUNNING) { + started = true; + countDownLatch.countDown(); + } + + if (newState == KafkaStreams.State.NOT_RUNNING) { + closed = true; + } + }); + + streams.setUncaughtExceptionHandler(e -> { + System.out.println(name + ": SMOKE-TEST-CLIENT-EXCEPTION"); + System.out.println(name + ": FATAL: An unexpected exception is encountered: " + e); + e.printStackTrace(System.out); + uncaughtException = true; + return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT; + }); + + addShutdownHook("streams-shutdown-hook", this::close); + + streams.start(); + try { + if (!countDownLatch.await(1, TimeUnit.MINUTES)) { + System.out.println(name + ": SMOKE-TEST-CLIENT-EXCEPTION: Didn't start in one minute"); + } + } catch (final InterruptedException e) { + System.out.println(name + ": SMOKE-TEST-CLIENT-EXCEPTION: " + e); + e.printStackTrace(System.out); + } + System.out.println(name + ": SMOKE-TEST-CLIENT-STARTED"); + System.out.println(name + " started at " + Instant.now()); + } + + public void closeAsync() { + streams.close(Duration.ZERO); + } + + public void close() { + final boolean closed = streams.close(Duration.ofMinutes(1)); + + if (closed && !uncaughtException) { + System.out.println(name + ": SMOKE-TEST-CLIENT-CLOSED"); + } else if (closed) { + System.out.println(name + ": SMOKE-TEST-CLIENT-EXCEPTION"); + } else { + System.out.println(name + ": SMOKE-TEST-CLIENT-EXCEPTION: Didn't close"); + } + } + + private Properties getStreamsConfig(final Properties props) { + final Properties fullProps = new Properties(props); + fullProps.put(StreamsConfig.APPLICATION_ID_CONFIG, "SmokeTest"); + fullProps.put(StreamsConfig.CLIENT_ID_CONFIG, "SmokeTest-" + name); + fullProps.put(StreamsConfig.STATE_DIR_CONFIG, tempDirectory().getAbsolutePath()); + fullProps.putAll(props); + return fullProps; + } + + public Topology getTopology() { + final StreamsBuilder builder = new StreamsBuilder(); + final Consumed stringIntConsumed = Consumed.with(stringSerde, intSerde); + final KStream source = builder.stream("data", stringIntConsumed); + source.filterNot((k, v) -> k.equals("flush")) + .to("echo", Produced.with(stringSerde, intSerde)); + final KStream data = source.filter((key, value) -> value == null || value != END); + data.process(SmokeTestUtil.printProcessorSupplier("data", name)); + + // min + final KGroupedStream groupedData = data.groupByKey(Grouped.with(stringSerde, intSerde)); + + final KTable, Integer> minAggregation = groupedData + .windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofDays(1), Duration.ofMinutes(1))) + .aggregate( + () -> Integer.MAX_VALUE, + (aggKey, value, aggregate) -> (value < aggregate) ? value : aggregate, + Materialized + .>as("uwin-min") + .withValueSerde(intSerde) + .withRetention(Duration.ofHours(25)) + ); + + streamify(minAggregation, "min-raw"); + + streamify(minAggregation.suppress(untilWindowCloses(BufferConfig.unbounded())), "min-suppressed"); + + minAggregation + .toStream(new Unwindow<>()) + .filterNot((k, v) -> k.equals("flush")) + .to("min", Produced.with(stringSerde, intSerde)); + + final KTable, Integer> smallWindowSum = groupedData + .windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofSeconds(2), Duration.ofSeconds(30)).advanceBy(Duration.ofSeconds(1))) + .reduce(Integer::sum); + + streamify(smallWindowSum, "sws-raw"); + streamify(smallWindowSum.suppress(untilWindowCloses(BufferConfig.unbounded())), "sws-suppressed"); + + final KTable minTable = builder.table( + "min", + Consumed.with(stringSerde, intSerde), + Materialized.as("minStoreName")); + + minTable.toStream().process(SmokeTestUtil.printProcessorSupplier("min", name)); + + // max + groupedData + .windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofDays(2))) + .aggregate( + () -> Integer.MIN_VALUE, + (aggKey, value, aggregate) -> (value > aggregate) ? value : aggregate, + Materialized.>as("uwin-max").withValueSerde(intSerde)) + .toStream(new Unwindow<>()) + .filterNot((k, v) -> k.equals("flush")) + .to("max", Produced.with(stringSerde, intSerde)); + + final KTable maxTable = builder.table( + "max", + Consumed.with(stringSerde, intSerde), + Materialized.as("maxStoreName")); + maxTable.toStream().process(SmokeTestUtil.printProcessorSupplier("max", name)); + + // sum + groupedData + .windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofDays(2))) + .aggregate( + () -> 0L, + (aggKey, value, aggregate) -> (long) value + aggregate, + Materialized.>as("win-sum").withValueSerde(longSerde)) + .toStream(new Unwindow<>()) + .filterNot((k, v) -> k.equals("flush")) + .to("sum", Produced.with(stringSerde, longSerde)); + + final Consumed stringLongConsumed = Consumed.with(stringSerde, longSerde); + final KTable sumTable = builder.table("sum", stringLongConsumed); + sumTable.toStream().process(SmokeTestUtil.printProcessorSupplier("sum", name)); + + // cnt + groupedData + .windowedBy(TimeWindows.ofSizeWithNoGrace(Duration.ofDays(2))) + .count(Materialized.as("uwin-cnt")) + .toStream(new Unwindow<>()) + .filterNot((k, v) -> k.equals("flush")) + .to("cnt", Produced.with(stringSerde, longSerde)); + + final KTable cntTable = builder.table( + "cnt", + Consumed.with(stringSerde, longSerde), + Materialized.as("cntStoreName")); + cntTable.toStream().process(SmokeTestUtil.printProcessorSupplier("cnt", name)); + + // dif + maxTable + .join( + minTable, + (value1, value2) -> value1 - value2) + .toStream() + .filterNot((k, v) -> k.equals("flush")) + .to("dif", Produced.with(stringSerde, intSerde)); + + // avg + sumTable + .join( + cntTable, + (value1, value2) -> (double) value1 / (double) value2) + .toStream() + .filterNot((k, v) -> k.equals("flush")) + .to("avg", Produced.with(stringSerde, doubleSerde)); + + // test repartition + final Agg agg = new Agg(); + cntTable.groupBy(agg.selector(), Grouped.with(stringSerde, longSerde)) + .aggregate(agg.init(), agg.adder(), agg.remover(), + Materialized.as(Stores.inMemoryKeyValueStore("cntByCnt")) + .withKeySerde(Serdes.String()) + .withValueSerde(Serdes.Long())) + .toStream() + .to("tagg", Produced.with(stringSerde, longSerde)); + + return builder.build(); + } + + private static void streamify(final KTable, Integer> windowedTable, final String topic) { + windowedTable + .toStream() + .filterNot((k, v) -> k.key().equals("flush")) + .map((key, value) -> new KeyValue<>(key.toString(), value)) + .to(topic, Produced.with(stringSerde, intSerde)); + } +} diff --git a/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestDriver.java b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestDriver.java new file mode 100644 index 0000000000000..8ab48f7cf5f6f --- /dev/null +++ b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestDriver.java @@ -0,0 +1,670 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.tests; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.Callback; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.common.utils.Utils; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.emptyMap; +import static org.apache.kafka.common.utils.Utils.mkEntry; + +public class SmokeTestDriver extends SmokeTestUtil { + private static final String[] NUMERIC_VALUE_TOPICS = { + "data", + "echo", + "max", + "min", "min-suppressed", "min-raw", + "dif", + "sum", + "sws-raw", "sws-suppressed", + "cnt", + "avg", + "tagg" + }; + private static final String[] STRING_VALUE_TOPICS = { + "fk" + }; + + private static final String[] TOPICS = new String[NUMERIC_VALUE_TOPICS.length + STRING_VALUE_TOPICS.length]; + static { + System.arraycopy(NUMERIC_VALUE_TOPICS, 0, TOPICS, 0, NUMERIC_VALUE_TOPICS.length); + System.arraycopy(STRING_VALUE_TOPICS, 0, TOPICS, NUMERIC_VALUE_TOPICS.length, STRING_VALUE_TOPICS.length); + } + + private static final int MAX_RECORD_EMPTY_RETRIES = 30; + + private static class ValueList { + public final String key; + private final int[] values; + private int index; + + ValueList(final int min, final int max) { + key = min + "-" + max; + + values = new int[max - min + 1]; + for (int i = 0; i < values.length; i++) { + values[i] = min + i; + } + // We want to randomize the order of data to test not completely predictable processing order + // However, values are also use as a timestamp of the record. (TODO: separate data and timestamp) + // We keep some correlation of time and order. Thus, the shuffling is done with a sliding window + shuffle(values, 10); + + index = 0; + } + + int next() { + return (index < values.length) ? values[index++] : -1; + } + } + + public static String[] topics() { + return Arrays.copyOf(TOPICS, TOPICS.length); + } + + static void generatePerpetually(final String kafka, + final int numKeys, + final int maxRecordsPerKey) { + final Properties producerProps = generatorProperties(kafka); + + int numRecordsProduced = 0; + + final ValueList[] data = new ValueList[numKeys]; + for (int i = 0; i < numKeys; i++) { + data[i] = new ValueList(i, i + maxRecordsPerKey - 1); + } + + final Random rand = new Random(); + + try (final KafkaProducer producer = new KafkaProducer<>(producerProps)) { + while (true) { + final int index = rand.nextInt(numKeys); + final String key = data[index].key; + final int value = data[index].next(); + + final ProducerRecord record = + new ProducerRecord<>( + "data", + stringSerde.serializer().serialize("", key), + intSerde.serializer().serialize("", value) + ); + producer.send(record); + + final ProducerRecord fkRecord = + new ProducerRecord<>( + "fk", + intSerde.serializer().serialize("", value), + stringSerde.serializer().serialize("", key) + ); + producer.send(fkRecord); + + numRecordsProduced++; + if (numRecordsProduced % 100 == 0) { + System.out.println(Instant.now() + " " + numRecordsProduced + " records produced"); + } + Utils.sleep(2); + } + } + } + + public static Map> generate(final String kafka, + final int numKeys, + final int maxRecordsPerKey, + final Duration timeToSpend) { + final Properties producerProps = generatorProperties(kafka); + + int numRecordsProduced = 0; + + final Map> allData = new HashMap<>(); + final ValueList[] data = new ValueList[numKeys]; + for (int i = 0; i < numKeys; i++) { + data[i] = new ValueList(i, i + maxRecordsPerKey - 1); + allData.put(data[i].key, new HashSet<>()); + } + final Random rand = new Random(); + + int remaining = data.length; + + final long recordPauseTime = timeToSpend.toMillis() / numKeys / maxRecordsPerKey; + + final List> dataNeedRetry = new ArrayList<>(); + final List> fkNeedRetry = new ArrayList<>(); + + try (final KafkaProducer producer = new KafkaProducer<>(producerProps)) { + while (remaining > 0) { + final int index = rand.nextInt(remaining); + final String key = data[index].key; + final int value = data[index].next(); + + if (value < 0) { + remaining--; + data[index] = data[remaining]; + } else { + final ProducerRecord record = + new ProducerRecord<>( + "data", + stringSerde.serializer().serialize("", key), + intSerde.serializer().serialize("", value) + ); + + producer.send(record, new TestCallback(record, dataNeedRetry)); + + final ProducerRecord fkRecord = + new ProducerRecord<>( + "fk", + intSerde.serializer().serialize("", value), + stringSerde.serializer().serialize("", key) + ); + + producer.send(fkRecord, new TestCallback(fkRecord, fkNeedRetry)); + + numRecordsProduced++; + allData.get(key).add(value); + if (numRecordsProduced % 100 == 0) { + System.out.println(Instant.now() + " " + numRecordsProduced + " records produced"); + } + Utils.sleep(Math.max(recordPauseTime, 2)); + } + } + producer.flush(); + + retry(producer, dataNeedRetry, stringSerde); + retry(producer, fkNeedRetry, intSerde); + + flush(producer, + "data", + stringSerde.serializer().serialize("", "flush"), + intSerde.serializer().serialize("", 0) + ); + flush(producer, + "fk", + intSerde.serializer().serialize("", 0), + stringSerde.serializer().serialize("", "flush") + ); + } + return Collections.unmodifiableMap(allData); + } + + private static void retry(final KafkaProducer producer, + List> needRetry, + final Serde keySerde) { + int remainingRetries = 5; + while (!needRetry.isEmpty()) { + final List> needRetry2 = new ArrayList<>(); + for (final ProducerRecord record : needRetry) { + System.out.println( + "retry producing " + keySerde.deserializer().deserialize("", record.key())); + producer.send(record, new TestCallback(record, needRetry2)); + } + producer.flush(); + needRetry = needRetry2; + if (--remainingRetries == 0 && !needRetry.isEmpty()) { + System.err.println("Failed to produce all records after multiple retries"); + Exit.exit(1); + } + } + } + + private static void flush(final KafkaProducer producer, + final String topic, + final byte[] keyBytes, + final byte[] valBytes) { + // now that we've sent everything, we'll send some final records with a timestamp high enough to flush out + // all suppressed records. + final List partitions = producer.partitionsFor(topic); + for (final PartitionInfo partition : partitions) { + producer.send(new ProducerRecord<>( + partition.topic(), + partition.partition(), + System.currentTimeMillis() + Duration.ofDays(2).toMillis(), + keyBytes, + valBytes + )); + } + } + + private static Properties generatorProperties(final String kafka) { + final Properties producerProps = new Properties(); + producerProps.put(ProducerConfig.CLIENT_ID_CONFIG, "SmokeTest"); + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka); + producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + producerProps.put(ProducerConfig.ACKS_CONFIG, "all"); + return producerProps; + } + + private static class TestCallback implements Callback { + private final ProducerRecord originalRecord; + private final List> needRetry; + + TestCallback(final ProducerRecord originalRecord, + final List> needRetry) { + this.originalRecord = originalRecord; + this.needRetry = needRetry; + } + + @Override + public void onCompletion(final RecordMetadata metadata, final Exception exception) { + if (exception != null) { + if (exception instanceof TimeoutException) { + needRetry.add(originalRecord); + } else { + exception.printStackTrace(); + Exit.exit(1); + } + } + } + } + + private static void shuffle(final int[] data, @SuppressWarnings("SameParameterValue") final int windowSize) { + final Random rand = new Random(); + for (int i = 0; i < data.length; i++) { + // we shuffle data within windowSize + final int j = rand.nextInt(Math.min(data.length - i, windowSize)) + i; + + // swap + final int tmp = data[i]; + data[i] = data[j]; + data[j] = tmp; + } + } + + public static class NumberDeserializer implements Deserializer { + @Override + public Number deserialize(final String topic, final byte[] data) { + final Number value; + switch (topic) { + case "data": + case "echo": + case "min": + case "min-raw": + case "min-suppressed": + case "sws-raw": + case "sws-suppressed": + case "max": + case "dif": + value = intSerde.deserializer().deserialize(topic, data); + break; + case "sum": + case "cnt": + case "tagg": + value = longSerde.deserializer().deserialize(topic, data); + break; + case "avg": + value = doubleSerde.deserializer().deserialize(topic, data); + break; + default: + throw new RuntimeException("unknown topic: " + topic); + } + return value; + } + } + + public static VerificationResult verify(final String kafka, + final Map> inputs, + final int maxRecordsPerKey) { + final Properties props = new Properties(); + props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier"); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka); + props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); + props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, NumberDeserializer.class); + props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); + + final KafkaConsumer consumer = new KafkaConsumer<>(props); + final List partitions = getAllPartitions(consumer, NUMERIC_VALUE_TOPICS); + consumer.assign(partitions); + consumer.seekToBeginning(partitions); + + final int recordsGenerated = inputs.size() * maxRecordsPerKey; + int recordsProcessed = 0; + final Map processed = + Stream.of(NUMERIC_VALUE_TOPICS) + .collect(Collectors.toMap(t -> t, t -> new AtomicInteger(0))); + + final Map>>> events = new HashMap<>(); + + VerificationResult verificationResult = new VerificationResult(false, "no results yet"); + int retry = 0; + final long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < TimeUnit.MINUTES.toMillis(6)) { + final ConsumerRecords records = consumer.poll(Duration.ofSeconds(5)); + if (records.isEmpty() && recordsProcessed >= recordsGenerated) { + verificationResult = verifyAll(inputs, events, false); + if (verificationResult.passed()) { + break; + } else if (retry++ > MAX_RECORD_EMPTY_RETRIES) { + System.out.println(Instant.now() + " Didn't get any more results, verification hasn't passed, and out of retries."); + break; + } else { + System.out.println(Instant.now() + " Didn't get any more results, but verification hasn't passed (yet). Retrying..." + retry); + } + } else { + System.out.println(Instant.now() + " Get some more results from " + records.partitions() + ", resetting retry."); + + retry = 0; + for (final ConsumerRecord record : records) { + final String key = record.key(); + + final String topic = record.topic(); + processed.get(topic).incrementAndGet(); + + if (topic.equals("echo")) { + recordsProcessed++; + if (recordsProcessed % 100 == 0) { + System.out.println("Echo records processed = " + recordsProcessed); + } + } + + events.computeIfAbsent(topic, t -> new HashMap<>()) + .computeIfAbsent(key, k -> new LinkedList<>()) + .add(record); + } + + System.out.println(processed); + } + } + consumer.close(); + final long finished = System.currentTimeMillis() - start; + System.out.println("Verification time=" + finished); + System.out.println("-------------------"); + System.out.println("Result Verification"); + System.out.println("-------------------"); + System.out.println("recordGenerated=" + recordsGenerated); + System.out.println("recordProcessed=" + recordsProcessed); + + if (recordsProcessed > recordsGenerated) { + System.out.println("PROCESSED-MORE-THAN-GENERATED"); + } else if (recordsProcessed < recordsGenerated) { + System.out.println("PROCESSED-LESS-THAN-GENERATED"); + } + + boolean success; + + final Map> received = + events.get("echo") + .entrySet() + .stream() + .map(entry -> mkEntry( + entry.getKey(), + entry.getValue().stream().map(ConsumerRecord::value).collect(Collectors.toSet())) + ) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + success = inputs.equals(received); + + if (success) { + System.out.println("ALL-RECORDS-DELIVERED"); + } else { + int missedCount = 0; + for (final Map.Entry> entry : inputs.entrySet()) { + missedCount += received.get(entry.getKey()).size(); + } + System.out.println("missedRecords=" + missedCount); + } + + // give it one more try if it's not already passing. + if (!verificationResult.passed()) { + verificationResult = verifyAll(inputs, events, true); + } + success &= verificationResult.passed(); + + System.out.println(verificationResult.result()); + + System.out.println(success ? "SUCCESS" : "FAILURE"); + return verificationResult; + } + + public static class VerificationResult { + private final boolean passed; + private final String result; + + VerificationResult(final boolean passed, final String result) { + this.passed = passed; + this.result = result; + } + + public boolean passed() { + return passed; + } + + public String result() { + return result; + } + } + + private static VerificationResult verifyAll(final Map> inputs, + final Map>>> events, + final boolean printResults) { + final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + boolean pass; + try (final PrintStream resultStream = new PrintStream(byteArrayOutputStream)) { + pass = verifyTAgg(resultStream, inputs, events.get("tagg"), printResults); + pass &= verifySuppressed(resultStream, "min-suppressed", events, printResults); + pass &= verify(resultStream, "min-suppressed", inputs, events, windowedKey -> { + final String unwindowedKey = windowedKey.substring(1, windowedKey.length() - 1).replaceAll("@.*", ""); + return getMin(unwindowedKey); + }, printResults); + pass &= verifySuppressed(resultStream, "sws-suppressed", events, printResults); + pass &= verify(resultStream, "min", inputs, events, SmokeTestDriver::getMin, printResults); + pass &= verify(resultStream, "max", inputs, events, SmokeTestDriver::getMax, printResults); + pass &= verify(resultStream, "dif", inputs, events, key -> getMax(key).intValue() - getMin(key).intValue(), printResults); + pass &= verify(resultStream, "sum", inputs, events, SmokeTestDriver::getSum, printResults); + pass &= verify(resultStream, "cnt", inputs, events, key1 -> getMax(key1).intValue() - getMin(key1).intValue() + 1L, printResults); + pass &= verify(resultStream, "avg", inputs, events, SmokeTestDriver::getAvg, printResults); + } + return new VerificationResult(pass, new String(byteArrayOutputStream.toByteArray(), StandardCharsets.UTF_8)); + } + + private static boolean verify(final PrintStream resultStream, + final String topic, + final Map> inputData, + final Map>>> events, + final Function keyToExpectation, + final boolean printResults) { + final Map>> observedInputEvents = events.get("data"); + final Map>> outputEvents = events.getOrDefault(topic, emptyMap()); + if (outputEvents.isEmpty()) { + resultStream.println(topic + " is empty"); + return false; + } else { + resultStream.printf("verifying %s with %d keys%n", topic, outputEvents.size()); + + if (outputEvents.size() != inputData.size()) { + resultStream.printf("fail: resultCount=%d expectedCount=%s%n\tresult=%s%n\texpected=%s%n", + outputEvents.size(), inputData.size(), outputEvents.keySet(), inputData.keySet()); + return false; + } + for (final Map.Entry>> entry : outputEvents.entrySet()) { + final String key = entry.getKey(); + final Number expected = keyToExpectation.apply(key); + final Number actual = entry.getValue().getLast().value(); + if (!expected.equals(actual)) { + resultStream.printf("%s fail: key=%s actual=%s expected=%s%n", topic, key, actual, expected); + + if (printResults) { + resultStream.printf("\t inputEvents=%n%s%n\t" + + "echoEvents=%n%s%n\tmaxEvents=%n%s%n\tminEvents=%n%s%n\tdifEvents=%n%s%n\tcntEvents=%n%s%n\ttaggEvents=%n%s%n", + indent("\t\t", observedInputEvents.get(key)), + indent("\t\t", events.getOrDefault("echo", emptyMap()).getOrDefault(key, new LinkedList<>())), + indent("\t\t", events.getOrDefault("max", emptyMap()).getOrDefault(key, new LinkedList<>())), + indent("\t\t", events.getOrDefault("min", emptyMap()).getOrDefault(key, new LinkedList<>())), + indent("\t\t", events.getOrDefault("dif", emptyMap()).getOrDefault(key, new LinkedList<>())), + indent("\t\t", events.getOrDefault("cnt", emptyMap()).getOrDefault(key, new LinkedList<>())), + indent("\t\t", events.getOrDefault("tagg", emptyMap()).getOrDefault(key, new LinkedList<>()))); + + if (!Set.of("echo", "max", "min", "dif", "cnt", "tagg").contains(topic)) + resultStream.printf("%sEvents=%n%s%n", topic, indent("\t\t", entry.getValue())); + } + + return false; + } + } + return true; + } + } + + + private static boolean verifySuppressed(final PrintStream resultStream, + @SuppressWarnings("SameParameterValue") final String topic, + final Map>>> events, + final boolean printResults) { + resultStream.println("verifying suppressed " + topic); + final Map>> topicEvents = events.getOrDefault(topic, emptyMap()); + for (final Map.Entry>> entry : topicEvents.entrySet()) { + if (entry.getValue().size() != 1) { + final String unsuppressedTopic = topic.replace("-suppressed", "-raw"); + final String key = entry.getKey(); + final String unwindowedKey = key.substring(1, key.length() - 1).replaceAll("@.*", ""); + resultStream.printf("fail: key=%s%n\tnon-unique result:%n%s%n", + key, + indent("\t\t", entry.getValue())); + + if (printResults) + resultStream.printf("\tresultEvents:%n%s%n\tinputEvents:%n%s%n", + indent("\t\t", events.get(unsuppressedTopic).get(key)), + indent("\t\t", events.get("data").get(unwindowedKey))); + + return false; + } + } + return true; + } + + private static String indent(@SuppressWarnings("SameParameterValue") final String prefix, + final Iterable> list) { + final StringBuilder stringBuilder = new StringBuilder(); + for (final ConsumerRecord record : list) { + stringBuilder.append(prefix).append(record).append('\n'); + } + return stringBuilder.toString(); + } + + private static Long getSum(final String key) { + final int min = getMin(key).intValue(); + final int max = getMax(key).intValue(); + return ((long) min + max) * (max - min + 1L) / 2L; + } + + private static Double getAvg(final String key) { + final int min = getMin(key).intValue(); + final int max = getMax(key).intValue(); + return ((long) min + max) / 2.0; + } + + + private static boolean verifyTAgg(final PrintStream resultStream, + final Map> allData, + final Map>> taggEvents, + final boolean printResults) { + if (taggEvents == null) { + resultStream.println("tagg is missing"); + return false; + } else if (taggEvents.isEmpty()) { + resultStream.println("tagg is empty"); + return false; + } else { + resultStream.println("verifying tagg"); + + // generate expected answer + final Map expected = new HashMap<>(); + for (final String key : allData.keySet()) { + final int min = getMin(key).intValue(); + final int max = getMax(key).intValue(); + final String cnt = Long.toString(max - min + 1L); + + expected.put(cnt, expected.getOrDefault(cnt, 0L) + 1); + } + + // check the result + for (final Map.Entry>> entry : taggEvents.entrySet()) { + final String key = entry.getKey(); + Long expectedCount = expected.remove(key); + if (expectedCount == null) { + expectedCount = 0L; + } + + if (entry.getValue().getLast().value().longValue() != expectedCount) { + resultStream.println("fail: key=" + key + " tagg=" + entry.getValue() + " expected=" + expectedCount); + + if (printResults) + resultStream.println("\t taggEvents: " + entry.getValue()); + return false; + } + } + + } + return true; + } + + private static Number getMin(final String key) { + return Integer.parseInt(key.split("-")[0]); + } + + private static Number getMax(final String key) { + return Integer.parseInt(key.split("-")[1]); + } + + private static List getAllPartitions(final KafkaConsumer consumer, final String... topics) { + final List partitions = new ArrayList<>(); + + for (final String topic : topics) { + for (final PartitionInfo info : consumer.partitionsFor(topic)) { + partitions.add(new TopicPartition(info.topic(), info.partition())); + } + } + return partitions; + } + +} diff --git a/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestUtil.java b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestUtil.java new file mode 100644 index 0000000000000..e4a711313aaf4 --- /dev/null +++ b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/SmokeTestUtil.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.tests; + +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.kstream.Aggregator; +import org.apache.kafka.streams.kstream.Initializer; +import org.apache.kafka.streams.kstream.KeyValueMapper; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.processor.api.ContextualProcessor; +import org.apache.kafka.streams.processor.api.ProcessorContext; +import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.processor.api.Record; + +import java.time.Instant; + +public class SmokeTestUtil { + + static final int END = Integer.MAX_VALUE; + + static ProcessorSupplier printProcessorSupplier(final String topic) { + return printProcessorSupplier(topic, ""); + } + + static ProcessorSupplier printProcessorSupplier(final String topic, final String name) { + return () -> new ContextualProcessor() { + private int numRecordsProcessed = 0; + private long smallestOffset = Long.MAX_VALUE; + private long largestOffset = Long.MIN_VALUE; + + @Override + public void init(final ProcessorContext context) { + super.init(context); + System.out.println("[3.9] initializing processor: topic=" + topic + " taskId=" + context.taskId()); + System.out.flush(); + numRecordsProcessed = 0; + smallestOffset = Long.MAX_VALUE; + largestOffset = Long.MIN_VALUE; + } + + @Override + public void process(final Record record) { + numRecordsProcessed++; + if (numRecordsProcessed % 100 == 0) { + System.out.printf("%s: %s%n", name, Instant.now()); + System.out.println("processed " + numRecordsProcessed + " records from topic=" + topic); + } + + context().recordMetadata().ifPresent(recordMetadata -> { + if (smallestOffset > recordMetadata.offset()) { + smallestOffset = recordMetadata.offset(); + } + if (largestOffset < recordMetadata.offset()) { + largestOffset = recordMetadata.offset(); + } + }); + } + + @Override + public void close() { + System.out.printf("Close processor for task %s%n", context().taskId()); + System.out.println("processed " + numRecordsProcessed + " records"); + final long processed; + if (largestOffset >= smallestOffset) { + processed = 1L + largestOffset - smallestOffset; + } else { + processed = 0L; + } + System.out.println("offset " + smallestOffset + " to " + largestOffset + " -> processed " + processed); + System.out.flush(); + } + }; + } + + public static final class Unwindow implements KeyValueMapper, V, K> { + @Override + public K apply(final Windowed winKey, final V value) { + return winKey.key(); + } + } + + public static class Agg { + + KeyValueMapper> selector() { + return (key, value) -> new KeyValue<>(value == null ? null : Long.toString(value), 1L); + } + + public Initializer init() { + return () -> 0L; + } + + Aggregator adder() { + return (aggKey, value, aggregate) -> aggregate + value; + } + + Aggregator remover() { + return (aggKey, value, aggregate) -> aggregate - value; + } + } + + public static Serde stringSerde = Serdes.String(); + + public static Serde intSerde = Serdes.Integer(); + + static Serde longSerde = Serdes.Long(); + + static Serde doubleSerde = Serdes.Double(); + + public static void sleep(final long duration) { + try { + Thread.sleep(duration); + } catch (final Exception ignore) { } + } + +} diff --git a/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/StreamsSmokeTest.java b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/StreamsSmokeTest.java new file mode 100644 index 0000000000000..5803b2fbd0217 --- /dev/null +++ b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/StreamsSmokeTest.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.tests; + +import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.streams.StreamsConfig; + +import java.io.IOException; +import java.time.Duration; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; + +import static org.apache.kafka.streams.tests.SmokeTestDriver.generate; +import static org.apache.kafka.streams.tests.SmokeTestDriver.generatePerpetually; + +public class StreamsSmokeTest { + + /** + * args ::= kafka propFileName command disableAutoTerminate + * command := "run" | "process" + * + * @param args + */ + public static void main(final String[] args) throws IOException { + if (args.length < 2) { + System.err.println("StreamsSmokeTest are expecting two parameters: propFile, command; but only see " + args.length + " parameter"); + Exit.exit(1); + } + + final String propFileName = args[0]; + final String command = args[1]; + final boolean disableAutoTerminate = args.length > 2; + + final Properties streamsProperties = Utils.loadProps(propFileName); + final String kafka = streamsProperties.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG); + final String processingGuarantee = streamsProperties.getProperty(StreamsConfig.PROCESSING_GUARANTEE_CONFIG); + + if (kafka == null) { + System.err.println("No bootstrap kafka servers specified in " + StreamsConfig.BOOTSTRAP_SERVERS_CONFIG); + Exit.exit(1); + } + + if ("process".equals(command)) { + if (!StreamsConfig.AT_LEAST_ONCE.equals(processingGuarantee) && + !StreamsConfig.EXACTLY_ONCE_V2.equals(processingGuarantee)) { + + System.err.println("processingGuarantee must be either " + StreamsConfig.AT_LEAST_ONCE + " or " + + StreamsConfig.EXACTLY_ONCE_V2); + + Exit.exit(1); + } + } + + System.out.println("StreamsTest instance started (StreamsSmokeTest)"); + System.out.println("command=" + command); + System.out.println("props=" + streamsProperties); + System.out.println("disableAutoTerminate=" + disableAutoTerminate); + + switch (command) { + case "run": + // this starts the driver (data generation and result verification) + final int numKeys = 10; + final int maxRecordsPerKey = 500; + if (disableAutoTerminate) { + generatePerpetually(kafka, numKeys, maxRecordsPerKey); + } else { + // slow down data production to span 30 seconds so that system tests have time to + // do their bounces, etc. + final Map> allData = + generate(kafka, numKeys, maxRecordsPerKey, Duration.ofSeconds(30)); + SmokeTestDriver.verify(kafka, allData, maxRecordsPerKey); + } + break; + case "process": + // this starts the stream processing app + new SmokeTestClient(UUID.randomUUID().toString()).start(streamsProperties); + break; + default: + System.out.println("unknown command: " + command); + } + } + +} diff --git a/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java new file mode 100644 index 0000000000000..462f8358774fe --- /dev/null +++ b/streams/upgrade-system-tests-39/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.tests; + +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KTable; +import org.apache.kafka.streams.kstream.Produced; +import org.apache.kafka.streams.processor.api.ContextualProcessor; +import org.apache.kafka.streams.processor.api.ProcessorContext; +import org.apache.kafka.streams.processor.api.ProcessorSupplier; +import org.apache.kafka.streams.processor.api.Record; + +import java.util.Properties; + +import static org.apache.kafka.streams.tests.SmokeTestUtil.intSerde; +import static org.apache.kafka.streams.tests.SmokeTestUtil.stringSerde; + + +public class StreamsUpgradeTest { + + @SuppressWarnings("unchecked") + public static void main(final String[] args) throws Exception { + if (args.length < 1) { + System.err.println("StreamsUpgradeTest requires one argument (properties-file) but provided none"); + } + final String propFileName = args[0]; + + final Properties streamsProperties = Utils.loadProps(propFileName); + + System.out.println("StreamsTest instance started (StreamsUpgradeTest v3.7)"); + System.out.println("props=" + streamsProperties); + + final StreamsBuilder builder = new StreamsBuilder(); + final KTable dataTable = builder.table( + "data", Consumed.with(stringSerde, intSerde)); + final KStream dataStream = dataTable.toStream(); + dataStream.process(printProcessorSupplier("data")); + dataStream.to("echo"); + + final boolean runFkJoin = Boolean.parseBoolean(streamsProperties.getProperty( + "test.run_fk_join", + "false")); + if (runFkJoin) { + try { + final KTable fkTable = builder.table( + "fk", Consumed.with(intSerde, stringSerde)); + buildFKTable(dataStream, fkTable); + } catch (final Exception e) { + System.err.println("Caught " + e.getMessage()); + } + } + + final Properties config = new Properties(); + config.setProperty( + StreamsConfig.APPLICATION_ID_CONFIG, + "StreamsUpgradeTest"); + config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); + config.putAll(streamsProperties); + + final KafkaStreams streams = new KafkaStreams(builder.build(), config); + streams.start(); + + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + streams.close(); + System.out.println("UPGRADE-TEST-CLIENT-CLOSED"); + System.out.flush(); + })); + } + + private static void buildFKTable(final KStream primaryTable, + final KTable otherTable) { + final KStream kStream = primaryTable.toTable() + .join(otherTable, v -> v, (k0, v0) -> v0) + .toStream(); + kStream.process(printProcessorSupplier("fk")); + kStream.to("fk-result", Produced.with(stringSerde, stringSerde)); + } + + private static ProcessorSupplier printProcessorSupplier(final String topic) { + return () -> new ContextualProcessor() { + private int numRecordsProcessed = 0; + + @Override + public void init(final ProcessorContext context) { + System.out.println("[3.9] initializing processor: topic=" + topic + "taskId=" + context.taskId()); + numRecordsProcessed = 0; + } + + @Override + public void process(final Record record) { + numRecordsProcessed++; + if (numRecordsProcessed % 100 == 0) { + System.out.println("processed " + numRecordsProcessed + " records from topic=" + topic); + } + } + + @Override + public void close() {} + }; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java b/test-common/src/main/java/org/apache/kafka/common/test/JaasModule.java similarity index 50% rename from clients/src/main/java/org/apache/kafka/common/ShareGroupState.java rename to test-common/src/main/java/org/apache/kafka/common/test/JaasModule.java index ad73a6bd09519..8d70b36dc23fa 100644 --- a/clients/src/main/java/org/apache/kafka/common/ShareGroupState.java +++ b/test-common/src/main/java/org/apache/kafka/common/test/JaasModule.java @@ -14,43 +14,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.kafka.common.test; -package org.apache.kafka.common; - -import java.util.Arrays; -import java.util.Locale; +import java.util.HashMap; import java.util.Map; -import java.util.function.Function; import java.util.stream.Collectors; -/** - * The share group state. - */ -public enum ShareGroupState { - UNKNOWN("Unknown"), - STABLE("Stable"), - DEAD("Dead"), - EMPTY("Empty"); - - private static final Map NAME_TO_ENUM = Arrays.stream(values()) - .collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity())); +public record JaasModule(String name, boolean debug, Map entries) { - private final String name; + public static JaasModule plainLoginModule(String username, String password, boolean debug, Map validUsers) { + String name = "org.apache.kafka.common.security.plain.PlainLoginModule"; - ShareGroupState(String name) { - this.name = name; - } + Map entries = new HashMap<>(); + entries.put("username", username); + entries.put("password", password); + validUsers.forEach((user, pass) -> entries.put("user_" + user, pass)); - /** - * Case-insensitive share group state lookup by string name. - */ - public static ShareGroupState parse(String name) { - ShareGroupState state = NAME_TO_ENUM.get(name.toUpperCase(Locale.ROOT)); - return state == null ? UNKNOWN : state; + return new JaasModule( + name, + debug, + entries + ); } @Override public String toString() { - return name; + return String.format("%s required%n debug=%b%n %s;%n", name, debug, entries.entrySet().stream() + .map(e -> e.getKey() + "=\"" + e.getValue() + "\"") + .collect(Collectors.joining("\n "))); } } diff --git a/test-common/src/main/java/org/apache/kafka/common/test/JaasUtils.java b/test-common/src/main/java/org/apache/kafka/common/test/JaasUtils.java new file mode 100644 index 0000000000000..77e904784f670 --- /dev/null +++ b/test-common/src/main/java/org/apache/kafka/common/test/JaasUtils.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import javax.security.auth.login.Configuration; + +public class JaasUtils { + public record JaasSection(String contextName, List modules) { + @Override + public String toString() { + return String.format( + "%s {%n %s%n};%n", + contextName, + modules.stream().map(Object::toString).collect(Collectors.joining("\n ")) + ); + } + } + + public static final String KAFKA_SERVER_CONTEXT_NAME = "KafkaServer"; + + public static final String KAFKA_PLAIN_USER1 = "plain-user1"; + public static final String KAFKA_PLAIN_USER1_PASSWORD = "plain-user1-secret"; + public static final String KAFKA_PLAIN_ADMIN = "plain-admin"; + public static final String KAFKA_PLAIN_ADMIN_PASSWORD = "plain-admin-secret"; + + public static File writeJaasContextsToFile(Set jaasSections) throws IOException { + File jaasFile = TestUtils.tempFile(); + try (FileOutputStream fileStream = new FileOutputStream(jaasFile); + OutputStreamWriter writer = new OutputStreamWriter(fileStream, StandardCharsets.UTF_8);) { + writer.write(String.join("", jaasSections.stream().map(Object::toString).toArray(String[]::new))); + } + return jaasFile; + } + + public static void refreshJavaLoginConfigParam(File file) { + System.setProperty(org.apache.kafka.common.security.JaasUtils.JAVA_LOGIN_CONFIG_PARAM, file.getAbsolutePath()); + // This will cause a reload of the Configuration singleton when `getConfiguration` is called + Configuration.setConfiguration(null); + } +} diff --git a/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java b/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java index db857e5bcc879..097f8c3e26d77 100644 --- a/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java +++ b/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java @@ -27,13 +27,15 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.AdminClientConfig; -import org.apache.kafka.common.Node; +import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.utils.ThreadUtils; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.controller.Controller; +import org.apache.kafka.metadata.authorizer.StandardAuthorizer; import org.apache.kafka.metadata.properties.MetaPropertiesEnsemble; import org.apache.kafka.metadata.storage.Formatter; import org.apache.kafka.network.SocketServerConfigs; @@ -52,7 +54,6 @@ import java.io.File; import java.io.IOException; -import java.net.InetSocketAddress; import java.nio.file.Files; import java.nio.file.Paths; import java.util.AbstractMap.SimpleImmutableEntry; @@ -65,7 +66,7 @@ import java.util.Map.Entry; import java.util.Optional; import java.util.Properties; -import java.util.TreeMap; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -82,47 +83,6 @@ public class KafkaClusterTestKit implements AutoCloseable { private static final Logger log = LoggerFactory.getLogger(KafkaClusterTestKit.class); - /** - * This class manages a future which is completed with the proper value for - * controller.quorum.voters once the randomly assigned ports for all the controllers are - * known. - */ - private static class ControllerQuorumVotersFutureManager implements AutoCloseable { - private final int expectedControllers; - private final CompletableFuture> future = new CompletableFuture<>(); - private final Map controllerPorts = new TreeMap<>(); - - ControllerQuorumVotersFutureManager(int expectedControllers) { - this.expectedControllers = expectedControllers; - } - - synchronized void registerPort(int nodeId, int port) { - controllerPorts.put(nodeId, port); - if (controllerPorts.size() >= expectedControllers) { - future.complete( - controllerPorts - .entrySet() - .stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, - entry -> new InetSocketAddress("localhost", entry.getValue()) - ) - ) - ); - } - } - - void fail(Throwable e) { - future.completeExceptionally(e); - } - - @Override - public void close() { - future.cancel(true); - } - } - static class SimpleFaultHandlerFactory implements FaultHandlerFactory { private final MockFaultHandler fatalFaultHandler = new MockFaultHandler("fatalFaultHandler"); private final MockFaultHandler nonFatalFaultHandler = new MockFaultHandler("nonFatalFaultHandler"); @@ -155,7 +115,6 @@ public static class Builder { private final String brokerSecurityProtocol; private final String controllerSecurityProtocol; - public Builder(TestKitNodes nodes) { this.nodes = nodes; this.brokerListenerName = nodes.brokerListenerName().value(); @@ -183,6 +142,7 @@ private KafkaConfig createNodeConfig(TestKitNode node) throws IOException { if (controllerNode != null) { props.put(KRaftConfigs.METADATA_LOG_DIR_CONFIG, controllerNode.metadataDirectory()); + setSecurityProtocolProps(props, controllerSecurityProtocol); } else { props.put(KRaftConfigs.METADATA_LOG_DIR_CONFIG, node.metadataDirectory()); @@ -191,6 +151,7 @@ private KafkaConfig createNodeConfig(TestKitNode node) throws IOException { // Set the log.dirs according to the broker node setting (if there is a broker node) props.put(LOG_DIRS_CONFIG, String.join(",", brokerNode.logDataDirectories())); + setSecurityProtocolProps(props, brokerSecurityProtocol); } else { // Set log.dirs equal to the metadata directory if there is just a controller. props.put(LOG_DIRS_CONFIG, @@ -213,13 +174,13 @@ private KafkaConfig createNodeConfig(TestKitNode node) throws IOException { append("@"). append("localhost"). append(":"). - append(socketFactoryManager.getOrCreatePortForListener(nodeId, "CONTROLLER")); + append(socketFactoryManager.getOrCreatePortForListener(nodeId, controllerListenerName)); prefix = ","; } props.put(QuorumConfig.QUORUM_VOTERS_CONFIG, quorumVoterStringBuilder.toString()); // reduce log cleaner offset map memory usage - props.put(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, "2097152"); + props.putIfAbsent(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, "2097152"); // Add associated broker node property overrides if (brokerNode != null) { @@ -234,27 +195,55 @@ private KafkaConfig createNodeConfig(TestKitNode node) throws IOException { return new KafkaConfig(props, false); } + private void setSecurityProtocolProps(Map props, String securityProtocol) { + if (securityProtocol.equals(SecurityProtocol.SASL_PLAINTEXT.name)) { + props.putIfAbsent(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, "PLAIN"); + props.putIfAbsent(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG, "PLAIN"); + props.putIfAbsent(KRaftConfigs.SASL_MECHANISM_CONTROLLER_PROTOCOL_CONFIG, "PLAIN"); + props.putIfAbsent(ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG, StandardAuthorizer.class.getName()); + props.putIfAbsent(StandardAuthorizer.ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "false"); + props.putIfAbsent(StandardAuthorizer.SUPER_USERS_CONFIG, "User:" + JaasUtils.KAFKA_PLAIN_ADMIN); + } + } + public KafkaClusterTestKit build() throws Exception { Map controllers = new HashMap<>(); Map brokers = new HashMap<>(); Map jointServers = new HashMap<>(); - ControllerQuorumVotersFutureManager connectFutureManager = - new ControllerQuorumVotersFutureManager(nodes.controllerNodes().size()); File baseDirectory = null; + File jaasFile = null; + + if (brokerSecurityProtocol.equals(SecurityProtocol.SASL_PLAINTEXT.name)) { + jaasFile = JaasUtils.writeJaasContextsToFile(Set.of( + new JaasUtils.JaasSection(JaasUtils.KAFKA_SERVER_CONTEXT_NAME, + List.of( + JaasModule.plainLoginModule( + JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD, + true, + Map.of( + JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD, + JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD) + ) + ) + ) + )); + JaasUtils.refreshJavaLoginConfigParam(jaasFile); + } try { baseDirectory = new File(nodes.baseDirectory()); for (TestKitNode node : nodes.controllerNodes().values()) { - socketFactoryManager.getOrCreatePortForListener(node.id(), "CONTROLLER"); + socketFactoryManager.getOrCreatePortForListener(node.id(), controllerListenerName); } for (TestKitNode node : nodes.controllerNodes().values()) { setupNodeDirectories(baseDirectory, node.metadataDirectory(), Collections.emptyList()); + KafkaConfig config = createNodeConfig(node); SharedServer sharedServer = new SharedServer( - createNodeConfig(node), + config, node.initialMetaPropertiesEnsemble(), Time.SYSTEM, new Metrics(), - connectFutureManager.future, + CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(config.quorumConfig().voters())), Collections.emptyList(), faultHandlerFactory, socketFactoryManager.getOrCreateSocketFactory(node.id()) @@ -271,24 +260,18 @@ public KafkaClusterTestKit build() throws Exception { throw e; } controllers.put(node.id(), controller); - controller.socketServerFirstBoundPortFuture().whenComplete((port, e) -> { - if (e != null) { - connectFutureManager.fail(e); - } else { - connectFutureManager.registerPort(node.id(), port); - } - }); jointServers.put(node.id(), sharedServer); } for (TestKitNode node : nodes.brokerNodes().values()) { SharedServer sharedServer = jointServers.get(node.id()); if (sharedServer == null) { + KafkaConfig config = createNodeConfig(node); sharedServer = new SharedServer( - createNodeConfig(node), + config, node.initialMetaPropertiesEnsemble(), Time.SYSTEM, new Metrics(), - connectFutureManager.future, + CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(config.quorumConfig().voters())), Collections.emptyList(), faultHandlerFactory, socketFactoryManager.getOrCreateSocketFactory(node.id()) @@ -312,7 +295,6 @@ public KafkaClusterTestKit build() throws Exception { for (ControllerServer controller : controllers.values()) { controller.shutdown(); } - connectFutureManager.close(); if (baseDirectory != null) { Utils.delete(baseDirectory); } @@ -323,10 +305,10 @@ public KafkaClusterTestKit build() throws Exception { nodes, controllers, brokers, - connectFutureManager, baseDirectory, faultHandlerFactory, - socketFactoryManager); + socketFactoryManager, + jaasFile == null ? Optional.empty() : Optional.of(jaasFile)); } private String listeners(int node) { @@ -366,19 +348,20 @@ private static void setupNodeDirectories(File baseDirectory, private final TestKitNodes nodes; private final Map controllers; private final Map brokers; - private final ControllerQuorumVotersFutureManager controllerQuorumVotersFutureManager; private final File baseDirectory; private final SimpleFaultHandlerFactory faultHandlerFactory; private final PreboundSocketFactoryManager socketFactoryManager; + private final String controllerListenerName; + private final Optional jaasFile; private KafkaClusterTestKit( TestKitNodes nodes, Map controllers, Map brokers, - ControllerQuorumVotersFutureManager controllerQuorumVotersFutureManager, File baseDirectory, SimpleFaultHandlerFactory faultHandlerFactory, - PreboundSocketFactoryManager socketFactoryManager + PreboundSocketFactoryManager socketFactoryManager, + Optional jaasFile ) { /* Number of threads = Total number of brokers + Total number of controllers + Total number of Raft Managers @@ -390,10 +373,11 @@ private KafkaClusterTestKit( this.nodes = nodes; this.controllers = controllers; this.brokers = brokers; - this.controllerQuorumVotersFutureManager = controllerQuorumVotersFutureManager; this.baseDirectory = baseDirectory; this.faultHandlerFactory = faultHandlerFactory; this.socketFactoryManager = socketFactoryManager; + this.controllerListenerName = nodes.controllerListenerName().value(); + this.jaasFile = jaasFile; } public void format() throws Exception { @@ -445,7 +429,7 @@ private void formatNode( nodes.bootstrapMetadata().featureLevel(KRaftVersion.FEATURE_NAME)); formatter.setUnstableFeatureVersionsEnabled(true); formatter.setIgnoreFormatted(false); - formatter.setControllerListenerName("CONTROLLER"); + formatter.setControllerListenerName(controllerListenerName); if (writeMetadataDirectory) { formatter.setMetadataLogDirectory(ensemble.metadataLogDir().get()); } else { @@ -456,7 +440,7 @@ private void formatNode( String prefix = ""; for (TestKitNode controllerNode : nodes.controllerNodes().values()) { int port = socketFactoryManager. - getOrCreatePortForListener(controllerNode.id(), "CONTROLLER"); + getOrCreatePortForListener(controllerNode.id(), controllerListenerName); dynamicVotersBuilder.append(prefix); prefix = ","; dynamicVotersBuilder.append(String.format("%d@localhost:%d:%s", @@ -509,20 +493,6 @@ public void waitForReadyBrokers() throws ExecutionException, InterruptedExceptio "Failed to wait for publisher to publish the metadata update to each broker."); } - public String quorumVotersConfig() throws ExecutionException, InterruptedException { - Collection controllerNodes = QuorumConfig.voterConnectionsToNodes( - controllerQuorumVotersFutureManager.future.get() - ); - StringBuilder bld = new StringBuilder(); - String prefix = ""; - for (Node node : controllerNodes) { - bld.append(prefix).append(node.id()).append('@'); - bld.append(node.host()).append(":").append(node.port()); - prefix = ","; - } - return bld.toString(); - } - public class ClientPropertiesBuilder { private Properties properties; private boolean usingBootstrapControllers = false; @@ -608,15 +578,14 @@ public Map controllers() { public Controller waitForActiveController() throws InterruptedException { AtomicReference active = new AtomicReference<>(null); - TestUtils.retryOnExceptionWithTimeout(() -> { + TestUtils.waitForCondition(() -> { for (ControllerServer controllerServer : controllers.values()) { if (controllerServer.controller().isActive()) { active.set(controllerServer.controller()); } } - if (active.get() == null) - throw new RuntimeException("Controller not active"); - }); + return active.get() != null; + }, 60_000, "Controller not active"); return active.get(); } @@ -653,11 +622,8 @@ public MockFaultHandler nonFatalFaultHandler() { public void close() throws Exception { List>> futureEntries = new ArrayList<>(); try { - controllerQuorumVotersFutureManager.close(); - // Note the shutdown order here is chosen to be consistent with // `KafkaRaftServer`. See comments in that class for an explanation. - for (Entry entry : brokers.entrySet()) { int brokerId = entry.getKey(); BrokerServer broker = entry.getValue(); @@ -675,6 +641,9 @@ public void close() throws Exception { waitForAllFutures(futureEntries); futureEntries.clear(); Utils.delete(baseDirectory); + if (jaasFile.isPresent()) { + Utils.delete(jaasFile.get()); + } } catch (Exception e) { for (Entry> entry : futureEntries) { entry.getValue().cancel(true); diff --git a/test-common/src/main/java/org/apache/kafka/common/test/PreboundSocketFactoryManager.java b/test-common/src/main/java/org/apache/kafka/common/test/PreboundSocketFactoryManager.java index 5358b211c7750..fa082001d648e 100644 --- a/test-common/src/main/java/org/apache/kafka/common/test/PreboundSocketFactoryManager.java +++ b/test-common/src/main/java/org/apache/kafka/common/test/PreboundSocketFactoryManager.java @@ -49,7 +49,22 @@ public ServerSocketChannel openServerSocket( ServerSocketChannel socketChannel = getSocketForListenerAndMarkAsUsed( nodeId, listenerName); + if (socketChannel != null) { + if (socketChannel.isOpen()) { + return socketChannel; + } + // When restarting components(e.g. controllers, brokers) in tests, we want to reuse the same + // port that was previously allocated to maintain consistent addressing + // so the client can reconnect to the same port. + // Since those components would close the socket when they are restarted, + // we need to rebind the socket to the same port. + socketAddress = new InetSocketAddress(socketAddress.getHostString(), socketChannel.socket().getLocalPort()); + socketChannel = ServerSocketFactory.INSTANCE.openServerSocket( + listenerName, + socketAddress, + listenBacklogSize, + recvBufferSize); return socketChannel; } return ServerSocketFactory.INSTANCE.openServerSocket( diff --git a/test-common/src/main/java/org/apache/kafka/common/test/TestKitNodes.java b/test-common/src/main/java/org/apache/kafka/common/test/TestKitNodes.java index 42a621c9bbb9a..b6d6f9f69fc6c 100644 --- a/test-common/src/main/java/org/apache/kafka/common/test/TestKitNodes.java +++ b/test-common/src/main/java/org/apache/kafka/common/test/TestKitNodes.java @@ -30,7 +30,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -48,6 +47,8 @@ public class TestKitNodes { public static final int BROKER_ID_OFFSET = 0; public static final SecurityProtocol DEFAULT_BROKER_SECURITY_PROTOCOL = SecurityProtocol.PLAINTEXT; public static final String DEFAULT_BROKER_LISTENER_NAME = "EXTERNAL"; + public static final SecurityProtocol DEFAULT_CONTROLLER_SECURITY_PROTOCOL = SecurityProtocol.PLAINTEXT; + public static final String DEFAULT_CONTROLLER_LISTENER_NAME = "CONTROLLER"; public static class Builder { private boolean combined; @@ -66,10 +67,12 @@ public Builder() { public Builder(BootstrapMetadata bootstrapMetadata) { this.bootstrapMetadata = bootstrapMetadata; } - // The brokerListenerName and brokerSecurityProtocol configurations must + // The broker and controller listener name and SecurityProtocol configurations must // be kept in sync with the default values in ClusterTest. private ListenerName brokerListenerName = ListenerName.normalised(DEFAULT_BROKER_LISTENER_NAME); private SecurityProtocol brokerSecurityProtocol = DEFAULT_BROKER_SECURITY_PROTOCOL; + private ListenerName controllerListenerName = ListenerName.normalised(DEFAULT_CONTROLLER_LISTENER_NAME); + private SecurityProtocol controllerSecurityProtocol = DEFAULT_CONTROLLER_SECURITY_PROTOCOL; public Builder setClusterId(String clusterId) { this.clusterId = clusterId; @@ -115,7 +118,7 @@ public Builder setNumDisksPerBroker(int numDisksPerBroker) { public Builder setPerServerProperties(Map> perServerProperties) { this.perServerProperties = Collections.unmodifiableMap( perServerProperties.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> Collections.unmodifiableMap(new HashMap<>(e.getValue()))))); + .collect(Collectors.toMap(Map.Entry::getKey, e -> Map.copyOf(e.getValue())))); return this; } @@ -134,6 +137,16 @@ public Builder setBrokerSecurityProtocol(SecurityProtocol securityProtocol) { return this; } + public Builder setControllerListenerName(ListenerName listenerName) { + this.controllerListenerName = listenerName; + return this; + } + + public Builder setControllerSecurityProtocol(SecurityProtocol securityProtocol) { + this.controllerSecurityProtocol = securityProtocol; + return this; + } + public TestKitNodes build() { if (numControllerNodes < 0) { throw new IllegalArgumentException("Invalid negative value for numControllerNodes"); @@ -145,8 +158,9 @@ public TestKitNodes build() { throw new IllegalArgumentException("Invalid value for numDisksPerBroker"); } // TODO: remove this assertion after https://issues.apache.org/jira/browse/KAFKA-16680 is finished - if (brokerSecurityProtocol != SecurityProtocol.PLAINTEXT) { - throw new IllegalArgumentException("Currently only support PLAINTEXT security protocol"); + if ((brokerSecurityProtocol != SecurityProtocol.PLAINTEXT && brokerSecurityProtocol != SecurityProtocol.SASL_PLAINTEXT) || + (controllerSecurityProtocol != SecurityProtocol.PLAINTEXT && controllerSecurityProtocol != SecurityProtocol.SASL_PLAINTEXT)) { + throw new IllegalArgumentException("Currently only support PLAINTEXT / SASL_PLAINTEXT security protocol"); } if (baseDirectory == null) { this.baseDirectory = TestUtils.tempDirectory().toPath(); @@ -203,7 +217,7 @@ public TestKitNodes build() { } return new TestKitNodes(baseDirectory.toFile().getAbsolutePath(), clusterId, bootstrapMetadata, controllerNodes, brokerNodes, - brokerListenerName, brokerSecurityProtocol, new ListenerName("CONTROLLER"), SecurityProtocol.PLAINTEXT); + brokerListenerName, brokerSecurityProtocol, controllerListenerName, controllerSecurityProtocol); } } @@ -364,4 +378,4 @@ public Map propertyOverrides() { } }; } -} \ No newline at end of file +} diff --git a/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java b/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java index b818a083c8423..d5f98be24b740 100644 --- a/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java +++ b/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java @@ -32,6 +32,7 @@ import java.nio.file.Files; import java.util.Collections; import java.util.Optional; +import java.util.Random; import java.util.function.BiFunction; import java.util.function.Supplier; @@ -43,9 +44,15 @@ public class TestUtils { private static final Logger log = LoggerFactory.getLogger(TestUtils.class); + /* A consistent random number generator to make tests repeatable */ + public static final Random SEEDED_RANDOM = new Random(192348092834L); + + public static final String LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + public static final String DIGITS = "0123456789"; + public static final String LETTERS_AND_DIGITS = LETTERS + DIGITS; + private static final long DEFAULT_POLL_INTERVAL_MS = 100; private static final long DEFAULT_MAX_WAIT_MS = 15_000; - private static final long DEFAULT_TIMEOUT_MS = 60_000; /** * Create an empty file in the default temporary-file directory, using `kafka` as the prefix and `tmp` as the @@ -57,6 +64,19 @@ public static File tempFile() throws IOException { return file; } + /** + * Generate a random string of letters and digits of the given length + * + * @param len The length of the string + * @return The random string + */ + public static String randomString(final int len) { + final StringBuilder b = new StringBuilder(); + for (int i = 0; i < len; i++) + b.append(LETTERS_AND_DIGITS.charAt(SEEDED_RANDOM.nextInt(LETTERS_AND_DIGITS.length()))); + return b.toString(); + } + /** * Create a temporary relative directory in the specified parent directory with the given prefix. * @@ -96,40 +116,26 @@ public static void waitForCondition(final Supplier testCondition, final */ public static void waitForCondition(final Supplier testCondition, final long maxWaitMs, - String conditionDetails - ) throws InterruptedException { - retryOnExceptionWithTimeout(() -> { - String conditionDetail = conditionDetails == null ? "" : conditionDetails; - if (!testCondition.get()) - throw new TimeoutException("Condition not met within timeout " + maxWaitMs + ". " + conditionDetail); - }); - } - - /** - * Wait for the given runnable to complete successfully, i.e. throw now {@link Exception}s or - * {@link AssertionError}s, or for the given timeout to expire. If the timeout expires then the - * last exception or assertion failure will be thrown thus providing context for the failure. - * - * @param runnable the code to attempt to execute successfully. - * @throws InterruptedException if the current thread is interrupted while waiting for {@code runnable} to complete successfully. - */ - static void retryOnExceptionWithTimeout(final Runnable runnable) throws InterruptedException { - final long expectedEnd = System.currentTimeMillis() + DEFAULT_TIMEOUT_MS; + String conditionDetails) throws InterruptedException { + final long expectedEnd = System.currentTimeMillis() + maxWaitMs; while (true) { try { - runnable.run(); - return; + if (testCondition.get()) { + return; + } + String conditionDetail = conditionDetails == null ? "" : conditionDetails; + throw new TimeoutException("Condition not met: " + conditionDetail); } catch (final AssertionError t) { if (expectedEnd <= System.currentTimeMillis()) { throw t; } } catch (final Exception e) { if (expectedEnd <= System.currentTimeMillis()) { - throw new AssertionError(format("Assertion failed with an exception after %s ms", DEFAULT_TIMEOUT_MS), e); + throw new AssertionError(format("Assertion failed with an exception after %s ms", maxWaitMs), e); } } - Thread.sleep(DEFAULT_POLL_INTERVAL_MS); + Thread.sleep(Math.min(DEFAULT_POLL_INTERVAL_MS, maxWaitMs)); } } diff --git a/test-common/src/main/resources/log4j2.yaml b/test-common/src/main/resources/log4j2.yaml new file mode 100644 index 0000000000000..be546a18b55e6 --- /dev/null +++ b/test-common/src/main/resources/log4j2.yaml @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka + level: INFO diff --git a/test-common/src/test/java/org/apache/kafka/common/test/KafkaClusterTestKitTest.java b/test-common/src/test/java/org/apache/kafka/common/test/KafkaClusterTestKitTest.java index 1ee8bc2628f3b..683de7e930f32 100644 --- a/test-common/src/test/java/org/apache/kafka/common/test/KafkaClusterTestKitTest.java +++ b/test-common/src/test/java/org/apache/kafka/common/test/KafkaClusterTestKitTest.java @@ -32,8 +32,10 @@ import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class KafkaClusterTestKitTest { @ParameterizedTest @@ -138,4 +140,17 @@ public void testCreateClusterWithSpecificBaseDir() throws Exception { assertTrue(Paths.get(broker.metadataDirectory()).startsWith(baseDirectory))); } } + @Test + public void testExposedFaultHandlers() { + TestKitNodes nodes = new TestKitNodes.Builder() + .setNumBrokerNodes(1) + .setNumControllerNodes(1) + .build(); + try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder(nodes).build()) { + assertNotNull(cluster.fatalFaultHandler(), "Fatal fault handler should not be null"); + assertNotNull(cluster.nonFatalFaultHandler(), "Non-fatal fault handler should not be null"); + } catch (Exception e) { + fail("Failed to initialize cluster", e); + } + } } diff --git a/test-common/src/test/java/org/apache/kafka/common/test/TestKitNodeTest.java b/test-common/src/test/java/org/apache/kafka/common/test/TestKitNodeTest.java index 1ba7e58abe87e..b0bb8afa22cb3 100644 --- a/test-common/src/test/java/org/apache/kafka/common/test/TestKitNodeTest.java +++ b/test-common/src/test/java/org/apache/kafka/common/test/TestKitNodeTest.java @@ -32,22 +32,29 @@ public class TestKitNodeTest { @ParameterizedTest @EnumSource(SecurityProtocol.class) public void testSecurityProtocol(SecurityProtocol securityProtocol) { - if (securityProtocol != SecurityProtocol.PLAINTEXT) { - assertEquals("Currently only support PLAINTEXT security protocol", + if (securityProtocol != SecurityProtocol.PLAINTEXT && securityProtocol != SecurityProtocol.SASL_PLAINTEXT) { + assertEquals("Currently only support PLAINTEXT / SASL_PLAINTEXT security protocol", assertThrows(IllegalArgumentException.class, () -> new TestKitNodes.Builder().setBrokerSecurityProtocol(securityProtocol).build()).getMessage()); + assertEquals("Currently only support PLAINTEXT / SASL_PLAINTEXT security protocol", + assertThrows(IllegalArgumentException.class, + () -> new TestKitNodes.Builder().setControllerSecurityProtocol(securityProtocol).build()).getMessage()); } } @Test public void testListenerName() { - ListenerName listenerName = ListenerName.normalised("FOOBAR"); + ListenerName brokerListenerName = ListenerName.normalised("FOOBAR"); + ListenerName controllerListenerName = ListenerName.normalised("BAZQUX"); TestKitNodes testKitNodes = new TestKitNodes.Builder() .setNumBrokerNodes(1) .setNumControllerNodes(1) - .setBrokerListenerName(listenerName) + .setBrokerListenerName(brokerListenerName) .setBrokerSecurityProtocol(SecurityProtocol.PLAINTEXT) + .setControllerListenerName(controllerListenerName) + .setControllerSecurityProtocol(SecurityProtocol.PLAINTEXT) .build(); - assertEquals(listenerName, testKitNodes.brokerListenerName()); + assertEquals(brokerListenerName, testKitNodes.brokerListenerName()); + assertEquals(controllerListenerName, testKitNodes.controllerListenerName()); } } diff --git a/test-common/src/test/resources/log4j.properties b/test-common/src/test/resources/log4j.properties deleted file mode 100644 index 91c909b99adaf..0000000000000 --- a/test-common/src/test/resources/log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.org.apache.kafka=INFO \ No newline at end of file diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterConfig.java b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterConfig.java index 4d5e07dfa05ec..6708297c85b9f 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterConfig.java +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterConfig.java @@ -19,14 +19,11 @@ import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.MetadataVersion; import java.io.File; -import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -38,6 +35,8 @@ import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_BROKER_LISTENER_NAME; import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_BROKER_SECURITY_PROTOCOL; +import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_CONTROLLER_LISTENER_NAME; +import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_CONTROLLER_SECURITY_PROTOCOL; /** * Represents an immutable requested configuration of a Kafka cluster for integration testing. @@ -51,6 +50,8 @@ public class ClusterConfig { private final boolean autoStart; private final SecurityProtocol brokerSecurityProtocol; private final ListenerName brokerListenerName; + private final SecurityProtocol controllerSecurityProtocol; + private final ListenerName controllerListenerName; private final File trustStoreFile; private final MetadataVersion metadataVersion; @@ -62,15 +63,16 @@ public class ClusterConfig { private final Map saslClientProperties; private final List tags; private final Map> perServerProperties; - private final Map features; + private final Map features; @SuppressWarnings("checkstyle:ParameterNumber") private ClusterConfig(Set types, int brokers, int controllers, int disksPerBroker, boolean autoStart, - SecurityProtocol brokerSecurityProtocol, ListenerName brokerListenerName, File trustStoreFile, + SecurityProtocol brokerSecurityProtocol, ListenerName brokerListenerName, + SecurityProtocol controllerSecurityProtocol, ListenerName controllerListenerName, File trustStoreFile, MetadataVersion metadataVersion, Map serverProperties, Map producerProperties, Map consumerProperties, Map adminClientProperties, Map saslServerProperties, Map saslClientProperties, Map> perServerProperties, List tags, - Map features) { + Map features) { // do fail fast. the following values are invalid for kraft modes. if (brokers < 0) throw new IllegalArgumentException("Number of brokers must be greater or equal to zero."); if (controllers < 0) throw new IllegalArgumentException("Number of controller must be greater or equal to zero."); @@ -83,6 +85,8 @@ private ClusterConfig(Set types, int brokers, int controllers, int disksPe this.autoStart = autoStart; this.brokerSecurityProtocol = Objects.requireNonNull(brokerSecurityProtocol); this.brokerListenerName = Objects.requireNonNull(brokerListenerName); + this.controllerSecurityProtocol = Objects.requireNonNull(controllerSecurityProtocol); + this.controllerListenerName = Objects.requireNonNull(controllerListenerName); this.trustStoreFile = trustStoreFile; this.metadataVersion = Objects.requireNonNull(metadataVersion); this.serverProperties = Objects.requireNonNull(serverProperties); @@ -144,6 +148,14 @@ public SecurityProtocol brokerSecurityProtocol() { return brokerSecurityProtocol; } + public ListenerName controllerListenerName() { + return controllerListenerName; + } + + public SecurityProtocol controllerSecurityProtocol() { + return controllerSecurityProtocol; + } + public ListenerName brokerListenerName() { return brokerListenerName; } @@ -164,7 +176,7 @@ public List tags() { return tags; } - public Map features() { + public Map features() { return features; } @@ -173,6 +185,8 @@ public Set displayTags() { displayTags.add("MetadataVersion=" + metadataVersion); displayTags.add("BrokerSecurityProtocol=" + brokerSecurityProtocol.name()); displayTags.add("BrokerListenerName=" + brokerListenerName); + displayTags.add("ControllerSecurityProtocol=" + controllerSecurityProtocol.name()); + displayTags.add("ControllerListenerName=" + controllerListenerName); return displayTags; } @@ -185,6 +199,8 @@ public static Builder defaultBuilder() { .setAutoStart(true) .setBrokerSecurityProtocol(DEFAULT_BROKER_SECURITY_PROTOCOL) .setBrokerListenerName(ListenerName.normalised(DEFAULT_BROKER_LISTENER_NAME)) + .setControllerSecurityProtocol(DEFAULT_CONTROLLER_SECURITY_PROTOCOL) + .setControllerListenerName(ListenerName.normalised(DEFAULT_CONTROLLER_LISTENER_NAME)) .setMetadataVersion(MetadataVersion.latestTesting()); } @@ -201,6 +217,8 @@ public static Builder builder(ClusterConfig clusterConfig) { .setAutoStart(clusterConfig.autoStart) .setBrokerSecurityProtocol(clusterConfig.brokerSecurityProtocol) .setBrokerListenerName(clusterConfig.brokerListenerName) + .setControllerSecurityProtocol(clusterConfig.controllerSecurityProtocol) + .setControllerListenerName(clusterConfig.controllerListenerName) .setTrustStoreFile(clusterConfig.trustStoreFile) .setMetadataVersion(clusterConfig.metadataVersion) .setServerProperties(clusterConfig.serverProperties) @@ -222,6 +240,8 @@ public static class Builder { private boolean autoStart; private SecurityProtocol brokerSecurityProtocol; private ListenerName brokerListenerName; + private SecurityProtocol controllerSecurityProtocol; + private ListenerName controllerListenerName; private File trustStoreFile; private MetadataVersion metadataVersion; private Map serverProperties = Collections.emptyMap(); @@ -232,12 +252,12 @@ public static class Builder { private Map saslClientProperties = Collections.emptyMap(); private Map> perServerProperties = Collections.emptyMap(); private List tags = Collections.emptyList(); - private Map features = Collections.emptyMap(); + private Map features = Collections.emptyMap(); private Builder() {} public Builder setTypes(Set types) { - this.types = Collections.unmodifiableSet(new HashSet<>(types)); + this.types = Set.copyOf(types); return this; } @@ -271,6 +291,16 @@ public Builder setBrokerListenerName(ListenerName listenerName) { return this; } + public Builder setControllerSecurityProtocol(SecurityProtocol securityProtocol) { + this.controllerSecurityProtocol = securityProtocol; + return this; + } + + public Builder setControllerListenerName(ListenerName listenerName) { + this.controllerListenerName = listenerName; + return this; + } + public Builder setTrustStoreFile(File trustStoreFile) { this.trustStoreFile = trustStoreFile; return this; @@ -282,54 +312,55 @@ public Builder setMetadataVersion(MetadataVersion metadataVersion) { } public Builder setServerProperties(Map serverProperties) { - this.serverProperties = Collections.unmodifiableMap(new HashMap<>(serverProperties)); + this.serverProperties = Map.copyOf(serverProperties); return this; } public Builder setConsumerProperties(Map consumerProperties) { - this.consumerProperties = Collections.unmodifiableMap(new HashMap<>(consumerProperties)); + this.consumerProperties = Map.copyOf(consumerProperties); return this; } public Builder setProducerProperties(Map producerProperties) { - this.producerProperties = Collections.unmodifiableMap(new HashMap<>(producerProperties)); + this.producerProperties = Map.copyOf(producerProperties); return this; } public Builder setAdminClientProperties(Map adminClientProperties) { - this.adminClientProperties = Collections.unmodifiableMap(new HashMap<>(adminClientProperties)); + this.adminClientProperties = Map.copyOf(adminClientProperties); return this; } public Builder setSaslServerProperties(Map saslServerProperties) { - this.saslServerProperties = Collections.unmodifiableMap(new HashMap<>(saslServerProperties)); + this.saslServerProperties = Map.copyOf(saslServerProperties); return this; } public Builder setSaslClientProperties(Map saslClientProperties) { - this.saslClientProperties = Collections.unmodifiableMap(new HashMap<>(saslClientProperties)); + this.saslClientProperties = Map.copyOf(saslClientProperties); return this; } public Builder setPerServerProperties(Map> perServerProperties) { this.perServerProperties = Collections.unmodifiableMap( perServerProperties.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> Collections.unmodifiableMap(new HashMap<>(e.getValue()))))); + .collect(Collectors.toMap(Map.Entry::getKey, e -> Map.copyOf(e.getValue())))); return this; } public Builder setTags(List tags) { - this.tags = Collections.unmodifiableList(new ArrayList<>(tags)); + this.tags = List.copyOf(tags); return this; } - public Builder setFeatures(Map features) { + public Builder setFeatures(Map features) { this.features = Collections.unmodifiableMap(features); return this; } public ClusterConfig build() { - return new ClusterConfig(types, brokers, controllers, disksPerBroker, autoStart, brokerSecurityProtocol, brokerListenerName, + return new ClusterConfig(types, brokers, controllers, disksPerBroker, autoStart, + brokerSecurityProtocol, brokerListenerName, controllerSecurityProtocol, controllerListenerName, trustStoreFile, metadataVersion, serverProperties, producerProperties, consumerProperties, adminClientProperties, saslServerProperties, saslClientProperties, perServerProperties, tags, features); diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterFeature.java b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterFeature.java index ba0eee508c420..ab1893ab05b4d 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterFeature.java +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterFeature.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.test.api; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; @@ -28,6 +28,6 @@ @Target({ElementType.ANNOTATION_TYPE}) @Retention(RetentionPolicy.RUNTIME) public @interface ClusterFeature { - Features feature(); + Feature feature(); short version(); } diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstance.java b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstance.java index 020f3ed7753ad..1c8551bf9e6de 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstance.java +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstance.java @@ -23,15 +23,30 @@ import kafka.server.ControllerServer; import kafka.server.KafkaBroker; +import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.GroupProtocol; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.acl.AccessControlEntry; import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.test.JaasUtils; import org.apache.kafka.common.test.TestUtils; import org.apache.kafka.server.authorizer.Authorizer; +import org.apache.kafka.server.fault.FaultHandlerException; import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile; import java.io.File; @@ -40,13 +55,14 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.Properties; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -60,10 +76,6 @@ public interface ClusterInstance { Type type(); - default boolean isKRaftTest() { - return type() == Type.KRAFT || type() == Type.CO_KRAFT; - } - Map brokers(); default Map aliveBrokers() { @@ -148,29 +160,89 @@ default SocketServer anyControllerSocketServer() { String clusterId(); - /** - * The underlying object which is responsible for setting up and tearing down the cluster. - */ - Object getUnderlying(); + //---------------------------[producer/consumer/admin]---------------------------// + + default Producer producer(Map configs) { + Map props = new HashMap<>(configs); + props.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); + props.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); + props.putIfAbsent(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); + return new KafkaProducer<>(setClientSaslConfig(props)); + } + + default Producer producer() { + return producer(Map.of()); + } + + default Consumer consumer(Map configs) { + Map props = new HashMap<>(configs); + props.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); + props.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); + props.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, "group_" + TestUtils.randomString(5)); + props.putIfAbsent(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); + return new KafkaConsumer<>(setClientSaslConfig(props)); + } + + default Consumer consumer() { + return consumer(Map.of()); + } + + default Admin admin(Map configs, boolean usingBootstrapControllers) { + Map props = new HashMap<>(configs); + if (usingBootstrapControllers) { + props.putIfAbsent(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, bootstrapControllers()); + props.remove(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); + } else { + props.putIfAbsent(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); + props.remove(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG); + } + return Admin.create(setClientSaslConfig(props)); + } - default T getUnderlying(Class asClass) { - return asClass.cast(getUnderlying()); + default Map setClientSaslConfig(Map configs) { + Map props = new HashMap<>(configs); + if (config().brokerSecurityProtocol() == SecurityProtocol.SASL_PLAINTEXT) { + props.putIfAbsent(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); + props.putIfAbsent(SaslConfigs.SASL_MECHANISM, "PLAIN"); + props.putIfAbsent( + SaslConfigs.SASL_JAAS_CONFIG, + String.format( + "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"%s\" password=\"%s\";", + JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD + ) + ); + } + return props; } - Admin createAdminClient(Properties configOverrides); + default Admin admin(Map configs) { + return admin(configs, false); + } - default Admin createAdminClient() { - return createAdminClient(new Properties()); + default Admin admin() { + return admin(Map.of(), false); } default Set supportedGroupProtocols() { - if (isKRaftTest() && brokers().values().stream().allMatch(b -> b.dataPlaneRequestProcessor().isConsumerGroupProtocolEnabled())) { + if (brokers().values().stream().allMatch(b -> b.dataPlaneRequestProcessor().isConsumerGroupProtocolEnabled())) { return Set.of(CLASSIC, CONSUMER); } else { return Collections.singleton(CLASSIC); } } + /** + * Returns the first recorded fatal exception, if any. + * + */ + Optional firstFatalException(); + + /** + * Return the first recorded non-fatal exception, if any. + */ + Optional firstNonFatalException(); + //---------------------------[modify]---------------------------// void start(); @@ -188,7 +260,7 @@ default void waitTopicDeletion(String topic) throws InterruptedException { } default void createTopic(String topicName, int partitions, short replicas) throws InterruptedException { - try (Admin admin = createAdminClient()) { + try (Admin admin = admin()) { admin.createTopics(Collections.singletonList(new NewTopic(topicName, partitions, replicas))); waitForTopic(topicName, partitions); } @@ -284,4 +356,20 @@ default void waitAcls(AclBindingFilter filter, Collection en }, "expected acls: " + entries + ", actual acls: " + actualEntries.get()); } } + + /** + * Returns the broker id of leader partition. + */ + default int getLeaderBrokerId(TopicPartition topicPartition) throws ExecutionException, InterruptedException { + try (var admin = admin()) { + String topic = topicPartition.topic(); + TopicDescription description = admin.describeTopics(List.of(topic)).topicNameValues().get(topic).get(); + + return description.partitions().stream() + .filter(tp -> tp.partition() == topicPartition.partition()) + .mapToInt(tp -> tp.leader().id()) + .findFirst() + .orElseThrow(() -> new RuntimeException("Leader not found for tp " + topicPartition)); + } + } } diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstanceParameterResolver.java b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstanceParameterResolver.java index 6b3c2339d2e1e..5582eb379ba1e 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstanceParameterResolver.java +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterInstanceParameterResolver.java @@ -50,7 +50,7 @@ public boolean supportsParameter(ParameterContext parameterContext, ExtensionCon return false; } - if (!extensionContext.getTestMethod().isPresent()) { + if (extensionContext.getTestMethod().isEmpty()) { // Allow this to be injected into the class extensionContext.getRequiredTestClass(); return true; diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTest.java b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTest.java index 574ae85abb394..86aba1030d878 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTest.java +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTest.java @@ -31,6 +31,7 @@ import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.RetentionPolicy.RUNTIME; import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_BROKER_LISTENER_NAME; +import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_CONTROLLER_LISTENER_NAME; @Documented @Target({METHOD}) @@ -44,11 +45,13 @@ int controllers() default 0; int disksPerBroker() default 0; AutoStart autoStart() default AutoStart.DEFAULT; - // The brokerListenerName and brokerSecurityProtocol configurations must + // The broker/controller listener name and SecurityProtocol configurations must // be kept in sync with the default values in TestKitNodes, as many tests // directly use TestKitNodes without relying on the ClusterTest annotation. SecurityProtocol brokerSecurityProtocol() default SecurityProtocol.PLAINTEXT; String brokerListener() default DEFAULT_BROKER_LISTENER_NAME; + SecurityProtocol controllerSecurityProtocol() default SecurityProtocol.PLAINTEXT; + String controllerListener() default DEFAULT_CONTROLLER_LISTENER_NAME; MetadataVersion metadataVersion() default MetadataVersion.IBP_4_0_IV3; ClusterConfigProperty[] serverProperties() default {}; // users can add tags that they want to display in test diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTestExtensions.java b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTestExtensions.java index 827fb0cf67c5b..127f8a4b7bedf 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTestExtensions.java +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/ClusterTestExtensions.java @@ -17,7 +17,7 @@ package org.apache.kafka.common.test.api; import org.apache.kafka.common.network.ListenerName; -import org.apache.kafka.server.common.Features; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.util.timer.SystemTimer; import org.junit.jupiter.api.extension.AfterEachCallback; @@ -32,7 +32,6 @@ import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -102,10 +101,9 @@ public class ClusterTestExtensions implements TestTemplateInvocationContextProvi private static final String PROCESS_REAPER_THREAD_PREFIX = "process reaper"; private static final String RMI_THREAD_PREFIX = "RMI"; private static final String DETECT_THREAD_LEAK_KEY = "detectThreadLeak"; - private static final Set SKIPPED_THREAD_PREFIX = Collections.unmodifiableSet(Stream.of( - METRICS_METER_TICK_THREAD_PREFIX, SCALA_THREAD_PREFIX, FORK_JOIN_POOL_THREAD_PREFIX, JUNIT_THREAD_PREFIX, - ATTACH_LISTENER_THREAD_PREFIX, PROCESS_REAPER_THREAD_PREFIX, RMI_THREAD_PREFIX, SystemTimer.SYSTEM_TIMER_THREAD_PREFIX) - .collect(Collectors.toSet())); + private static final Set SKIPPED_THREAD_PREFIX = Set.of(METRICS_METER_TICK_THREAD_PREFIX, SCALA_THREAD_PREFIX, + FORK_JOIN_POOL_THREAD_PREFIX, JUNIT_THREAD_PREFIX, ATTACH_LISTENER_THREAD_PREFIX, PROCESS_REAPER_THREAD_PREFIX, + RMI_THREAD_PREFIX, SystemTimer.SYSTEM_TIMER_THREAD_PREFIX); @Override public boolean supportsTestTemplate(ExtensionContext context) { @@ -240,7 +238,7 @@ private List processClusterTestInternal( .collect(Collectors.groupingBy(ClusterConfigProperty::id, Collectors.mapping(Function.identity(), Collectors.toMap(ClusterConfigProperty::key, ClusterConfigProperty::value, (a, b) -> b)))); - Map features = Arrays.stream(clusterTest.features()) + Map features = Arrays.stream(clusterTest.features()) .collect(Collectors.toMap(ClusterFeature::feature, ClusterFeature::version)); ClusterConfig config = ClusterConfig.builder() @@ -250,9 +248,11 @@ private List processClusterTestInternal( .setDisksPerBroker(clusterTest.disksPerBroker() == 0 ? defaults.disksPerBroker() : clusterTest.disksPerBroker()) .setAutoStart(clusterTest.autoStart() == AutoStart.DEFAULT ? defaults.autoStart() : clusterTest.autoStart() == AutoStart.YES) .setBrokerListenerName(ListenerName.normalised(clusterTest.brokerListener())) + .setBrokerSecurityProtocol(clusterTest.brokerSecurityProtocol()) + .setControllerListenerName(ListenerName.normalised(clusterTest.controllerListener())) + .setControllerSecurityProtocol(clusterTest.controllerSecurityProtocol()) .setServerProperties(serverProperties) .setPerServerProperties(perServerProperties) - .setBrokerSecurityProtocol(clusterTest.brokerSecurityProtocol()) .setMetadataVersion(clusterTest.metadataVersion()) .setTags(Arrays.asList(clusterTest.tags())) .setFeatures(features) diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/README.md b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/README.md index 0821d3f1f4b22..7a3ea14dc6663 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/README.md +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/README.md @@ -14,9 +14,12 @@ This annotation has fields for a set of cluster types and number of brokers, as Arbitrary server properties can also be provided in the annotation: ```java -@ClusterTest(types = {Type.KRAFT}, securityProtocol = "PLAINTEXT", properties = { - @ClusterProperty(key = "inter.broker.protocol.version", value = "2.7-IV2"), - @ClusterProperty(key = "socket.send.buffer.bytes", value = "10240"), +@ClusterTest( + types = {Type.KRAFT}, + brokerSecurityProtocol = SecurityProtocol.PLAINTEXT, + properties = { + @ClusterProperty(key = "inter.broker.protocol.version", value = "2.7-IV2"), + @ClusterProperty(key = "socket.send.buffer.bytes", value = "10240"), }) void testSomething() { ... } ``` @@ -25,8 +28,8 @@ Multiple `@ClusterTest` annotations can be given to generate more than one test ```scala @ClusterTests(Array( - @ClusterTest(securityProtocol = "PLAINTEXT"), - @ClusterTest(securityProtocol = "SASL_PLAINTEXT") + new ClusterTest(brokerSecurityProtocol = SecurityProtocol.PLAINTEXT), + new ClusterTest(brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT) )) def testSomething(): Unit = { ... } ``` @@ -45,18 +48,18 @@ produce any number of test configurations using a fluent builder style API. import java.util.Arrays; @ClusterTemplate("generateConfigs") -void testSomething() { ...} +void testSomething() { ... } static List generateConfigs() { ClusterConfig config1 = ClusterConfig.defaultClusterBuilder() .name("Generated Test 1") .serverProperties(props1) - .ibp("2.7-IV1") + .setMetadataVersion(MetadataVersion.IBP_2_7_IV1) .build(); ClusterConfig config2 = ClusterConfig.defaultClusterBuilder() .name("Generated Test 2") .serverProperties(props2) - .ibp("2.7-IV2") + .setMetadataVersion(MetadataVersion.IBP_2_7_IV2) .build(); ClusterConfig config3 = ClusterConfig.defaultClusterBuilder() .name("Generated Test 3") diff --git a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/RaftClusterInvocationContext.java b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/RaftClusterInvocationContext.java index ff46a99cd2b63..22a009b394e6e 100644 --- a/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/RaftClusterInvocationContext.java +++ b/test-common/test-common-api/src/main/java/org/apache/kafka/common/test/api/RaftClusterInvocationContext.java @@ -21,7 +21,6 @@ import kafka.server.ControllerServer; import kafka.server.KafkaBroker; -import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.test.KafkaClusterTestKit; import org.apache.kafka.common.test.TestKitNodes; @@ -30,9 +29,10 @@ import org.apache.kafka.metadata.BrokerState; import org.apache.kafka.metadata.bootstrap.BootstrapMetadata; import org.apache.kafka.metadata.storage.FormatterException; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.FeatureVersion; -import org.apache.kafka.server.common.Features; import org.apache.kafka.server.common.MetadataVersion; +import org.apache.kafka.server.fault.FaultHandlerException; import org.junit.jupiter.api.extension.AfterTestExecutionCallback; import org.junit.jupiter.api.extension.BeforeTestExecutionCallback; @@ -45,10 +45,8 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Properties; import java.util.Set; import java.util.TreeMap; -import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @@ -100,13 +98,12 @@ public List getAdditionalExtensions() { ); } - public static class RaftClusterInstance implements ClusterInstance { + private static class RaftClusterInstance implements ClusterInstance { private final ClusterConfig clusterConfig; final AtomicBoolean started = new AtomicBoolean(false); final AtomicBoolean stopped = new AtomicBoolean(false); final AtomicBoolean formated = new AtomicBoolean(false); - private final ConcurrentLinkedQueue admins = new ConcurrentLinkedQueue<>(); private KafkaClusterTestKit clusterTestKit; private final boolean isCombined; private final ListenerName listenerName; @@ -169,18 +166,10 @@ public Set controllerIds() { return controllers().keySet(); } - @Override public KafkaClusterTestKit getUnderlying() { return clusterTestKit; } - @Override - public Admin createAdminClient(Properties configOverrides) { - Admin admin = Admin.create(clusterTestKit.newClientPropertiesBuilder(configOverrides).build()); - admins.add(admin); - return admin; - } - @Override public void start() { try { @@ -200,8 +189,6 @@ public void start() { @Override public void stop() { if (stopped.compareAndSet(false, true)) { - admins.forEach(admin -> Utils.closeQuietly(admin, "admin")); - admins.clear(); Utils.closeQuietly(clusterTestKit, "cluster"); } } @@ -216,6 +203,16 @@ public void startBroker(int brokerId) { findBrokerOrThrow(brokerId).startup(); } + @Override + public Optional firstFatalException() { + return Optional.ofNullable(clusterTestKit.fatalFaultHandler().firstException()); + } + + @Override + public Optional firstNonFatalException() { + return Optional.ofNullable(clusterTestKit.nonFatalFaultHandler().firstException()); + } + @Override public void waitForReadyBrokers() throws InterruptedException { try { @@ -239,12 +236,12 @@ public Map controllers() { public void format() throws Exception { if (formated.compareAndSet(false, true)) { - Map nameToSupportedFeature = new TreeMap<>(); - Features.PRODUCTION_FEATURES.forEach(feature -> nameToSupportedFeature.put(feature.featureName(), feature)); + Map nameToSupportedFeature = new TreeMap<>(); + Feature.PRODUCTION_FEATURES.forEach(feature -> nameToSupportedFeature.put(feature.featureName(), feature)); Map newFeatureLevels = new TreeMap<>(); // Verify that all specified features are known to us. - for (Map.Entry entry : clusterConfig.features().entrySet()) { + for (Map.Entry entry : clusterConfig.features().entrySet()) { String featureName = entry.getKey().featureName(); short level = entry.getValue(); if (!featureName.equals(MetadataVersion.FEATURE_NAME)) { @@ -258,10 +255,10 @@ public void format() throws Exception { newFeatureLevels.put(MetadataVersion.FEATURE_NAME, clusterConfig.metadataVersion().featureLevel()); // Add default values for features that were not specified. - Features.PRODUCTION_FEATURES.forEach(supportedFeature -> { + Feature.PRODUCTION_FEATURES.forEach(supportedFeature -> { if (!newFeatureLevels.containsKey(supportedFeature.featureName())) { newFeatureLevels.put(supportedFeature.featureName(), - supportedFeature.defaultValue(clusterConfig.metadataVersion())); + supportedFeature.defaultLevel(clusterConfig.metadataVersion())); } }); @@ -271,10 +268,10 @@ public void format() throws Exception { String featureName = entry.getKey(); if (!featureName.equals(MetadataVersion.FEATURE_NAME)) { short level = entry.getValue(); - Features supportedFeature = nameToSupportedFeature.get(featureName); + Feature supportedFeature = nameToSupportedFeature.get(featureName); FeatureVersion featureVersion = supportedFeature.fromFeatureLevel(level, true); - Features.validateVersion(featureVersion, newFeatureLevels); + Feature.validateVersion(featureVersion, newFeatureLevels); } } @@ -287,11 +284,12 @@ public void format() throws Exception { .setNumControllerNodes(clusterConfig.numControllers()) .setBrokerListenerName(listenerName) .setBrokerSecurityProtocol(clusterConfig.brokerSecurityProtocol()) + .setControllerListenerName(clusterConfig.controllerListenerName()) + .setControllerSecurityProtocol(clusterConfig.controllerSecurityProtocol()) .build(); KafkaClusterTestKit.Builder builder = new KafkaClusterTestKit.Builder(nodes); // Copy properties into the TestKit builder clusterConfig.serverProperties().forEach(builder::setConfigProp); - // KAFKA-12512 need to pass security protocol and listener name here this.clusterTestKit = builder.build(); this.clusterTestKit.format(); } diff --git a/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterConfigTest.java b/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterConfigTest.java index 4e5e2e6b2cec8..555514774793a 100644 --- a/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterConfigTest.java +++ b/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterConfigTest.java @@ -37,6 +37,8 @@ import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_BROKER_LISTENER_NAME; import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_BROKER_SECURITY_PROTOCOL; +import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_CONTROLLER_LISTENER_NAME; +import static org.apache.kafka.common.test.TestKitNodes.DEFAULT_CONTROLLER_SECURITY_PROTOCOL; public class ClusterConfigTest { @@ -60,8 +62,10 @@ public void testCopy() throws IOException { .setTags(Arrays.asList("name", "Generated Test")) .setBrokerSecurityProtocol(SecurityProtocol.PLAINTEXT) .setBrokerListenerName(ListenerName.normalised("EXTERNAL")) + .setControllerSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT) + .setControllerListenerName(ListenerName.normalised("CONTROLLER")) .setTrustStoreFile(trustStoreFile) - .setMetadataVersion(MetadataVersion.IBP_0_8_0) + .setMetadataVersion(MetadataVersion.IBP_3_0_IV1) .setServerProperties(Collections.singletonMap("broker", "broker_value")) .setConsumerProperties(Collections.singletonMap("consumer", "consumer_value")) .setProducerProperties(Collections.singletonMap("producer", "producer_value")) @@ -116,5 +120,7 @@ public void testDisplayTags() { Assertions.assertTrue(expectedDisplayTags.contains("MetadataVersion=" + MetadataVersion.latestTesting())); Assertions.assertTrue(expectedDisplayTags.contains("BrokerSecurityProtocol=" + DEFAULT_BROKER_SECURITY_PROTOCOL)); Assertions.assertTrue(expectedDisplayTags.contains("BrokerListenerName=" + ListenerName.normalised(DEFAULT_BROKER_LISTENER_NAME))); + Assertions.assertTrue(expectedDisplayTags.contains("ControllerSecurityProtocol=" + DEFAULT_CONTROLLER_SECURITY_PROTOCOL)); + Assertions.assertTrue(expectedDisplayTags.contains("ControllerListenerName=" + ListenerName.normalised(DEFAULT_CONTROLLER_LISTENER_NAME))); } } diff --git a/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterTestExtensionsTest.java b/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterTestExtensionsTest.java index 8e02aa7bd9880..6020364927400 100644 --- a/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterTestExtensionsTest.java +++ b/test-common/test-common-api/src/test/java/org/apache/kafka/common/test/api/ClusterTestExtensionsTest.java @@ -17,19 +17,41 @@ package org.apache.kafka.common.test.api; +import kafka.server.ControllerServer; + import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.DescribeLogDirsResult; import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.GroupProtocol; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartitionInfo; +import org.apache.kafka.common.acl.AclBindingFilter; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.errors.ClusterAuthorizationException; +import org.apache.kafka.common.errors.SaslAuthenticationException; +import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.test.JaasUtils; +import org.apache.kafka.common.test.TestUtils; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.server.common.MetadataVersion; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.extension.ExtendWith; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -37,12 +59,26 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.GroupProtocol.CLASSIC; import static org.apache.kafka.clients.consumer.GroupProtocol.CONSUMER; +import static org.apache.kafka.clients.producer.ProducerConfig.ACKS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG; import static org.apache.kafka.coordinator.group.GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; @ClusterTestDefaults(types = {Type.KRAFT}, serverProperties = { @ClusterConfigProperty(key = "default.key", value = "default.value"), @@ -61,10 +97,10 @@ public class ClusterTestExtensionsTest { static List generate1() { Map serverProperties = new HashMap<>(); serverProperties.put("foo", "bar"); - return Collections.singletonList(ClusterConfig.defaultBuilder() - .setTypes(Collections.singleton(Type.KRAFT)) + return singletonList(ClusterConfig.defaultBuilder() + .setTypes(singleton(Type.KRAFT)) .setServerProperties(serverProperties) - .setTags(Collections.singletonList("Generated Test")) + .setTags(singletonList("Generated Test")) .build()); } @@ -72,17 +108,17 @@ static List generate1() { @ClusterTest public void testClusterTest(ClusterInstance clusterInstance) { Assertions.assertSame(this.clusterInstance, clusterInstance, "Injected objects should be the same"); - Assertions.assertEquals(Type.KRAFT, clusterInstance.type()); // From the class level default - Assertions.assertEquals("default.value", clusterInstance.config().serverProperties().get("default.key")); + assertEquals(Type.KRAFT, clusterInstance.type()); // From the class level default + assertEquals("default.value", clusterInstance.config().serverProperties().get("default.key")); } // generate1 is a template method which generates any number of cluster configs @ClusterTemplate("generate1") public void testClusterTemplate() { - Assertions.assertEquals(Type.KRAFT, clusterInstance.type(), + assertEquals(Type.KRAFT, clusterInstance.type(), "generate1 provided a KRAFT cluster, so we should see that here"); - Assertions.assertEquals("bar", clusterInstance.config().serverProperties().get("foo")); - Assertions.assertEquals(Collections.singletonList("Generated Test"), clusterInstance.config().tags()); + assertEquals("bar", clusterInstance.config().serverProperties().get("foo")); + assertEquals(singletonList("Generated Test"), clusterInstance.config().tags()); } // Multiple @ClusterTest can be used with @ClusterTests @@ -96,7 +132,7 @@ public void testClusterTemplate() { @ClusterConfigProperty(key = "spam", value = "eggs"), @ClusterConfigProperty(key = "default.key", value = "overwrite.value") }, tags = { - "default.display.key1", "default.display.key2" + "default.display.key1", "default.display.key2" }), @ClusterTest(types = {Type.CO_KRAFT}, serverProperties = { @ClusterConfigProperty(key = "foo", value = "baz"), @@ -106,31 +142,31 @@ public void testClusterTemplate() { @ClusterConfigProperty(key = "spam", value = "eggs"), @ClusterConfigProperty(key = "default.key", value = "overwrite.value") }, tags = { - "default.display.key1", "default.display.key2" + "default.display.key1", "default.display.key2" }) }) public void testClusterTests() throws ExecutionException, InterruptedException { - Assertions.assertEquals("baz", clusterInstance.config().serverProperties().get("foo")); - Assertions.assertEquals("eggs", clusterInstance.config().serverProperties().get("spam")); - Assertions.assertEquals("overwrite.value", clusterInstance.config().serverProperties().get("default.key")); - Assertions.assertEquals(Arrays.asList("default.display.key1", "default.display.key2"), clusterInstance.config().tags()); + assertEquals("baz", clusterInstance.config().serverProperties().get("foo")); + assertEquals("eggs", clusterInstance.config().serverProperties().get("spam")); + assertEquals("overwrite.value", clusterInstance.config().serverProperties().get("default.key")); + assertEquals(Arrays.asList("default.display.key1", "default.display.key2"), clusterInstance.config().tags()); // assert broker server 0 contains property queued.max.requests 200 from ClusterTest which overrides // the value 100 in server property in ClusterTestDefaults - try (Admin admin = clusterInstance.createAdminClient()) { + try (Admin admin = clusterInstance.admin()) { ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, "0"); - Map configs = admin.describeConfigs(Collections.singletonList(configResource)).all().get(); - Assertions.assertEquals(1, configs.size()); - Assertions.assertEquals("200", configs.get(configResource).get("queued.max.requests").value()); + Map configs = admin.describeConfigs(singletonList(configResource)).all().get(); + assertEquals(1, configs.size()); + assertEquals("200", configs.get(configResource).get("queued.max.requests").value()); } // In KRaft cluster non-combined mode, assert the controller server 3000 contains the property queued.max.requests 300 if (clusterInstance.type() == Type.KRAFT) { try (Admin admin = Admin.create(Collections.singletonMap( AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, clusterInstance.bootstrapControllers()))) { ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, "3000"); - Map configs = admin.describeConfigs(Collections.singletonList(configResource)).all().get(); - Assertions.assertEquals(1, configs.size()); - Assertions.assertEquals("300", configs.get(configResource).get("queued.max.requests").value()); + Map configs = admin.describeConfigs(singletonList(configResource)).all().get(); + assertEquals(1, configs.size()); + assertEquals("300", configs.get(configResource).get("queued.max.requests").value()); } } } @@ -140,24 +176,24 @@ public void testClusterTests() throws ExecutionException, InterruptedException { @ClusterTest(types = {Type.KRAFT, Type.CO_KRAFT}, disksPerBroker = 2), }) public void testClusterTestWithDisksPerBroker() throws ExecutionException, InterruptedException { - Admin admin = clusterInstance.createAdminClient(); - - DescribeLogDirsResult result = admin.describeLogDirs(clusterInstance.brokerIds()); - result.allDescriptions().get().forEach((brokerId, logDirDescriptionMap) -> { - Assertions.assertEquals(clusterInstance.config().numDisksPerBroker(), logDirDescriptionMap.size()); - }); + try (Admin admin = clusterInstance.admin()) { + DescribeLogDirsResult result = admin.describeLogDirs(clusterInstance.brokerIds()); + result.allDescriptions().get().forEach((brokerId, logDirDescriptionMap) -> { + assertEquals(clusterInstance.config().numDisksPerBroker(), logDirDescriptionMap.size()); + }); + } } @ClusterTest(autoStart = AutoStart.NO) public void testNoAutoStart() { Assertions.assertThrows(RuntimeException.class, clusterInstance::anyBrokerSocketServer); clusterInstance.start(); - Assertions.assertNotNull(clusterInstance.anyBrokerSocketServer()); + assertNotNull(clusterInstance.anyBrokerSocketServer()); } @ClusterTest public void testDefaults(ClusterInstance clusterInstance) { - Assertions.assertEquals(MetadataVersion.latestTesting(), clusterInstance.config().metadataVersion()); + assertEquals(MetadataVersion.latestTesting(), clusterInstance.config().metadataVersion()); } @ClusterTest(types = {Type.KRAFT, Type.CO_KRAFT}) @@ -165,7 +201,7 @@ public void testSupportedNewGroupProtocols(ClusterInstance clusterInstance) { Set supportedGroupProtocols = new HashSet<>(); supportedGroupProtocols.add(CLASSIC); supportedGroupProtocols.add(CONSUMER); - Assertions.assertEquals(supportedGroupProtocols, clusterInstance.supportedGroupProtocols()); + assertEquals(supportedGroupProtocols, clusterInstance.supportedGroupProtocols()); } @ClusterTests({ @@ -177,7 +213,7 @@ public void testSupportedNewGroupProtocols(ClusterInstance clusterInstance) { }) }) public void testNotSupportedNewGroupProtocols(ClusterInstance clusterInstance) { - Assertions.assertEquals(Collections.singleton(CLASSIC), clusterInstance.supportedGroupProtocols()); + assertEquals(singleton(CLASSIC), clusterInstance.supportedGroupProtocols()); } @@ -189,11 +225,11 @@ public void testCreateTopic(ClusterInstance clusterInstance) throws Exception { short numReplicas = 3; clusterInstance.createTopic(topicName, numPartition, numReplicas); - try (Admin admin = clusterInstance.createAdminClient()) { + try (Admin admin = clusterInstance.admin()) { Assertions.assertTrue(admin.listTopics().listings().get().stream().anyMatch(s -> s.name().equals(topicName))); - List partitions = admin.describeTopics(Collections.singleton(topicName)).allTopicNames().get() + List partitions = admin.describeTopics(singleton(topicName)).allTopicNames().get() .get(topicName).partitions(); - Assertions.assertEquals(numPartition, partitions.size()); + assertEquals(numPartition, partitions.size()); Assertions.assertTrue(partitions.stream().allMatch(partition -> partition.replicas().size() == numReplicas)); } } @@ -233,15 +269,250 @@ public void testClusterAliveBrokers(ClusterInstance clusterInstance) throws Exce } ) public void testVerifyTopicDeletion(ClusterInstance clusterInstance) throws Exception { - try (Admin admin = clusterInstance.createAdminClient()) { + try (Admin admin = clusterInstance.admin()) { String testTopic = "testTopic"; - admin.createTopics(Collections.singletonList(new NewTopic(testTopic, 1, (short) 1))); + admin.createTopics(singletonList(new NewTopic(testTopic, 1, (short) 1))); clusterInstance.waitForTopic(testTopic, 1); - admin.deleteTopics(Collections.singletonList(testTopic)); + admin.deleteTopics(singletonList(testTopic)); clusterInstance.waitTopicDeletion(testTopic); Assertions.assertTrue(admin.listTopics().listings().get().stream().noneMatch( topic -> topic.name().equals(testTopic) )); } } + + @ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}, brokers = 3) + public void testCreateProducerAndConsumer(ClusterInstance cluster) throws InterruptedException { + String topic = "topic"; + String key = "key"; + String value = "value"; + try (Admin adminClient = cluster.admin(); + Producer producer = cluster.producer(Map.of( + ACKS_CONFIG, "all", + KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName(), + VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName())); + Consumer consumer = cluster.consumer(Map.of( + KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(), + VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName())) + ) { + adminClient.createTopics(singleton(new NewTopic(topic, 1, (short) 1))); + assertNotNull(producer); + assertNotNull(consumer); + producer.send(new ProducerRecord<>(topic, key, value)); + producer.flush(); + consumer.subscribe(singletonList(topic)); + List> records = new ArrayList<>(); + TestUtils.waitForCondition(() -> { + consumer.poll(Duration.ofMillis(100)).forEach(records::add); + return records.size() == 1; + }, "Failed to receive message"); + assertEquals(key, records.get(0).key()); + assertEquals(value, records.get(0).value()); + } + } + + @ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}, serverProperties = { + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + }) + public void testCreateDefaultProducerAndConsumer(ClusterInstance cluster) throws InterruptedException { + String topic = "topic"; + byte[] key = "key".getBytes(StandardCharsets.UTF_8); + byte[] value = "value".getBytes(StandardCharsets.UTF_8); + try (Admin adminClient = cluster.admin(); + Producer producer = cluster.producer(); + Consumer consumer = cluster.consumer() + ) { + adminClient.createTopics(singleton(new NewTopic(topic, 1, (short) 1))); + assertNotNull(producer); + assertNotNull(consumer); + producer.send(new ProducerRecord<>(topic, key, value)); + producer.flush(); + consumer.subscribe(singletonList(topic)); + List> records = new ArrayList<>(); + TestUtils.waitForCondition(() -> { + consumer.poll(Duration.ofMillis(100)).forEach(records::add); + return records.size() == 1; + }, "Failed to receive message"); + assertArrayEquals(key, records.get(0).key()); + assertArrayEquals(value, records.get(0).value()); + } + } + + @ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}, controllerListener = "FOO") + public void testControllerListenerName(ClusterInstance cluster) throws ExecutionException, InterruptedException { + assertEquals("FOO", cluster.controllerListenerName().get().value()); + try (Admin admin = cluster.admin(Map.of(), true)) { + assertEquals(1, admin.describeMetadataQuorum().quorumInfo().get().nodes().size()); + } + } + + @ClusterTest(types = {Type.KRAFT}) + public void testControllerRestart(ClusterInstance cluster) throws ExecutionException, InterruptedException { + try (Admin admin = cluster.admin()) { + + ControllerServer controller = cluster.controllers().values().iterator().next(); + controller.shutdown(); + controller.awaitShutdown(); + + controller.startup(); + + assertEquals(1, admin.describeMetadataQuorum().quorumInfo().get().nodes().size()); + } + } + + @ClusterTest( + types = {Type.KRAFT, Type.CO_KRAFT}, + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT, + controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT, + serverProperties = { + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + } + ) + public void testSaslPlaintext(ClusterInstance clusterInstance) throws CancellationException, ExecutionException, InterruptedException { + Assertions.assertEquals(SecurityProtocol.SASL_PLAINTEXT, clusterInstance.config().brokerSecurityProtocol()); + + // default ClusterInstance#admin helper with admin credentials + try (Admin admin = clusterInstance.admin()) { + admin.describeAcls(AclBindingFilter.ANY).values().get(); + } + String topic = "sasl-plaintext-topic"; + clusterInstance.createTopic(topic, 1, (short) 1); + try (Producer producer = clusterInstance.producer()) { + producer.send(new ProducerRecord<>(topic, Utils.utf8("key"), Utils.utf8("value"))).get(); + producer.flush(); + } + try (Consumer consumer = clusterInstance.consumer()) { + consumer.subscribe(List.of(topic)); + TestUtils.waitForCondition(() -> { + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + return records.count() == 1; + }, "Failed to receive message"); + } + + // client with non-admin credentials + Map nonAdminConfig = Map.of( + SaslConfigs.SASL_JAAS_CONFIG, + String.format( + "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"%s\" password=\"%s\";", + JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD + ) + ); + try (Admin admin = clusterInstance.admin(nonAdminConfig)) { + ExecutionException exception = assertThrows( + ExecutionException.class, + () -> admin.describeAcls(AclBindingFilter.ANY).values().get() + ); + assertInstanceOf(ClusterAuthorizationException.class, exception.getCause()); + } + try (Producer producer = clusterInstance.producer(nonAdminConfig)) { + ExecutionException exception = assertThrows( + ExecutionException.class, + () -> producer.send(new ProducerRecord<>(topic, Utils.utf8("key"), Utils.utf8("value"))).get() + ); + assertInstanceOf(TopicAuthorizationException.class, exception.getCause()); + } + try (Consumer consumer = clusterInstance.consumer(nonAdminConfig)) { + consumer.subscribe(List.of(topic)); + AtomicBoolean hasException = new AtomicBoolean(false); + TestUtils.waitForCondition(() -> { + if (hasException.get()) { + return true; + } + try { + consumer.poll(Duration.ofMillis(100)); + } catch (TopicAuthorizationException e) { + hasException.set(true); + } + return false; + }, "Failed to get exception"); + } + + // client with unknown credentials + Map unknownUserConfig = Map.of( + SaslConfigs.SASL_JAAS_CONFIG, + String.format( + "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"%s\" password=\"%s\";", + "unknown", "unknown" + ) + ); + try (Admin admin = clusterInstance.admin(unknownUserConfig)) { + ExecutionException exception = assertThrows( + ExecutionException.class, + () -> admin.describeAcls(AclBindingFilter.ANY).values().get() + ); + assertInstanceOf(SaslAuthenticationException.class, exception.getCause()); + } + try (Producer producer = clusterInstance.producer(unknownUserConfig)) { + ExecutionException exception = assertThrows( + ExecutionException.class, + () -> producer.send(new ProducerRecord<>(topic, Utils.utf8("key"), Utils.utf8("value"))).get() + ); + assertInstanceOf(SaslAuthenticationException.class, exception.getCause()); + } + try (Consumer consumer = clusterInstance.consumer(unknownUserConfig)) { + consumer.subscribe(List.of(topic)); + AtomicBoolean hasException = new AtomicBoolean(false); + TestUtils.waitForCondition(() -> { + if (hasException.get()) { + return true; + } + try { + consumer.poll(Duration.ofMillis(100)); + } catch (SaslAuthenticationException e) { + hasException.set(true); + } + return false; + }, "Failed to get exception"); + } + } + + @ClusterTest( + types = {Type.KRAFT, Type.CO_KRAFT}, + brokerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT, + controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT, + serverProperties = { + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + } + ) + public void testSaslPlaintextWithController(ClusterInstance clusterInstance) throws CancellationException, ExecutionException, InterruptedException { + // default ClusterInstance#admin helper with admin credentials + try (Admin admin = clusterInstance.admin(Map.of(), true)) { + admin.describeAcls(AclBindingFilter.ANY).values().get(); + } + + // client with non-admin credentials + Map nonAdminConfig = Map.of( + SaslConfigs.SASL_JAAS_CONFIG, + String.format( + "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"%s\" password=\"%s\";", + JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD + ) + ); + try (Admin admin = clusterInstance.admin(nonAdminConfig, true)) { + ExecutionException exception = assertThrows( + ExecutionException.class, + () -> admin.describeAcls(AclBindingFilter.ANY).values().get() + ); + assertInstanceOf(ClusterAuthorizationException.class, exception.getCause()); + } + + // client with unknown credentials + Map unknownUserConfig = Map.of( + SaslConfigs.SASL_JAAS_CONFIG, + String.format( + "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"%s\" password=\"%s\";", + "unknown", "unknown" + ) + ); + try (Admin admin = clusterInstance.admin(unknownUserConfig)) { + ExecutionException exception = assertThrows( + ExecutionException.class, + () -> admin.describeAcls(AclBindingFilter.ANY).values().get() + ); + assertInstanceOf(SaslAuthenticationException.class, exception.getCause()); + } + } } diff --git a/test-common/test-common-api/src/test/resources/log4j.properties b/test-common/test-common-api/src/test/resources/log4j.properties deleted file mode 100644 index 91c909b99adaf..0000000000000 --- a/test-common/test-common-api/src/test/resources/log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.org.apache.kafka=INFO \ No newline at end of file diff --git a/test-common/test-common-api/src/test/resources/log4j2.yaml b/test-common/test-common-api/src/test/resources/log4j2.yaml new file mode 100644 index 0000000000000..be546a18b55e6 --- /dev/null +++ b/test-common/test-common-api/src/test/resources/log4j2.yaml @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c:%L)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: INFO + AppenderRef: + - ref: STDOUT + Logger: + - name: org.apache.kafka + level: INFO diff --git a/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilter.java b/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilter.java new file mode 100644 index 0000000000000..a236c05b95778 --- /dev/null +++ b/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilter.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.test.junit; + +import org.junit.platform.engine.Filter; +import org.junit.platform.engine.FilterResult; +import org.junit.platform.engine.TestDescriptor; +import org.junit.platform.engine.TestSource; +import org.junit.platform.engine.support.descriptor.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; + +public class AutoQuarantinedTestFilter implements Filter { + + private static final Filter INCLUDE_ALL_TESTS = testDescriptor -> FilterResult.included(null); + private static final Filter EXCLUDE_ALL_TESTS = testDescriptor -> FilterResult.excluded(null); + + private static final Logger log = LoggerFactory.getLogger(AutoQuarantinedTestFilter.class); + + private final Set testCatalog; + private final boolean includeQuarantined; + + AutoQuarantinedTestFilter(Set testCatalog, boolean includeQuarantined) { + this.testCatalog = Collections.unmodifiableSet(testCatalog); + this.includeQuarantined = includeQuarantined; + } + + @Override + public FilterResult apply(TestDescriptor testDescriptor) { + Optional sourceOpt = testDescriptor.getSource(); + if (sourceOpt.isEmpty()) { + return FilterResult.included(null); + } + + TestSource source = sourceOpt.get(); + if (!(source instanceof MethodSource)) { + return FilterResult.included(null); + } + + MethodSource methodSource = (MethodSource) source; + + TestAndMethod testAndMethod = new TestAndMethod(methodSource.getClassName(), methodSource.getMethodName()); + if (includeQuarantined) { + if (testCatalog.contains(testAndMethod)) { + return FilterResult.excluded("exclude non-quarantined"); + } else { + return FilterResult.included("auto-quarantined"); + } + } else { + if (testCatalog.contains(testAndMethod)) { + return FilterResult.included(null); + } else { + return FilterResult.excluded("auto-quarantined"); + } + } + } + + private static Filter defaultFilter(boolean includeQuarantined) { + if (includeQuarantined) { + return EXCLUDE_ALL_TESTS; + } else { + return INCLUDE_ALL_TESTS; + } + } + + /** + * Create a filter that excludes tests that are missing from a given test catalog file. + * If no test catalog is given, the default behavior depends on {@code includeQuarantined}. + * If true, this filter will exclude all tests. If false, this filter will include all tests. + *

                + * The format of the test catalog is a text file where each line has the format of: + * + *

                +     *     FullyQualifiedClassName "#" MethodName "\n"
                +     * 
                + * + * @param testCatalogFileName path to a test catalog file + * @param includeQuarantined true if this filter should include only the auto-quarantined tests + */ + public static Filter create(String testCatalogFileName, boolean includeQuarantined) { + if (testCatalogFileName == null || testCatalogFileName.isEmpty()) { + log.debug("No test catalog specified, will not quarantine any recently added tests."); + return defaultFilter(includeQuarantined); + } + Path path = Paths.get(testCatalogFileName); + log.debug("Loading test catalog file {}.", path); + + if (!Files.exists(path)) { + log.error("Test catalog file {} does not exist, will not quarantine any recently added tests.", path); + return defaultFilter(includeQuarantined); + } + + Set allTests = new HashSet<>(); + try (BufferedReader reader = Files.newBufferedReader(path, Charset.defaultCharset())) { + String line = reader.readLine(); + while (line != null) { + String[] toks = line.split("#", 2); + allTests.add(new TestAndMethod(toks[0], toks[1])); + line = reader.readLine(); + } + } catch (IOException e) { + log.error("Error while reading test catalog file, will not quarantine any recently added tests.", e); + return defaultFilter(includeQuarantined); + } + + if (allTests.isEmpty()) { + log.error("Loaded an empty test catalog, will not quarantine any recently added tests."); + return defaultFilter(includeQuarantined); + } else { + log.debug("Loaded {} test methods from test catalog file {}.", allTests.size(), path); + return new AutoQuarantinedTestFilter(allTests, includeQuarantined); + } + } + + public static class TestAndMethod { + private final String testClass; + private final String testMethod; + + public TestAndMethod(String testClass, String testMethod) { + this.testClass = testClass; + this.testMethod = testMethod; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestAndMethod that = (TestAndMethod) o; + return Objects.equals(testClass, that.testClass) && Objects.equals(testMethod, that.testMethod); + } + + @Override + public int hashCode() { + return Objects.hash(testClass, testMethod); + } + + @Override + public String toString() { + return "TestAndMethod{" + + "testClass='" + testClass + '\'' + + ", testMethod='" + testMethod + '\'' + + '}'; + } + } +} diff --git a/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilter.java b/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilter.java new file mode 100644 index 0000000000000..f56c44d36ec6c --- /dev/null +++ b/test-common/test-common-runtime/src/main/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilter.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.test.junit; + +import org.junit.platform.engine.Filter; +import org.junit.platform.engine.FilterResult; +import org.junit.platform.engine.TestDescriptor; +import org.junit.platform.engine.TestTag; +import org.junit.platform.launcher.PostDiscoveryFilter; + +/** + * A JUnit test filter which can include or exclude discovered tests before + * they are sent off to the test engine for execution. The behavior of this + * filter is controlled by the system property "kafka.test.run.quarantined". + * If the property is set to "true", then only auto-quarantined and explicitly + * {@code @Flaky} tests will be included. If the property is set to "false", then + * only non-quarantined tests will be run. + *

                + * This filter is registered with JUnit using SPI. The test-common-runtime module + * includes a META-INF/services/org.junit.platform.launcher.PostDiscoveryFilter + * service file which registers this class. + */ +public class QuarantinedPostDiscoveryFilter implements PostDiscoveryFilter { + + private static final TestTag FLAKY_TEST_TAG = TestTag.create("flaky"); + + public static final String RUN_QUARANTINED_PROP = "kafka.test.run.quarantined"; + + public static final String CATALOG_FILE_PROP = "kafka.test.catalog.file"; + + private final Filter autoQuarantinedFilter; + private final boolean runQuarantined; + + // No-arg public constructor for SPI + @SuppressWarnings("unused") + public QuarantinedPostDiscoveryFilter() { + runQuarantined = System.getProperty(RUN_QUARANTINED_PROP, "false") + .equalsIgnoreCase("true"); + + String testCatalogFileName = System.getProperty(CATALOG_FILE_PROP); + autoQuarantinedFilter = AutoQuarantinedTestFilter.create(testCatalogFileName, runQuarantined); + } + + // Visible for tests + QuarantinedPostDiscoveryFilter(Filter autoQuarantinedFilter, boolean runQuarantined) { + this.autoQuarantinedFilter = autoQuarantinedFilter; + this.runQuarantined = runQuarantined; + } + + @Override + public FilterResult apply(TestDescriptor testDescriptor) { + boolean hasTag = testDescriptor.getTags().contains(FLAKY_TEST_TAG); + FilterResult result = autoQuarantinedFilter.apply(testDescriptor); + if (runQuarantined) { + // If selecting quarantined tests, we first check for explicitly flaky tests. If no + // flaky tag is set, check the auto-quarantined filter. In the case of a missing test + // catalog, the auto-quarantined filter will exclude all tests. + if (hasTag) { + return FilterResult.included("flaky"); + } else { + return result; + } + } else { + // If selecting non-quarantined tests, we exclude auto-quarantined tests and flaky tests + if (result.included() && hasTag) { + return FilterResult.excluded("flaky"); + } else { + return result; + } + } + } +} diff --git a/test-common/src/test/resources/junit-platform.properties b/test-common/test-common-runtime/src/main/resources/META-INF/services/org.junit.platform.launcher.PostDiscoveryFilter similarity index 90% rename from test-common/src/test/resources/junit-platform.properties rename to test-common/test-common-runtime/src/main/resources/META-INF/services/org.junit.platform.launcher.PostDiscoveryFilter index 05069923a7f21..45209e1fde44e 100644 --- a/test-common/src/test/resources/junit-platform.properties +++ b/test-common/test-common-runtime/src/main/resources/META-INF/services/org.junit.platform.launcher.PostDiscoveryFilter @@ -12,4 +12,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -junit.jupiter.params.displayname.default = "{displayName}.{argumentsWithNames}" + +org.apache.kafka.common.test.junit.QuarantinedPostDiscoveryFilter \ No newline at end of file diff --git a/clients/src/test/resources/junit-platform.properties b/test-common/test-common-runtime/src/main/resources/junit-platform.properties similarity index 94% rename from clients/src/test/resources/junit-platform.properties rename to test-common/test-common-runtime/src/main/resources/junit-platform.properties index 05069923a7f21..551f6c42cb8aa 100644 --- a/clients/src/test/resources/junit-platform.properties +++ b/test-common/test-common-runtime/src/main/resources/junit-platform.properties @@ -13,3 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. junit.jupiter.params.displayname.default = "{displayName}.{argumentsWithNames}" +junit.jupiter.extensions.autodetection.enabled = true \ No newline at end of file diff --git a/test-common/test-common-runtime/src/test/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilterTest.java b/test-common/test-common-runtime/src/test/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilterTest.java new file mode 100644 index 0000000000000..390132d348449 --- /dev/null +++ b/test-common/test-common-runtime/src/test/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilterTest.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.test.junit; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.platform.engine.Filter; +import org.junit.platform.engine.TestDescriptor; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class AutoQuarantinedTestFilterTest { + + private TestDescriptor descriptor(String className, String methodName) { + return new QuarantinedPostDiscoveryFilterTest.MockTestDescriptor(className, methodName); + } + + @Test + public void testLoadCatalog(@TempDir Path tempDir) throws IOException { + Path catalog = tempDir.resolve("catalog.txt"); + List lines = new ArrayList<>(); + lines.add("o.a.k.Foo#testBar1"); + lines.add("o.a.k.Foo#testBar2"); + lines.add("o.a.k.Spam#testEggs"); + Files.write(catalog, lines); + + Filter filter = AutoQuarantinedTestFilter.create(catalog.toString(), false); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar1")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar2")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testEggs")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testNew")).excluded()); + + filter = AutoQuarantinedTestFilter.create(catalog.toString(), true); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar1")).excluded()); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar2")).excluded()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testEggs")).excluded()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testNew")).included()); + } + + @Test + public void testEmptyCatalog(@TempDir Path tempDir) throws IOException { + Path catalog = tempDir.resolve("catalog.txt"); + Files.write(catalog, Collections.emptyList()); + + Filter filter = AutoQuarantinedTestFilter.create(catalog.toString(), false); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar1")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar2")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testEggs")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testNew")).included()); + } + + @Test + public void testMissingCatalog() { + Filter filter = AutoQuarantinedTestFilter.create("does-not-exist.txt", false); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar1")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Foo", "testBar2")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testEggs")).included()); + assertTrue(filter.apply(descriptor("o.a.k.Spam", "testNew")).included()); + } +} diff --git a/test-common/test-common-runtime/src/test/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilterTest.java b/test-common/test-common-runtime/src/test/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilterTest.java new file mode 100644 index 0000000000000..4ce628594f503 --- /dev/null +++ b/test-common/test-common-runtime/src/test/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilterTest.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.test.junit; + +import org.junit.jupiter.api.Test; +import org.junit.platform.engine.TestDescriptor; +import org.junit.platform.engine.TestSource; +import org.junit.platform.engine.TestTag; +import org.junit.platform.engine.UniqueId; +import org.junit.platform.engine.support.descriptor.MethodSource; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class QuarantinedPostDiscoveryFilterTest { + + static class MockTestDescriptor implements TestDescriptor { + + private final MethodSource methodSource; + private final Set testTags; + + MockTestDescriptor(String className, String methodName, String... tags) { + this.methodSource = MethodSource.from(className, methodName); + this.testTags = new HashSet<>(); + Arrays.stream(tags).forEach(tag -> testTags.add(TestTag.create(tag))); + } + + @Override + public UniqueId getUniqueId() { + return null; + } + + @Override + public String getDisplayName() { + return ""; + } + + @Override + public Set getTags() { + return this.testTags; + } + + @Override + public Optional getSource() { + return Optional.of(this.methodSource); + } + + @Override + public Optional getParent() { + return Optional.empty(); + } + + @Override + public void setParent(TestDescriptor testDescriptor) { + + } + + @Override + public Set getChildren() { + return Set.of(); + } + + @Override + public void addChild(TestDescriptor testDescriptor) { + + } + + @Override + public void removeChild(TestDescriptor testDescriptor) { + + } + + @Override + public void removeFromHierarchy() { + + } + + @Override + public Type getType() { + return null; + } + + @Override + public Optional findByUniqueId(UniqueId uniqueId) { + return Optional.empty(); + } + } + + QuarantinedPostDiscoveryFilter setupFilter(boolean runQuarantined) { + Set testCatalog = new HashSet<>(); + testCatalog.add(new AutoQuarantinedTestFilter.TestAndMethod("o.a.k.Foo", "testBar1")); + testCatalog.add(new AutoQuarantinedTestFilter.TestAndMethod("o.a.k.Foo", "testBar2")); + testCatalog.add(new AutoQuarantinedTestFilter.TestAndMethod("o.a.k.Spam", "testEggs")); + + AutoQuarantinedTestFilter autoQuarantinedTestFilter = new AutoQuarantinedTestFilter(testCatalog, runQuarantined); + return new QuarantinedPostDiscoveryFilter(autoQuarantinedTestFilter, runQuarantined); + } + + @Test + public void testQuarantinedExistingTestNonFlaky() { + QuarantinedPostDiscoveryFilter filter = setupFilter(true); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar1")).excluded()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar2")).excluded()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Spam", "testEggs")).excluded()); + } + + @Test + public void testQuarantinedExistingTestFlaky() { + QuarantinedPostDiscoveryFilter filter = setupFilter(true); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar1", "flaky")).included()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar2", "flaky")).included()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Spam", "testEggs", "flaky", "integration")).included()); + } + + @Test + public void testQuarantinedNewTest() { + QuarantinedPostDiscoveryFilter filter = setupFilter(true); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar3")).included()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Spam", "testEggz", "flaky")).included()); + } + + @Test + public void testExistingTestNonFlaky() { + QuarantinedPostDiscoveryFilter filter = setupFilter(false); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar1")).included()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar2")).included()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Spam", "testEggs")).included()); + } + + + @Test + public void testExistingTestFlaky() { + QuarantinedPostDiscoveryFilter filter = setupFilter(false); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar1", "flaky")).excluded()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar2", "flaky")).excluded()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Spam", "testEggs", "flaky", "integration")).excluded()); + } + + @Test + public void testNewTest() { + QuarantinedPostDiscoveryFilter filter = setupFilter(false); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar3")).excluded()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Spam", "testEggz", "flaky")).excluded()); + } + + @Test + public void testNoCatalogQuarantinedTest() { + QuarantinedPostDiscoveryFilter filter = new QuarantinedPostDiscoveryFilter( + AutoQuarantinedTestFilter.create(null, true), + true + ); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar1", "flaky")).included()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Foo", "testBar2", "flaky")).included()); + assertTrue(filter.apply(new MockTestDescriptor("o.a.k.Spam", "testEggs")).excluded()); + } +} diff --git a/tests/README.md b/tests/README.md index fe71cf4503145..7a1d4e733945b 100644 --- a/tests/README.md +++ b/tests/README.md @@ -31,6 +31,10 @@ TC_PATHS="tests/kafkatest/tests/streams tests/kafkatest/tests/tools" bash tests/ ``` TC_PATHS="tests/kafkatest/tests/client/pluggable_test.py" bash tests/docker/run_tests.sh ``` +* Run multiple test files +``` +TC_PATHS="tests/kafkatest/tests/client/pluggable_test.py tests/kafkatest/services/console_consumer.py" bash tests/docker/run_tests.sh +``` * Run a specific test class ``` TC_PATHS="tests/kafkatest/tests/client/pluggable_test.py::PluggableConsumerTest" bash tests/docker/run_tests.sh @@ -161,7 +165,7 @@ https://cwiki.apache.org/confluence/display/KAFKA/tutorial+-+set+up+and+run+Kafk $ cd kafka/tests $ virtualenv -p python3 venv $ . ./venv/bin/activate - $ python3 setup.py develop + $ python3 -m pip install --editable . $ cd .. # back to base kafka directory * Run the bootstrap script to set up Vagrant for testing diff --git a/tests/docker/Dockerfile b/tests/docker/Dockerfile index 198ffde1b7c52..7dce5ef86d839 100644 --- a/tests/docker/Dockerfile +++ b/tests/docker/Dockerfile @@ -65,7 +65,8 @@ LABEL ducker.creator=$ducker_creator RUN apt update && apt install -y sudo git netcat iptables rsync unzip wget curl jq coreutils openssh-server net-tools vim python3-pip python3-dev libffi-dev libssl-dev cmake pkg-config libfuse-dev iperf traceroute iproute2 iputils-ping && apt-get -y clean RUN python3 -m pip install -U pip==21.1.1; # NOTE: ducktape 0.12.0 supports py 3.9, 3.10, 3.11 and 3.12 -RUN pip3 install --upgrade cffi virtualenv pyasn1 boto3 pycrypto pywinrm ipaddress enum34 debugpy psutil && pip3 install --upgrade "ducktape==0.12.0" +COPY requirements.txt requirements.txt +RUN pip3 install --upgrade -r requirements.txt COPY --from=build-native-image /build/kafka-binary/ /opt/kafka-binary/ # Set up ssh @@ -77,6 +78,12 @@ RUN echo 'PermitUserEnvironment yes' >> /etc/ssh/sshd_config # Install binary test dependencies. # we use the same versions as in vagrant/base.sh ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" +# The versions between 0.11.0.3 and 2.0.1 are used to run client code, because zookeeper in these versions is not compatible with JDK 17. +# See KAFKA-17888 for more details. +RUN mkdir -p "/opt/kafka-0.11.0.3" && chmod a+rw /opt/kafka-0.11.0.3 && curl -s "$KAFKA_MIRROR/kafka_2.11-0.11.0.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-0.11.0.3" +RUN mkdir -p "/opt/kafka-1.0.2" && chmod a+rw /opt/kafka-1.0.2 && curl -s "$KAFKA_MIRROR/kafka_2.11-1.0.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-1.0.2" +RUN mkdir -p "/opt/kafka-1.1.1" && chmod a+rw /opt/kafka-1.1.1 && curl -s "$KAFKA_MIRROR/kafka_2.11-1.1.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-1.1.1" +RUN mkdir -p "/opt/kafka-2.0.1" && chmod a+rw /opt/kafka-2.0.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.0.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.0.1" RUN mkdir -p "/opt/kafka-2.1.1" && chmod a+rw /opt/kafka-2.1.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.1.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.1.1" RUN mkdir -p "/opt/kafka-2.2.2" && chmod a+rw /opt/kafka-2.2.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.2.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.2.2" RUN mkdir -p "/opt/kafka-2.3.1" && chmod a+rw /opt/kafka-2.3.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.3.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.3.1" @@ -92,11 +99,16 @@ RUN mkdir -p "/opt/kafka-3.3.2" && chmod a+rw /opt/kafka-3.3.2 && curl -s "$KAFK RUN mkdir -p "/opt/kafka-3.4.1" && chmod a+rw /opt/kafka-3.4.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.4.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.4.1" RUN mkdir -p "/opt/kafka-3.5.2" && chmod a+rw /opt/kafka-3.5.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.5.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.5.2" RUN mkdir -p "/opt/kafka-3.6.2" && chmod a+rw /opt/kafka-3.6.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.6.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.6.2" -RUN mkdir -p "/opt/kafka-3.7.1" && chmod a+rw /opt/kafka-3.7.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.7.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.7.1" +RUN mkdir -p "/opt/kafka-3.7.2" && chmod a+rw /opt/kafka-3.7.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.7.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.7.2" RUN mkdir -p "/opt/kafka-3.8.1" && chmod a+rw /opt/kafka-3.8.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.8.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.8.1" +RUN mkdir -p "/opt/kafka-3.9.0" && chmod a+rw /opt/kafka-3.9.0 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.9.0.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.9.0" # Streams test dependencies +RUN curl -s "$KAFKA_MIRROR/kafka-streams-0.11.0.3-test.jar" -o /opt/kafka-0.11.0.3/libs/kafka-streams-0.11.0.3-test.jar +RUN curl -s "$KAFKA_MIRROR/kafka-streams-1.0.2-test.jar" -o /opt/kafka-1.0.2/libs/kafka-streams-1.0.2-test.jar +RUN curl -s "$KAFKA_MIRROR/kafka-streams-1.1.1-test.jar" -o /opt/kafka-1.1.1/libs/kafka-streams-1.1.1-test.jar +RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.0.1-test.jar" -o /opt/kafka-2.0.1/libs/kafka-streams-2.0.1-test.jar RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.1.1-test.jar" -o /opt/kafka-2.1.1/libs/kafka-streams-2.1.1-test.jar RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.2.2-test.jar" -o /opt/kafka-2.2.2/libs/kafka-streams-2.2.2-test.jar RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.3.1-test.jar" -o /opt/kafka-2.3.1/libs/kafka-streams-2.3.1-test.jar @@ -112,9 +124,22 @@ RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.3.2-test.jar" -o /opt/kafka-3.3.2/lib RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.4.1-test.jar" -o /opt/kafka-3.4.1/libs/kafka-streams-3.4.1-test.jar RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.5.2-test.jar" -o /opt/kafka-3.5.2/libs/kafka-streams-3.5.2-test.jar RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.6.2-test.jar" -o /opt/kafka-3.6.2/libs/kafka-streams-3.6.2-test.jar -RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.7.1-test.jar" -o /opt/kafka-3.7.1/libs/kafka-streams-3.7.1-test.jar +RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.7.2-test.jar" -o /opt/kafka-3.7.2/libs/kafka-streams-3.7.2-test.jar RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.8.1-test.jar" -o /opt/kafka-3.8.1/libs/kafka-streams-3.8.1-test.jar - +RUN curl -s "$KAFKA_MIRROR/kafka-streams-3.9.0-test.jar" -o /opt/kafka-3.9.0/libs/kafka-streams-3.9.0-test.jar + +# To ensure the Kafka cluster starts successfully under JDK 17, we need to update the Zookeeper +# client from version 3.4.x to 3.5.7 in Kafka versions 2.1.1, 2.2.2, and 2.3.1, as the older Zookeeper +# client is incompatible with JDK 17. See KAFKA-17888 for more details. +RUN curl -s "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper/3.5.7/zookeeper-3.5.7.jar" -o /opt/zookeeper-3.5.7.jar +RUN curl -s "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper-jute/3.5.7/zookeeper-jute-3.5.7.jar" -o /opt/zookeeper-jute-3.5.7.jar +RUN rm -f /opt/kafka-2.1.1/libs/zookeeper-* && rm -f /opt/kafka-2.2.2/libs/zookeeper-* && rm -f /opt/kafka-2.3.1/libs/zookeeper-* +RUN cp /opt/zookeeper-3.5.7.jar /opt/kafka-2.1.1/libs/zookeeper-3.5.7.jar && chmod a+rw /opt/kafka-2.1.1/libs/zookeeper-3.5.7.jar +RUN cp /opt/zookeeper-3.5.7.jar /opt/kafka-2.2.2/libs/zookeeper-3.5.7.jar && chmod a+rw /opt/kafka-2.2.2/libs/zookeeper-3.5.7.jar +RUN cp /opt/zookeeper-3.5.7.jar /opt/kafka-2.3.1/libs/zookeeper-3.5.7.jar && chmod a+rw /opt/kafka-2.3.1/libs/zookeeper-3.5.7.jar +RUN cp /opt/zookeeper-jute-3.5.7.jar /opt/kafka-2.1.1/libs/zookeeper-jute-3.5.7.jar && chmod a+rw /opt/kafka-2.1.1/libs/zookeeper-jute-3.5.7.jar +RUN cp /opt/zookeeper-jute-3.5.7.jar /opt/kafka-2.2.2/libs/zookeeper-jute-3.5.7.jar && chmod a+rw /opt/kafka-2.2.2/libs/zookeeper-jute-3.5.7.jar +RUN cp /opt/zookeeper-jute-3.5.7.jar /opt/kafka-2.3.1/libs/zookeeper-jute-3.5.7.jar && chmod a+rw /opt/kafka-2.3.1/libs/zookeeper-jute-3.5.7.jar # The version of Kibosh to use for testing. # If you update this, also update vagrant/base.sh ARG KIBOSH_VERSION="8841dd392e6fbf02986e2fb1f1ebf04df344b65a" diff --git a/tests/docker/ducker-ak b/tests/docker/ducker-ak index b632604145ec0..8283438476196 100755 --- a/tests/docker/ducker-ak +++ b/tests/docker/ducker-ak @@ -38,7 +38,7 @@ tmp_native_dir=${ducker_dir}/native # This does not include swap. docker_build_memory_limit="3200m" -# The maximum mmemory consumption to allow in containers. +# The maximum memory consumption to allow in containers. docker_run_memory_limit="2000m" # The default number of cluster nodes to bring up if a number is not specified. @@ -489,6 +489,18 @@ EOF exec 3>&- } +correct_latest_link() { + local result_dir="${kafka_dir}/results" + local latest_link="${result_dir}/latest" + + # Correct the link if it's a symbolic link and broken. + if [[ -L "${latest_link}" ]] && [[ ! -e "${latest_link}" ]]; then + local latest_test_dirname=$(basename "$(readlink "${latest_link}")") + unlink "${latest_link}" + ln -s "${result_dir}/${latest_test_dirname}" "${latest_link}" + fi +} + ducker_test() { require_commands docker docker inspect ducker01 &>/dev/null || \ @@ -529,7 +541,10 @@ ducker_test() { cmd="cd /opt/kafka-dev && ${ducktape_cmd} --cluster-file /opt/kafka-dev/tests/docker/build/cluster.json $test_names $ducktape_args" echo "docker exec ducker01 bash -c \"${cmd}\"" - exec docker exec --user=ducker ducker01 bash -c "${cmd}" + docker exec --user=ducker ducker01 bash -c "${cmd}" + docker_status=$? + correct_latest_link + exit "${docker_status}" } ducker_ssh() { diff --git a/bin/zookeeper-security-migration.sh b/tests/docker/requirements.txt old mode 100755 new mode 100644 similarity index 89% rename from bin/zookeeper-security-migration.sh rename to tests/docker/requirements.txt index 722bde7cc4c62..91eaae441b46b --- a/bin/zookeeper-security-migration.sh +++ b/tests/docker/requirements.txt @@ -1,4 +1,3 @@ -#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -14,4 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@" +cffi +virtualenv +pyasn1 +boto3 +pycrypto +pywinrm +ipaddress +debugpy +psutil +ducktape==0.12.0 \ No newline at end of file diff --git a/tests/kafkatest/__init__.py b/tests/kafkatest/__init__.py index cf5a5d17f5929..99c9dfe22e7f8 100644 --- a/tests/kafkatest/__init__.py +++ b/tests/kafkatest/__init__.py @@ -22,4 +22,4 @@ # Instead, in development branches, the version should have a suffix of the form ".devN" # # For example, when Kafka is at version 1.0.0-SNAPSHOT, this should be something like "1.0.0.dev0" -__version__ = '4.0.0.dev0' +__version__ = '4.1.0.dev0' diff --git a/tests/kafkatest/services/connect.py b/tests/kafkatest/services/connect.py index c84a3ec43c31e..e09eba30b3e03 100644 --- a/tests/kafkatest/services/connect.py +++ b/tests/kafkatest/services/connect.py @@ -25,7 +25,7 @@ from ducktape.utils.util import wait_until from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin -from kafkatest.services.kafka.util import fix_opts_for_new_jvm +from kafkatest.services.kafka.util import fix_opts_for_new_jvm, get_log4j_config_param, get_log4j_config_for_connect class ConnectServiceBase(KafkaPathResolverMixin, Service): @@ -38,7 +38,6 @@ class ConnectServiceBase(KafkaPathResolverMixin, Service): LOG_FILE = os.path.join(PERSISTENT_ROOT, "connect.log") STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "connect.stdout") STDERR_FILE = os.path.join(PERSISTENT_ROOT, "connect.stderr") - LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "connect-log4j.properties") PID_FILE = os.path.join(PERSISTENT_ROOT, "connect.pid") EXTERNAL_CONFIGS_FILE = os.path.join(PERSISTENT_ROOT, "connect-external-configs.properties") CONNECT_REST_PORT = 8083 @@ -340,7 +339,8 @@ def node(self): return self.nodes[0] def start_cmd(self, node, connector_configs): - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE + cmd = "( export KAFKA_LOG4J_OPTS=\"%s%s\"; " % \ + (get_log4j_config_param(node), os.path.join(self.PERSISTENT_ROOT, get_log4j_config_for_connect(node))) heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["connect_heap_dump_file"]["path"] other_kafka_opts = self.security_config.kafka_opts.strip('\"') @@ -364,7 +364,8 @@ def start_node(self, node, **kwargs): if self.external_config_template_func: node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node)) node.account.create_file(self.CONFIG_FILE, self.config_template_func(node)) - node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE)) + node.account.create_file(os.path.join(self.PERSISTENT_ROOT, get_log4j_config_for_connect(node)), + self.render(get_log4j_config_for_connect(node), log_file=self.LOG_FILE)) remote_connector_configs = [] for idx, template in enumerate(self.connector_config_templates): target_file = os.path.join(self.PERSISTENT_ROOT, "connect-connector-" + str(idx) + ".properties") @@ -400,7 +401,8 @@ def __init__(self, context, num_nodes, kafka, files, offsets_topic="connect-offs # connector_configs argument is intentionally ignored in distributed service. def start_cmd(self, node, connector_configs): - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE + cmd = ("( export KAFKA_LOG4J_OPTS=\"%s%s\"; " % + (get_log4j_config_param(node), os.path.join(self.PERSISTENT_ROOT, get_log4j_config_for_connect(node)))) heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["connect_heap_dump_file"]["path"] other_kafka_opts = self.security_config.kafka_opts.strip('\"') @@ -421,7 +423,8 @@ def start_node(self, node, **kwargs): if self.external_config_template_func: node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node)) node.account.create_file(self.CONFIG_FILE, self.config_template_func(node)) - node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE)) + node.account.create_file(os.path.join(self.PERSISTENT_ROOT, get_log4j_config_for_connect(node)), + self.render(get_log4j_config_for_connect(node), log_file=self.LOG_FILE)) if self.connector_config_templates: raise DucktapeError("Config files are not valid in distributed mode, submit connectors via the REST API") diff --git a/tests/kafkatest/services/console_consumer.py b/tests/kafkatest/services/console_consumer.py index cc8a0d319970e..9755faa19696d 100644 --- a/tests/kafkatest/services/console_consumer.py +++ b/tests/kafkatest/services/console_consumer.py @@ -21,8 +21,8 @@ from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin from kafkatest.services.monitor.jmx import JmxMixin, JmxTool -from kafkatest.version import DEV_BRANCH, LATEST_3_7 -from kafkatest.services.kafka.util import fix_opts_for_new_jvm +from kafkatest.version import DEV_BRANCH, LATEST_3_7, get_version, LATEST_4_0 +from kafkatest.services.kafka.util import fix_opts_for_new_jvm, get_log4j_config_param, get_log4j_config_for_tools """ The console consumer is a tool that reads data from Kafka and outputs it to standard output. @@ -36,7 +36,6 @@ class ConsoleConsumer(KafkaPathResolverMixin, JmxMixin, BackgroundThreadService) STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "console_consumer.stderr") LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs") LOG_FILE = os.path.join(LOG_DIR, "console_consumer.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "console_consumer.properties") JMX_TOOL_LOG = os.path.join(PERSISTENT_ROOT, "jmx_tool.log") JMX_TOOL_ERROR_LOG = os.path.join(PERSISTENT_ROOT, "jmx_tool.err.log") @@ -59,7 +58,7 @@ class ConsoleConsumer(KafkaPathResolverMixin, JmxMixin, BackgroundThreadService) "collect_default": False} } - def __init__(self, context, num_nodes, kafka, topic, group_id="test-consumer-group", new_consumer=True, + def __init__(self, context, num_nodes, kafka, topic, group_id="test-consumer-group", message_validator=None, from_beginning=True, consumer_timeout_ms=None, version=DEV_BRANCH, client_id="console-consumer", print_key=False, jmx_object_names=None, jmx_attributes=None, enable_systest_events=False, stop_timeout_sec=35, print_timestamp=False, print_partition=False, @@ -72,7 +71,6 @@ def __init__(self, context, num_nodes, kafka, topic, group_id="test-consumer-gro num_nodes: number of nodes to use (this should be 1) kafka: kafka service topic: consume from this topic - new_consumer: use new Kafka consumer if True message_validator: function which returns message or None from_beginning: consume from beginning if True, else from the end consumer_timeout_ms: corresponds to consumer.timeout.ms. consumer process ends if time between @@ -96,7 +94,6 @@ def __init__(self, context, num_nodes, kafka, topic, group_id="test-consumer-gro root=ConsoleConsumer.PERSISTENT_ROOT) BackgroundThreadService.__init__(self, context, num_nodes) self.kafka = kafka - self.new_consumer = new_consumer self.group_id = group_id self.args = { 'topic': topic, @@ -145,12 +142,11 @@ def start_cmd(self, node): """Return the start command appropriate for the given node.""" args = self.args.copy() args['broker_list'] = self.kafka.bootstrap_servers(self.security_config.security_protocol) - if not self.new_consumer: - args['zk_connect'] = self.kafka.zk_connect_setting() args['stdout'] = ConsoleConsumer.STDOUT_CAPTURE args['stderr'] = ConsoleConsumer.STDERR_CAPTURE args['log_dir'] = ConsoleConsumer.LOG_DIR - args['log4j_config'] = ConsoleConsumer.LOG4J_CONFIG + args['log4j_param'] = get_log4j_config_param(node) + args['log4j_config'] = get_log4j_config_for_tools(node) args['config_file'] = ConsoleConsumer.CONFIG_FILE args['stdout'] = ConsoleConsumer.STDOUT_CAPTURE args['jmx_port'] = self.jmx_port @@ -164,7 +160,7 @@ def start_cmd(self, node): cmd = fix_opts_for_new_jvm(node) cmd += "export JMX_PORT=%(jmx_port)s; " \ "export LOG_DIR=%(log_dir)s; " \ - "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j_config)s\"; " \ + "export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j_config)s\"; " \ "export KAFKA_OPTS=%(kafka_opts)s; " \ "%(console_consumer)s " \ "--topic %(topic)s " \ @@ -230,8 +226,8 @@ def _worker(self, idx, node): node.account.create_file(ConsoleConsumer.CONFIG_FILE, prop_file) # Create and upload log properties - log_config = self.render('tools_log4j.properties', log_file=ConsoleConsumer.LOG_FILE) - node.account.create_file(ConsoleConsumer.LOG4J_CONFIG, log_config) + log_config = self.render(get_log4j_config_for_tools(node), log_file=ConsoleConsumer.LOG_FILE) + node.account.create_file(get_log4j_config_for_tools(node), log_config) # Run and capture output cmd = self.start_cmd(node) diff --git a/tests/kafkatest/services/kafka/config_property.py b/tests/kafkatest/services/kafka/config_property.py index c9b7e037e70e9..70b69e1185737 100644 --- a/tests/kafkatest/services/kafka/config_property.py +++ b/tests/kafkatest/services/kafka/config_property.py @@ -157,8 +157,6 @@ val InterBrokerSecurityProtocolProp = "security.inter.broker.protocol" val InterBrokerProtocolVersionProp = "inter.broker.protocol.version" /** ********* Controlled shutdown configuration ***********/ - val ControlledShutdownMaxRetriesProp = "controlled.shutdown.max.retries" - val ControlledShutdownRetryBackoffMsProp = "controlled.shutdown.retry.backoff.ms" val ControlledShutdownEnableProp = "controlled.shutdown.enable" /** ********* Consumer coordinator configuration ***********/ val ConsumerMinSessionTimeoutMsProp = "consumer.min.session.timeout.ms" diff --git a/tests/kafkatest/services/kafka/kafka.py b/tests/kafkatest/services/kafka/kafka.py index 1d9444da204c8..018bf38252976 100644 --- a/tests/kafkatest/services/kafka/kafka.py +++ b/tests/kafkatest/services/kafka/kafka.py @@ -33,7 +33,8 @@ from kafkatest.services.security.security_config import SecurityConfig from kafkatest.version import DEV_BRANCH from kafkatest.version import KafkaVersion -from kafkatest.services.kafka.util import fix_opts_for_new_jvm +from kafkatest.version import get_version +from kafkatest.services.kafka.util import fix_opts_for_new_jvm, get_log4j_config_param, get_log4j_config class KafkaListener: @@ -145,7 +146,6 @@ class for details. """ PERSISTENT_ROOT = "/mnt/kafka" STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "server-start-stdout-stderr.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties") # Logs such as controller.log, server.log, etc all go here OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs") OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, "info") @@ -206,6 +206,7 @@ def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAI use_new_coordinator=None, consumer_group_migration_policy=None, dynamicRaftQuorum=False, + use_transactions_v2=False ): """ :param context: test context @@ -254,7 +255,7 @@ def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAI :param jmx_attributes: :param int zk_connect_timeout: :param int zk_session_timeout: - :param list[list] server_prop_overrides: overrides for kafka.properties file + :param list[list] server_prop_overrides: overrides for kafka.properties file, if the second value is None or "", it will be filtered e.g: [["config1", "true"], ["config2", "1000"]] :param str zk_chroot: :param bool zk_client_secure: connect to Zookeeper over secure client port (TLS) when True @@ -268,7 +269,8 @@ def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAI :param quorum_info_provider: A function that takes this KafkaService as an argument and returns a ServiceQuorumInfo. If this is None, then the ServiceQuorumInfo is generated from the test context :param use_new_coordinator: When true, use the new implementation of the group coordinator as per KIP-848. If this is None, the default existing group coordinator is used. :param consumer_group_migration_policy: The config that enables converting the non-empty classic group using the consumer embedded protocol to the non-empty consumer group using the consumer group protocol and vice versa. - :param dynamicRaftQuorum: When true, the quorum uses kraft.version=1, controller_quorum_bootstrap_servers, and bootstraps the first controller using the standalone flag + :param dynamicRaftQuorum: When true, controller_quorum_bootstrap_servers, and bootstraps the first controller using the standalone flag + :param use_transactions_v2: When true, uses transaction.version=2 which utilizes the new transaction protocol introduced in KIP-890 """ self.zk = zk @@ -293,6 +295,7 @@ def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAI # Assign the determined value. self.use_new_coordinator = use_new_coordinator + self.use_transactions_v2 = use_transactions_v2 # Set consumer_group_migration_policy based on context and arguments. if consumer_group_migration_policy is None: @@ -599,12 +602,6 @@ def start_minikdc_if_necessary(self, add_principals=""): nodes_for_kdc += other_service.nodes self.minikdc = MiniKdc(self.context, nodes_for_kdc, extra_principals = add_principals) self.minikdc.start() - else: - self.minikdc = None - if self.quorum_info.using_kraft: - self.controller_quorum.minikdc = None - if self.isolated_kafka: - self.isolated_kafka.minikdc = None def alive(self, node): return len(self.pids(node)) > 0 @@ -784,7 +781,8 @@ def prop_file(self, node): #update template configs with test override configs configs.update(override_configs) - prop_file = self.render_configs(configs) + filtered_configs = {k: v for k, v in configs.items() if v not in [None, ""]} + prop_file = self.render_configs(filtered_configs) return prop_file def render_configs(self, configs): @@ -805,7 +803,7 @@ def start_cmd(self, node): kafka_mode = self.context.globals.get("kafka_mode", "") cmd = f"export KAFKA_MODE={kafka_mode}; " cmd += "export JMX_PORT=%d; " % self.jmx_port - cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG + cmd += "export KAFKA_LOG4J_OPTS=\"%s%s\"; " % (get_log4j_config_param(node), os.path.join(self.PERSISTENT_ROOT, get_log4j_config(node))) heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["kafka_heap_dump_file"]["path"] security_kafka_opts = self.security_config.kafka_opts.strip('\"') @@ -874,20 +872,25 @@ def start_node(self, node, timeout_sec=60, **kwargs): self.logger.info("kafka.properties:") self.logger.info(prop_file) node.account.create_file(KafkaService.CONFIG_FILE, prop_file) - node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR)) + node.account.create_file(os.path.join(self.PERSISTENT_ROOT, get_log4j_config(node)), + self.render(get_log4j_config(node), log_dir=KafkaService.OPERATIONAL_LOG_DIR)) if self.quorum_info.using_kraft: # format log directories if necessary kafka_storage_script = self.path.script("kafka-storage.sh", node) cmd = "%s format --ignore-formatted --config %s --cluster-id %s" % (kafka_storage_script, KafkaService.CONFIG_FILE, config_property.CLUSTER_ID) if self.dynamicRaftQuorum: - cmd += " --feature kraft.version=1" if self.node_quorum_info.has_controller_role: if self.standalone_controller_bootstrapped: cmd += " --no-initial-controllers" else: cmd += " --standalone" self.standalone_controller_bootstrapped = True + if self.use_transactions_v2: + cmd += " --feature transaction.version=2" + else: + if get_version(node).supports_feature_command(): + cmd += " --feature transaction.version=0" self.logger.info("Running log directory format command...\n%s" % cmd) node.account.ssh(cmd) @@ -921,24 +924,29 @@ def wait_for_start(self, node, monitor, timeout_sec=60): raise Exception("No process ids recorded on node %s" % node.account.hostname) def upgrade_metadata_version(self, new_version): - self.run_features_command("upgrade", new_version) + self.run_metadata_features_command("upgrade", new_version) def downgrade_metadata_version(self, new_version): - self.run_features_command("downgrade", new_version) + self.run_metadata_features_command("downgrade", new_version) - def run_features_command(self, op, new_version): + def run_metadata_features_command(self, op, new_version): cmd = self.path.script("kafka-features.sh ") cmd += "--bootstrap-server %s " % self.bootstrap_servers() cmd += "%s --metadata %s" % (op, new_version) self.logger.info("Running %s command...\n%s" % (op, cmd)) self.nodes[0].account.ssh(cmd) + def run_features_command(self, op, feature, new_version): + cmd = self.path.script("kafka-features.sh ") + cmd += "--bootstrap-server %s " % self.bootstrap_servers() + cmd += "%s --feature %s=%s" % (op, feature, new_version) + self.logger.info("Running %s command...\n%s" % (op, cmd)) + self.nodes[0].account.ssh(cmd) + def pids(self, node): """Return process ids associated with running processes on the given node.""" try: - cmd = "ps ax | grep -i %s | grep -v grep | awk '{print $1}'" % self.java_class_name() - pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)] - return pid_arr + return node.account.java_pids(self.java_class_name()) except (RemoteCommandError, ValueError) as e: return [] @@ -1920,4 +1928,4 @@ def get_offset_shell(self, time=None, topic=None, partitions=None, topic_partiti return output def java_class_name(self): - return "kafka.Kafka" + return "kafka\.Kafka" diff --git a/tests/kafkatest/services/kafka/templates/log4j2.yaml b/tests/kafkatest/services/kafka/templates/log4j2.yaml new file mode 100644 index 0000000000000..22e3f118f680d --- /dev/null +++ b/tests/kafkatest/services/kafka/templates/log4j2.yaml @@ -0,0 +1,283 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +Configuration: + Properties: + Property: + - name: "log_dir" + value: {{ log_dir }} + - name: "logPattern" + value: "[%d] %p %m (%c)%n" + + Appenders: + Console: + name: STDOUT + PatternLayout: + pattern: "${logPattern}" + + RollingFile: + - name: KafkaInfoAppender + fileName: "${log_dir}/info/server.log" + filePattern: "${log_dir}/info/server.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: INFO + onMatch: ACCEPT + + - name: StateChangeInfoAppender + fileName: "${log_dir}/info/state-change.log" + filePattern: "${log_dir}/info/state-change.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: INFO + onMatch: ACCEPT + + - name: RequestInfoAppender + fileName: "${log_dir}/info/kafka-request.log" + filePattern: "${log_dir}/info/kafka-request.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: INFO + onMatch: ACCEPT + + - name: CleanerInfoAppender + fileName: "${log_dir}/info/log-cleaner.log" + filePattern: "${log_dir}/info/log-cleaner.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: INFO + onMatch: ACCEPT + + - name: ControllerInfoAppender + fileName: "${log_dir}/info/controller.log" + filePattern: "${log_dir}/info/controller.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: INFO + onMatch: ACCEPT + + - name: AuthorizerInfoAppender + fileName: "${log_dir}/info/kafka-authorizer.log" + filePattern: "${log_dir}/info/kafka-authorizer.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: INFO + onMatch: ACCEPT + + - name: KafkaDebugAppender + fileName: "${log_dir}/debug/server.log" + filePattern: "${log_dir}/debug/server.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: DEBUG + onMatch: ACCEPT + + - name: StateChangeDebugAppender + fileName: "${log_dir}/debug/state-change.log" + filePattern: "${log_dir}/debug/state-change.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: DEBUG + onMatch: ACCEPT + + - name: RequestDebugAppender + fileName: "${log_dir}/debug/kafka-request.log" + filePattern: "${log_dir}/debug/kafka-request.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: DEBUG + onMatch: ACCEPT + + - name: CleanerDebugAppender + fileName: "${log_dir}/debug/log-cleaner.log" + filePattern: "${log_dir}/debug/log-cleaner.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: DEBUG + onMatch: ACCEPT + + - name: ControllerDebugAppender + fileName: "${log_dir}/debug/controller.log" + filePattern: "${log_dir}/debug/controller.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: DEBUG + onMatch: ACCEPT + + - name: AuthorizerDebugAppender + fileName: "${log_dir}/debug/kafka-authorizer.log" + filePattern: "${log_dir}/debug/kafka-authorizer.log.%d{yyyy-MM-dd-HH}" + PatternLayout: + pattern: "${logPattern}" + TimeBasedTriggeringPolicy: + interval: 1 + Filters: + ThresholdFilter: + level: DEBUG + onMatch: ACCEPT + + Loggers: + Root: + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: STDOUT + + Logger: + - name: kafka.producer.async.DefaultEventHandler + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: KafkaInfoAppender + - ref: KafkaDebugAppender + + - name: kafka.client.ClientUtils + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: KafkaInfoAppender + - ref: KafkaDebugAppender + + - name: kafka.perf + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: KafkaInfoAppender + - ref: KafkaDebugAppender + + - name: kafka.perf.ProducerPerformance$ProducerThread + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: KafkaInfoAppender + - ref: KafkaDebugAppender + + - name: kafka + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: KafkaInfoAppender + - ref: KafkaDebugAppender + + - name: kafka.network.RequestChannel$ + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: RequestInfoAppender + - ref: RequestDebugAppender + + - name: kafka.network.Processor + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: RequestInfoAppender + - ref: RequestDebugAppender + + - name: kafka.server.KafkaApis + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: RequestInfoAppender + - ref: RequestDebugAppender + + - name: kafka.request.logger + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: RequestInfoAppender + - ref: RequestDebugAppender + + - name: org.apache.kafka.raft + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: ControllerInfoAppender + - ref: ControllerDebugAppender + + - name: org.apache.kafka.controller + level: {{ log_level|default("DEBUG") }} + AppenderRef: + - ref: ControllerInfoAppender + - ref: ControllerDebugAppender + + - name: kafka.controller + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: ControllerInfoAppender + - ref: ControllerDebugAppender + + - name: kafka.log.LogCleaner + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: CleanerInfoAppender + - ref: CleanerDebugAppender + + - name: state.change.logger + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: StateChangeInfoAppender + - ref: StateChangeDebugAppender + + - name: kafka.authorizer.logger + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: AuthorizerInfoAppender + - ref: AuthorizerDebugAppender + + - name: org.apache.kafka.coordinator.group + level: {{ log_level|default("DEBUG") }} + additivity: false + AppenderRef: + - ref: KafkaInfoAppender + - ref: KafkaDebugAppender diff --git a/tests/kafkatest/services/kafka/util.py b/tests/kafkatest/services/kafka/util.py index 0965fd9d4e4c3..a2e22ac32b7c3 100644 --- a/tests/kafkatest/services/kafka/util.py +++ b/tests/kafkatest/services/kafka/util.py @@ -16,6 +16,7 @@ from collections import namedtuple from kafkatest.utils.remote_account import java_version +from kafkatest.version import LATEST_4_0, get_version TopicPartition = namedtuple('TopicPartition', ['topic', 'partition']) @@ -30,4 +31,20 @@ def fix_opts_for_new_jvm(node): return "" +def get_log4j_config_param(node): + return '-Dlog4j2.configurationFile=file:' if get_version(node) >= LATEST_4_0 else '-Dlog4j.configuration=file:' +def get_log4j_config(node): + return 'log4j2.yaml' if get_version(node) >= LATEST_4_0 else 'log4j.properties' + +def get_log4j_config_for_connect(node): + return 'connect_log4j2.yaml' if get_version(node) >= LATEST_4_0 else 'connect_log4j.properties' + +def get_log4j_config_for_tools(node): + return 'tools_log4j2.yaml' if get_version(node) >= LATEST_4_0 else 'tools_log4j.properties' + +def get_log4j_config_for_trogdor_coordinator(node): + return 'trogdor-coordinator-log4j2.yaml' if get_version(node) >= LATEST_4_0 else 'trogdor-coordinator-log4j.properties' + +def get_log4j_config_for_trogdor_agent(node): + return 'trogdor-agent-log4j2.yaml' if get_version(node) >= LATEST_4_0 else 'trogdor-agent-log4j.properties' diff --git a/tests/kafkatest/services/performance/consumer_performance.py b/tests/kafkatest/services/performance/consumer_performance.py index eea91cbfd90c5..28086e8281887 100644 --- a/tests/kafkatest/services/performance/consumer_performance.py +++ b/tests/kafkatest/services/performance/consumer_performance.py @@ -16,7 +16,7 @@ import os -from kafkatest.services.kafka.util import fix_opts_for_new_jvm +from kafkatest.services.kafka.util import fix_opts_for_new_jvm, get_log4j_config_param, get_log4j_config_for_tools from kafkatest.services.performance import PerformanceService from kafkatest.version import V_2_5_0, DEV_BRANCH @@ -49,7 +49,6 @@ class ConsumerPerformanceService(PerformanceService): STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "consumer_performance.stdout") STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "consumer_performance.stderr") LOG_FILE = os.path.join(LOG_DIR, "consumer_performance.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "consumer.properties") logs = { @@ -111,7 +110,7 @@ def start_cmd(self, node): cmd = fix_opts_for_new_jvm(node) cmd += "export LOG_DIR=%s;" % ConsumerPerformanceService.LOG_DIR cmd += " export KAFKA_OPTS=%s;" % self.security_config.kafka_opts - cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\";" % ConsumerPerformanceService.LOG4J_CONFIG + cmd += " export KAFKA_LOG4J_OPTS=\"%s%s\";" % (get_log4j_config_param(node), get_log4j_config_for_tools(node)) cmd += " %s" % self.path.script("kafka-consumer-perf-test.sh", node) for key, value in self.args(node.version).items(): cmd += " --%s %s" % (key, value) @@ -128,8 +127,8 @@ def start_cmd(self, node): def _worker(self, idx, node): node.account.ssh("mkdir -p %s" % ConsumerPerformanceService.PERSISTENT_ROOT, allow_fail=False) - log_config = self.render('tools_log4j.properties', log_file=ConsumerPerformanceService.LOG_FILE) - node.account.create_file(ConsumerPerformanceService.LOG4J_CONFIG, log_config) + log_config = self.render(get_log4j_config_for_tools(node), log_file=ConsumerPerformanceService.LOG_FILE) + node.account.create_file(get_log4j_config_for_tools(node), log_config) node.account.create_file(ConsumerPerformanceService.CONFIG_FILE, str(self.security_config)) self.security_config.setup_node(node) diff --git a/tests/kafkatest/services/performance/end_to_end_latency.py b/tests/kafkatest/services/performance/end_to_end_latency.py index e7e0100e5114c..1591555770594 100644 --- a/tests/kafkatest/services/performance/end_to_end_latency.py +++ b/tests/kafkatest/services/performance/end_to_end_latency.py @@ -15,9 +15,8 @@ import os -from kafkatest.services.kafka.util import fix_opts_for_new_jvm +from kafkatest.services.kafka.util import fix_opts_for_new_jvm, get_log4j_config_param, get_log4j_config_for_tools from kafkatest.services.performance import PerformanceService -from kafkatest.services.security.security_config import SecurityConfig from kafkatest.version import get_version, V_3_4_0, DEV_BRANCH @@ -31,7 +30,6 @@ class EndToEndLatencyService(PerformanceService): STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "end_to_end_latency.stdout") STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "end_to_end_latency.stderr") LOG_FILE = os.path.join(LOG_DIR, "end_to_end_latency.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "client.properties") logs = { @@ -76,7 +74,7 @@ def start_cmd(self, node): }) cmd = fix_opts_for_new_jvm(node) - cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % EndToEndLatencyService.LOG4J_CONFIG + cmd += "export KAFKA_LOG4J_OPTS=\"%s%s\"; " % (get_log4j_config_param(node), get_log4j_config_for_tools(node)) cmd += "KAFKA_OPTS=%(kafka_opts)s %(kafka_run_class)s %(java_class_name)s " % args cmd += "%(bootstrap_servers)s %(topic)s %(num_records)d %(acks)d %(message_bytes)d %(config_file)s" % args @@ -88,9 +86,9 @@ def start_cmd(self, node): def _worker(self, idx, node): node.account.ssh("mkdir -p %s" % EndToEndLatencyService.PERSISTENT_ROOT, allow_fail=False) - log_config = self.render('tools_log4j.properties', log_file=EndToEndLatencyService.LOG_FILE) + log_config = self.render(get_log4j_config_for_tools(node), log_file=EndToEndLatencyService.LOG_FILE) - node.account.create_file(EndToEndLatencyService.LOG4J_CONFIG, log_config) + node.account.create_file(get_log4j_config_for_tools(node), log_config) client_config = str(self.security_config) client_config += "compression_type=%(compression_type)s" % self.args node.account.create_file(EndToEndLatencyService.CONFIG_FILE, client_config) diff --git a/tests/kafkatest/services/performance/producer_performance.py b/tests/kafkatest/services/performance/producer_performance.py index acb0aec865085..acfe4790d731c 100644 --- a/tests/kafkatest/services/performance/producer_performance.py +++ b/tests/kafkatest/services/performance/producer_performance.py @@ -19,7 +19,7 @@ from ducktape.cluster.remoteaccount import RemoteCommandError from kafkatest.directory_layout.kafka_path import TOOLS_JAR_NAME, TOOLS_DEPENDANT_TEST_LIBS_JAR_NAME -from kafkatest.services.kafka.util import fix_opts_for_new_jvm +from kafkatest.services.kafka.util import fix_opts_for_new_jvm, get_log4j_config_param, get_log4j_config_for_tools from kafkatest.services.monitor.http import HttpMetricsCollector from kafkatest.services.performance import PerformanceService from kafkatest.services.security.security_config import SecurityConfig @@ -33,7 +33,6 @@ class ProducerPerformanceService(HttpMetricsCollector, PerformanceService): STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "producer_performance.stderr") LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs") LOG_FILE = os.path.join(LOG_DIR, "producer_performance.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") def __init__(self, context, num_nodes, kafka, topic, num_records, record_size, throughput, version=DEV_BRANCH, settings=None, intermediate_stats=False, client_id="producer-performance"): @@ -90,7 +89,7 @@ def start_cmd(self, node): cmd += "for file in %s; do CLASSPATH=$CLASSPATH:$file; done; " % jar cmd += "export CLASSPATH; " - cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % ProducerPerformanceService.LOG4J_CONFIG + cmd += " export KAFKA_LOG4J_OPTS=\"%s%s\"; " % (get_log4j_config_param(node), get_log4j_config_for_tools(node)) cmd += "KAFKA_OPTS=%(kafka_opts)s KAFKA_HEAP_OPTS=\"-XX:+HeapDumpOnOutOfMemoryError\" %(kafka_run_class)s org.apache.kafka.tools.ProducerPerformance " \ "--topic %(topic)s --num-records %(num_records)d --record-size %(record_size)d --throughput %(throughput)d --producer-props bootstrap.servers=%(bootstrap_servers)s client.id=%(client_id)s %(metrics_props)s" % args @@ -119,8 +118,8 @@ def _worker(self, idx, node): node.account.ssh("mkdir -p %s" % ProducerPerformanceService.PERSISTENT_ROOT, allow_fail=False) # Create and upload log properties - log_config = self.render('tools_log4j.properties', log_file=ProducerPerformanceService.LOG_FILE) - node.account.create_file(ProducerPerformanceService.LOG4J_CONFIG, log_config) + log_config = self.render(get_log4j_config_for_tools(node), log_file=ProducerPerformanceService.LOG_FILE) + node.account.create_file(get_log4j_config_for_tools(node), log_config) cmd = self.start_cmd(node) self.logger.debug("Producer performance %d command: %s", idx, cmd) diff --git a/metadata/src/test/resources/log4j.properties b/tests/kafkatest/services/performance/templates/tools_log4j2.yaml similarity index 60% rename from metadata/src/test/resources/log4j.properties rename to tests/kafkatest/services/performance/templates/tools_log4j2.yaml index db3879386f10f..5c5e1099f94fd 100644 --- a/metadata/src/test/resources/log4j.properties +++ b/tests/kafkatest/services/performance/templates/tools_log4j2.yaml @@ -1,9 +1,9 @@ # Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with +# contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # @@ -12,11 +12,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=DEBUG, stdout -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +Configuration: + Appenders: + File: + name: FILE + fileName: {{ log_file }} + append: true + immediateFlush: true + PatternLayout: + pattern: "[%d] %p %m (%c)%n" -log4j.logger.org.apache.kafka=DEBUG -log4j.logger.org.apache.zookeeper=WARN + Loggers: + Root: + level: {{ log_level|default("INFO") }} + AppenderRef: + - ref: FILE diff --git a/tests/kafkatest/services/streams.py b/tests/kafkatest/services/streams.py index 3848fea686dd2..df8a0b3923073 100644 --- a/tests/kafkatest/services/streams.py +++ b/tests/kafkatest/services/streams.py @@ -22,6 +22,7 @@ from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin from kafkatest.services.kafka import KafkaConfig from kafkatest.services.monitor.jmx import JmxMixin +from .kafka.util import get_log4j_config_param, get_log4j_config_for_tools STATE_DIR = "state.dir" @@ -37,7 +38,6 @@ class StreamsTestBaseService(KafkaPathResolverMixin, JmxMixin, Service): STDERR_FILE = os.path.join(PERSISTENT_ROOT, "streams.stderr") JMX_LOG_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.log") JMX_ERR_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.err.log") - LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") PID_FILE = os.path.join(PERSISTENT_ROOT, "streams.pid") CLEAN_NODE_ENABLED = True @@ -285,10 +285,11 @@ def start_cmd(self, node): args['stdout'] = self.STDOUT_FILE args['stderr'] = self.STDERR_FILE args['pidfile'] = self.PID_FILE - args['log4j'] = self.LOG4J_CONFIG_FILE + args['log4j_param'] = get_log4j_config_param(node) + args['log4j'] = get_log4j_config_for_tools(node) args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \ + cmd = "( export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j)s\"; " \ "INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \ " %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \ " %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args @@ -305,7 +306,7 @@ def start_node(self, node): node.account.mkdirs(self.PERSISTENT_ROOT) prop_file = self.prop_file() node.account.create_file(self.CONFIG_FILE, prop_file) - node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('tools_log4j.properties', log_file=self.LOG_FILE)) + node.account.create_file(get_log4j_config_for_tools(node), self.render(get_log4j_config_for_tools(node), log_file=self.LOG_FILE)) self.logger.info("Starting StreamsTest process on " + str(node.account)) with node.account.monitor_log(self.STDOUT_FILE) as monitor: @@ -363,11 +364,12 @@ def start_cmd(self, node): args['stdout'] = self.STDOUT_FILE args['stderr'] = self.STDERR_FILE args['pidfile'] = self.PID_FILE - args['log4j'] = self.LOG4J_CONFIG_FILE + args['log4j_param'] = get_log4j_config_param(node) + args['log4j'] = get_log4j_config_for_tools(node) args['version'] = self.KAFKA_STREAMS_VERSION args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\";" \ + cmd = "( export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j)s\";" \ " INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s" \ " %(kafka_run_class)s %(streams_class_name)s" \ " %(config_file)s %(user_test_args1)s" \ @@ -419,11 +421,12 @@ def start_cmd(self, node): args['stdout'] = self.STDOUT_FILE args['stderr'] = self.STDERR_FILE args['pidfile'] = self.PID_FILE - args['log4j'] = self.LOG4J_CONFIG_FILE + args['log4j_param'] = get_log4j_config_param(node) + args['log4j'] = get_log4j_config_for_tools(node) args['disable_auto_terminate'] = self.DISABLE_AUTO_TERMINATE args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \ + cmd = "( export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j)s\"; " \ "INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \ " %(config_file)s %(user_test_args1)s %(disable_auto_terminate)s" \ " & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args @@ -496,10 +499,11 @@ def start_cmd(self, node): args['stdout'] = self.STDOUT_FILE args['stderr'] = self.STDERR_FILE args['pidfile'] = self.PID_FILE - args['log4j'] = self.LOG4J_CONFIG_FILE + args['log4j_param'] = get_log4j_config_param(node) + args['log4j'] = get_log4j_config_for_tools(node) args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \ + cmd = "( export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j)s\"; " \ "INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \ " %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \ " %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args @@ -535,12 +539,13 @@ def start_cmd(self, node): args['stdout'] = self.STDOUT_FILE args['stderr'] = self.STDERR_FILE args['pidfile'] = self.PID_FILE - args['log4j'] = self.LOG4J_CONFIG_FILE + args['log4j_param'] = get_log4j_config_param(node) + args['log4j'] = get_log4j_config_for_tools(node) args['application.id'] = self.applicationId args['input.topics'] = self.topic args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "(export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \ + cmd = "(export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j)s\";" \ "%(kafka_run_class)s %(streams_class_name)s " \ "--bootstrap-server %(bootstrap.servers)s " \ "--force " \ @@ -630,11 +635,12 @@ def start_cmd(self, node): args['stdout'] = self.STDOUT_FILE args['stderr'] = self.STDERR_FILE args['pidfile'] = self.PID_FILE - args['log4j'] = self.LOG4J_CONFIG_FILE + args['log4j_param'] = get_log4j_config_param(node) + args['log4j'] = get_log4j_config_for_tools(node) args['version'] = self.KAFKA_STREAMS_VERSION args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \ + cmd = "( export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j)s\"; " \ "INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \ " %(kafka_run_class)s %(streams_class_name)s %(config_file)s " \ " & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args @@ -730,11 +736,12 @@ def start_cmd(self, node): args['stdout'] = self.STDOUT_FILE args['stderr'] = self.STDERR_FILE args['pidfile'] = self.PID_FILE - args['log4j'] = self.LOG4J_CONFIG_FILE + args['log4j_param'] = get_log4j_config_param(node) + args['log4j'] = get_log4j_config_for_tools(node) args['version'] = self.KAFKA_STREAMS_VERSION args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \ + cmd = "( export KAFKA_LOG4J_OPTS=\"%(log4j_param)s%(log4j)s\"; " \ "INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \ " %(kafka_run_class)s %(streams_class_name)s %(config_file)s " \ " & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args diff --git a/tests/kafkatest/services/templates/connect_log4j.properties b/tests/kafkatest/services/templates/connect_log4j.properties index 9c90543ebe982..51bcb6cd22763 100644 --- a/tests/kafkatest/services/templates/connect_log4j.properties +++ b/tests/kafkatest/services/templates/connect_log4j.properties @@ -24,3 +24,10 @@ log4j.appender.FILE.ImmediateFlush=true log4j.appender.FILE.Append=true log4j.appender.FILE.layout=org.apache.log4j.PatternLayout log4j.appender.FILE.layout.conversionPattern=[%d] %p %m (%c)%n + +# After removing org.reflections, the initial logger of worker only contains "root" logger. +# The test_dynamic_logging e2e test in ConnectDistributedTest requires at least one non-root logger +# to verify logger operations (assert len(initial_loggers) >= 2). +# Adding this logger configuration ensures admin/logger endpoint returns both "root" and +# "org.apache.kafka.clients.consumer.ConsumerConfig" loggers. +log4j.logger.org.apache.kafka.clients.consumer.ConsumerConfig=ERROR \ No newline at end of file diff --git a/tests/kafkatest/services/templates/connect_log4j2.yaml b/tests/kafkatest/services/templates/connect_log4j2.yaml new file mode 100644 index 0000000000000..9605970341866 --- /dev/null +++ b/tests/kafkatest/services/templates/connect_log4j2.yaml @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c)%n" + + Appenders: + File: + - name: FILE + fileName: {{ log_file }} + append: true + immediateFlush: true + PatternLayout: + pattern: "${logPattern}" + + Loggers: + Root: + level: {{ log_level|default("INFO") }} + AppenderRef: + - ref: FILE + + Logger: + - name: org.apache.kafka.clients.consumer.ConsumerConfig + level: ERROR diff --git a/tests/kafkatest/services/templates/tools_log4j2.yaml b/tests/kafkatest/services/templates/tools_log4j2.yaml new file mode 100644 index 0000000000000..2f41025d4850d --- /dev/null +++ b/tests/kafkatest/services/templates/tools_log4j2.yaml @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Appenders: + File: + name: FILE + fileName: {{ log_file }} + append: true + immediateFlush: true + PatternLayout: + pattern: "[%d] %p %m (%c)%n" + + Loggers: + Root: + level: {{ log_level|default("INFO") }} + AppenderRef: + - ref: FILE + + {% if loggers is defined %} + Logger: + # Add additional loggers dynamically if defined + {% for logger, log_level in loggers.items() %} + - name: {{ logger }} + level: {{ log_level }} + {% endfor %} + {% endif %} \ No newline at end of file diff --git a/tests/kafkatest/services/transactional_message_copier.py b/tests/kafkatest/services/transactional_message_copier.py index 564a23fdcc389..9a84f2b4c4285 100644 --- a/tests/kafkatest/services/transactional_message_copier.py +++ b/tests/kafkatest/services/transactional_message_copier.py @@ -22,6 +22,9 @@ from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin from ducktape.cluster.remoteaccount import RemoteCommandError +from kafkatest.services.kafka.util import get_log4j_config_param, get_log4j_config_for_tools + + class TransactionalMessageCopier(KafkaPathResolverMixin, BackgroundThreadService): """This service wraps org.apache.kafka.tools.TransactionalMessageCopier for use in system testing. @@ -31,7 +34,6 @@ class TransactionalMessageCopier(KafkaPathResolverMixin, BackgroundThreadService STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "transactional_message_copier.stderr") LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs") LOG_FILE = os.path.join(LOG_DIR, "transactional_message_copier.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") logs = { "transactional_message_copier_stdout": { @@ -75,9 +77,9 @@ def _worker(self, idx, node): node.account.ssh("mkdir -p %s" % TransactionalMessageCopier.PERSISTENT_ROOT, allow_fail=False) # Create and upload log properties - log_config = self.render('tools_log4j.properties', + log_config = self.render(get_log4j_config_for_tools(node), log_file=TransactionalMessageCopier.LOG_FILE) - node.account.create_file(TransactionalMessageCopier.LOG4J_CONFIG, log_config) + node.account.create_file(get_log4j_config_for_tools(node), log_config) # Configure security self.security_config = self.kafka.security_config.client_config(node=node) self.security_config.setup_node(node) @@ -114,7 +116,7 @@ def _worker(self, idx, node): def start_cmd(self, node, idx): cmd = "export LOG_DIR=%s;" % TransactionalMessageCopier.LOG_DIR cmd += " export KAFKA_OPTS=%s;" % self.security_config.kafka_opts - cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % TransactionalMessageCopier.LOG4J_CONFIG + cmd += " export KAFKA_LOG4J_OPTS=\"%s%s\"; " % (get_log4j_config_param(node), get_log4j_config_for_tools(node)) cmd += self.path.script("kafka-run-class.sh", node) + " org.apache.kafka.tools." + "TransactionalMessageCopier" cmd += " --broker-list %s" % self.kafka.bootstrap_servers(self.security_config.security_protocol) cmd += " --transactional-id %s" % self.transactional_id diff --git a/tests/kafkatest/services/trogdor/templates/log4j2.yaml b/tests/kafkatest/services/trogdor/templates/log4j2.yaml new file mode 100644 index 0000000000000..42c1aa281e783 --- /dev/null +++ b/tests/kafkatest/services/trogdor/templates/log4j2.yaml @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Configuration: + Properties: + Property: + - name: "logPattern" + value: "[%d] %p %m (%c)%n" + + Appenders: + File: + - name: MyFileLogger + fileName: {{ log_path }} + PatternLayout: + pattern: "${logPattern}" + Loggers: + Root: + level: DEBUG + AppenderRef: + - ref: MyFileLogger + + Logger: + - name: kafka + level: DEBUG + + - name: org.apache.kafka + level: DEBUG + + - name: org.eclipse + level: INFO diff --git a/tests/kafkatest/services/trogdor/trogdor.py b/tests/kafkatest/services/trogdor/trogdor.py index 3b941fe9059eb..adda231fbf88f 100644 --- a/tests/kafkatest/services/trogdor/trogdor.py +++ b/tests/kafkatest/services/trogdor/trogdor.py @@ -22,6 +22,8 @@ from ducktape.services.service import Service from ducktape.utils.util import wait_until from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin +from kafkatest.services.kafka.util import get_log4j_config_param, get_log4j_config, \ + get_log4j_config_for_trogdor_coordinator, get_log4j_config_for_trogdor_agent class TrogdorService(KafkaPathResolverMixin, Service): @@ -48,8 +50,6 @@ class TrogdorService(KafkaPathResolverMixin, Service): AGENT_STDOUT_STDERR = os.path.join(PERSISTENT_ROOT, "trogdor-agent-stdout-stderr.log") COORDINATOR_LOG = os.path.join(PERSISTENT_ROOT, "trogdor-coordinator.log") AGENT_LOG = os.path.join(PERSISTENT_ROOT, "trogdor-agent.log") - COORDINATOR_LOG4J_PROPERTIES = os.path.join(PERSISTENT_ROOT, "trogdor-coordinator-log4j.properties") - AGENT_LOG4J_PROPERTIES = os.path.join(PERSISTENT_ROOT, "trogdor-agent-log4j.properties") CONFIG_PATH = os.path.join(PERSISTENT_ROOT, "trogdor.conf") DEFAULT_AGENT_PORT=8888 DEFAULT_COORDINATOR_PORT=8889 @@ -141,26 +141,26 @@ def start_node(self, node): self._start_agent_node(node) def _start_coordinator_node(self, node): - node.account.create_file(TrogdorService.COORDINATOR_LOG4J_PROPERTIES, - self.render('log4j.properties', + node.account.create_file(get_log4j_config_for_trogdor_coordinator(node), + self.render(get_log4j_config(node), log_path=TrogdorService.COORDINATOR_LOG)) self._start_trogdor_daemon("coordinator", TrogdorService.COORDINATOR_STDOUT_STDERR, - TrogdorService.COORDINATOR_LOG4J_PROPERTIES, + get_log4j_config_for_trogdor_coordinator(node), TrogdorService.COORDINATOR_LOG, node) self.logger.info("Started trogdor coordinator on %s." % node.name) def _start_agent_node(self, node): - node.account.create_file(TrogdorService.AGENT_LOG4J_PROPERTIES, - self.render('log4j.properties', + node.account.create_file(get_log4j_config_for_trogdor_agent(node), + self.render(get_log4j_config(node), log_path=TrogdorService.AGENT_LOG)) self._start_trogdor_daemon("agent", TrogdorService.AGENT_STDOUT_STDERR, - TrogdorService.AGENT_LOG4J_PROPERTIES, + get_log4j_config_for_trogdor_agent(node), TrogdorService.AGENT_LOG, node) self.logger.info("Started trogdor agent on %s." % node.name) def _start_trogdor_daemon(self, daemon_name, stdout_stderr_capture_path, log4j_properties_path, log_path, node): - cmd = "export KAFKA_LOG4J_OPTS='-Dlog4j.configuration=file:%s'; " % log4j_properties_path + cmd = "export KAFKA_LOG4J_OPTS='%s%s'; " % (get_log4j_config_param(node), log4j_properties_path) cmd += "%s %s --%s.config %s --node-name %s 1>> %s 2>> %s &" % \ (self.path.script("trogdor.sh", node), daemon_name, @@ -170,7 +170,7 @@ def _start_trogdor_daemon(self, daemon_name, stdout_stderr_capture_path, stdout_stderr_capture_path, stdout_stderr_capture_path) node.account.ssh(cmd) - with node.account.monitor_log(log_path) as monitor: + with node.account.monitor_log(stdout_stderr_capture_path) as monitor: monitor.wait_until("Starting %s process." % daemon_name, timeout_sec=60, backoff_sec=.10, err_msg=("%s on %s didn't finish startup" % (daemon_name, node.name))) diff --git a/tests/kafkatest/services/verifiable_client.py b/tests/kafkatest/services/verifiable_client.py index 4971136a64e78..16617d621aab1 100644 --- a/tests/kafkatest/services/verifiable_client.py +++ b/tests/kafkatest/services/verifiable_client.py @@ -70,7 +70,7 @@ * `--group-id ` * `--topic ` * `--broker-list ` - * `--session-timeout ` + * `--session-timeout ` - note that this configuration is not supported when group protocol is consumer * `--enable-autocommit` * `--max-messages ` * `--assignment-strategy ` @@ -142,10 +142,10 @@ def create_verifiable_client_implementation(context, parent): """Factory for generating a verifiable client implementation class instance - :param parent: parent class instance, either VerifiableConsumer or VerifiableProducer + :param parent: parent class instance, either VerifiableConsumer, VerifiableProducer or VerifiableShareConsumer This will first check for a fully qualified client implementation class name - in context.globals as "Verifiable" where is "Producer" or "Consumer", + in context.globals as "Verifiable" where is "Producer" or "Consumer" or "ShareConsumer", followed by "VerifiableClient" (which should implement both). The global object layout is: {"class": "", "..anything..": ..}. @@ -232,11 +232,11 @@ def kill_signal (self, clean_shutdown=True): class VerifiableClientJava (VerifiableClient): """ - Verifiable Consumer and Producer using the official Java client. + Verifiable Consumer, ShareConsumer and Producer using the official Java client. """ def __init__(self, parent, conf=None): """ - :param parent: The parent instance, either VerifiableConsumer or VerifiableProducer + :param parent: The parent instance, either VerifiableConsumer, VerifiableShareConsumer or VerifiableProducer :param conf: Optional conf object (the --globals VerifiableX object) """ super(VerifiableClientJava, self).__init__() @@ -267,7 +267,7 @@ class VerifiableClientDummy (VerifiableClient): """ def __init__(self, parent, conf=None): """ - :param parent: The parent instance, either VerifiableConsumer or VerifiableProducer + :param parent: The parent instance, either VerifiableConsumer, VerifiableShareConsumer or VerifiableProducer :param conf: Optional conf object (the --globals VerifiableX object) """ super(VerifiableClientDummy, self).__init__() diff --git a/tests/kafkatest/services/verifiable_consumer.py b/tests/kafkatest/services/verifiable_consumer.py index 7e81ca1f7ceea..8264566f1c2b9 100644 --- a/tests/kafkatest/services/verifiable_consumer.py +++ b/tests/kafkatest/services/verifiable_consumer.py @@ -20,6 +20,7 @@ from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin from kafkatest.services.kafka import TopicPartition, consumer_group +from kafkatest.services.kafka.util import get_log4j_config_param, get_log4j_config_for_tools from kafkatest.services.verifiable_client import VerifiableClientMixin from kafkatest.version import DEV_BRANCH, V_2_3_0, V_2_3_1, V_3_7_0, V_4_0_0 @@ -215,7 +216,6 @@ class VerifiableConsumer(KafkaPathResolverMixin, VerifiableClientMixin, Backgrou STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "verifiable_consumer.stderr") LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs") LOG_FILE = os.path.join(LOG_DIR, "verifiable_consumer.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "verifiable_consumer.properties") logs = { @@ -231,10 +231,10 @@ class VerifiableConsumer(KafkaPathResolverMixin, VerifiableClientMixin, Backgrou } def __init__(self, context, num_nodes, kafka, topic, group_id, - static_membership=False, max_messages=-1, session_timeout_sec=30, enable_autocommit=False, + static_membership=False, max_messages=-1, session_timeout_sec=0, enable_autocommit=False, assignment_strategy=None, group_protocol=None, group_remote_assignor=None, version=DEV_BRANCH, stop_timeout_sec=30, log_level="INFO", jaas_override_variables=None, - on_record_consumed=None, reset_policy="earliest", verify_offsets=True): + on_record_consumed=None, reset_policy="earliest", verify_offsets=True, prop_file=""): """ :param jaas_override_variables: A dict of variables to be used in the jaas.conf template file """ @@ -251,9 +251,7 @@ def __init__(self, context, num_nodes, kafka, topic, group_id, self.session_timeout_sec = session_timeout_sec self.enable_autocommit = enable_autocommit self.assignment_strategy = assignment_strategy - self.group_protocol = group_protocol - self.group_remote_assignor = group_remote_assignor - self.prop_file = "" + self.prop_file = prop_file self.stop_timeout_sec = stop_timeout_sec self.on_record_consumed = on_record_consumed self.verify_offsets = verify_offsets @@ -298,8 +296,8 @@ def _worker(self, idx, node): node.account.ssh("mkdir -p %s" % VerifiableConsumer.PERSISTENT_ROOT, allow_fail=False) # Create and upload log properties - log_config = self.render('tools_log4j.properties', log_file=VerifiableConsumer.LOG_FILE) - node.account.create_file(VerifiableConsumer.LOG4J_CONFIG, log_config) + log_config = self.render(get_log4j_config_for_tools(node), log_file=VerifiableConsumer.LOG_FILE) + node.account.create_file(get_log4j_config_for_tools(node), log_config) # Create and upload config file self.security_config = self.kafka.security_config.client_config(self.prop_file, node, @@ -382,7 +380,7 @@ def start_cmd(self, node): cmd = "" cmd += "export LOG_DIR=%s;" % VerifiableConsumer.LOG_DIR cmd += " export KAFKA_OPTS=%s;" % self.security_config.kafka_opts - cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % VerifiableConsumer.LOG4J_CONFIG + cmd += " export KAFKA_LOG4J_OPTS=\"%s%s\"; " % (get_log4j_config_param(node), get_log4j_config_for_tools(node)) cmd += self.impl.exec_cmd(node) if self.on_record_consumed: cmd += " --verbose" @@ -417,10 +415,12 @@ def start_cmd(self, node): else: cmd += " --bootstrap-server %s" % self.kafka.bootstrap_servers(self.security_config.security_protocol) - cmd += " --reset-policy %s --group-id %s --topic %s --session-timeout %s" % \ - (self.reset_policy, self.group_id, self.topic, - self.session_timeout_sec*1000) - + cmd += " --reset-policy %s --group-id %s --topic %s" % \ + (self.reset_policy, self.group_id, self.topic) + + if self.session_timeout_sec > 0: + cmd += " --session-timeout %s" % self.session_timeout_sec*1000 + if self.max_messages > 0: cmd += " --max-messages %s" % str(self.max_messages) diff --git a/tests/kafkatest/services/verifiable_producer.py b/tests/kafkatest/services/verifiable_producer.py index ea6292d57725e..6f473d8bb13c0 100644 --- a/tests/kafkatest/services/verifiable_producer.py +++ b/tests/kafkatest/services/verifiable_producer.py @@ -24,7 +24,7 @@ from kafkatest.services.verifiable_client import VerifiableClientMixin from kafkatest.utils import is_int, is_int_with_prefix from kafkatest.version import get_version, V_2_5_0, DEV_BRANCH -from kafkatest.services.kafka.util import fix_opts_for_new_jvm +from kafkatest.services.kafka.util import fix_opts_for_new_jvm, get_log4j_config_param, get_log4j_config_for_tools class VerifiableProducer(KafkaPathResolverMixin, VerifiableClientMixin, BackgroundThreadService): @@ -41,7 +41,6 @@ class VerifiableProducer(KafkaPathResolverMixin, VerifiableClientMixin, Backgrou STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "verifiable_producer.stderr") LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs") LOG_FILE = os.path.join(LOG_DIR, "verifiable_producer.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties") CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "verifiable_producer.properties") logs = { @@ -127,8 +126,8 @@ def _worker(self, idx, node): node.account.ssh("mkdir -p %s" % VerifiableProducer.PERSISTENT_ROOT, allow_fail=False) # Create and upload log properties - log_config = self.render('tools_log4j.properties', log_file=VerifiableProducer.LOG_FILE) - node.account.create_file(VerifiableProducer.LOG4J_CONFIG, log_config) + log_config = self.render(get_log4j_config_for_tools(node), log_file=VerifiableProducer.LOG_FILE) + node.account.create_file(get_log4j_config_for_tools(node), log_config) # Configure security self.security_config = self.kafka.security_config.client_config(node=node, @@ -222,7 +221,7 @@ def start_cmd(self, node, idx): cmd += " export KAFKA_OPTS=%s;" % self.security_config.kafka_opts cmd += fix_opts_for_new_jvm(node) - cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % VerifiableProducer.LOG4J_CONFIG + cmd += " export KAFKA_LOG4J_OPTS=\"%s%s\"; " % (get_log4j_config_param(node), get_log4j_config_for_tools(node)) cmd += self.impl.exec_cmd(node) version = get_version(node) if version >= V_2_5_0: diff --git a/tests/kafkatest/services/verifiable_share_consumer.py b/tests/kafkatest/services/verifiable_share_consumer.py new file mode 100644 index 0000000000000..ff39e106cf0fa --- /dev/null +++ b/tests/kafkatest/services/verifiable_share_consumer.py @@ -0,0 +1,330 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +from ducktape.services.background_thread import BackgroundThreadService + +from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin +from kafkatest.services.kafka import TopicPartition +from kafkatest.services.kafka.util import get_log4j_config_param, get_log4j_config_for_tools +from kafkatest.services.verifiable_client import VerifiableClientMixin +from kafkatest.version import DEV_BRANCH + +class ShareConsumerState: + Started = 1 + Dead = 2 + +class ShareConsumerEventHandler(object): + + def __init__(self, node, idx, state=ShareConsumerState.Dead): + self.node = node + self.idx = idx + self.total_consumed = 0 + self.total_acknowledged = 0 + self.total_acknowledged_failed = 0 + self.consumed_per_partition = {} + self.acknowledged_per_partition = {} + self.acknowledged_per_partition_failed = {} + self.state = state + + def handle_shutdown_complete(self, node=None, logger=None): + self.state = ShareConsumerState.Dead + if node is not None and logger is not None: + logger.debug("Shut down %s" % node.account.hostname) + + def handle_startup_complete(self, node, logger): + self.state = ShareConsumerState.Started + logger.debug("Started %s" % node.account.hostname) + + def handle_offsets_acknowledged(self, event, node, logger): + if event["success"]: + self.total_acknowledged += event["count"] + for share_partition_data in event["partitions"]: + topic_partition = TopicPartition(share_partition_data["topic"], share_partition_data["partition"]) + self.acknowledged_per_partition[topic_partition] = self.acknowledged_per_partition.get(topic_partition, 0) + share_partition_data["count"] + logger.debug("Offsets acknowledged for %s" % (node.account.hostname)) + else: + self.total_acknowledged_failed += event["count"] + for share_partition_data in event["partitions"]: + topic_partition = TopicPartition(share_partition_data["topic"], share_partition_data["partition"]) + self.acknowledged_per_partition_failed[topic_partition] = self.acknowledged_per_partition_failed.get(topic_partition, 0) + share_partition_data["count"] + logger.debug("Offsets acknowledged for %s" % (node.account.hostname)) + logger.debug("Offset acknowledgement failed for: %s" % (node.account.hostname)) + + def handle_records_consumed(self, event, node, logger): + self.total_consumed += event["count"] + for share_partition_data in event["partitions"]: + topic_partition = TopicPartition(share_partition_data["topic"], share_partition_data["partition"]) + self.consumed_per_partition[topic_partition] = self.consumed_per_partition.get(topic_partition, 0) + share_partition_data["count"] + logger.debug("Offsets consumed for %s" % (node.account.hostname)) + + + def handle_kill_process(self, clean_shutdown): + # if the shutdown was clean, then we expect the explicit + # shutdown event from the share consumer + if not clean_shutdown: + self.handle_shutdown_complete() + +class VerifiableShareConsumer(KafkaPathResolverMixin, VerifiableClientMixin, BackgroundThreadService): + """This service wraps org.apache.kafka.tools.VerifiableShareConsumer for use in + system testing. + + NOTE: this class should be treated as a PUBLIC API. Downstream users use + this service both directly and through class extension, so care must be + taken to ensure compatibility. + """ + + PERSISTENT_ROOT = "/mnt/verifiable_share_consumer" + STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "verifiable_share_consumer.stdout") + STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "verifiable_share_consumer.stderr") + LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs") + LOG_FILE = os.path.join(LOG_DIR, "verifiable_share_consumer.log") + CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "verifiable_share_consumer.properties") + + logs = { + "verifiable_share_consumer_stdout": { + "path": STDOUT_CAPTURE, + "collect_default": False}, + "verifiable_share_consumer_stderr": { + "path": STDERR_CAPTURE, + "collect_default": False}, + "verifiable_share_consumer_log": { + "path": LOG_FILE, + "collect_default": True} + } + + def __init__(self, context, num_nodes, kafka, topic, group_id, + max_messages=-1, acknowledgement_mode="auto", offset_reset_strategy="", + version=DEV_BRANCH, stop_timeout_sec=60, log_level="INFO", jaas_override_variables=None, + on_record_consumed=None): + """ + :param jaas_override_variables: A dict of variables to be used in the jaas.conf template file + """ + super(VerifiableShareConsumer, self).__init__(context, num_nodes) + self.log_level = log_level + self.kafka = kafka + self.topic = topic + self.group_id = group_id + self.offset_reset_strategy = offset_reset_strategy + self.max_messages = max_messages + self.acknowledgement_mode = acknowledgement_mode + self.prop_file = "" + self.stop_timeout_sec = stop_timeout_sec + self.on_record_consumed = on_record_consumed + + self.event_handlers = {} + self.jaas_override_variables = jaas_override_variables or {} + + self.total_records_consumed = 0 + self.total_records_acknowledged = 0 + self.total_records_acknowledged_failed = 0 + self.consumed_records_offsets = set() + self.acknowledged_records_offsets = set() + self.is_offset_reset_strategy_set = False + + for node in self.nodes: + node.version = version + + def java_class_name(self): + return "VerifiableShareConsumer" + + def create_event_handler(self, idx, node): + return ShareConsumerEventHandler(node, idx) + + def _worker(self, idx, node): + with self.lock: + self.event_handlers[node] = self.create_event_handler(idx, node) + handler = self.event_handlers[node] + + node.account.ssh("mkdir -p %s" % VerifiableShareConsumer.PERSISTENT_ROOT, allow_fail=False) + + # Create and upload log properties + log_config = self.render(get_log4j_config_for_tools(node), log_file=VerifiableShareConsumer.LOG_FILE) + node.account.create_file(get_log4j_config_for_tools(node), log_config) + + # Create and upload config file + self.security_config = self.kafka.security_config.client_config(self.prop_file, node, + self.jaas_override_variables) + self.security_config.setup_node(node) + self.prop_file += str(self.security_config) + self.logger.info("verifiable_share_consumer.properties:") + self.logger.info(self.prop_file) + node.account.create_file(VerifiableShareConsumer.CONFIG_FILE, self.prop_file) + self.security_config.setup_node(node) + + cmd = self.start_cmd(node) + self.logger.debug("VerifiableShareConsumer %d command: %s" % (idx, cmd)) + + for line in node.account.ssh_capture(cmd): + event = self.try_parse_json(node, line.strip()) + if event is not None: + with self.lock: + name = event["name"] + if name == "shutdown_complete": + handler.handle_shutdown_complete(node, self.logger) + elif name == "startup_complete": + handler.handle_startup_complete(node, self.logger) + elif name == "offsets_acknowledged": + handler.handle_offsets_acknowledged(event, node, self.logger) + self._update_global_acknowledged(event) + elif name == "records_consumed": + handler.handle_records_consumed(event, node, self.logger) + self._update_global_consumed(event) + elif name == "record_data" and self.on_record_consumed: + self.on_record_consumed(event, node) + elif name == "offset_reset_strategy_set": + self._on_offset_reset_strategy_set() + else: + self.logger.debug("%s: ignoring unknown event: %s" % (str(node.account), event)) + + def _update_global_acknowledged(self, acknowledge_event): + if acknowledge_event["success"]: + self.total_records_acknowledged += acknowledge_event["count"] + else: + self.total_records_acknowledged_failed += acknowledge_event["count"] + for share_partition_data in acknowledge_event["partitions"]: + tpkey = str(share_partition_data["topic"]) + "-" + str(share_partition_data["partition"]) + for offset in share_partition_data["offsets"]: + key = tpkey + "-" + str(offset) + if key not in self.acknowledged_records_offsets: + self.acknowledged_records_offsets.add(key) + + def _update_global_consumed(self, consumed_event): + self.total_records_consumed += consumed_event["count"] + + for share_partition_data in consumed_event["partitions"]: + tpkey = str(share_partition_data["topic"]) + "-" + str(share_partition_data["partition"]) + for offset in share_partition_data["offsets"]: + key = tpkey + "-" + str(offset) + if key not in self.consumed_records_offsets: + self.consumed_records_offsets.add(key) + + def _on_offset_reset_strategy_set(self): + self.is_offset_reset_strategy_set = True + + def start_cmd(self, node): + cmd = "" + cmd += "export LOG_DIR=%s;" % VerifiableShareConsumer.LOG_DIR + cmd += " export KAFKA_OPTS=%s;" % self.security_config.kafka_opts + cmd += " export KAFKA_LOG4J_OPTS=\"%s%s\"; " % (get_log4j_config_param(node), get_log4j_config_for_tools(node)) + cmd += self.impl.exec_cmd(node) + if self.on_record_consumed: + cmd += " --verbose" + + cmd += " --acknowledgement-mode %s" % self.acknowledgement_mode + + cmd += " --offset-reset-strategy %s" % self.offset_reset_strategy + + cmd += " --bootstrap-server %s" % self.kafka.bootstrap_servers(self.security_config.security_protocol) + + cmd += " --group-id %s --topic %s" % (self.group_id, self.topic) + + if self.max_messages > 0: + cmd += " --max-messages %s" % str(self.max_messages) + + cmd += " --consumer.config %s" % VerifiableShareConsumer.CONFIG_FILE + cmd += " 2>> %s | tee -a %s &" % (VerifiableShareConsumer.STDOUT_CAPTURE, VerifiableShareConsumer.STDOUT_CAPTURE) + return cmd + + def pids(self, node): + return self.impl.pids(node) + + def try_parse_json(self, node, string): + """Try to parse a string as json. Return None if not parseable.""" + try: + return json.loads(string) + except ValueError: + self.logger.debug("%s: Could not parse as json: %s" % (str(node.account), str(string))) + return None + + def stop_all(self): + for node in self.nodes: + self.stop_node(node) + + def kill_node(self, node, clean_shutdown=True, allow_fail=False): + sig = self.impl.kill_signal(clean_shutdown) + for pid in self.pids(node): + node.account.signal(pid, sig, allow_fail) + + with self.lock: + self.event_handlers[node].handle_kill_process(clean_shutdown) + + def stop_node(self, node, clean_shutdown=True): + self.kill_node(node, clean_shutdown=clean_shutdown) + + stopped = self.wait_node(node, timeout_sec=self.stop_timeout_sec) + assert stopped, "Node %s: did not stop within the specified timeout of %s seconds" % \ + (str(node.account), str(self.stop_timeout_sec)) + + def clean_node(self, node): + self.kill_node(node, clean_shutdown=False) + node.account.ssh("rm -rf " + self.PERSISTENT_ROOT, allow_fail=False) + self.security_config.clean_node(node) + + def total_consumed(self): + with self.lock: + return self.total_records_consumed + + def total_unique_consumed(self): + with self.lock: + return len(self.consumed_records_offsets) + + def total_unique_acknowledged(self): + with self.lock: + return len(self.acknowledged_records_offsets) + + def total_acknowledged(self): + with self.lock: + return self.total_records_acknowledged + self.total_records_acknowledged_failed + + def total_successful_acknowledged(self): + with self.lock: + return self.total_records_acknowledged + + def total_failed_acknowledged(self): + with self.lock: + return self.total_records_acknowledged_failed + + def total_consumed_for_a_share_consumer(self, node): + with self.lock: + return self.event_handlers[node].total_consumed + + def total_acknowledged_for_a_share_consumer(self, node): + with self.lock: + return self.event_handlers[node].total_acknowledged + self.event_handlers[node].total_acknowledged_failed + + def total_successful_acknowledged_for_a_share_consumer(self, node): + with self.lock: + return self.event_handlers[node].total_acknowledged + + def total_failed_acknowledged_for_a_share_consumer(self, node): + with self.lock: + return self.event_handlers[node].total_acknowledged_failed + + def offset_reset_strategy_set(self): + with self.lock: + return self.is_offset_reset_strategy_set + + def dead_nodes(self): + with self.lock: + return [handler.node for handler in self.event_handlers.values() + if handler.state == ShareConsumerState.Dead] + + def alive_nodes(self): + with self.lock: + return [handler.node for handler in self.event_handlers.values() + if handler.state == ShareConsumerState.Started] \ No newline at end of file diff --git a/tests/kafkatest/services/zookeeper.py b/tests/kafkatest/services/zookeeper.py index 9c9e8c35a441e..3215ce783233a 100644 --- a/tests/kafkatest/services/zookeeper.py +++ b/tests/kafkatest/services/zookeeper.py @@ -23,7 +23,7 @@ from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin from kafkatest.services.security.security_config import SecurityConfig -from kafkatest.version import DEV_BRANCH +from kafkatest.version import LATEST_3_9 class ZookeeperService(KafkaPathResolverMixin, Service): @@ -43,8 +43,9 @@ class ZookeeperService(KafkaPathResolverMixin, Service): "collect_default": True} } + # After 4.0, zookeeper service is removed from source code. Using LATEST_3_9 for compatibility test cases. def __init__(self, context, num_nodes, zk_sasl = False, zk_client_port = True, zk_client_secure_port = False, - zk_tls_encrypt_only = False, version=DEV_BRANCH): + zk_tls_encrypt_only = False, version=LATEST_3_9): """ :type context """ @@ -186,7 +187,7 @@ def query(self, path, chroot=None): chroot_path = ('' if chroot is None else chroot) + path - kafka_run_class = self.path.script("kafka-run-class.sh", DEV_BRANCH) + kafka_run_class = self.path.script("kafka-run-class.sh", LATEST_3_9) cmd = "%s %s -server %s %s get %s" % \ (kafka_run_class, self.java_cli_class_name(), self.connect_setting(force_tls=self.zk_client_secure_port), self.zkTlsConfigFileOption(True), @@ -211,7 +212,7 @@ def get_children(self, path, chroot=None): chroot_path = ('' if chroot is None else chroot) + path - kafka_run_class = self.path.script("kafka-run-class.sh", DEV_BRANCH) + kafka_run_class = self.path.script("kafka-run-class.sh", LATEST_3_9) cmd = "%s %s -server %s %s ls %s" % \ (kafka_run_class, self.java_cli_class_name(), self.connect_setting(force_tls=self.zk_client_secure_port), self.zkTlsConfigFileOption(True), @@ -239,7 +240,7 @@ def delete(self, path, recursive, chroot=None): chroot_path = ('' if chroot is None else chroot) + path - kafka_run_class = self.path.script("kafka-run-class.sh", DEV_BRANCH) + kafka_run_class = self.path.script("kafka-run-class.sh", LATEST_3_9) if recursive: op = "deleteall" else: @@ -261,7 +262,7 @@ def create(self, path, chroot=None, value=""): chroot_path = ('' if chroot is None else chroot) + path - kafka_run_class = self.path.script("kafka-run-class.sh", DEV_BRANCH) + kafka_run_class = self.path.script("kafka-run-class.sh", LATEST_3_9) cmd = "%s %s -server %s %s create %s '%s'" % \ (kafka_run_class, self.java_cli_class_name(), self.connect_setting(force_tls=self.zk_client_secure_port), self.zkTlsConfigFileOption(True), @@ -275,7 +276,7 @@ def describeUsers(self): Describe the default user using the ConfigCommand CLI """ - kafka_run_class = self.path.script("kafka-run-class.sh", DEV_BRANCH) + kafka_run_class = self.path.script("kafka-run-class.sh", LATEST_3_9) cmd = "%s kafka.admin.ConfigCommand --zookeeper %s %s --describe --entity-type users --entity-default" % \ (kafka_run_class, self.connect_setting(force_tls=self.zk_client_secure_port), self.zkTlsConfigFileOption()) diff --git a/tests/kafkatest/tests/client/client_compatibility_features_test.py b/tests/kafkatest/tests/client/client_compatibility_features_test.py index d0bcd80a79115..dcb7146a298e6 100644 --- a/tests/kafkatest/tests/client/client_compatibility_features_test.py +++ b/tests/kafkatest/tests/client/client_compatibility_features_test.py @@ -28,7 +28,7 @@ from ducktape.tests.test import Test from kafkatest.version import DEV_BRANCH, \ LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, LATEST_2_5, LATEST_2_6, LATEST_2_7, LATEST_2_8, \ - LATEST_3_0, LATEST_3_1, LATEST_3_2, LATEST_3_3, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, KafkaVersion + LATEST_3_0, LATEST_3_1, LATEST_3_2, LATEST_3_3, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, LATEST_3_9, KafkaVersion def get_broker_features(broker_version): features = {} @@ -53,7 +53,6 @@ def run_command(node, cmd, ssh_log_file): print(e, flush=True) raise - class ClientCompatibilityFeaturesTest(Test): """ Tests clients for the presence or absence of specific features when communicating with brokers with various @@ -118,16 +117,20 @@ def invoke_compatibility_program(self, features): @parametrize(broker_version=str(LATEST_3_0)) @parametrize(broker_version=str(LATEST_3_1)) @parametrize(broker_version=str(LATEST_3_2)) - @parametrize(broker_version=str(LATEST_3_3)) - @parametrize(broker_version=str(LATEST_3_4)) - @parametrize(broker_version=str(LATEST_3_5)) - @parametrize(broker_version=str(LATEST_3_6)) - @parametrize(broker_version=str(LATEST_3_7)) - @parametrize(broker_version=str(LATEST_3_8)) + @parametrize(broker_version=str(LATEST_3_3), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_4), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_5), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_6), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_7), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_8), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_9), metadata_quorum=quorum.isolated_kraft) def run_compatibility_test(self, broker_version, metadata_quorum=quorum.zk): if self.zk: self.zk.start() self.kafka.set_version(KafkaVersion(broker_version)) + if metadata_quorum == quorum.isolated_kraft: + for node in self.kafka.controller_quorum.nodes: + node.version = KafkaVersion(broker_version) self.kafka.start() features = get_broker_features(broker_version) self.invoke_compatibility_program(features) diff --git a/tests/kafkatest/tests/client/client_compatibility_produce_consume_test.py b/tests/kafkatest/tests/client/client_compatibility_produce_consume_test.py index 74bd5563200b4..4c7bfef158f58 100644 --- a/tests/kafkatest/tests/client/client_compatibility_produce_consume_test.py +++ b/tests/kafkatest/tests/client/client_compatibility_produce_consume_test.py @@ -25,7 +25,7 @@ from kafkatest.utils import is_int_with_prefix from kafkatest.version import DEV_BRANCH, \ LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, LATEST_2_5, LATEST_2_6, LATEST_2_7, LATEST_2_8, \ - LATEST_3_0, LATEST_3_1, LATEST_3_2, LATEST_3_3, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, KafkaVersion + LATEST_3_0, LATEST_3_1, LATEST_3_2, LATEST_3_3, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, LATEST_3_9, KafkaVersion class ClientCompatibilityProduceConsumeTest(ProduceConsumeValidateTest): """ @@ -38,9 +38,17 @@ def __init__(self, test_context): self.topic = "test_topic" self.zk = ZookeeperService(test_context, num_nodes=3) if quorum.for_test(test_context) == quorum.zk else None - self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{ - "partitions": 10, - "replication-factor": 2}}) + self.kafka = KafkaService( + test_context, + num_nodes=3, + zk=self.zk, + topics={ + self.topic:{ + "partitions": 10, + "replication-factor": 2 + } + }, + ) self.num_partitions = 10 self.timeout_sec = 60 self.producer_throughput = 1000 @@ -69,15 +77,19 @@ def min_cluster_size(self): @parametrize(broker_version=str(LATEST_3_0)) @parametrize(broker_version=str(LATEST_3_1)) @parametrize(broker_version=str(LATEST_3_2)) - @parametrize(broker_version=str(LATEST_3_3)) - @parametrize(broker_version=str(LATEST_3_4)) - @parametrize(broker_version=str(LATEST_3_5)) - @parametrize(broker_version=str(LATEST_3_6)) - @parametrize(broker_version=str(LATEST_3_7)) - @parametrize(broker_version=str(LATEST_3_8)) + @parametrize(broker_version=str(LATEST_3_3), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_4), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_5), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_6), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_7), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_8), metadata_quorum=quorum.isolated_kraft) + @parametrize(broker_version=str(LATEST_3_9), metadata_quorum=quorum.isolated_kraft) def test_produce_consume(self, broker_version, metadata_quorum=quorum.zk): print("running producer_consumer_compat with broker_version = %s" % broker_version, flush=True) self.kafka.set_version(KafkaVersion(broker_version)) + if metadata_quorum == quorum.isolated_kraft: + for node in self.kafka.controller_quorum.nodes: + node.version = KafkaVersion(broker_version) self.kafka.security_protocol = "PLAINTEXT" self.kafka.interbroker_security_protocol = self.kafka.security_protocol self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka, diff --git a/tests/kafkatest/tests/client/consumer_protocol_migration_test.py b/tests/kafkatest/tests/client/consumer_protocol_migration_test.py index 41b21ee26558b..a03228b617a2a 100644 --- a/tests/kafkatest/tests/client/consumer_protocol_migration_test.py +++ b/tests/kafkatest/tests/client/consumer_protocol_migration_test.py @@ -20,7 +20,7 @@ from kafkatest.tests.verifiable_consumer_test import VerifiableConsumerTest from kafkatest.services.kafka import TopicPartition, quorum, consumer_group from kafkatest.version import LATEST_2_1, LATEST_2_3, LATEST_2_4, LATEST_2_5, \ - LATEST_3_2, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, DEV_BRANCH, KafkaVersion + LATEST_3_2, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, LATEST_3_9, DEV_BRANCH, KafkaVersion class ConsumerProtocolMigrationTest(VerifiableConsumerTest): """ @@ -42,7 +42,7 @@ class ConsumerProtocolMigrationTest(VerifiableConsumerTest): COOPERATIVE_STICKEY = "org.apache.kafka.clients.consumer.CooperativeStickyAssignor" all_consumer_versions = [LATEST_2_1, LATEST_2_3, LATEST_2_4, LATEST_2_5, \ - LATEST_3_2, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, DEV_BRANCH] + LATEST_3_2, LATEST_3_4, LATEST_3_5, LATEST_3_6, LATEST_3_7, LATEST_3_8, LATEST_3_9, DEV_BRANCH] consumer_versions_supporting_range_assignnor = [str(v) for v in all_consumer_versions] consumer_versions_supporting_static_membership = [str(v) for v in all_consumer_versions if v >= LATEST_2_3] consumer_versions_supporting_cooperative_sticky_assignor = [str(v) for v in all_consumer_versions if v >= LATEST_2_4] @@ -77,7 +77,7 @@ def rolling_bounce_consumers(self, consumer, clean_shutdown=True): consumer.stop_node(node, clean_shutdown) wait_until(lambda: len(consumer.dead_nodes()) == 1, - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for the consumer to shutdown") consumer.start_node(node) diff --git a/tests/kafkatest/tests/client/consumer_test.py b/tests/kafkatest/tests/client/consumer_test.py index 4bd680dd2a00e..5e5c5ff308d66 100644 --- a/tests/kafkatest/tests/client/consumer_test.py +++ b/tests/kafkatest/tests/client/consumer_test.py @@ -39,7 +39,7 @@ def rolling_bounce_consumers(self, consumer, keep_alive=0, num_bounces=5, clean_ consumer.stop_node(node, clean_shutdown) wait_until(lambda: len(consumer.dead_nodes()) == 1, - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for the consumer to shutdown") consumer.start_node(node) @@ -84,12 +84,12 @@ def setup_consumer(self, topic, **kwargs): use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_broker_rolling_bounce(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_broker_rolling_bounce(self, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Verify correct consumer behavior when the brokers are consecutively restarted. Setup: single Kafka cluster with one producer writing messages to a single topic with one - partition, an a set of consumers in the same group reading from the same topic. + partition, a set of consumers in the same group reading from the same topic. - Start a producer which continues producing new messages throughout the test. - Start up the consumers and wait until they've joined the group. @@ -101,15 +101,10 @@ def test_broker_rolling_bounce(self, metadata_quorum=quorum.zk, use_new_coordina partition = TopicPartition(self.TOPIC, 0) producer = self.setup_producer(self.TOPIC) - # The consumers' session timeouts must exceed the time it takes for a broker to roll. Consumers are likely - # to see cluster metadata consisting of just a single alive broker in the case where the cluster has just 2 - # brokers and the cluster is rolling (which is what is happening here). When the consumer sees a single alive - # broker, and then that broker rolls, the consumer will be unable to connect to the cluster until that broker - # completes its roll. In the meantime, the consumer group will move to the group coordinator on the other - # broker, and that coordinator will fail the consumer and trigger a group rebalance if its session times out. - # This test is asserting that no rebalances occur, so we increase the session timeout for this to be the case. - self.session_timeout_sec = 30 - consumer = self.setup_consumer(self.TOPIC, group_protocol=group_protocol) + # Due to KIP-899, which rebootstrap is performed when there are no available brokers in the current metadata. + # We disable rebootstrapping by setting `metadata.recovery.strategy=none` for the consumer, as the test expects no metadata changes. + # see KAFKA-18194 + consumer = self.setup_consumer(self.TOPIC, group_protocol=group_protocol, prop_file="metadata.recovery.strategy=none") producer.start() self.await_produced_messages(producer) @@ -148,7 +143,7 @@ def test_broker_rolling_bounce(self, metadata_quorum=quorum.zk, use_new_coordina use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_consumer_bounce(self, clean_shutdown, bounce_mode, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_consumer_bounce(self, clean_shutdown, bounce_mode, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Verify correct consumer behavior when the consumers in the group are consecutively restarted. @@ -207,7 +202,7 @@ def test_consumer_bounce(self, clean_shutdown, bounce_mode, metadata_quorum=quor use_new_coordinator=[True], group_protocol=[consumer_group.classic_group_protocol] ) - def test_static_consumer_bounce_with_eager_assignment(self, clean_shutdown, static_membership, bounce_mode, num_bounces, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_static_consumer_bounce_with_eager_assignment(self, clean_shutdown, static_membership, bounce_mode, num_bounces, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Verify correct static consumer behavior when the consumers in the group are restarted. In order to make sure the behavior of static members are different from dynamic ones, we take both static and dynamic @@ -229,7 +224,6 @@ def test_static_consumer_bounce_with_eager_assignment(self, clean_shutdown, stat producer.start() self.await_produced_messages(producer) - self.session_timeout_sec = 60 consumer = self.setup_consumer(self.TOPIC, static_membership=static_membership, group_protocol=group_protocol, assignment_strategy="org.apache.kafka.clients.consumer.RangeAssignor") @@ -281,7 +275,7 @@ def test_static_consumer_bounce_with_eager_assignment(self, clean_shutdown, stat use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_static_consumer_persisted_after_rejoin(self, bounce_mode, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_static_consumer_persisted_after_rejoin(self, bounce_mode, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Verify that the updated member.id(updated_member_id) caused by static member rejoin would be persisted. If not, after the brokers rolling bounce, the migrated group coordinator would load the stale persisted member.id and @@ -295,7 +289,6 @@ def test_static_consumer_persisted_after_rejoin(self, bounce_mode, metadata_quor producer = self.setup_producer(self.TOPIC) producer.start() self.await_produced_messages(producer) - self.session_timeout_sec = 60 consumer = self.setup_consumer(self.TOPIC, static_membership=True, group_protocol=group_protocol) consumer.start() self.await_all_members(consumer) @@ -324,7 +317,7 @@ def test_static_consumer_persisted_after_rejoin(self, bounce_mode, metadata_quor use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Verify correct static consumer behavior when there are conflicting consumers with same group.instance.id. @@ -340,7 +333,6 @@ def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, me producer.start() self.await_produced_messages(producer) - self.session_timeout_sec = 60 consumer = self.setup_consumer(self.TOPIC, static_membership=True, group_protocol=group_protocol) self.num_consumers = num_conflict_consumers @@ -372,7 +364,7 @@ def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, me # Stop existing nodes, so conflicting ones should be able to join. consumer.stop_all() wait_until(lambda: len(consumer.dead_nodes()) == len(consumer.nodes), - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for the consumer to shutdown") conflict_consumer.start() self.await_members(conflict_consumer, num_conflict_consumers) @@ -383,13 +375,13 @@ def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, me conflict_consumer.start() wait_until(lambda: len(consumer.joined_nodes()) + len(conflict_consumer.joined_nodes()) == len(consumer.nodes), - timeout_sec=self.session_timeout_sec*2, + timeout_sec=60, err_msg="Timed out waiting for consumers to join, expected total %d joined, but only see %d joined from " "normal consumer group and %d from conflict consumer group" % \ (len(consumer.nodes), len(consumer.joined_nodes()), len(conflict_consumer.joined_nodes())) ) wait_until(lambda: len(consumer.dead_nodes()) + len(conflict_consumer.dead_nodes()) == len(conflict_consumer.nodes), - timeout_sec=self.session_timeout_sec*2, + timeout_sec=60, err_msg="Timed out waiting for fenced consumers to die, expected total %d dead, but only see %d dead in " "normal consumer group and %d dead in conflict consumer group" % \ (len(conflict_consumer.nodes), len(consumer.dead_nodes()), len(conflict_consumer.dead_nodes())) @@ -409,7 +401,7 @@ def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, me use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_consumer_failure(self, clean_shutdown, enable_autocommit, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_consumer_failure(self, clean_shutdown, enable_autocommit, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): partition = TopicPartition(self.TOPIC, 0) consumer = self.setup_consumer(self.TOPIC, enable_autocommit=enable_autocommit, group_protocol=group_protocol) @@ -427,7 +419,7 @@ def test_consumer_failure(self, clean_shutdown, enable_autocommit, metadata_quor # stop the partition owner and await its shutdown consumer.kill_node(partition_owner, clean_shutdown=clean_shutdown) wait_until(lambda: len(consumer.joined_nodes()) == (self.num_consumers - 1) and consumer.owner(partition) is not None, - timeout_sec=self.session_timeout_sec*2+5, + timeout_sec=60, err_msg="Timed out waiting for consumer to close") # ensure that the remaining consumer does some work after rebalancing @@ -467,7 +459,7 @@ def test_consumer_failure(self, clean_shutdown, enable_autocommit, metadata_quor use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_broker_failure(self, clean_shutdown, enable_autocommit, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_broker_failure(self, clean_shutdown, enable_autocommit, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): partition = TopicPartition(self.TOPIC, 0) consumer = self.setup_consumer(self.TOPIC, enable_autocommit=enable_autocommit, group_protocol=group_protocol) @@ -513,7 +505,7 @@ def test_broker_failure(self, clean_shutdown, enable_autocommit, metadata_quorum use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_group_consumption(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_group_consumption(self, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Verifies correct group rebalance behavior as consumers are started and stopped. In particular, this test verifies that the partition is readable after every @@ -578,7 +570,7 @@ def __init__(self, test_context): "org.apache.kafka.clients.consumer.CooperativeStickyAssignor"], metadata_quorum=[quorum.isolated_kraft], use_new_coordinator=[True], - group_protocol=[consumer_group.classic_group_protocol], + group_protocol=[consumer_group.classic_group_protocol] ) @matrix( metadata_quorum=[quorum.isolated_kraft], @@ -586,7 +578,7 @@ def __init__(self, test_context): group_protocol=[consumer_group.consumer_group_protocol], group_remote_assignor=consumer_group.all_remote_assignors ) - def test_valid_assignment(self, assignment_strategy=None, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None, group_remote_assignor=None): + def test_valid_assignment(self, assignment_strategy=None, metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None, group_remote_assignor=None): """ Verify assignment strategy correctness: each partition is assigned to exactly one consumer instance. diff --git a/tests/kafkatest/tests/client/pluggable_test.py b/tests/kafkatest/tests/client/pluggable_test.py index b2f726e016303..8f74ec1c8a52d 100644 --- a/tests/kafkatest/tests/client/pluggable_test.py +++ b/tests/kafkatest/tests/client/pluggable_test.py @@ -52,5 +52,5 @@ def test_start_stop(self, metadata_quorum=quorum.zk): self.logger.debug("Waiting for %d nodes to stop" % len(consumer.nodes)) wait_until(lambda: len(consumer.dead_nodes()) == len(consumer.nodes), - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for consumers to shutdown") diff --git a/tests/kafkatest/tests/client/quota_test.py b/tests/kafkatest/tests/client/quota_test.py index d52f9b6a9441c..e89fea80eeeea 100644 --- a/tests/kafkatest/tests/client/quota_test.py +++ b/tests/kafkatest/tests/client/quota_test.py @@ -17,8 +17,7 @@ from ducktape.mark import matrix, parametrize from ducktape.mark.resource import cluster -from kafkatest.services.zookeeper import ZookeeperService -from kafkatest.services.kafka import KafkaService +from kafkatest.services.kafka import KafkaService, quorum from kafkatest.services.performance import ProducerPerformanceService from kafkatest.services.console_consumer import ConsoleConsumer from kafkatest.version import DEV_BRANCH @@ -77,10 +76,9 @@ def __init__(self, quota_type, override_quota, kafka): self.configure_quota(kafka, QuotaConfig.LARGE_QUOTA, QuotaConfig.LARGE_QUOTA, ['clients', None]) def configure_quota(self, kafka, producer_byte_rate, consumer_byte_rate, entity_args): - force_use_zk_connection = not kafka.all_nodes_configs_command_uses_bootstrap_server() node = kafka.nodes[0] cmd = "%s --alter --add-config producer_byte_rate=%d,consumer_byte_rate=%d" % \ - (kafka.kafka_configs_cmd_with_optional_security_settings(node, force_use_zk_connection), producer_byte_rate, consumer_byte_rate) + (kafka.kafka_configs_cmd_with_optional_security_settings(node, False), producer_byte_rate, consumer_byte_rate) cmd += " --entity-type " + entity_args[0] + self.entity_name_opt(entity_args[1]) if len(entity_args) > 2: cmd += " --entity-type " + entity_args[2] + self.entity_name_opt(entity_args[3]) @@ -108,8 +106,7 @@ def __init__(self, test_context): self.num_records = 50000 self.record_size = 3000 - self.zk = ZookeeperService(test_context, num_nodes=1) - self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, + self.kafka = KafkaService(test_context, num_nodes=1, zk=None, security_protocol='SSL', authorizer_class_name='', interbroker_security_protocol='SSL', topics={self.topic: {'partitions': 6, 'replication-factor': 1, 'configs': {'min.insync.replicas': 1}}}, @@ -119,17 +116,14 @@ def __init__(self, test_context): self.num_producers = 1 self.num_consumers = 2 - def setUp(self): - self.zk.start() - def min_cluster_size(self): """Override this since we're adding services outside of the constructor""" return super(QuotaTest, self).min_cluster_size() + self.num_producers + self.num_consumers @cluster(num_nodes=5) - @matrix(quota_type=[QuotaConfig.CLIENT_ID, QuotaConfig.USER, QuotaConfig.USER_CLIENT], override_quota=[True, False]) - @parametrize(quota_type=QuotaConfig.CLIENT_ID, consumer_num=2) - def test_quota(self, quota_type, override_quota=True, producer_num=1, consumer_num=1): + @matrix(quota_type=[QuotaConfig.CLIENT_ID, QuotaConfig.USER, QuotaConfig.USER_CLIENT], override_quota=[True, False], metadata_quorum=[quorum.isolated_kraft]) + @parametrize(quota_type=QuotaConfig.CLIENT_ID, consumer_num=2, metadata_quorum=quorum.isolated_kraft) + def test_quota(self, quota_type, override_quota=True, producer_num=1, consumer_num=1, metadata_quorum=quorum.isolated_kraft): self.kafka.start() self.quota_config = QuotaConfig(quota_type, override_quota, self.kafka) diff --git a/tests/kafkatest/tests/client/share_consumer_test.py b/tests/kafkatest/tests/client/share_consumer_test.py new file mode 100644 index 0000000000000..cc74234ac0f7c --- /dev/null +++ b/tests/kafkatest/tests/client/share_consumer_test.py @@ -0,0 +1,313 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ducktape.mark import matrix +from ducktape.mark.resource import cluster +from ducktape.utils.util import wait_until +from kafkatest.tests.verifiable_share_consumer_test import VerifiableShareConsumerTest + +from kafkatest.services.kafka import TopicPartition, quorum + +import signal + +class ShareConsumerTest(VerifiableShareConsumerTest): + TOPIC1 = {"name": "test_topic1", "partitions": 1,"replication_factor": 1} + TOPIC2 = {"name": "test_topic2", "partitions": 3,"replication_factor": 3} + TOPIC3 = {"name": "test_topic3", "partitions": 3,"replication_factor": 3} + + num_consumers = 3 + num_producers = 1 + num_brokers = 3 + + def __init__(self, test_context): + super(ShareConsumerTest, self).__init__(test_context, num_consumers=self.num_consumers, num_producers=self.num_producers, + num_zk=0, num_brokers=self.num_brokers, topics={ + self.TOPIC1["name"] : { 'partitions': self.TOPIC1["partitions"], 'replication-factor': self.TOPIC1["replication_factor"] }, + self.TOPIC2["name"] : { 'partitions': self.TOPIC2["partitions"], 'replication-factor': self.TOPIC2["replication_factor"] } + }) + + def setup_share_group(self, topic, **kwargs): + consumer = super(ShareConsumerTest, self).setup_share_group(topic, **kwargs) + self.mark_for_collect(consumer, 'verifiable_share_consumer_stdout') + return consumer + + def get_topic_partitions(self, topic): + return [TopicPartition(topic["name"], i) for i in range(topic["partitions"])] + + def wait_until_topic_replicas_settled(self, topic, expected_num_isr, timeout_sec=60): + for partition in range(0, topic["partitions"]): + wait_until(lambda: len(self.kafka.isr_idx_list(topic["name"], partition)) == expected_num_isr, + timeout_sec=timeout_sec, backoff_sec=1, err_msg="the expected number of ISRs did not settle in a reasonable amount of time") + + def wait_until_topic_partition_leaders_settled(self, topic, timeout_sec=60): + def leader_settled(partition_leader, topicName, partition): + try: + partition_leader(topicName, partition) + return True + except Exception: + return False + for partition in range(0, topic["partitions"]): + wait_until(lambda: leader_settled(self.kafka.leader, topic["name"], partition), + timeout_sec=timeout_sec, backoff_sec=1, err_msg="partition leaders did not settle in a reasonable amount of time") + + def rolling_bounce_brokers(self, topic, num_bounces=5, clean_shutdown=True, timeout_sec=60): + for _ in range(num_bounces): + for i in range(len(self.kafka.nodes)): + node = self.kafka.nodes[i] + self.kafka.restart_node(node, clean_shutdown=clean_shutdown) + self.wait_until_topic_replicas_settled(topic, expected_num_isr = topic["replication_factor"], timeout_sec=timeout_sec) + + def fail_brokers(self, topic, num_brokers=1, clean_shutdown=True, timeout_sec=60): + for i in range(num_brokers): + self.kafka.signal_node(self.kafka.nodes[i], signal.SIGTERM if clean_shutdown else signal.SIGKILL) + self.wait_until_topic_replicas_settled(topic, topic["replication_factor"] - (i + 1)) + self.wait_until_topic_partition_leaders_settled(topic, timeout_sec=timeout_sec) + + def rolling_bounce_share_consumers(self, consumer, keep_alive=0, num_bounces=5, clean_shutdown=True, timeout_sec=60): + for _ in range(num_bounces): + num_consumers_killed = 0 + for node in consumer.nodes[keep_alive:]: + consumer.stop_node(node, clean_shutdown) + num_consumers_killed += 1 + wait_until(lambda: len(consumer.dead_nodes()) == 1, + timeout_sec=timeout_sec, + err_msg="Timed out waiting for the share consumer to shutdown") + + consumer.start_node(node) + + self.await_all_members(consumer, timeout_sec=timeout_sec) + self.await_consumed_messages_by_a_consumer(consumer, node, timeout_sec=timeout_sec) + + def bounce_all_share_consumers(self, consumer, keep_alive=0, num_bounces=5, clean_shutdown=True, timeout_sec=60): + for _ in range(num_bounces): + for node in consumer.nodes[keep_alive:]: + consumer.stop_node(node, clean_shutdown) + + wait_until(lambda: len(consumer.dead_nodes()) == self.num_consumers - keep_alive, timeout_sec=timeout_sec, + err_msg="Timed out waiting for the share consumers to shutdown") + + num_alive_consumers = keep_alive + for node in consumer.nodes[keep_alive:]: + consumer.start_node(node) + num_alive_consumers += 1 + self.await_members(consumer, num_consumers=num_alive_consumers, timeout_sec=timeout_sec) + self.await_consumed_messages_by_a_consumer(consumer, node, timeout_sec=timeout_sec) + + def fail_share_consumers(self, consumer, num_consumers=1, clean_shutdown=True, timeout_sec=60): + for i in range(num_consumers): + consumer.kill_node(consumer.nodes[i], clean_shutdown=clean_shutdown) + wait_until(lambda: len(consumer.dead_nodes()) == (i + 1), + timeout_sec=timeout_sec, + err_msg="Timed out waiting for the share consumer to be killed") + + @cluster(num_nodes=10) + @matrix( + metadata_quorum=[quorum.isolated_kraft, quorum.combined_kraft] + ) + def test_share_single_topic_partition(self, metadata_quorum=quorum.isolated_kraft): + + total_messages = 100000 + producer = self.setup_producer(self.TOPIC1["name"], max_messages=total_messages) + + consumer = self.setup_share_group(self.TOPIC1["name"], offset_reset_strategy="earliest") + + producer.start() + + consumer.start() + self.await_all_members(consumer, timeout_sec=60) + + self.await_acknowledged_messages(consumer, min_messages=total_messages, timeout_sec=60) + + assert consumer.total_consumed() >= producer.num_acked + assert consumer.total_acknowledged() == producer.num_acked + + for event_handler in consumer.event_handlers.values(): + assert event_handler.total_consumed > 0 + assert event_handler.total_acknowledged > 0 + + producer.stop() + consumer.stop_all() + + @cluster(num_nodes=10) + @matrix( + metadata_quorum=[quorum.isolated_kraft, quorum.combined_kraft] + ) + def test_share_multiple_partitions(self, metadata_quorum=quorum.isolated_kraft): + + total_messages = 1000000 + producer = self.setup_producer(self.TOPIC2["name"], max_messages=total_messages, throughput=5000) + + consumer = self.setup_share_group(self.TOPIC2["name"], offset_reset_strategy="earliest") + + producer.start() + + consumer.start() + self.await_all_members(consumer, timeout_sec=60) + + self.await_acknowledged_messages(consumer, min_messages=total_messages, timeout_sec=60) + + assert consumer.total_consumed() >= producer.num_acked + assert consumer.total_acknowledged() == producer.num_acked + + for event_handler in consumer.event_handlers.values(): + assert event_handler.total_consumed > 0 + assert event_handler.total_acknowledged > 0 + for topic_partition in self.get_topic_partitions(self.TOPIC2): + assert topic_partition in event_handler.consumed_per_partition + assert event_handler.consumed_per_partition[topic_partition] > 0 + assert topic_partition in event_handler.acknowledged_per_partition + assert event_handler.acknowledged_per_partition[topic_partition] > 0 + + producer.stop() + consumer.stop_all() + + @cluster(num_nodes=10) + @matrix( + clean_shutdown=[True, False], + metadata_quorum=[quorum.isolated_kraft, quorum.combined_kraft] + ) + def test_broker_rolling_bounce(self, clean_shutdown, metadata_quorum=quorum.isolated_kraft): + + producer = self.setup_producer(self.TOPIC2["name"]) + consumer = self.setup_share_group(self.TOPIC2["name"], offset_reset_strategy="earliest") + + producer.start() + self.await_produced_messages(producer) + + consumer.start() + self.await_all_members(consumer) + + self.await_consumed_messages(consumer, timeout_sec=60) + self.rolling_bounce_brokers(self.TOPIC2, num_bounces=1, clean_shutdown=clean_shutdown) + + # ensure that the share consumers do some work after the broker bounces + self.await_consumed_messages(consumer, min_messages=1000) + + producer.stop() + + self.await_unique_consumed_messages(consumer, min_messages=producer.num_acked, timeout_sec=60) + + assert consumer.total_unique_consumed() >= producer.num_acked + + consumer.stop_all() + + @cluster(num_nodes=10) + @matrix( + clean_shutdown=[True, False], + metadata_quorum=[quorum.isolated_kraft], + num_failed_brokers=[1, 2] + ) + @matrix( + clean_shutdown=[True, False], + metadata_quorum=[quorum.combined_kraft], + num_failed_brokers=[1] + ) + def test_broker_failure(self, clean_shutdown, metadata_quorum=quorum.isolated_kraft, num_failed_brokers=1): + + producer = self.setup_producer(self.TOPIC2["name"]) + consumer = self.setup_share_group(self.TOPIC2["name"], offset_reset_strategy="earliest") + + producer.start() + self.await_produced_messages(producer) + + consumer.start() + self.await_all_members(consumer) + + # shutdown the required number of brokers + self.fail_brokers(self.TOPIC2, num_brokers=num_failed_brokers, clean_shutdown=clean_shutdown) + + # ensure that the share consumers do some work after the broker failure + self.await_consumed_messages(consumer, min_messages=1000) + + producer.stop() + + self.await_unique_consumed_messages(consumer, min_messages=producer.num_acked, timeout_sec=60) + + assert consumer.total_unique_consumed() >= producer.num_acked + + consumer.stop_all() + + @cluster(num_nodes=10) + @matrix( + clean_shutdown=[True, False], + bounce_mode=["all", "rolling"], + metadata_quorum=[quorum.isolated_kraft, quorum.combined_kraft] + ) + def test_share_consumer_bounce(self, clean_shutdown, bounce_mode, metadata_quorum=quorum.zk): + """ + Verify correct share consumer behavior when the share consumers in the group are consecutively restarted. + + Setup: single Kafka cluster with one producer and a set of share consumers in one group. + + - Start a producer which continues producing new messages throughout the test. + - Start up the share consumers and wait until they've joined the group. + - In a loop, restart each share consumer, waiting for each one to rejoin the group before + restarting the rest. + - Verify that the share consumers consume all messages produced by the producer atleast once. + """ + + producer = self.setup_producer(self.TOPIC2["name"]) + consumer = self.setup_share_group(self.TOPIC2["name"], offset_reset_strategy="earliest") + + producer.start() + self.await_produced_messages(producer) + + consumer.start() + self.await_all_members(consumer) + + if bounce_mode == "all": + self.bounce_all_share_consumers(consumer, clean_shutdown=clean_shutdown) + else: + self.rolling_bounce_share_consumers(consumer, clean_shutdown=clean_shutdown) + + producer.stop() + + self.await_unique_consumed_messages(consumer, min_messages=producer.num_acked, timeout_sec=60) + + assert consumer.total_unique_consumed() >= producer.num_acked + + consumer.stop_all() + + @cluster(num_nodes=10) + @matrix( + clean_shutdown=[True, False], + num_failed_consumers=[1, 2], + metadata_quorum=[quorum.isolated_kraft, quorum.combined_kraft] + ) + def test_share_consumer_failure(self, clean_shutdown, metadata_quorum=quorum.zk, num_failed_consumers=1): + + producer = self.setup_producer(self.TOPIC2["name"]) + consumer = self.setup_share_group(self.TOPIC2["name"], offset_reset_strategy="earliest") + + # startup the producer and ensure that some records have been written + producer.start() + self.await_produced_messages(producer) + + consumer.start() + self.await_all_members(consumer) + + # stop the required number of share consumers + self.fail_share_consumers(consumer, num_failed_consumers, clean_shutdown=clean_shutdown) + + # ensure that the remaining consumer does some work + self.await_consumed_messages(consumer, min_messages=1000, timeout_sec=60) + + producer.stop() + + self.await_unique_consumed_messages(consumer, min_messages=producer.num_acked, timeout_sec=60) + + assert consumer.total_unique_consumed() >= producer.num_acked + + consumer.stop_all() \ No newline at end of file diff --git a/tests/kafkatest/tests/client/truncation_test.py b/tests/kafkatest/tests/client/truncation_test.py index 523bcbca985de..3a091c01a57eb 100644 --- a/tests/kafkatest/tests/client/truncation_test.py +++ b/tests/kafkatest/tests/client/truncation_test.py @@ -12,12 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from ducktape.mark import matrix from ducktape.mark.resource import cluster from ducktape.utils.util import wait_until from kafkatest.tests.verifiable_consumer_test import VerifiableConsumerTest -from kafkatest.services.kafka import TopicPartition +from kafkatest.services.kafka import TopicPartition, quorum from kafkatest.services.verifiable_consumer import VerifiableConsumer @@ -51,7 +51,8 @@ def print_record(event, node): return consumer @cluster(num_nodes=7) - def test_offset_truncate(self): + @matrix(metadata_quorum=quorum.all_non_upgrade, use_new_coordinator=[True]) + def test_offset_truncate(self, metadata_quorum, use_new_coordinator): """ Verify correct consumer behavior when the brokers are consecutively restarted. diff --git a/tests/kafkatest/tests/connect/connect_distributed_test.py b/tests/kafkatest/tests/connect/connect_distributed_test.py index e54118c3881be..f00ae31cbce4b 100644 --- a/tests/kafkatest/tests/connect/connect_distributed_test.py +++ b/tests/kafkatest/tests/connect/connect_distributed_test.py @@ -563,7 +563,16 @@ def test_dynamic_logging(self, metadata_quorum): # have been discarded self._restart_worker(worker) restarted_loggers = self.cc.get_all_loggers(worker) - assert initial_loggers == restarted_loggers + + for loggerName in restarted_loggers: + logger = self.cc.get_logger(worker, loggerName) + level = logger['level'] + # ConsumerConfig logger is pre-defined in log4j2 config with ERROR level, + # while other loggers should be set to DEBUG level + if loggerName == 'org.apache.kafka.clients.consumer.ConsumerConfig': + assert level == 'ERROR' + else: + assert level == 'DEBUG' def _different_level(self, current_level): return 'INFO' if current_level is None or current_level.upper() != 'INFO' else 'WARN' diff --git a/tests/kafkatest/tests/connect/connect_rest_test.py b/tests/kafkatest/tests/connect/connect_rest_test.py index 1bd340f25c7b7..0e5c90df0f468 100644 --- a/tests/kafkatest/tests/connect/connect_rest_test.py +++ b/tests/kafkatest/tests/connect/connect_rest_test.py @@ -37,12 +37,14 @@ class ConnectRestApiTest(KafkaTest): 'topic', 'file', 'transforms', 'config.action.reload', 'errors.retry.timeout', 'errors.retry.delay.max.ms', 'errors.tolerance', 'errors.log.enable', 'errors.log.include.messages', 'predicates', 'topic.creation.groups', 'exactly.once.support', 'transaction.boundary', 'transaction.boundary.interval.ms', 'offsets.storage.topic', - 'tasks.max.enforce'} + 'tasks.max.enforce', 'connector.plugin.version', 'key.converter.plugin.version', 'value.converter.plugin.version', + 'header.converter.plugin.version'} FILE_SINK_CONFIGS = {'name', 'connector.class', 'tasks.max', 'key.converter', 'value.converter', 'header.converter', 'topics', 'file', 'transforms', 'topics.regex', 'config.action.reload', 'errors.retry.timeout', 'errors.retry.delay.max.ms', 'errors.tolerance', 'errors.log.enable', 'errors.log.include.messages', 'errors.deadletterqueue.topic.name', 'errors.deadletterqueue.topic.replication.factor', 'errors.deadletterqueue.context.headers.enable', 'predicates', - 'tasks.max.enforce'} + 'tasks.max.enforce', 'connector.plugin.version', 'key.converter.plugin.version', 'value.converter.plugin.version', + 'header.converter.plugin.version'} INPUT_FILE = "/mnt/connect.input" INPUT_FILE2 = "/mnt/connect.input2" diff --git a/tests/kafkatest/tests/connect/templates/connect-distributed.properties b/tests/kafkatest/tests/connect/templates/connect-distributed.properties index fa2172edd7b97..132155852471d 100644 --- a/tests/kafkatest/tests/connect/templates/connect-distributed.properties +++ b/tests/kafkatest/tests/connect/templates/connect-distributed.properties @@ -52,11 +52,6 @@ offset.flush.interval.ms=5000 rest.advertised.host.name = {{ node.account.hostname }} - -# Reduce session timeouts so tests that kill workers don't need to wait as long to recover -session.timeout.ms=10000 -consumer.session.timeout.ms=10000 - # Reduce the admin client request timeouts so that we don't wait the default 120 sec before failing to connect the admin client request.timeout.ms=30000 diff --git a/tests/kafkatest/tests/core/authorizer_test.py b/tests/kafkatest/tests/core/authorizer_test.py index 1e7178f5f812b..60c0612f356b1 100644 --- a/tests/kafkatest/tests/core/authorizer_test.py +++ b/tests/kafkatest/tests/core/authorizer_test.py @@ -19,7 +19,6 @@ from ducktape.tests.test import Test from kafkatest.services.kafka import KafkaService, quorum -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.security.kafka_acls import ACLs class AuthorizerTest(Test): @@ -47,15 +46,8 @@ def setUp(self): def test_authorizer(self, metadata_quorum, authorizer_class): topics = {"test_topic": {"partitions": 1, "replication-factor": 1}} - if (authorizer_class == KafkaService.KRAFT_ACL_AUTHORIZER): - self.zk = None - else: - self.zk = ZookeeperService(self.test_context, num_nodes=1) - self.zk.start() - - self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, - topics=topics, controller_num_nodes_override=1, - allow_zk_with_kraft=True) + self.kafka = KafkaService(self.test_context, num_nodes=1, zk=None, + topics=topics, controller_num_nodes_override=1) broker_security_protocol = "SSL" broker_principal = "User:CN=systemtest" diff --git a/tests/kafkatest/tests/core/compatibility_test_new_broker_test.py b/tests/kafkatest/tests/core/compatibility_test_new_broker_test.py index c7f600a0f3e69..7a9d87c0968ae 100644 --- a/tests/kafkatest/tests/core/compatibility_test_new_broker_test.py +++ b/tests/kafkatest/tests/core/compatibility_test_new_broker_test.py @@ -18,12 +18,11 @@ from kafkatest.services.kafka import KafkaService, quorum from kafkatest.services.kafka import config_property from kafkatest.services.verifiable_producer import VerifiableProducer -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest from kafkatest.utils import is_int from kafkatest.version import LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, LATEST_2_5, LATEST_2_6, \ LATEST_2_7, LATEST_2_8, LATEST_3_0, LATEST_3_1, LATEST_3_2, LATEST_3_3, LATEST_3_4, LATEST_3_5, LATEST_3_6, \ - LATEST_3_7, LATEST_3_8, DEV_BRANCH, KafkaVersion + LATEST_3_7, LATEST_3_8, LATEST_3_9, DEV_BRANCH, KafkaVersion # Compatibility tests for moving to a new broker (e.g., 0.10.x) and using a mix of old and new clients (e.g., 0.9.x) class ClientCompatibilityTestNewBroker(ProduceConsumeValidateTest): @@ -33,10 +32,6 @@ def __init__(self, test_context): def setUp(self): self.topic = "test_topic" - self.zk = ZookeeperService(self.test_context, num_nodes=1) if quorum.for_test(self.test_context) == quorum.zk else None - - if self.zk: - self.zk.start() # Producer and consumer self.producer_throughput = 10000 @@ -64,11 +59,10 @@ def setUp(self): @matrix(producer_version=[str(LATEST_3_6)], consumer_version=[str(LATEST_3_6)], compression_types=[["none"]], timestamp_type=[str("CreateTime")], metadata_quorum=quorum.all_non_upgrade) @matrix(producer_version=[str(LATEST_3_7)], consumer_version=[str(LATEST_3_7)], compression_types=[["none"]], timestamp_type=[str("CreateTime")], metadata_quorum=quorum.all_non_upgrade) @matrix(producer_version=[str(LATEST_3_8)], consumer_version=[str(LATEST_3_8)], compression_types=[["none"]], timestamp_type=[str("CreateTime")], metadata_quorum=quorum.all_non_upgrade) + @matrix(producer_version=[str(LATEST_3_9)], consumer_version=[str(LATEST_3_9)], compression_types=[["none"]], timestamp_type=[str("CreateTime")], metadata_quorum=quorum.all_non_upgrade) @matrix(producer_version=[str(LATEST_2_1)], consumer_version=[str(LATEST_2_1)], compression_types=[["zstd"]], timestamp_type=[str("CreateTime")], metadata_quorum=quorum.all_non_upgrade) - def test_compatibility(self, producer_version, consumer_version, compression_types, new_consumer=True, timestamp_type=None, metadata_quorum=quorum.zk): - if not new_consumer and metadata_quorum != quorum.zk: - raise Exception("ZooKeeper-based consumers are not supported when using a KRaft metadata quorum") - self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: { + def test_compatibility(self, producer_version, consumer_version, compression_types, timestamp_type=None, metadata_quorum=quorum.zk): + self.kafka = KafkaService(self.test_context, num_nodes=3, zk=None, version=DEV_BRANCH, topics={self.topic: { "partitions": 3, "replication-factor": 3, 'configs': {"min.insync.replicas": 2}}}, @@ -85,7 +79,7 @@ def test_compatibility(self, producer_version, consumer_version, compression_typ version=KafkaVersion(producer_version)) self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, - self.topic, consumer_timeout_ms=30000, new_consumer=new_consumer, + self.topic, consumer_timeout_ms=30000, message_validator=is_int, version=KafkaVersion(consumer_version)) self.run_produce_consume_validate(lambda: wait_until( diff --git a/tests/kafkatest/tests/core/consume_bench_test.py b/tests/kafkatest/tests/core/consume_bench_test.py index c205604f8f6ce..c84f0dda59e5f 100644 --- a/tests/kafkatest/tests/core/consume_bench_test.py +++ b/tests/kafkatest/tests/core/consume_bench_test.py @@ -22,15 +22,13 @@ from kafkatest.services.trogdor.consume_bench_workload import ConsumeBenchWorkloadService, ConsumeBenchWorkloadSpec from kafkatest.services.trogdor.task_spec import TaskSpec from kafkatest.services.trogdor.trogdor import TrogdorService -from kafkatest.services.zookeeper import ZookeeperService class ConsumeBenchTest(Test): def __init__(self, test_context): """:type test_context: ducktape.tests.test.TestContext""" super(ConsumeBenchTest, self).__init__(test_context) - self.zk = ZookeeperService(test_context, num_nodes=3) if quorum.for_test(test_context) == quorum.zk else None - self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk) + self.kafka = KafkaService(test_context, num_nodes=3, zk=None) self.producer_workload_service = ProduceBenchWorkloadService(test_context, self.kafka) self.consumer_workload_service = ConsumeBenchWorkloadService(test_context, self.kafka) self.consumer_workload_service_2 = ConsumeBenchWorkloadService(test_context, self.kafka) @@ -42,15 +40,11 @@ def __init__(self, test_context): def setUp(self): self.trogdor.start() - if self.zk: - self.zk.start() self.kafka.start() def teardown(self): self.trogdor.stop() self.kafka.stop() - if self.zk: - self.zk.stop() def produce_messages(self, topics, max_messages=10000): produce_spec = ProduceBenchWorkloadSpec(0, TaskSpec.MAX_DURATION_MS, @@ -85,7 +79,7 @@ def produce_messages(self, topics, max_messages=10000): use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_consume_bench(self, topics, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_consume_bench(self, topics, metadata_quorum, use_new_coordinator=False, group_protocol=None): """ Runs a ConsumeBench workload to consume messages """ @@ -115,7 +109,7 @@ def test_consume_bench(self, topics, metadata_quorum=quorum.zk, use_new_coordina use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_single_partition(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_single_partition(self, metadata_quorum, use_new_coordinator=False, group_protocol=None): """ Run a ConsumeBench against a single partition """ @@ -146,7 +140,7 @@ def test_single_partition(self, metadata_quorum=quorum.zk, use_new_coordinator=F use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_multiple_consumers_random_group_topics(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_multiple_consumers_random_group_topics(self, metadata_quorum, use_new_coordinator=False, group_protocol=None): """ Runs multiple consumers group to read messages from topics. Since a consumerGroup isn't specified, each consumer should read from all topics independently @@ -178,7 +172,7 @@ def test_multiple_consumers_random_group_topics(self, metadata_quorum=quorum.zk, use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_two_consumers_specified_group_topics(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_two_consumers_specified_group_topics(self, metadata_quorum, use_new_coordinator=False, group_protocol=None): """ Runs two consumers in the same consumer group to read messages from topics. Since a consumerGroup is specified, each consumer should dynamically get assigned a partition from group @@ -211,7 +205,7 @@ def test_two_consumers_specified_group_topics(self, metadata_quorum=quorum.zk, u use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_multiple_consumers_random_group_partitions(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_multiple_consumers_random_group_partitions(self, metadata_quorum, use_new_coordinator=False, group_protocol=None): """ Runs multiple consumers in to read messages from specific partitions. Since a consumerGroup isn't specified, each consumer will get assigned a random group @@ -244,7 +238,7 @@ def test_multiple_consumers_random_group_partitions(self, metadata_quorum=quorum use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_multiple_consumers_specified_group_partitions_should_raise(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_multiple_consumers_specified_group_partitions_should_raise(self, metadata_quorum, use_new_coordinator=False, group_protocol=None): """ Runs multiple consumers in the same group to read messages from specific partitions. It is an invalid configuration to provide a consumer group and specific partitions. diff --git a/tests/kafkatest/tests/core/consumer_group_command_test.py b/tests/kafkatest/tests/core/consumer_group_command_test.py index 2df53e3093a4f..57af320574b78 100644 --- a/tests/kafkatest/tests/core/consumer_group_command_test.py +++ b/tests/kafkatest/tests/core/consumer_group_command_test.py @@ -19,7 +19,6 @@ from ducktape.mark import matrix from ducktape.mark.resource import cluster -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.kafka import KafkaService, quorum, consumer_group from kafkatest.services.console_consumer import ConsoleConsumer from kafkatest.services.security.security_config import SecurityConfig @@ -40,23 +39,17 @@ class ConsumerGroupCommandTest(Test): def __init__(self, test_context): super(ConsumerGroupCommandTest, self).__init__(test_context) - self.num_zk = 1 self.num_brokers = 1 self.topics = { TOPIC: {'partitions': 1, 'replication-factor': 1} } - self.zk = ZookeeperService(test_context, self.num_zk) if quorum.for_test(test_context) == quorum.zk else None - - def setUp(self): - if self.zk: - self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, - self.zk, security_protocol=security_protocol, + None, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics, - controller_num_nodes_override=self.num_zk) + controller_num_nodes_override=self.num_brokers) self.kafka.start() def start_consumer(self, group_protocol=None): @@ -102,7 +95,7 @@ def setup_and_verify(self, security_protocol, group=None, group_protocol=None): use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_list_consumer_groups(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_list_consumer_groups(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Tests if ConsumerGroupCommand is listing correct consumer groups :return: None @@ -121,7 +114,7 @@ def test_list_consumer_groups(self, security_protocol='PLAINTEXT', metadata_quor use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_describe_consumer_group(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_describe_consumer_group(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.isolated_kraft, use_new_coordinator=False, group_protocol=None): """ Tests if ConsumerGroupCommand is describing a consumer group correctly :return: None diff --git a/tests/kafkatest/tests/core/controller_mutation_quota_test.py b/tests/kafkatest/tests/core/controller_mutation_quota_test.py index bf8a3b874ed46..98f33deab1f17 100644 --- a/tests/kafkatest/tests/core/controller_mutation_quota_test.py +++ b/tests/kafkatest/tests/core/controller_mutation_quota_test.py @@ -17,15 +17,7 @@ from ducktape.mark import matrix from ducktape.tests.test import Test -from kafkatest.services.trogdor.produce_bench_workload import ProduceBenchWorkloadService, ProduceBenchWorkloadSpec -from kafkatest.services.trogdor.consume_bench_workload import ConsumeBenchWorkloadService, ConsumeBenchWorkloadSpec -from kafkatest.services.trogdor.task_spec import TaskSpec from kafkatest.services.kafka import KafkaService, quorum -from kafkatest.services.trogdor.trogdor import TrogdorService -from kafkatest.services.zookeeper import ZookeeperService - -import time - class ControllerMutationQuotaTest(Test): """Tests throttled partition changes via the kafka-topics CLI as follows: @@ -54,11 +46,10 @@ class ControllerMutationQuotaTest(Test): def __init__(self, test_context): super(ControllerMutationQuotaTest, self).__init__(test_context=test_context) self.test_context = test_context - self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None self.window_num = 10 self.window_size_seconds = 200 # must be long enough such that all CLI commands fit into it - self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, + self.kafka = KafkaService(self.test_context, num_nodes=1, zk=None, server_prop_overrides=[ ["quota.window.num", "%s" % self.window_num], ["controller.quota.window.size.seconds", "%s" % self.window_size_seconds] @@ -66,19 +57,15 @@ def __init__(self, test_context): controller_num_nodes_override=1) def setUp(self): - if self.zk: - self.zk.start() self.kafka.start() def teardown(self): # Need to increase the timeout due to partition count self.kafka.stop() - if self.zk: - self.zk.stop() @cluster(num_nodes=2) @matrix(metadata_quorum=quorum.all_kraft) - def test_controller_mutation_quota(self, metadata_quorum=quorum.zk): + def test_controller_mutation_quota(self, metadata_quorum): self.partition_count = 10 mutation_rate = 3 * self.partition_count / (self.window_num * self.window_size_seconds) diff --git a/tests/kafkatest/tests/core/delegation_token_test.py b/tests/kafkatest/tests/core/delegation_token_test.py index 4855572309891..4a1b1e83a9aaf 100644 --- a/tests/kafkatest/tests/core/delegation_token_test.py +++ b/tests/kafkatest/tests/core/delegation_token_test.py @@ -18,7 +18,6 @@ from ducktape.tests.test import Test from ducktape.utils.util import wait_until from kafkatest.services.kafka import config_property, KafkaService, quorum -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.console_consumer import ConsoleConsumer from kafkatest.services.delegation_tokens import DelegationTokens from kafkatest.services.verifiable_producer import VerifiableProducer @@ -35,8 +34,7 @@ def __init__(self, test_context): self.test_context = test_context self.topic = "topic" - self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None - self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, zk_chroot="/kafka", + self.kafka = KafkaService(self.test_context, num_nodes=1, zk=None, topics={self.topic: {"partitions": 1, "replication-factor": 1}}, server_prop_overrides=[ [config_property.DELEGATION_TOKEN_MAX_LIFETIME_MS, "604800000"], @@ -66,11 +64,6 @@ def __init__(self, test_context): self.kafka.client_sasl_mechanism = 'GSSAPI,SCRAM-SHA-256' self.kafka.interbroker_sasl_mechanism = 'GSSAPI' - - def setUp(self): - if self.zk: - self.zk.start() - def tearDown(self): self.producer.nodes[0].account.remove(self.jaas_deleg_conf_path) self.consumer.nodes[0].account.remove(self.jaas_deleg_conf_path) @@ -114,7 +107,7 @@ def renew_delegation_token(self): @cluster(num_nodes=5) @matrix(metadata_quorum=quorum.all_non_upgrade) - def test_delegation_token_lifecycle(self, metadata_quorum=quorum.zk): + def test_delegation_token_lifecycle(self, metadata_quorum): self.kafka.start() self.delegation_tokens = DelegationTokens(self.kafka, self.test_context) diff --git a/tests/kafkatest/tests/core/downgrade_test.py b/tests/kafkatest/tests/core/downgrade_test.py deleted file mode 100644 index e35f6f7515421..0000000000000 --- a/tests/kafkatest/tests/core/downgrade_test.py +++ /dev/null @@ -1,166 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ducktape.mark import parametrize, matrix -from ducktape.mark.resource import cluster -from ducktape.utils.util import wait_until - -from kafkatest.services.kafka import config_property -from kafkatest.tests.end_to_end import EndToEndTest -from kafkatest.version import LATEST_2_4, LATEST_2_5, \ - LATEST_2_6, LATEST_2_7, LATEST_2_8, LATEST_3_0, LATEST_3_1, LATEST_3_2, LATEST_3_3, LATEST_3_4, LATEST_3_5, \ - LATEST_3_6, LATEST_3_7, LATEST_3_8, DEV_BRANCH, KafkaVersion - -class TestDowngrade(EndToEndTest): - PARTITIONS = 3 - REPLICATION_FACTOR = 3 - - TOPIC_CONFIG = { - "partitions": PARTITIONS, - "replication-factor": REPLICATION_FACTOR, - "configs": {"min.insync.replicas": 2} - } - - def __init__(self, test_context): - super(TestDowngrade, self).__init__(test_context=test_context, topic_config=self.TOPIC_CONFIG) - - def upgrade_from(self, kafka_version): - for node in self.kafka.nodes: - self.kafka.stop_node(node) - node.version = DEV_BRANCH - node.config[config_property.INTER_BROKER_PROTOCOL_VERSION] = str(kafka_version) - node.config[config_property.MESSAGE_FORMAT_VERSION] = str(kafka_version) - self.kafka.start_node(node) - self.wait_until_rejoin() - - def downgrade_to(self, kafka_version): - for node in self.kafka.nodes: - self.kafka.stop_node(node) - node.version = kafka_version - del node.config[config_property.INTER_BROKER_PROTOCOL_VERSION] - del node.config[config_property.MESSAGE_FORMAT_VERSION] - self.kafka.start_node(node) - self.wait_until_rejoin() - - def setup_services(self, kafka_version, compression_types, security_protocol, static_membership): - self.create_zookeeper_if_necessary() - self.zk.start() - - self.create_kafka(num_nodes=3, - security_protocol=security_protocol, - interbroker_security_protocol=security_protocol, - version=kafka_version) - self.kafka.start() - - self.create_producer(log_level="DEBUG", - compression_types=compression_types, - version=kafka_version) - self.producer.start() - - self.create_consumer(log_level="DEBUG", - version=kafka_version, - static_membership=static_membership) - - self.consumer.start() - - def wait_until_rejoin(self): - for partition in range(0, self.PARTITIONS): - wait_until(lambda: len(self.kafka.isr_idx_list(self.topic, partition)) == self.REPLICATION_FACTOR, - timeout_sec=60, backoff_sec=1, err_msg="Replicas did not rejoin the ISR in a reasonable amount of time") - - @cluster(num_nodes=7) - @parametrize(version=str(LATEST_3_8), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_8), compression_types=["zstd"], security_protocol="SASL_SSL") - @parametrize(version=str(LATEST_3_7), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_7), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_7)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_3_6), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_6), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_6)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_3_5), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_5), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_5)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_3_4), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_4), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_4)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_3_3), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_3), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_3)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_3_2), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_2), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_2)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_3_1), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_1), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_1)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_3_0), compression_types=["snappy"]) - @parametrize(version=str(LATEST_3_0), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_3_0)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_2_8), compression_types=["snappy"]) - @parametrize(version=str(LATEST_2_8), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_2_8)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_2_7), compression_types=["lz4"]) - @parametrize(version=str(LATEST_2_7), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_2_7)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_2_6), compression_types=["lz4"]) - @parametrize(version=str(LATEST_2_6), compression_types=["zstd"], security_protocol="SASL_SSL") - @matrix(version=[str(LATEST_2_6)], compression_types=[["none"]], static_membership=[False, True]) - @matrix(version=[str(LATEST_2_5)], compression_types=[["none"]], static_membership=[False, True]) - @parametrize(version=str(LATEST_2_5), compression_types=["zstd"], security_protocol="SASL_SSL") - # static membership was introduced with a buggy verifiable console consumer which - # required static membership to be enabled - @parametrize(version=str(LATEST_2_4), compression_types=["none"], static_membership=True) - @parametrize(version=str(LATEST_2_4), compression_types=["zstd"], security_protocol="SASL_SSL", static_membership=True) - def test_upgrade_and_downgrade(self, version, compression_types, security_protocol="PLAINTEXT", - static_membership=False): - """Test upgrade and downgrade of Kafka cluster from old versions to the current version - - `version` is the Kafka version to upgrade from and downgrade back to - - Downgrades are supported to any version which is at or above the current - `inter.broker.protocol.version` (IBP). For example, if a user upgrades from 1.1 to 2.3, - but they leave the IBP set to 1.1, then downgrading to any version at 1.1 or higher is - supported. - - This test case verifies that producers and consumers continue working during - the course of an upgrade and downgrade. - - - Start 3 node broker cluster on version 'kafka_version' - - Start producer and consumer in the background - - Roll the cluster to upgrade to the current version with IBP set to 'kafka_version' - - Roll the cluster to downgrade back to 'kafka_version' - - Finally, validate that every message acked by the producer was consumed by the consumer - """ - kafka_version = KafkaVersion(version) - - self.setup_services(kafka_version, compression_types, security_protocol, static_membership) - self.await_startup() - - start_topic_id = self.kafka.topic_id(self.topic) - - self.logger.info("First pass bounce - rolling upgrade") - self.upgrade_from(kafka_version) - self.await_consumed_records(min_records=5000) - - upgrade_topic_id = self.kafka.topic_id(self.topic) - assert start_topic_id == upgrade_topic_id - - self.logger.info("Second pass bounce - rolling downgrade") - num_records_acked = self.producer.num_acked - self.downgrade_to(kafka_version) - self.run_validation(min_records=num_records_acked+5000) - - downgrade_topic_id = self.kafka.topic_id(self.topic) - assert upgrade_topic_id == downgrade_topic_id - assert self.kafka.check_protocol_errors(self) diff --git a/tests/kafkatest/tests/core/fetch_from_follower_test.py b/tests/kafkatest/tests/core/fetch_from_follower_test.py index a4c810116ddc7..8db6e6d31105e 100644 --- a/tests/kafkatest/tests/core/fetch_from_follower_test.py +++ b/tests/kafkatest/tests/core/fetch_from_follower_test.py @@ -23,7 +23,6 @@ from kafkatest.services.kafka import KafkaService, quorum, consumer_group from kafkatest.services.monitor.jmx import JmxTool from kafkatest.services.verifiable_producer import VerifiableProducer -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest from kafkatest.utils import is_int @@ -37,10 +36,9 @@ def __init__(self, test_context): super(FetchFromFollowerTest, self).__init__(test_context=test_context) self.jmx_tool = JmxTool(test_context, jmx_poll_ms=100) self.topic = "test_topic" - self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None self.kafka = KafkaService(test_context, num_nodes=3, - zk=self.zk, + zk=None, topics={ self.topic: { "partitions": 1, @@ -65,8 +63,6 @@ def min_cluster_size(self): return super(FetchFromFollowerTest, self).min_cluster_size() + self.num_producers * 2 + self.num_consumers * 2 def setUp(self): - if self.zk: - self.zk.start() self.kafka.start() @cluster(num_nodes=9) @@ -79,7 +75,7 @@ def setUp(self): use_new_coordinator=[True], group_protocol=consumer_group.all_group_protocols ) - def test_consumer_preferred_read_replica(self, metadata_quorum=quorum.zk, use_new_coordinator=False, group_protocol=None): + def test_consumer_preferred_read_replica(self, metadata_quorum, use_new_coordinator=False, group_protocol=None): """ This test starts up brokers with "broker.rack" and "replica.selector.class" configurations set. The replica selector is set to the rack-aware implementation. One of the brokers has a different rack than the other two. diff --git a/tests/kafkatest/tests/core/get_offset_shell_test.py b/tests/kafkatest/tests/core/get_offset_shell_test.py index b48185d15d20f..c54dc565ff6df 100644 --- a/tests/kafkatest/tests/core/get_offset_shell_test.py +++ b/tests/kafkatest/tests/core/get_offset_shell_test.py @@ -20,7 +20,6 @@ from ducktape.mark.resource import cluster from kafkatest.services.verifiable_producer import VerifiableProducer -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.kafka import KafkaService, quorum from kafkatest.services.console_consumer import ConsoleConsumer @@ -51,7 +50,6 @@ class GetOffsetShellTest(Test): """ def __init__(self, test_context): super(GetOffsetShellTest, self).__init__(test_context) - self.num_zk = 1 self.num_brokers = 1 self.messages_received_count = 0 self.topics = { @@ -64,16 +62,10 @@ def __init__(self, test_context): TOPIC_TEST_TOPIC_PARTITIONS2: {'partitions': 2, 'replication-factor': REPLICATION_FACTOR} } - self.zk = ZookeeperService(test_context, self.num_zk) if quorum.for_test(test_context) == quorum.zk else None - - def setUp(self): - if self.zk: - self.zk.start() - def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, - self.zk, security_protocol=security_protocol, + None, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() diff --git a/tests/kafkatest/tests/core/group_mode_transactions_test.py b/tests/kafkatest/tests/core/group_mode_transactions_test.py index 1ffab0413c9c9..2db9c62b46bd1 100644 --- a/tests/kafkatest/tests/core/group_mode_transactions_test.py +++ b/tests/kafkatest/tests/core/group_mode_transactions_test.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.kafka import KafkaService, quorum from kafkatest.services.console_consumer import ConsoleConsumer from kafkatest.services.verifiable_producer import VerifiableProducer @@ -61,14 +60,9 @@ def __init__(self, test_context): self.progress_timeout_sec = 60 self.consumer_group = "grouped-transactions-test-consumer-group" - self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None self.kafka = KafkaService(test_context, num_nodes=self.num_brokers, - zk=self.zk, controller_num_nodes_override=1) - - def setUp(self): - if self.zk: - self.zk.start() + zk=None, controller_num_nodes_override=1) def seed_messages(self, topic, num_seed_messages): seed_timeout_sec = 10000 @@ -98,16 +92,11 @@ def bounce_brokers(self, clean_shutdown): else: self.kafka.stop_node(node, clean_shutdown = False) gracePeriodSecs = 5 - if self.zk: - wait_until(lambda: not self.kafka.pids(node) and not self.kafka.is_registered(node), - timeout_sec=self.kafka.zk_session_timeout + gracePeriodSecs, - err_msg="Failed to see timely deregistration of hard-killed broker %s" % str(node.account)) - else: - brokerSessionTimeoutSecs = 18 - wait_until(lambda: not self.kafka.pids(node), - timeout_sec=brokerSessionTimeoutSecs + gracePeriodSecs, - err_msg="Failed to see timely disappearance of process for hard-killed broker %s" % str(node.account)) - time.sleep(brokerSessionTimeoutSecs + gracePeriodSecs) + brokerSessionTimeoutSecs = 18 + wait_until(lambda: not self.kafka.pids(node), + timeout_sec=brokerSessionTimeoutSecs + gracePeriodSecs, + err_msg="Failed to see timely disappearance of process for hard-killed broker %s" % str(node.account)) + time.sleep(brokerSessionTimeoutSecs + gracePeriodSecs) self.kafka.start_node(node) self.kafka.await_no_under_replicated_partitions() @@ -271,8 +260,9 @@ def setup_topics(self): @cluster(num_nodes=10) @matrix(failure_mode=["hard_bounce", "clean_bounce"], - bounce_target=["brokers", "clients"]) - def test_transactions(self, failure_mode, bounce_target, metadata_quorum=quorum.zk): + bounce_target=["brokers", "clients"], + metadata_quorum=quorum.all_non_upgrade) + def test_transactions(self, failure_mode, bounce_target, metadata_quorum): security_protocol = 'PLAINTEXT' self.kafka.security_protocol = security_protocol self.kafka.interbroker_security_protocol = security_protocol diff --git a/tests/kafkatest/tests/core/kraft_upgrade_test.py b/tests/kafkatest/tests/core/kraft_upgrade_test.py index 6975069b73e5f..ab724a91e9e93 100644 --- a/tests/kafkatest/tests/core/kraft_upgrade_test.py +++ b/tests/kafkatest/tests/core/kraft_upgrade_test.py @@ -17,13 +17,13 @@ from ducktape.mark.resource import cluster from ducktape.utils.util import wait_until from kafkatest.services.console_consumer import ConsoleConsumer -from kafkatest.services.kafka import KafkaService +from kafkatest.services.kafka import config_property, KafkaService from kafkatest.services.kafka.quorum import isolated_kraft, combined_kraft from kafkatest.services.verifiable_producer import VerifiableProducer from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest from kafkatest.utils import is_int from kafkatest.version import LATEST_3_1, LATEST_3_2, LATEST_3_3, LATEST_3_4, LATEST_3_5, \ - LATEST_3_6, LATEST_3_7, LATEST_3_8, DEV_BRANCH, KafkaVersion, LATEST_STABLE_METADATA_VERSION + LATEST_3_6, LATEST_3_7, LATEST_3_8, LATEST_3_9, DEV_BRANCH, KafkaVersion, LATEST_STABLE_METADATA_VERSION # # Test upgrading between different KRaft versions. @@ -53,7 +53,7 @@ def wait_until_rejoin(self): wait_until(lambda: len(self.kafka.isr_idx_list(self.topic, partition)) == self.replication_factor, timeout_sec=60, backoff_sec=1, err_msg="Replicas did not rejoin the ISR in a reasonable amount of time") - def perform_version_change(self, from_kafka_version): + def upgrade_to_dev_version(self, from_kafka_version, update_metadata_version): self.logger.info("Performing rolling upgrade.") for node in self.kafka.controller_quorum.nodes: self.logger.info("Stopping controller node %s" % node.account.hostname) @@ -71,8 +71,28 @@ def perform_version_change(self, from_kafka_version): self.kafka.start_node(node) self.wait_until_rejoin() self.logger.info("Successfully restarted broker node %s" % node.account.hostname) - self.logger.info("Changing metadata.version to %s" % LATEST_STABLE_METADATA_VERSION) - self.kafka.upgrade_metadata_version(LATEST_STABLE_METADATA_VERSION) + if update_metadata_version: + self.logger.info("Changing metadata.version to %s" % LATEST_STABLE_METADATA_VERSION) + self.kafka.upgrade_metadata_version(LATEST_STABLE_METADATA_VERSION) + + def downgrade_to_version(self, to_kafka_version): + self.logger.info("Performing rolling downgrade.") + for node in self.kafka.controller_quorum.nodes: + self.logger.info("Stopping controller node %s" % node.account.hostname) + self.kafka.controller_quorum.stop_node(node) + node.version = KafkaVersion(to_kafka_version) + self.logger.info("Restarting controller node %s" % node.account.hostname) + self.kafka.controller_quorum.start_node(node) + self.wait_until_rejoin() + self.logger.info("Successfully restarted controller node %s" % node.account.hostname) + for node in self.kafka.nodes: + self.logger.info("Stopping broker node %s" % node.account.hostname) + self.kafka.stop_node(node) + node.version = KafkaVersion(to_kafka_version) + self.logger.info("Restarting broker node %s" % node.account.hostname) + self.kafka.start_node(node) + self.wait_until_rejoin() + self.logger.info("Successfully restarted broker node %s" % node.account.hostname) def run_upgrade(self, from_kafka_version): """Test upgrade of Kafka broker cluster from various versions to the current version @@ -100,23 +120,78 @@ def run_upgrade(self, from_kafka_version): compression_types=["none"], version=KafkaVersion(from_kafka_version)) self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, - self.topic, new_consumer=True, consumer_timeout_ms=30000, + self.topic, consumer_timeout_ms=30000, message_validator=is_int, version=KafkaVersion(from_kafka_version)) - self.run_produce_consume_validate(core_test_action=lambda: self.perform_version_change(from_kafka_version)) + + self.run_produce_consume_validate(core_test_action=lambda: self.upgrade_to_dev_version(from_kafka_version, True)) + cluster_id = self.kafka.cluster_id() + assert cluster_id is not None + assert len(cluster_id) == 22 + assert self.kafka.check_protocol_errors(self) + + def run_upgrade_downgrade(self, starting_kafka_version): + """Test upgrade and downgrade of Kafka broker cluster from various versions to current version and back + + - Start 3 node broker cluster on version 'starting_kafka_version'. + - Perform rolling upgrade but do not update metadata.version. + - Start producer and consumer in the background. + - Perform rolling downgrade. + - Finally, validate that every message acked by the producer was consumed by the consumer. + """ + + # Due to compatability issue with version 3.3, we need to use a single folder. Using multiple folders + # will cause broker to throw InconsistentBrokerMetadataException during startup. + # see https://github.com/apache/kafka/pull/13130 + server_prop_overrides = None + if starting_kafka_version == str(LATEST_3_3): + server_prop_overrides = [[config_property.LOG_DIRS, "/mnt/kafka/kafka-metadata-logs"], [config_property.METADATA_LOG_DIR, ""]] + + fromKafkaVersion = KafkaVersion(starting_kafka_version) + self.kafka = KafkaService(self.test_context, + num_nodes=3, + zk=None, + version=fromKafkaVersion, + topics={self.topic: {"partitions": self.partitions, + "replication-factor": self.replication_factor, + 'configs': {"min.insync.replicas": 2}}}, + server_prop_overrides = server_prop_overrides) + self.kafka.start() + self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka, + self.topic, throughput=self.producer_throughput, + message_validator=is_int, + compression_types=["none"], + version=KafkaVersion(starting_kafka_version)) + self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, + self.topic, consumer_timeout_ms=30000, + message_validator=is_int, version=KafkaVersion(starting_kafka_version)) + self.upgrade_to_dev_version(starting_kafka_version, False) + + self.run_produce_consume_validate(core_test_action=lambda: self.downgrade_to_version(starting_kafka_version)) cluster_id = self.kafka.cluster_id() assert cluster_id is not None assert len(cluster_id) == 22 assert self.kafka.check_protocol_errors(self) @cluster(num_nodes=5) - @matrix(from_kafka_version=[str(LATEST_3_1), str(LATEST_3_2), str(LATEST_3_3), str(LATEST_3_4), str(LATEST_3_5), str(LATEST_3_6), str(LATEST_3_7), str(LATEST_3_8), str(DEV_BRANCH)], + @matrix(from_kafka_version=[str(LATEST_3_1), str(LATEST_3_2), str(LATEST_3_3), str(LATEST_3_4), str(LATEST_3_5), str(LATEST_3_6), str(LATEST_3_7), str(LATEST_3_8), str(LATEST_3_9), str(DEV_BRANCH)], metadata_quorum=[combined_kraft]) def test_combined_mode_upgrade(self, from_kafka_version, metadata_quorum, use_new_coordinator=False): self.run_upgrade(from_kafka_version) @cluster(num_nodes=8) - @matrix(from_kafka_version=[str(LATEST_3_1), str(LATEST_3_2), str(LATEST_3_3), str(LATEST_3_4), str(LATEST_3_5), str(LATEST_3_6), str(LATEST_3_7), str(LATEST_3_8), str(DEV_BRANCH)], + @matrix(from_kafka_version=[str(LATEST_3_1), str(LATEST_3_2), str(LATEST_3_3), str(LATEST_3_4), str(LATEST_3_5), str(LATEST_3_6), str(LATEST_3_7), str(LATEST_3_8), str(LATEST_3_9), str(DEV_BRANCH)], metadata_quorum=[isolated_kraft]) def test_isolated_mode_upgrade(self, from_kafka_version, metadata_quorum, use_new_coordinator=False): self.run_upgrade(from_kafka_version) + @cluster(num_nodes=5) + @matrix(from_kafka_version=[str(LATEST_3_3), str(LATEST_3_4), str(LATEST_3_5), str(LATEST_3_6), str(LATEST_3_7), str(LATEST_3_8), str(LATEST_3_9), str(DEV_BRANCH)], + metadata_quorum=[combined_kraft]) + def test_combined_mode_upgrade_downgrade(self, from_kafka_version, metadata_quorum, use_new_coordinator=False): + self.run_upgrade_downgrade(from_kafka_version) + + @cluster(num_nodes=8) + @matrix(from_kafka_version=[str(LATEST_3_3), str(LATEST_3_4), str(LATEST_3_5), str(LATEST_3_6), str(LATEST_3_7), str(LATEST_3_8), str(LATEST_3_9), str(DEV_BRANCH)], + metadata_quorum=[isolated_kraft]) + def test_isolated_mode_upgrade_downgrade(self, from_kafka_version, metadata_quorum, use_new_coordinator=False): + self.run_upgrade_downgrade(from_kafka_version) \ No newline at end of file diff --git a/tests/kafkatest/tests/core/log_dir_failure_test.py b/tests/kafkatest/tests/core/log_dir_failure_test.py index ba8390ceb1c91..4bc453c517bff 100644 --- a/tests/kafkatest/tests/core/log_dir_failure_test.py +++ b/tests/kafkatest/tests/core/log_dir_failure_test.py @@ -17,7 +17,6 @@ from ducktape.mark import matrix from ducktape.mark.resource import cluster from kafkatest.services.kafka import config_property -from kafkatest.services.zookeeper import ZookeeperService from kafkatest.services.kafka import KafkaService, quorum from kafkatest.services.verifiable_producer import VerifiableProducer from kafkatest.services.console_consumer import ConsoleConsumer @@ -62,10 +61,9 @@ def __init__(self, test_context): self.topic1 = "test_topic_1" self.topic2 = "test_topic_2" - self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None self.kafka = KafkaService(test_context, num_nodes=3, - zk=self.zk, + zk=None, topics={ self.topic1: {"partitions": 1, "replication-factor": 3, "configs": {"min.insync.replicas": 1}}, self.topic2: {"partitions": 1, "replication-factor": 3, "configs": {"min.insync.replicas": 2}} @@ -83,10 +81,6 @@ def __init__(self, test_context): self.num_producers = 1 self.num_consumers = 1 - def setUp(self): - if self.zk: - self.zk.start() - def min_cluster_size(self): """Override this since we're adding services outside of the constructor""" return super(LogDirFailureTest, self).min_cluster_size() + self.num_producers * 2 + self.num_consumers * 2 diff --git a/tests/kafkatest/tests/core/network_degrade_test.py b/tests/kafkatest/tests/core/network_degrade_test.py index 68cce856528e7..1c55d9b7e061c 100644 --- a/tests/kafkatest/tests/core/network_degrade_test.py +++ b/tests/kafkatest/tests/core/network_degrade_test.py @@ -22,7 +22,7 @@ from kafkatest.services.trogdor.degraded_network_fault_spec import DegradedNetworkFaultSpec from kafkatest.services.trogdor.trogdor import TrogdorService -from kafkatest.services.zookeeper import ZookeeperService +from kafkatest.services.kafka import KafkaService, quorum class NetworkDegradeTest(Test): @@ -34,36 +34,37 @@ class NetworkDegradeTest(Test): def __init__(self, test_context): super(NetworkDegradeTest, self).__init__(test_context) - self.zk = ZookeeperService(test_context, num_nodes=3) - self.trogdor = TrogdorService(context=self.test_context, client_services=[self.zk]) + self.kafka = KafkaService(test_context, num_nodes=2, zk=None, controller_num_nodes_override=2) + self.trogdor = TrogdorService(context=self.test_context, client_services=[self.kafka.controller_quorum]) def setUp(self): - self.zk.start() + self.kafka.start() self.trogdor.start() def teardown(self): self.trogdor.stop() - self.zk.stop() + self.kafka.stop() - @cluster(num_nodes=5) - @parametrize(task_name="latency-100", device_name="eth0", latency_ms=50, rate_limit_kbit=0) - @parametrize(task_name="latency-100-rate-1000", device_name="eth0", latency_ms=50, rate_limit_kbit=1000) - def test_latency(self, task_name, device_name, latency_ms, rate_limit_kbit): + @cluster(num_nodes=3) + @parametrize(task_name="latency-100", device_name="eth0", latency_ms=50, rate_limit_kbit=0, metadata_quorum=quorum.combined_kraft) + @parametrize(task_name="latency-100-rate-1000", device_name="eth0", latency_ms=50, rate_limit_kbit=1000, metadata_quorum=quorum.combined_kraft) + def test_latency(self, task_name, device_name, latency_ms, rate_limit_kbit, metadata_quorum=quorum.combined_kraft): spec = DegradedNetworkFaultSpec(0, 10000) - for node in self.zk.nodes: + for node in self.kafka.controller_quorum.nodes: spec.add_node_spec(node.name, device_name, latency_ms, rate_limit_kbit) latency = self.trogdor.create_task(task_name, spec) - zk0 = self.zk.nodes[0] - zk1 = self.zk.nodes[1] + quorum0 = self.kafka.controller_quorum.nodes[0] + quorum1 = self.kafka.controller_quorum.nodes[1] + # Capture the ping times from the ping stdout # 64 bytes from ducker01 (172.24.0.2): icmp_seq=1 ttl=64 time=0.325 ms r = re.compile(r".*time=(?P